From 98b303c9bf05dae932efbd71e18d81f6c64f20d8 Mon Sep 17 00:00:00 2001 From: Kenta Tada Date: Fri, 7 Jun 2024 20:17:04 +0900 Subject: bpftool: Query only cgroup-related attach types When CONFIG_NETKIT=y, bpftool-cgroup shows error even if the cgroup's path is correct: $ bpftool cgroup tree /sys/fs/cgroup CgroupPath ID AttachType AttachFlags Name Error: can't query bpf programs attached to /sys/fs/cgroup: No such device or address >From strace and kernel tracing, I found netkit returned ENXIO and this command failed. I think this AttachType(BPF_NETKIT_PRIMARY) is not relevant to cgroup. bpftool-cgroup should query just only cgroup-related attach types. v2->v3: - removed an unnecessary check v1->v2: - used an array of cgroup attach types Signed-off-by: Kenta Tada Reviewed-by: Quentin Monnet Link: https://lore.kernel.org/r/20240607111704.6716-1-tadakentaso@gmail.com Signed-off-by: Alexei Starovoitov --- tools/bpf/bpftool/cgroup.c | 40 ++++++++++++++++++++++++++++++++++++---- 1 file changed, 36 insertions(+), 4 deletions(-) diff --git a/tools/bpf/bpftool/cgroup.c b/tools/bpf/bpftool/cgroup.c index af6898c0f388..9af426d43299 100644 --- a/tools/bpf/bpftool/cgroup.c +++ b/tools/bpf/bpftool/cgroup.c @@ -19,6 +19,38 @@ #include "main.h" +static const int cgroup_attach_types[] = { + BPF_CGROUP_INET_INGRESS, + BPF_CGROUP_INET_EGRESS, + BPF_CGROUP_INET_SOCK_CREATE, + BPF_CGROUP_INET_SOCK_RELEASE, + BPF_CGROUP_INET4_BIND, + BPF_CGROUP_INET6_BIND, + BPF_CGROUP_INET4_POST_BIND, + BPF_CGROUP_INET6_POST_BIND, + BPF_CGROUP_INET4_CONNECT, + BPF_CGROUP_INET6_CONNECT, + BPF_CGROUP_UNIX_CONNECT, + BPF_CGROUP_INET4_GETPEERNAME, + BPF_CGROUP_INET6_GETPEERNAME, + BPF_CGROUP_UNIX_GETPEERNAME, + BPF_CGROUP_INET4_GETSOCKNAME, + BPF_CGROUP_INET6_GETSOCKNAME, + BPF_CGROUP_UNIX_GETSOCKNAME, + BPF_CGROUP_UDP4_SENDMSG, + BPF_CGROUP_UDP6_SENDMSG, + BPF_CGROUP_UNIX_SENDMSG, + BPF_CGROUP_UDP4_RECVMSG, + BPF_CGROUP_UDP6_RECVMSG, + BPF_CGROUP_UNIX_RECVMSG, + BPF_CGROUP_SOCK_OPS, + BPF_CGROUP_DEVICE, + BPF_CGROUP_SYSCTL, + BPF_CGROUP_GETSOCKOPT, + BPF_CGROUP_SETSOCKOPT, + BPF_LSM_CGROUP +}; + #define HELP_SPEC_ATTACH_FLAGS \ "ATTACH_FLAGS := { multi | override }" @@ -183,13 +215,13 @@ static int count_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type) static int cgroup_has_attached_progs(int cgroup_fd) { - enum bpf_attach_type type; + unsigned int i = 0; bool no_prog = true; - for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) { - int count = count_attached_bpf_progs(cgroup_fd, type); + for (i = 0; i < ARRAY_SIZE(cgroup_attach_types); i++) { + int count = count_attached_bpf_progs(cgroup_fd, cgroup_attach_types[i]); - if (count < 0 && errno != EINVAL) + if (count < 0) return -1; if (count > 0) { -- cgit v1.2.3 From ebb79e96f1ea454fbcc8fe27dfe44e751bd74b4b Mon Sep 17 00:00:00 2001 From: Daniel Xu Date: Wed, 12 Jun 2024 09:58:25 -0600 Subject: kbuild: bpf: Tell pahole to DECL_TAG kfuncs With [0], pahole can now discover kfuncs and inject DECL_TAG into BTF. With this commit, we will start shipping said DECL_TAGs to downstream consumers if pahole supports it. This is useful for feature probing kfuncs as well as generating compilable prototypes. This is particularly important as kfuncs do not have stable ABI. [0]: https://git.kernel.org/pub/scm/devel/pahole/pahole.git/commit/?id=72e88f29c6f7e14201756e65bd66157427a61aaf Signed-off-by: Daniel Xu Link: https://lore.kernel.org/r/324aac5c627bddb80d9968c30df6382846994cc8.1718207789.git.dxu@dxuuu.xyz Signed-off-by: Alexei Starovoitov --- scripts/Makefile.btf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/Makefile.btf b/scripts/Makefile.btf index bca8a8f26ea4..2597e3d4d6e0 100644 --- a/scripts/Makefile.btf +++ b/scripts/Makefile.btf @@ -19,7 +19,7 @@ pahole-flags-$(call test-ge, $(pahole-ver), 125) += --skip_encoding_btf_inconsis else # Switch to using --btf_features for v1.26 and later. -pahole-flags-$(call test-ge, $(pahole-ver), 126) = -j --btf_features=encode_force,var,float,enum64,decl_tag,type_tag,optimized_func,consistent_func +pahole-flags-$(call test-ge, $(pahole-ver), 126) = -j --btf_features=encode_force,var,float,enum64,decl_tag,type_tag,optimized_func,consistent_func,decl_tag_kfuncs endif -- cgit v1.2.3 From 718135f5bd24ec10ff38aa0294a7da0a7b99fa89 Mon Sep 17 00:00:00 2001 From: Daniel Xu Date: Wed, 12 Jun 2024 09:58:26 -0600 Subject: bpf: selftests: Fix bpf_iter_task_vma_new() prototype bpf_iter_task_vma_new() is defined as taking a u64 as its 3rd argument. u64 is a unsigned long long. bpf_experimental.h was defining the prototype as unsigned long. Fix by using __u64. Signed-off-by: Daniel Xu Link: https://lore.kernel.org/r/fab4509bfee914f539166a91c3ff41e949f3df30.1718207789.git.dxu@dxuuu.xyz Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/bpf_experimental.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h index 3d9e4b8c6b81..8ee7a00b7c82 100644 --- a/tools/testing/selftests/bpf/bpf_experimental.h +++ b/tools/testing/selftests/bpf/bpf_experimental.h @@ -163,7 +163,7 @@ struct bpf_iter_task_vma; extern int bpf_iter_task_vma_new(struct bpf_iter_task_vma *it, struct task_struct *task, - unsigned long addr) __ksym; + __u64 addr) __ksym; extern struct vm_area_struct *bpf_iter_task_vma_next(struct bpf_iter_task_vma *it) __ksym; extern void bpf_iter_task_vma_destroy(struct bpf_iter_task_vma *it) __ksym; -- cgit v1.2.3 From dff96e4f5078c6c61fc6c36dddf27b124c4318fc Mon Sep 17 00:00:00 2001 From: Daniel Xu Date: Wed, 12 Jun 2024 09:58:27 -0600 Subject: bpf: selftests: Fix fentry test kfunc prototypes Some prototypes in progs/get_func_ip_test.c were not in line with how the actual kfuncs are defined in net/bpf/test_run.c. This causes compilation errors when kfunc prototypes are generated from BTF. Fix by aligning with actual kfunc definitions. Also remove two unused prototypes. Signed-off-by: Daniel Xu Link: https://lore.kernel.org/r/1e68870e7626b7b9c6420e65076b307fc404a2f0.1718207789.git.dxu@dxuuu.xyz Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/progs/get_func_ip_test.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/bpf/progs/get_func_ip_test.c b/tools/testing/selftests/bpf/progs/get_func_ip_test.c index 8956eb78a226..2011cacdeb18 100644 --- a/tools/testing/selftests/bpf/progs/get_func_ip_test.c +++ b/tools/testing/selftests/bpf/progs/get_func_ip_test.c @@ -5,13 +5,12 @@ char _license[] SEC("license") = "GPL"; -extern const void bpf_fentry_test1 __ksym; +extern int bpf_fentry_test1(int a) __ksym; +extern int bpf_modify_return_test(int a, int *b) __ksym; + extern const void bpf_fentry_test2 __ksym; extern const void bpf_fentry_test3 __ksym; extern const void bpf_fentry_test4 __ksym; -extern const void bpf_modify_return_test __ksym; -extern const void bpf_fentry_test6 __ksym; -extern const void bpf_fentry_test7 __ksym; extern bool CONFIG_X86_KERNEL_IBT __kconfig __weak; -- cgit v1.2.3 From 89f0b1abac497c47d0851b780abecc756c1e8734 Mon Sep 17 00:00:00 2001 From: Daniel Xu Date: Wed, 12 Jun 2024 09:58:28 -0600 Subject: bpf: selftests: Fix bpf_cpumask_first_zero() kfunc prototype The prototype in progs/nested_trust_common.h is not in line with how the actual kfuncs are defined in kernel/bpf/cpumask.c. This causes compilation errors when kfunc prototypes are generated from BTF. Fix by aligning with actual kfunc definitions. Signed-off-by: Daniel Xu Link: https://lore.kernel.org/r/437936a4e554b02e04566dd6e3f0a5d08370cc8c.1718207789.git.dxu@dxuuu.xyz Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/progs/nested_trust_common.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/bpf/progs/nested_trust_common.h b/tools/testing/selftests/bpf/progs/nested_trust_common.h index 83d33931136e..1784b496be2e 100644 --- a/tools/testing/selftests/bpf/progs/nested_trust_common.h +++ b/tools/testing/selftests/bpf/progs/nested_trust_common.h @@ -7,6 +7,6 @@ #include bool bpf_cpumask_test_cpu(unsigned int cpu, const struct cpumask *cpumask) __ksym; -bool bpf_cpumask_first_zero(const struct cpumask *cpumask) __ksym; +__u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) __ksym; #endif /* _NESTED_TRUST_COMMON_H */ -- cgit v1.2.3 From ac42f636dc11b2e8d6dea9dd5bb10a39c7bec342 Mon Sep 17 00:00:00 2001 From: Daniel Xu Date: Wed, 12 Jun 2024 09:58:29 -0600 Subject: bpf: selftests: Fix bpf_map_sum_elem_count() kfunc prototype The prototype in progs/map_percpu_stats.c is not in line with how the actual kfuncs are defined in kernel/bpf/map_iter.c. This causes compilation errors when kfunc prototypes are generated from BTF. Fix by aligning with actual kfunc definitions. Signed-off-by: Daniel Xu Link: https://lore.kernel.org/r/0497e11a71472dcb71ada7c90ad691523ae87c3b.1718207789.git.dxu@dxuuu.xyz Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/progs/map_percpu_stats.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/bpf/progs/map_percpu_stats.c b/tools/testing/selftests/bpf/progs/map_percpu_stats.c index 10b2325c1720..63245785eb69 100644 --- a/tools/testing/selftests/bpf/progs/map_percpu_stats.c +++ b/tools/testing/selftests/bpf/progs/map_percpu_stats.c @@ -7,7 +7,7 @@ __u32 target_id; -__s64 bpf_map_sum_elem_count(struct bpf_map *map) __ksym; +__s64 bpf_map_sum_elem_count(const struct bpf_map *map) __ksym; SEC("iter/bpf_map") int dump_bpf_map(struct bpf_iter__bpf_map *ctx) -- cgit v1.2.3 From 2b8dd87332cd2782b5b3f0c423bd6693e487ed30 Mon Sep 17 00:00:00 2001 From: Daniel Xu Date: Wed, 12 Jun 2024 09:58:30 -0600 Subject: bpf: Make bpf_session_cookie() kfunc return long * We will soon be generating kfunc prototypes from BTF. As part of that, we need to align the manual signatures in bpf_kfuncs.h with the actual kfunc definitions. There is currently a conflicting signature for bpf_session_cookie() w.r.t. return type. The original intent was to return long * and not __u64 *. You can see evidence of that intent in a3a5113393cc ("selftests/bpf: Add kprobe session cookie test"). Fix conflict by changing kfunc definition. Fixes: 5c919acef851 ("bpf: Add support for kprobe session cookie") Signed-off-by: Daniel Xu Link: https://lore.kernel.org/r/7043e1c251ab33151d6e3830f8ea1902ed2604ac.1718207789.git.dxu@dxuuu.xyz Signed-off-by: Alexei Starovoitov --- kernel/trace/bpf_trace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index d1daeab1bbc1..bc16e21a2a44 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -3527,7 +3527,7 @@ __bpf_kfunc bool bpf_session_is_return(void) return session_ctx->is_return; } -__bpf_kfunc __u64 *bpf_session_cookie(void) +__bpf_kfunc long *bpf_session_cookie(void) { struct bpf_session_run_ctx *session_ctx; -- cgit v1.2.3 From 0ce089cbdc6a393bf9ad04964427852800503a58 Mon Sep 17 00:00:00 2001 From: Daniel Xu Date: Wed, 12 Jun 2024 09:58:31 -0600 Subject: bpf: selftests: Namespace struct_opt callbacks in bpf_dctcp With generated kfunc prototypes, the existing callback names will conflict. Fix by namespacing with a bpf_ prefix. Signed-off-by: Daniel Xu Link: https://lore.kernel.org/r/efe7aadad8a054e5aeeba94b1d2e4502eee09d7a.1718207789.git.dxu@dxuuu.xyz Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/progs/bpf_dctcp.c | 36 +++++++++++++-------------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/tools/testing/selftests/bpf/progs/bpf_dctcp.c b/tools/testing/selftests/bpf/progs/bpf_dctcp.c index 3c9ffe340312..02f552e7fd4d 100644 --- a/tools/testing/selftests/bpf/progs/bpf_dctcp.c +++ b/tools/testing/selftests/bpf/progs/bpf_dctcp.c @@ -65,7 +65,7 @@ static void dctcp_reset(const struct tcp_sock *tp, struct bpf_dctcp *ca) } SEC("struct_ops") -void BPF_PROG(dctcp_init, struct sock *sk) +void BPF_PROG(bpf_dctcp_init, struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); struct bpf_dctcp *ca = inet_csk_ca(sk); @@ -77,7 +77,7 @@ void BPF_PROG(dctcp_init, struct sock *sk) (void *)fallback, sizeof(fallback)) == -EBUSY) ebusy_cnt++; - /* Switch back to myself and the recurred dctcp_init() + /* Switch back to myself and the recurred bpf_dctcp_init() * will get -EBUSY for all bpf_setsockopt(TCP_CONGESTION), * except the last "cdg" one. */ @@ -112,7 +112,7 @@ void BPF_PROG(dctcp_init, struct sock *sk) } SEC("struct_ops") -__u32 BPF_PROG(dctcp_ssthresh, struct sock *sk) +__u32 BPF_PROG(bpf_dctcp_ssthresh, struct sock *sk) { struct bpf_dctcp *ca = inet_csk_ca(sk); struct tcp_sock *tp = tcp_sk(sk); @@ -122,7 +122,7 @@ __u32 BPF_PROG(dctcp_ssthresh, struct sock *sk) } SEC("struct_ops") -void BPF_PROG(dctcp_update_alpha, struct sock *sk, __u32 flags) +void BPF_PROG(bpf_dctcp_update_alpha, struct sock *sk, __u32 flags) { const struct tcp_sock *tp = tcp_sk(sk); struct bpf_dctcp *ca = inet_csk_ca(sk); @@ -161,12 +161,12 @@ static void dctcp_react_to_loss(struct sock *sk) } SEC("struct_ops") -void BPF_PROG(dctcp_state, struct sock *sk, __u8 new_state) +void BPF_PROG(bpf_dctcp_state, struct sock *sk, __u8 new_state) { if (new_state == TCP_CA_Recovery && new_state != BPF_CORE_READ_BITFIELD(inet_csk(sk), icsk_ca_state)) dctcp_react_to_loss(sk); - /* We handle RTO in dctcp_cwnd_event to ensure that we perform only + /* We handle RTO in bpf_dctcp_cwnd_event to ensure that we perform only * one loss-adjustment per RTT. */ } @@ -208,7 +208,7 @@ static void dctcp_ece_ack_update(struct sock *sk, enum tcp_ca_event evt, } SEC("struct_ops") -void BPF_PROG(dctcp_cwnd_event, struct sock *sk, enum tcp_ca_event ev) +void BPF_PROG(bpf_dctcp_cwnd_event, struct sock *sk, enum tcp_ca_event ev) { struct bpf_dctcp *ca = inet_csk_ca(sk); @@ -227,7 +227,7 @@ void BPF_PROG(dctcp_cwnd_event, struct sock *sk, enum tcp_ca_event ev) } SEC("struct_ops") -__u32 BPF_PROG(dctcp_cwnd_undo, struct sock *sk) +__u32 BPF_PROG(bpf_dctcp_cwnd_undo, struct sock *sk) { const struct bpf_dctcp *ca = inet_csk_ca(sk); @@ -237,28 +237,28 @@ __u32 BPF_PROG(dctcp_cwnd_undo, struct sock *sk) extern void tcp_reno_cong_avoid(struct sock *sk, __u32 ack, __u32 acked) __ksym; SEC("struct_ops") -void BPF_PROG(dctcp_cong_avoid, struct sock *sk, __u32 ack, __u32 acked) +void BPF_PROG(bpf_dctcp_cong_avoid, struct sock *sk, __u32 ack, __u32 acked) { tcp_reno_cong_avoid(sk, ack, acked); } SEC(".struct_ops") struct tcp_congestion_ops dctcp_nouse = { - .init = (void *)dctcp_init, - .set_state = (void *)dctcp_state, + .init = (void *)bpf_dctcp_init, + .set_state = (void *)bpf_dctcp_state, .flags = TCP_CONG_NEEDS_ECN, .name = "bpf_dctcp_nouse", }; SEC(".struct_ops") struct tcp_congestion_ops dctcp = { - .init = (void *)dctcp_init, - .in_ack_event = (void *)dctcp_update_alpha, - .cwnd_event = (void *)dctcp_cwnd_event, - .ssthresh = (void *)dctcp_ssthresh, - .cong_avoid = (void *)dctcp_cong_avoid, - .undo_cwnd = (void *)dctcp_cwnd_undo, - .set_state = (void *)dctcp_state, + .init = (void *)bpf_dctcp_init, + .in_ack_event = (void *)bpf_dctcp_update_alpha, + .cwnd_event = (void *)bpf_dctcp_cwnd_event, + .ssthresh = (void *)bpf_dctcp_ssthresh, + .cong_avoid = (void *)bpf_dctcp_cong_avoid, + .undo_cwnd = (void *)bpf_dctcp_cwnd_undo, + .set_state = (void *)bpf_dctcp_state, .flags = TCP_CONG_NEEDS_ECN, .name = "bpf_dctcp", }; -- cgit v1.2.3 From ec209ad86324de84ef66990f0e9df0851e45e054 Mon Sep 17 00:00:00 2001 From: Daniel Xu Date: Wed, 12 Jun 2024 09:58:32 -0600 Subject: bpf: verifier: Relax caller requirements for kfunc projection type args Currently, if a kfunc accepts a projection type as an argument (eg struct __sk_buff *), the caller must exactly provide exactly the same type with provable provenance. However in practice, kfuncs that accept projection types _must_ cast to the underlying type before use b/c projection type layouts are completely made up. Thus, it is ok to relax the verifier rules around implicit conversions. We will use this functionality in the next commit when we align kfuncs to user-facing types. Signed-off-by: Daniel Xu Link: https://lore.kernel.org/r/e2c025cb09ccfd4af1ec9e18284dc3cecff7514d.1718207789.git.dxu@dxuuu.xyz Signed-off-by: Alexei Starovoitov --- include/linux/btf.h | 1 + kernel/bpf/btf.c | 13 ++++++++++--- kernel/bpf/verifier.c | 10 +++++++++- 3 files changed, 20 insertions(+), 4 deletions(-) diff --git a/include/linux/btf.h b/include/linux/btf.h index f9e56fd12a9f..56d91daacdba 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -531,6 +531,7 @@ s32 btf_find_dtor_kfunc(struct btf *btf, u32 btf_id); int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_cnt, struct module *owner); struct btf_struct_meta *btf_find_struct_meta(const struct btf *btf, u32 btf_id); +bool btf_is_projection_of(const char *pname, const char *tname); bool btf_is_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf, const struct btf_type *t, enum bpf_prog_type prog_type, int arg); diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 7928d920056f..ce4707968217 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -5820,6 +5820,15 @@ static int find_kern_ctx_type_id(enum bpf_prog_type prog_type) return ctx_type->type; } +bool btf_is_projection_of(const char *pname, const char *tname) +{ + if (strcmp(pname, "__sk_buff") == 0 && strcmp(tname, "sk_buff") == 0) + return true; + if (strcmp(pname, "xdp_md") == 0 && strcmp(tname, "xdp_buff") == 0) + return true; + return false; +} + bool btf_is_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf, const struct btf_type *t, enum bpf_prog_type prog_type, int arg) @@ -5882,9 +5891,7 @@ again: * int socket_filter_bpf_prog(struct __sk_buff *skb) * { // no fields of skb are ever used } */ - if (strcmp(ctx_tname, "__sk_buff") == 0 && strcmp(tname, "sk_buff") == 0) - return true; - if (strcmp(ctx_tname, "xdp_md") == 0 && strcmp(tname, "xdp_buff") == 0) + if (btf_is_projection_of(ctx_tname, tname)) return true; if (strcmp(ctx_tname, tname)) { /* bpf_user_pt_regs_t is a typedef, so resolve it to diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 20ac9cfd54dd..dcac6119d810 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -11265,6 +11265,8 @@ static int process_kf_arg_ptr_to_btf_id(struct bpf_verifier_env *env, bool strict_type_match = false; const struct btf *reg_btf; const char *reg_ref_tname; + bool taking_projection; + bool struct_same; u32 reg_ref_id; if (base_type(reg->type) == PTR_TO_BTF_ID) { @@ -11308,7 +11310,13 @@ static int process_kf_arg_ptr_to_btf_id(struct bpf_verifier_env *env, reg_ref_t = btf_type_skip_modifiers(reg_btf, reg_ref_id, ®_ref_id); reg_ref_tname = btf_name_by_offset(reg_btf, reg_ref_t->name_off); - if (!btf_struct_ids_match(&env->log, reg_btf, reg_ref_id, reg->off, meta->btf, ref_id, strict_type_match)) { + struct_same = btf_struct_ids_match(&env->log, reg_btf, reg_ref_id, reg->off, meta->btf, ref_id, strict_type_match); + /* If kfunc is accepting a projection type (ie. __sk_buff), it cannot + * actually use it -- it must cast to the underlying type. So we allow + * caller to pass in the underlying type. + */ + taking_projection = btf_is_projection_of(ref_tname, reg_ref_tname); + if (!taking_projection && !struct_same) { verbose(env, "kernel function %s args#%d expected pointer to %s %s but R%d has a pointer to %s %s\n", meta->func_name, argno, btf_type_str(ref_t), ref_tname, argno + 1, btf_type_str(reg_ref_t), reg_ref_tname); -- cgit v1.2.3 From cce4c40b960673f9e020835def310f1e89d3a940 Mon Sep 17 00:00:00 2001 From: Daniel Xu Date: Wed, 12 Jun 2024 09:58:33 -0600 Subject: bpf: treewide: Align kfunc signatures to prog point-of-view Previously, kfunc declarations in bpf_kfuncs.h (and others) used "user facing" types for kfuncs prototypes while the actual kfunc definitions used "kernel facing" types. More specifically: bpf_dynptr vs bpf_dynptr_kern, __sk_buff vs sk_buff, and xdp_md vs xdp_buff. It wasn't an issue before, as the verifier allows aliased types. However, since we are now generating kfunc prototypes in vmlinux.h (in addition to keeping bpf_kfuncs.h around), this conflict creates compilation errors. Fix this conflict by using "user facing" types in kfunc definitions. This results in more casts, but otherwise has no additional runtime cost. Note, similar to 5b268d1ebcdc ("bpf: Have bpf_rdonly_cast() take a const pointer"), we also make kfuncs take const arguments where appropriate in order to make the kfunc more permissive. Signed-off-by: Daniel Xu Link: https://lore.kernel.org/r/b58346a63a0e66bc9b7504da751b526b0b189a67.1718207789.git.dxu@dxuuu.xyz Signed-off-by: Alexei Starovoitov --- fs/verity/measure.c | 5 +-- include/linux/bpf.h | 8 ++--- kernel/bpf/crypto.c | 24 ++++++++----- kernel/bpf/helpers.c | 39 +++++++++++++++------- kernel/bpf/verifier.c | 2 +- kernel/trace/bpf_trace.c | 15 +++++---- net/core/filter.c | 32 +++++++++++------- .../testing/selftests/bpf/progs/ip_check_defrag.c | 10 +++--- .../selftests/bpf/progs/verifier_netfilter_ctx.c | 6 ++-- 9 files changed, 88 insertions(+), 53 deletions(-) diff --git a/fs/verity/measure.c b/fs/verity/measure.c index 3969d54158d1..175d2f1bc089 100644 --- a/fs/verity/measure.c +++ b/fs/verity/measure.c @@ -111,14 +111,15 @@ __bpf_kfunc_start_defs(); /** * bpf_get_fsverity_digest: read fsverity digest of file * @file: file to get digest from - * @digest_ptr: (out) dynptr for struct fsverity_digest + * @digest_p: (out) dynptr for struct fsverity_digest * * Read fsverity_digest of *file* into *digest_ptr*. * * Return: 0 on success, a negative value on error. */ -__bpf_kfunc int bpf_get_fsverity_digest(struct file *file, struct bpf_dynptr_kern *digest_ptr) +__bpf_kfunc int bpf_get_fsverity_digest(struct file *file, struct bpf_dynptr *digest_p) { + struct bpf_dynptr_kern *digest_ptr = (struct bpf_dynptr_kern *)digest_p; const struct inode *inode = file_inode(file); u32 dynptr_sz = __bpf_dynptr_size(digest_ptr); struct fsverity_digest *arg; diff --git a/include/linux/bpf.h b/include/linux/bpf.h index a834f4b761bc..f636b4998bf7 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -3265,8 +3265,8 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, struct bpf_insn *insn_buf, struct bpf_prog *prog, u32 *target_size); -int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags, - struct bpf_dynptr_kern *ptr); +int bpf_dynptr_from_skb_rdonly(struct __sk_buff *skb, u64 flags, + struct bpf_dynptr *ptr); #else static inline bool bpf_sock_common_is_valid_access(int off, int size, enum bpf_access_type type, @@ -3288,8 +3288,8 @@ static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, { return 0; } -static inline int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags, - struct bpf_dynptr_kern *ptr) +static inline int bpf_dynptr_from_skb_rdonly(struct __sk_buff *skb, u64 flags, + struct bpf_dynptr *ptr) { return -EOPNOTSUPP; } diff --git a/kernel/bpf/crypto.c b/kernel/bpf/crypto.c index 2bee4af91e38..3c1de0e5c0bd 100644 --- a/kernel/bpf/crypto.c +++ b/kernel/bpf/crypto.c @@ -311,11 +311,15 @@ static int bpf_crypto_crypt(const struct bpf_crypto_ctx *ctx, * Decrypts provided buffer using IV data and the crypto context. Crypto context must be configured. */ __bpf_kfunc int bpf_crypto_decrypt(struct bpf_crypto_ctx *ctx, - const struct bpf_dynptr_kern *src, - const struct bpf_dynptr_kern *dst, - const struct bpf_dynptr_kern *siv) + const struct bpf_dynptr *src, + const struct bpf_dynptr *dst, + const struct bpf_dynptr *siv) { - return bpf_crypto_crypt(ctx, src, dst, siv, true); + const struct bpf_dynptr_kern *src_kern = (struct bpf_dynptr_kern *)src; + const struct bpf_dynptr_kern *dst_kern = (struct bpf_dynptr_kern *)dst; + const struct bpf_dynptr_kern *siv_kern = (struct bpf_dynptr_kern *)siv; + + return bpf_crypto_crypt(ctx, src_kern, dst_kern, siv_kern, true); } /** @@ -328,11 +332,15 @@ __bpf_kfunc int bpf_crypto_decrypt(struct bpf_crypto_ctx *ctx, * Encrypts provided buffer using IV data and the crypto context. Crypto context must be configured. */ __bpf_kfunc int bpf_crypto_encrypt(struct bpf_crypto_ctx *ctx, - const struct bpf_dynptr_kern *src, - const struct bpf_dynptr_kern *dst, - const struct bpf_dynptr_kern *siv) + const struct bpf_dynptr *src, + const struct bpf_dynptr *dst, + const struct bpf_dynptr *siv) { - return bpf_crypto_crypt(ctx, src, dst, siv, false); + const struct bpf_dynptr_kern *src_kern = (struct bpf_dynptr_kern *)src; + const struct bpf_dynptr_kern *dst_kern = (struct bpf_dynptr_kern *)dst; + const struct bpf_dynptr_kern *siv_kern = (struct bpf_dynptr_kern *)siv; + + return bpf_crypto_crypt(ctx, src_kern, dst_kern, siv_kern, false); } __bpf_kfunc_end_defs(); diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 6f1abcb4b084..3ac521c48bba 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -2459,9 +2459,10 @@ __bpf_kfunc struct task_struct *bpf_task_from_pid(s32 pid) * provided buffer, with its contents containing the data, if unable to obtain * direct pointer) */ -__bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr_kern *ptr, u32 offset, +__bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr *p, u32 offset, void *buffer__opt, u32 buffer__szk) { + const struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; enum bpf_dynptr_type type; u32 len = buffer__szk; int err; @@ -2543,9 +2544,11 @@ __bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr_kern *ptr, u32 offset * provided buffer, with its contents containing the data, if unable to obtain * direct pointer) */ -__bpf_kfunc void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr_kern *ptr, u32 offset, +__bpf_kfunc void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *p, u32 offset, void *buffer__opt, u32 buffer__szk) { + const struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; + if (!ptr->data || __bpf_dynptr_is_rdonly(ptr)) return NULL; @@ -2571,11 +2574,12 @@ __bpf_kfunc void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr_kern *ptr, u32 o * will be copied out into the buffer and the user will need to call * bpf_dynptr_write() to commit changes. */ - return bpf_dynptr_slice(ptr, offset, buffer__opt, buffer__szk); + return bpf_dynptr_slice(p, offset, buffer__opt, buffer__szk); } -__bpf_kfunc int bpf_dynptr_adjust(struct bpf_dynptr_kern *ptr, u32 start, u32 end) +__bpf_kfunc int bpf_dynptr_adjust(const struct bpf_dynptr *p, u32 start, u32 end) { + struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; u32 size; if (!ptr->data || start > end) @@ -2592,36 +2596,45 @@ __bpf_kfunc int bpf_dynptr_adjust(struct bpf_dynptr_kern *ptr, u32 start, u32 en return 0; } -__bpf_kfunc bool bpf_dynptr_is_null(struct bpf_dynptr_kern *ptr) +__bpf_kfunc bool bpf_dynptr_is_null(const struct bpf_dynptr *p) { + struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; + return !ptr->data; } -__bpf_kfunc bool bpf_dynptr_is_rdonly(struct bpf_dynptr_kern *ptr) +__bpf_kfunc bool bpf_dynptr_is_rdonly(const struct bpf_dynptr *p) { + struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; + if (!ptr->data) return false; return __bpf_dynptr_is_rdonly(ptr); } -__bpf_kfunc __u32 bpf_dynptr_size(const struct bpf_dynptr_kern *ptr) +__bpf_kfunc __u32 bpf_dynptr_size(const struct bpf_dynptr *p) { + struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; + if (!ptr->data) return -EINVAL; return __bpf_dynptr_size(ptr); } -__bpf_kfunc int bpf_dynptr_clone(struct bpf_dynptr_kern *ptr, - struct bpf_dynptr_kern *clone__uninit) +__bpf_kfunc int bpf_dynptr_clone(const struct bpf_dynptr *p, + struct bpf_dynptr *clone__uninit) { + struct bpf_dynptr_kern *clone = (struct bpf_dynptr_kern *)clone__uninit; + struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; + if (!ptr->data) { - bpf_dynptr_set_null(clone__uninit); + bpf_dynptr_set_null(clone); return -EINVAL; } - *clone__uninit = *ptr; + *clone = *ptr; return 0; } @@ -2986,7 +2999,9 @@ late_initcall(kfunc_init); */ const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len) { - return bpf_dynptr_slice(ptr, 0, NULL, len); + const struct bpf_dynptr *p = (struct bpf_dynptr *)ptr; + + return bpf_dynptr_slice(p, 0, NULL, len); } /* Get a pointer to dynptr data up to len bytes for read write access. If diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index dcac6119d810..acc9dd830807 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -10914,7 +10914,7 @@ enum { }; BTF_ID_LIST(kf_arg_btf_ids) -BTF_ID(struct, bpf_dynptr_kern) +BTF_ID(struct, bpf_dynptr) BTF_ID(struct, bpf_list_head) BTF_ID(struct, bpf_list_node) BTF_ID(struct, bpf_rb_root) diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index bc16e21a2a44..4b3fda456299 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1369,8 +1369,8 @@ __bpf_kfunc void bpf_key_put(struct bpf_key *bkey) #ifdef CONFIG_SYSTEM_DATA_VERIFICATION /** * bpf_verify_pkcs7_signature - verify a PKCS#7 signature - * @data_ptr: data to verify - * @sig_ptr: signature of the data + * @data_p: data to verify + * @sig_p: signature of the data * @trusted_keyring: keyring with keys trusted for signature verification * * Verify the PKCS#7 signature *sig_ptr* against the supplied *data_ptr* @@ -1378,10 +1378,12 @@ __bpf_kfunc void bpf_key_put(struct bpf_key *bkey) * * Return: 0 on success, a negative value on error. */ -__bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr_kern *data_ptr, - struct bpf_dynptr_kern *sig_ptr, +__bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr *data_p, + struct bpf_dynptr *sig_p, struct bpf_key *trusted_keyring) { + struct bpf_dynptr_kern *data_ptr = (struct bpf_dynptr_kern *)data_p; + struct bpf_dynptr_kern *sig_ptr = (struct bpf_dynptr_kern *)sig_p; const void *data, *sig; u32 data_len, sig_len; int ret; @@ -1444,7 +1446,7 @@ __bpf_kfunc_start_defs(); * bpf_get_file_xattr - get xattr of a file * @file: file to get xattr from * @name__str: name of the xattr - * @value_ptr: output buffer of the xattr value + * @value_p: output buffer of the xattr value * * Get xattr *name__str* of *file* and store the output in *value_ptr*. * @@ -1453,8 +1455,9 @@ __bpf_kfunc_start_defs(); * Return: 0 on success, a negative value on error. */ __bpf_kfunc int bpf_get_file_xattr(struct file *file, const char *name__str, - struct bpf_dynptr_kern *value_ptr) + struct bpf_dynptr *value_p) { + struct bpf_dynptr_kern *value_ptr = (struct bpf_dynptr_kern *)value_p; struct dentry *dentry; u32 value_len; void *value; diff --git a/net/core/filter.c b/net/core/filter.c index 7c46ecba3b01..73722790cee3 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -11859,28 +11859,34 @@ bpf_sk_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) } __bpf_kfunc_start_defs(); -__bpf_kfunc int bpf_dynptr_from_skb(struct sk_buff *skb, u64 flags, - struct bpf_dynptr_kern *ptr__uninit) +__bpf_kfunc int bpf_dynptr_from_skb(struct __sk_buff *s, u64 flags, + struct bpf_dynptr *ptr__uninit) { + struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)ptr__uninit; + struct sk_buff *skb = (struct sk_buff *)s; + if (flags) { - bpf_dynptr_set_null(ptr__uninit); + bpf_dynptr_set_null(ptr); return -EINVAL; } - bpf_dynptr_init(ptr__uninit, skb, BPF_DYNPTR_TYPE_SKB, 0, skb->len); + bpf_dynptr_init(ptr, skb, BPF_DYNPTR_TYPE_SKB, 0, skb->len); return 0; } -__bpf_kfunc int bpf_dynptr_from_xdp(struct xdp_buff *xdp, u64 flags, - struct bpf_dynptr_kern *ptr__uninit) +__bpf_kfunc int bpf_dynptr_from_xdp(struct xdp_md *x, u64 flags, + struct bpf_dynptr *ptr__uninit) { + struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)ptr__uninit; + struct xdp_buff *xdp = (struct xdp_buff *)x; + if (flags) { - bpf_dynptr_set_null(ptr__uninit); + bpf_dynptr_set_null(ptr); return -EINVAL; } - bpf_dynptr_init(ptr__uninit, xdp, BPF_DYNPTR_TYPE_XDP, 0, xdp_get_buff_len(xdp)); + bpf_dynptr_init(ptr, xdp, BPF_DYNPTR_TYPE_XDP, 0, xdp_get_buff_len(xdp)); return 0; } @@ -11906,10 +11912,11 @@ __bpf_kfunc int bpf_sock_addr_set_sun_path(struct bpf_sock_addr_kern *sa_kern, return 0; } -__bpf_kfunc int bpf_sk_assign_tcp_reqsk(struct sk_buff *skb, struct sock *sk, +__bpf_kfunc int bpf_sk_assign_tcp_reqsk(struct __sk_buff *s, struct sock *sk, struct bpf_tcp_req_attrs *attrs, int attrs__sz) { #if IS_ENABLED(CONFIG_SYN_COOKIES) + struct sk_buff *skb = (struct sk_buff *)s; const struct request_sock_ops *ops; struct inet_request_sock *ireq; struct tcp_request_sock *treq; @@ -12004,16 +12011,17 @@ __bpf_kfunc int bpf_sk_assign_tcp_reqsk(struct sk_buff *skb, struct sock *sk, __bpf_kfunc_end_defs(); -int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags, - struct bpf_dynptr_kern *ptr__uninit) +int bpf_dynptr_from_skb_rdonly(struct __sk_buff *skb, u64 flags, + struct bpf_dynptr *ptr__uninit) { + struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)ptr__uninit; int err; err = bpf_dynptr_from_skb(skb, flags, ptr__uninit); if (err) return err; - bpf_dynptr_set_rdonly(ptr__uninit); + bpf_dynptr_set_rdonly(ptr); return 0; } diff --git a/tools/testing/selftests/bpf/progs/ip_check_defrag.c b/tools/testing/selftests/bpf/progs/ip_check_defrag.c index 1c2b6c1616b0..645b2c9f7867 100644 --- a/tools/testing/selftests/bpf/progs/ip_check_defrag.c +++ b/tools/testing/selftests/bpf/progs/ip_check_defrag.c @@ -12,7 +12,7 @@ #define IP_OFFSET 0x1FFF #define NEXTHDR_FRAGMENT 44 -extern int bpf_dynptr_from_skb(struct sk_buff *skb, __u64 flags, +extern int bpf_dynptr_from_skb(struct __sk_buff *skb, __u64 flags, struct bpf_dynptr *ptr__uninit) __ksym; extern void *bpf_dynptr_slice(const struct bpf_dynptr *ptr, uint32_t offset, void *buffer, uint32_t buffer__sz) __ksym; @@ -42,7 +42,7 @@ static bool is_frag_v6(struct ipv6hdr *ip6h) return ip6h->nexthdr == NEXTHDR_FRAGMENT; } -static int handle_v4(struct sk_buff *skb) +static int handle_v4(struct __sk_buff *skb) { struct bpf_dynptr ptr; u8 iph_buf[20] = {}; @@ -64,7 +64,7 @@ static int handle_v4(struct sk_buff *skb) return NF_ACCEPT; } -static int handle_v6(struct sk_buff *skb) +static int handle_v6(struct __sk_buff *skb) { struct bpf_dynptr ptr; struct ipv6hdr *ip6h; @@ -89,9 +89,9 @@ static int handle_v6(struct sk_buff *skb) SEC("netfilter") int defrag(struct bpf_nf_ctx *ctx) { - struct sk_buff *skb = ctx->skb; + struct __sk_buff *skb = (struct __sk_buff *)ctx->skb; - switch (bpf_ntohs(skb->protocol)) { + switch (bpf_ntohs(ctx->skb->protocol)) { case ETH_P_IP: return handle_v4(skb); case ETH_P_IPV6: diff --git a/tools/testing/selftests/bpf/progs/verifier_netfilter_ctx.c b/tools/testing/selftests/bpf/progs/verifier_netfilter_ctx.c index 65bba330e7e5..ab9f9f2620ed 100644 --- a/tools/testing/selftests/bpf/progs/verifier_netfilter_ctx.c +++ b/tools/testing/selftests/bpf/progs/verifier_netfilter_ctx.c @@ -79,7 +79,7 @@ int with_invalid_ctx_access_test5(struct bpf_nf_ctx *ctx) return NF_ACCEPT; } -extern int bpf_dynptr_from_skb(struct sk_buff *skb, __u64 flags, +extern int bpf_dynptr_from_skb(struct __sk_buff *skb, __u64 flags, struct bpf_dynptr *ptr__uninit) __ksym; extern void *bpf_dynptr_slice(const struct bpf_dynptr *ptr, uint32_t offset, void *buffer, uint32_t buffer__sz) __ksym; @@ -90,8 +90,8 @@ __success __failure_unpriv __retval(0) int with_valid_ctx_access_test6(struct bpf_nf_ctx *ctx) { + struct __sk_buff *skb = (struct __sk_buff *)ctx->skb; const struct nf_hook_state *state = ctx->state; - struct sk_buff *skb = ctx->skb; const struct iphdr *iph; const struct tcphdr *th; u8 buffer_iph[20] = {}; @@ -99,7 +99,7 @@ int with_valid_ctx_access_test6(struct bpf_nf_ctx *ctx) struct bpf_dynptr ptr; uint8_t ihl; - if (skb->len <= 20 || bpf_dynptr_from_skb(skb, 0, &ptr)) + if (ctx->skb->len <= 20 || bpf_dynptr_from_skb(skb, 0, &ptr)) return NF_ACCEPT; iph = bpf_dynptr_slice(&ptr, 0, buffer_iph, sizeof(buffer_iph)); -- cgit v1.2.3 From f709124dd72fe7a3f6ba7764b2ed145c55c33e47 Mon Sep 17 00:00:00 2001 From: Daniel Xu Date: Wed, 12 Jun 2024 09:58:34 -0600 Subject: bpf: selftests: nf: Opt out of using generated kfunc prototypes The bpf-nf selftests play various games with aliased types such that folks with CONFIG_NF_CONNTRACK=m/n configs can still build the selftests. See commits: 1058b6a78db2 ("selftests/bpf: Do not fail build if CONFIG_NF_CONNTRACK=m/n") 92afc5329a5b ("selftests/bpf: Fix build errors if CONFIG_NF_CONNTRACK=m") Thus, it is simpler if these selftests opt out of using generated kfunc prototypes. The preprocessor macro this commit uses will be introduced in the final commit. Signed-off-by: Daniel Xu Link: https://lore.kernel.org/r/044a5b10cb3abd0d71cb1c818ee0bfc4a2239332.1718207789.git.dxu@dxuuu.xyz Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/progs/test_bpf_nf.c | 1 + tools/testing/selftests/bpf/progs/test_bpf_nf_fail.c | 1 + tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c | 1 + 3 files changed, 3 insertions(+) diff --git a/tools/testing/selftests/bpf/progs/test_bpf_nf.c b/tools/testing/selftests/bpf/progs/test_bpf_nf.c index 0289d8ce2b80..f7b330ddd007 100644 --- a/tools/testing/selftests/bpf/progs/test_bpf_nf.c +++ b/tools/testing/selftests/bpf/progs/test_bpf_nf.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 +#define BPF_NO_KFUNC_PROTOTYPES #include #include #include diff --git a/tools/testing/selftests/bpf/progs/test_bpf_nf_fail.c b/tools/testing/selftests/bpf/progs/test_bpf_nf_fail.c index 0e4759ab38ff..a586f087ffeb 100644 --- a/tools/testing/selftests/bpf/progs/test_bpf_nf_fail.c +++ b/tools/testing/selftests/bpf/progs/test_bpf_nf_fail.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 +#define BPF_NO_KFUNC_PROTOTYPES #include #include #include diff --git a/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c b/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c index 7ea9785738b5..f8f5dc9f72b8 100644 --- a/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c +++ b/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: LGPL-2.1 OR BSD-2-Clause /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ +#define BPF_NO_KFUNC_PROTOTYPES #include "vmlinux.h" #include -- cgit v1.2.3 From c567cba34585514f82600a10587c8813c50e3a7c Mon Sep 17 00:00:00 2001 From: Daniel Xu Date: Wed, 12 Jun 2024 09:58:35 -0600 Subject: bpf: selftests: xfrm: Opt out of using generated kfunc prototypes The xfrm_info selftest locally defines an aliased type such that folks with CONFIG_XFRM_INTERFACE=m/n configs can still build the selftests. See commit aa67961f3243 ("selftests/bpf: Allow building bpf tests with CONFIG_XFRM_INTERFACE=[m|n]"). Thus, it is simpler if this selftest opts out of using enerated kfunc prototypes. The preprocessor macro this commit uses will be introduced in the final commit. Signed-off-by: Daniel Xu Link: https://lore.kernel.org/r/afe0bb1c50487f52542cdd5230c4aef9e36ce250.1718207789.git.dxu@dxuuu.xyz Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/progs/xfrm_info.c | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/testing/selftests/bpf/progs/xfrm_info.c b/tools/testing/selftests/bpf/progs/xfrm_info.c index f6a501fbba2b..a1d9f106c3f0 100644 --- a/tools/testing/selftests/bpf/progs/xfrm_info.c +++ b/tools/testing/selftests/bpf/progs/xfrm_info.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 +#define BPF_NO_KFUNC_PROTOTYPES #include "vmlinux.h" #include "bpf_tracing_net.h" #include -- cgit v1.2.3 From 770abbb5a25a5b767f1c60ba366aea503728e957 Mon Sep 17 00:00:00 2001 From: Daniel Xu Date: Wed, 12 Jun 2024 09:58:36 -0600 Subject: bpftool: Support dumping kfunc prototypes from BTF This patch enables dumping kfunc prototypes from bpftool. This is useful b/c with this patch, end users will no longer have to manually define kfunc prototypes. For the kernel tree, this also means we can optionally drop kfunc prototypes from: tools/testing/selftests/bpf/bpf_kfuncs.h tools/testing/selftests/bpf/bpf_experimental.h Example usage: $ make PAHOLE=/home/dxu/dev/pahole/build/pahole -j30 vmlinux $ ./tools/bpf/bpftool/bpftool btf dump file ./vmlinux format c | rg "__ksym;" | head -3 extern void cgroup_rstat_updated(struct cgroup *cgrp, int cpu) __weak __ksym; extern void cgroup_rstat_flush(struct cgroup *cgrp) __weak __ksym; extern struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags) __weak __ksym; Signed-off-by: Daniel Xu Link: https://lore.kernel.org/r/bf6c08f9263c4bd9d10a717de95199d766a13f61.1718207789.git.dxu@dxuuu.xyz Signed-off-by: Alexei Starovoitov --- tools/bpf/bpftool/btf.c | 55 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) diff --git a/tools/bpf/bpftool/btf.c b/tools/bpf/bpftool/btf.c index af047dedde38..6789c7a4d5ca 100644 --- a/tools/bpf/bpftool/btf.c +++ b/tools/bpf/bpftool/btf.c @@ -20,6 +20,8 @@ #include "json_writer.h" #include "main.h" +#define KFUNC_DECL_TAG "bpf_kfunc" + static const char * const btf_kind_str[NR_BTF_KINDS] = { [BTF_KIND_UNKN] = "UNKNOWN", [BTF_KIND_INT] = "INT", @@ -461,6 +463,49 @@ static int dump_btf_raw(const struct btf *btf, return 0; } +static int dump_btf_kfuncs(struct btf_dump *d, const struct btf *btf) +{ + LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts); + int cnt = btf__type_cnt(btf); + int i; + + printf("\n/* BPF kfuncs */\n"); + printf("#ifndef BPF_NO_KFUNC_PROTOTYPES\n"); + + for (i = 1; i < cnt; i++) { + const struct btf_type *t = btf__type_by_id(btf, i); + const char *name; + int err; + + if (!btf_is_decl_tag(t)) + continue; + + if (btf_decl_tag(t)->component_idx != -1) + continue; + + name = btf__name_by_offset(btf, t->name_off); + if (strncmp(name, KFUNC_DECL_TAG, sizeof(KFUNC_DECL_TAG))) + continue; + + t = btf__type_by_id(btf, t->type); + if (!btf_is_func(t)) + continue; + + printf("extern "); + + opts.field_name = btf__name_by_offset(btf, t->name_off); + err = btf_dump__emit_type_decl(d, t->type, &opts); + if (err) + return err; + + printf(" __weak __ksym;\n"); + } + + printf("#endif\n\n"); + + return 0; +} + static void __printf(2, 0) btf_dump_printf(void *ctx, const char *fmt, va_list args) { @@ -596,6 +641,12 @@ static int dump_btf_c(const struct btf *btf, printf("#ifndef BPF_NO_PRESERVE_ACCESS_INDEX\n"); printf("#pragma clang attribute push (__attribute__((preserve_access_index)), apply_to = record)\n"); printf("#endif\n\n"); + printf("#ifndef __ksym\n"); + printf("#define __ksym __attribute__((section(\".ksyms\")))\n"); + printf("#endif\n\n"); + printf("#ifndef __weak\n"); + printf("#define __weak __attribute__((weak))\n"); + printf("#endif\n\n"); if (root_type_cnt) { for (i = 0; i < root_type_cnt; i++) { @@ -615,6 +666,10 @@ static int dump_btf_c(const struct btf *btf, if (err) goto done; } + + err = dump_btf_kfuncs(d, btf); + if (err) + goto done; } printf("#ifndef BPF_NO_PRESERVE_ACCESS_INDEX\n"); -- cgit v1.2.3 From a3cfe84cca28f205761a0450016593b0d728165e Mon Sep 17 00:00:00 2001 From: Vadim Fedorenko Date: Thu, 6 Jun 2024 07:58:50 -0700 Subject: bpf: Add CHECKSUM_COMPLETE to bpf test progs Add special flag to validate that TC BPF program properly updates checksum information in skb. Signed-off-by: Vadim Fedorenko Signed-off-by: Daniel Borkmann Reviewed-by: Jakub Kicinski Acked-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20240606145851.229116-1-vadfed@meta.com --- include/uapi/linux/bpf.h | 2 ++ net/bpf/test_run.c | 28 +++++++++++++++++++++++++++- tools/include/uapi/linux/bpf.h | 2 ++ 3 files changed, 31 insertions(+), 1 deletion(-) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 25ea393cf084..35bcf52dbc65 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1425,6 +1425,8 @@ enum { #define BPF_F_TEST_RUN_ON_CPU (1U << 0) /* If set, XDP frames will be transmitted after processing */ #define BPF_F_TEST_XDP_LIVE_FRAMES (1U << 1) +/* If set, apply CHECKSUM_COMPLETE to skb and validate the checksum */ +#define BPF_F_TEST_SKB_CHECKSUM_COMPLETE (1U << 2) /* type for BPF_ENABLE_STATS */ enum bpf_stats_type { diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 36ae54f57bf5..3c965e32fc33 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -983,7 +983,8 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, void *data; int ret; - if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size) + if ((kattr->test.flags & ~BPF_F_TEST_SKB_CHECKSUM_COMPLETE) || + kattr->test.cpu || kattr->test.batch_size) return -EINVAL; data = bpf_test_init(kattr, kattr->test.data_size_in, @@ -1031,6 +1032,7 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); __skb_put(skb, size); + if (ctx && ctx->ifindex > 1) { dev = dev_get_by_index(net, ctx->ifindex); if (!dev) { @@ -1066,9 +1068,19 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, __skb_push(skb, hh_len); if (is_direct_pkt_access) bpf_compute_data_pointers(skb); + ret = convert___skb_to_skb(skb, ctx); if (ret) goto out; + + if (kattr->test.flags & BPF_F_TEST_SKB_CHECKSUM_COMPLETE) { + const int off = skb_network_offset(skb); + int len = skb->len - off; + + skb->csum = skb_checksum(skb, off, len, 0); + skb->ip_summed = CHECKSUM_COMPLETE; + } + ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false); if (ret) goto out; @@ -1083,6 +1095,20 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, } memset(__skb_push(skb, hh_len), 0, hh_len); } + + if (kattr->test.flags & BPF_F_TEST_SKB_CHECKSUM_COMPLETE) { + const int off = skb_network_offset(skb); + int len = skb->len - off; + __wsum csum; + + csum = skb_checksum(skb, off, len, 0); + + if (csum_fold(skb->csum) != csum_fold(csum)) { + ret = -EBADMSG; + goto out; + } + } + convert_skb_to___skb(skb, ctx); size = skb->len; diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 25ea393cf084..35bcf52dbc65 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -1425,6 +1425,8 @@ enum { #define BPF_F_TEST_RUN_ON_CPU (1U << 0) /* If set, XDP frames will be transmitted after processing */ #define BPF_F_TEST_XDP_LIVE_FRAMES (1U << 1) +/* If set, apply CHECKSUM_COMPLETE to skb and validate the checksum */ +#define BPF_F_TEST_SKB_CHECKSUM_COMPLETE (1U << 2) /* type for BPF_ENABLE_STATS */ enum bpf_stats_type { -- cgit v1.2.3 From 041c1dc988fdffd5eb0c13f1ce5d1b3b0125f208 Mon Sep 17 00:00:00 2001 From: Vadim Fedorenko Date: Thu, 6 Jun 2024 07:58:51 -0700 Subject: selftests/bpf: Validate CHECKSUM_COMPLETE option Adjust skb program test to run with checksum validation. Signed-off-by: Vadim Fedorenko Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20240606145851.229116-2-vadfed@meta.com --- tools/testing/selftests/bpf/prog_tests/test_skb_pkt_end.c | 1 + tools/testing/selftests/bpf/progs/skb_pkt_end.c | 11 ++++++++++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/bpf/prog_tests/test_skb_pkt_end.c b/tools/testing/selftests/bpf/prog_tests/test_skb_pkt_end.c index ae93411fd582..09ca13bdf6ca 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_skb_pkt_end.c +++ b/tools/testing/selftests/bpf/prog_tests/test_skb_pkt_end.c @@ -11,6 +11,7 @@ static int sanity_run(struct bpf_program *prog) .data_in = &pkt_v4, .data_size_in = sizeof(pkt_v4), .repeat = 1, + .flags = BPF_F_TEST_SKB_CHECKSUM_COMPLETE, ); prog_fd = bpf_program__fd(prog); diff --git a/tools/testing/selftests/bpf/progs/skb_pkt_end.c b/tools/testing/selftests/bpf/progs/skb_pkt_end.c index db4abd2682fc..3bb4451524a1 100644 --- a/tools/testing/selftests/bpf/progs/skb_pkt_end.c +++ b/tools/testing/selftests/bpf/progs/skb_pkt_end.c @@ -33,6 +33,8 @@ int main_prog(struct __sk_buff *skb) struct iphdr *ip = NULL; struct tcphdr *tcp; __u8 proto = 0; + int urg_ptr; + u32 offset; if (!(ip = get_iphdr(skb))) goto out; @@ -48,7 +50,14 @@ int main_prog(struct __sk_buff *skb) if (!tcp) goto out; - return tcp->urg_ptr; + urg_ptr = tcp->urg_ptr; + + /* Checksum validation part */ + proto++; + offset = sizeof(struct ethhdr) + offsetof(struct iphdr, protocol); + bpf_skb_store_bytes(skb, offset, &proto, sizeof(proto), BPF_F_RECOMPUTE_CSUM); + + return urg_ptr; out: return -1; } -- cgit v1.2.3 From 78746f93e903d022c692b9bb3a3e2570167b2dc2 Mon Sep 17 00:00:00 2001 From: Daniel Xu Date: Thu, 13 Jun 2024 10:19:25 -0600 Subject: bpf: Fix bpf_dynptr documentation comments The function argument names were changed but the doc comment was not. Fix htmldocs build warning by updating doc comments. Fixes: cce4c40b9606 ("bpf: treewide: Align kfunc signatures to prog point-of-view") Signed-off-by: Daniel Xu Link: https://lore.kernel.org/r/d0b0eb05f91e12e5795966153b11998d3fc1d433.1718295425.git.dxu@dxuuu.xyz Signed-off-by: Alexei Starovoitov --- kernel/bpf/helpers.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 3ac521c48bba..229396172026 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -2433,7 +2433,7 @@ __bpf_kfunc struct task_struct *bpf_task_from_pid(s32 pid) /** * bpf_dynptr_slice() - Obtain a read-only pointer to the dynptr data. - * @ptr: The dynptr whose data slice to retrieve + * @p: The dynptr whose data slice to retrieve * @offset: Offset into the dynptr * @buffer__opt: User-provided buffer to copy contents into. May be NULL * @buffer__szk: Size (in bytes) of the buffer if present. This is the @@ -2504,7 +2504,7 @@ __bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr *p, u32 offset, /** * bpf_dynptr_slice_rdwr() - Obtain a writable pointer to the dynptr data. - * @ptr: The dynptr whose data slice to retrieve + * @p: The dynptr whose data slice to retrieve * @offset: Offset into the dynptr * @buffer__opt: User-provided buffer to copy contents into. May be NULL * @buffer__szk: Size (in bytes) of the buffer if present. This is the -- cgit v1.2.3 From 6a8260147745fe493d733d4e5f9b327da3720905 Mon Sep 17 00:00:00 2001 From: Daniel Xu Date: Thu, 13 Jun 2024 10:19:26 -0600 Subject: bpf: selftests: Do not use generated kfunc prototypes for arena progs When selftests are built with a new enough clang, the arena selftests opt-in to use LLVM address_space attribute annotations for arena pointers. These annotations are not emitted by kfunc prototype generation. This causes compilation errors when clang sees conflicting prototypes. Fix by opting arena selftests out of using generated kfunc prototypes. Fixes: 770abbb5a25a ("bpftool: Support dumping kfunc prototypes from BTF") Reported-by: kernel test robot Closes: https://lore.kernel.org/r/202406131810.c1B8hTm8-lkp@intel.com/ Signed-off-by: Daniel Xu Link: https://lore.kernel.org/r/fc59a617439ceea9ad8dfbb4786843c2169496ae.1718295425.git.dxu@dxuuu.xyz Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/progs/arena_htab.c | 1 + tools/testing/selftests/bpf/progs/arena_list.c | 1 + tools/testing/selftests/bpf/progs/verifier_arena.c | 1 + tools/testing/selftests/bpf/progs/verifier_arena_large.c | 1 + 4 files changed, 4 insertions(+) diff --git a/tools/testing/selftests/bpf/progs/arena_htab.c b/tools/testing/selftests/bpf/progs/arena_htab.c index 1e6ac187a6a0..cf938cf8c11e 100644 --- a/tools/testing/selftests/bpf/progs/arena_htab.c +++ b/tools/testing/selftests/bpf/progs/arena_htab.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#define BPF_NO_KFUNC_PROTOTYPES #include #include #include diff --git a/tools/testing/selftests/bpf/progs/arena_list.c b/tools/testing/selftests/bpf/progs/arena_list.c index 93bd0600eba0..3a2ddcacbea6 100644 --- a/tools/testing/selftests/bpf/progs/arena_list.c +++ b/tools/testing/selftests/bpf/progs/arena_list.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#define BPF_NO_KFUNC_PROTOTYPES #include #include #include diff --git a/tools/testing/selftests/bpf/progs/verifier_arena.c b/tools/testing/selftests/bpf/progs/verifier_arena.c index 93144ae6df74..67509c5d3982 100644 --- a/tools/testing/selftests/bpf/progs/verifier_arena.c +++ b/tools/testing/selftests/bpf/progs/verifier_arena.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#define BPF_NO_KFUNC_PROTOTYPES #include #include #include diff --git a/tools/testing/selftests/bpf/progs/verifier_arena_large.c b/tools/testing/selftests/bpf/progs/verifier_arena_large.c index ef66ea460264..6065f862d964 100644 --- a/tools/testing/selftests/bpf/progs/verifier_arena_large.c +++ b/tools/testing/selftests/bpf/progs/verifier_arena_large.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#define BPF_NO_KFUNC_PROTOTYPES #include #include #include -- cgit v1.2.3 From a90797993afcb0eaf6bf47a062ff47eb3810a6d5 Mon Sep 17 00:00:00 2001 From: Vadim Fedorenko Date: Thu, 13 Jun 2024 14:18:13 -0700 Subject: bpf: verifier: make kfuncs args nullalble Some arguments to kfuncs might be NULL in some cases. But currently it's not possible to pass NULL to any BTF structures because the check for the suffix is located after all type checks. Move it to earlier place to allow nullable args. Acked-by: Eduard Zingerman Signed-off-by: Vadim Fedorenko Link: https://lore.kernel.org/r/20240613211817.1551967-2-vadfed@meta.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index acc9dd830807..e857b08e1f2d 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -11187,6 +11187,9 @@ get_kfunc_ptr_arg_type(struct bpf_verifier_env *env, if (btf_is_prog_ctx_type(&env->log, meta->btf, t, resolve_prog_type(env->prog), argno)) return KF_ARG_PTR_TO_CTX; + if (is_kfunc_arg_nullable(meta->btf, &args[argno]) && register_is_null(reg)) + return KF_ARG_PTR_TO_NULL; + if (is_kfunc_arg_alloc_obj(meta->btf, &args[argno])) return KF_ARG_PTR_TO_ALLOC_BTF_ID; @@ -11232,9 +11235,6 @@ get_kfunc_ptr_arg_type(struct bpf_verifier_env *env, if (is_kfunc_arg_callback(env, meta->btf, &args[argno])) return KF_ARG_PTR_TO_CALLBACK; - if (is_kfunc_arg_nullable(meta->btf, &args[argno]) && register_is_null(reg)) - return KF_ARG_PTR_TO_NULL; - if (argno + 1 < nargs && (is_kfunc_arg_mem_size(meta->btf, &args[argno + 1], ®s[regno + 1]) || is_kfunc_arg_const_mem_size(meta->btf, &args[argno + 1], ®s[regno + 1]))) -- cgit v1.2.3 From 65d6d61d25968d1f13a478a6f303ed8d6b978a77 Mon Sep 17 00:00:00 2001 From: Vadim Fedorenko Date: Thu, 13 Jun 2024 14:18:14 -0700 Subject: bpf: crypto: make state and IV dynptr nullable Some ciphers do not require state and IV buffer, but with current implementation 0-sized dynptr is always needed. With adjustment to verifier we can provide NULL instead of 0-sized dynptr. Make crypto kfuncs ready for this. Reviewed-by: Eduard Zingerman Signed-off-by: Vadim Fedorenko Link: https://lore.kernel.org/r/20240613211817.1551967-3-vadfed@meta.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/crypto.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/kernel/bpf/crypto.c b/kernel/bpf/crypto.c index 3c1de0e5c0bd..94854cd9c4cc 100644 --- a/kernel/bpf/crypto.c +++ b/kernel/bpf/crypto.c @@ -275,7 +275,7 @@ static int bpf_crypto_crypt(const struct bpf_crypto_ctx *ctx, if (__bpf_dynptr_is_rdonly(dst)) return -EINVAL; - siv_len = __bpf_dynptr_size(siv); + siv_len = siv ? __bpf_dynptr_size(siv) : 0; src_len = __bpf_dynptr_size(src); dst_len = __bpf_dynptr_size(dst); if (!src_len || !dst_len) @@ -303,42 +303,42 @@ static int bpf_crypto_crypt(const struct bpf_crypto_ctx *ctx, /** * bpf_crypto_decrypt() - Decrypt buffer using configured context and IV provided. - * @ctx: The crypto context being used. The ctx must be a trusted pointer. - * @src: bpf_dynptr to the encrypted data. Must be a trusted pointer. - * @dst: bpf_dynptr to the buffer where to store the result. Must be a trusted pointer. - * @siv: bpf_dynptr to IV data and state data to be used by decryptor. + * @ctx: The crypto context being used. The ctx must be a trusted pointer. + * @src: bpf_dynptr to the encrypted data. Must be a trusted pointer. + * @dst: bpf_dynptr to the buffer where to store the result. Must be a trusted pointer. + * @siv__nullable: bpf_dynptr to IV data and state data to be used by decryptor. May be NULL. * * Decrypts provided buffer using IV data and the crypto context. Crypto context must be configured. */ __bpf_kfunc int bpf_crypto_decrypt(struct bpf_crypto_ctx *ctx, const struct bpf_dynptr *src, const struct bpf_dynptr *dst, - const struct bpf_dynptr *siv) + const struct bpf_dynptr *siv__nullable) { const struct bpf_dynptr_kern *src_kern = (struct bpf_dynptr_kern *)src; const struct bpf_dynptr_kern *dst_kern = (struct bpf_dynptr_kern *)dst; - const struct bpf_dynptr_kern *siv_kern = (struct bpf_dynptr_kern *)siv; + const struct bpf_dynptr_kern *siv_kern = (struct bpf_dynptr_kern *)siv__nullable; return bpf_crypto_crypt(ctx, src_kern, dst_kern, siv_kern, true); } /** * bpf_crypto_encrypt() - Encrypt buffer using configured context and IV provided. - * @ctx: The crypto context being used. The ctx must be a trusted pointer. - * @src: bpf_dynptr to the plain data. Must be a trusted pointer. - * @dst: bpf_dynptr to buffer where to store the result. Must be a trusted pointer. - * @siv: bpf_dynptr to IV data and state data to be used by decryptor. + * @ctx: The crypto context being used. The ctx must be a trusted pointer. + * @src: bpf_dynptr to the plain data. Must be a trusted pointer. + * @dst: bpf_dynptr to the buffer where to store the result. Must be a trusted pointer. + * @siv__nullable: bpf_dynptr to IV data and state data to be used by decryptor. May be NULL. * * Encrypts provided buffer using IV data and the crypto context. Crypto context must be configured. */ __bpf_kfunc int bpf_crypto_encrypt(struct bpf_crypto_ctx *ctx, const struct bpf_dynptr *src, const struct bpf_dynptr *dst, - const struct bpf_dynptr *siv) + const struct bpf_dynptr *siv__nullable) { const struct bpf_dynptr_kern *src_kern = (struct bpf_dynptr_kern *)src; const struct bpf_dynptr_kern *dst_kern = (struct bpf_dynptr_kern *)dst; - const struct bpf_dynptr_kern *siv_kern = (struct bpf_dynptr_kern *)siv; + const struct bpf_dynptr_kern *siv_kern = (struct bpf_dynptr_kern *)siv__nullable; return bpf_crypto_crypt(ctx, src_kern, dst_kern, siv_kern, false); } -- cgit v1.2.3 From 9363dc8ddc4e222c4259013ae5428070712910b9 Mon Sep 17 00:00:00 2001 From: Vadim Fedorenko Date: Thu, 13 Jun 2024 14:18:15 -0700 Subject: selftests: bpf: crypto: use NULL instead of 0-sized dynptr Adjust selftests to use nullable option for state and IV arg. Reviewed-by: Eduard Zingerman Signed-off-by: Vadim Fedorenko Link: https://lore.kernel.org/r/20240613211817.1551967-4-vadfed@meta.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/progs/crypto_sanity.c | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/tools/testing/selftests/bpf/progs/crypto_sanity.c b/tools/testing/selftests/bpf/progs/crypto_sanity.c index 1be0a3fa5efd..645be6cddf36 100644 --- a/tools/testing/selftests/bpf/progs/crypto_sanity.c +++ b/tools/testing/selftests/bpf/progs/crypto_sanity.c @@ -89,7 +89,7 @@ int decrypt_sanity(struct __sk_buff *skb) { struct __crypto_ctx_value *v; struct bpf_crypto_ctx *ctx; - struct bpf_dynptr psrc, pdst, iv; + struct bpf_dynptr psrc, pdst; int err; err = skb_dynptr_validate(skb, &psrc); @@ -114,12 +114,8 @@ int decrypt_sanity(struct __sk_buff *skb) * production code, a percpu map should be used to store the result. */ bpf_dynptr_from_mem(dst, sizeof(dst), 0, &pdst); - /* iv dynptr has to be initialized with 0 size, but proper memory region - * has to be provided anyway - */ - bpf_dynptr_from_mem(dst, 0, 0, &iv); - status = bpf_crypto_decrypt(ctx, &psrc, &pdst, &iv); + status = bpf_crypto_decrypt(ctx, &psrc, &pdst, NULL); return TC_ACT_SHOT; } @@ -129,7 +125,7 @@ int encrypt_sanity(struct __sk_buff *skb) { struct __crypto_ctx_value *v; struct bpf_crypto_ctx *ctx; - struct bpf_dynptr psrc, pdst, iv; + struct bpf_dynptr psrc, pdst; int err; status = 0; @@ -156,12 +152,8 @@ int encrypt_sanity(struct __sk_buff *skb) * production code, a percpu map should be used to store the result. */ bpf_dynptr_from_mem(dst, sizeof(dst), 0, &pdst); - /* iv dynptr has to be initialized with 0 size, but proper memory region - * has to be provided anyway - */ - bpf_dynptr_from_mem(dst, 0, 0, &iv); - status = bpf_crypto_encrypt(ctx, &psrc, &pdst, &iv); + status = bpf_crypto_encrypt(ctx, &psrc, &pdst, NULL); return TC_ACT_SHOT; } -- cgit v1.2.3 From 9b560751f75f7b2484fa22c781be68f4f9fec2b0 Mon Sep 17 00:00:00 2001 From: Vadim Fedorenko Date: Thu, 13 Jun 2024 14:18:16 -0700 Subject: selftests: bpf: crypto: adjust bench to use nullable IV MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The bench shows some improvements, around 4% faster on decrypt. Before: Benchmark 'crypto-decrypt' started. Iter 0 (325.719us): hits 5.105M/s ( 5.105M/prod), drops 0.000M/s, total operations 5.105M/s Iter 1 (-17.295us): hits 5.224M/s ( 5.224M/prod), drops 0.000M/s, total operations 5.224M/s Iter 2 ( 5.504us): hits 4.630M/s ( 4.630M/prod), drops 0.000M/s, total operations 4.630M/s Iter 3 ( 9.239us): hits 5.148M/s ( 5.148M/prod), drops 0.000M/s, total operations 5.148M/s Iter 4 ( 37.885us): hits 5.198M/s ( 5.198M/prod), drops 0.000M/s, total operations 5.198M/s Iter 5 (-53.282us): hits 5.167M/s ( 5.167M/prod), drops 0.000M/s, total operations 5.167M/s Iter 6 (-17.809us): hits 5.186M/s ( 5.186M/prod), drops 0.000M/s, total operations 5.186M/s Summary: hits 5.092 ± 0.228M/s ( 5.092M/prod), drops 0.000 ±0.000M/s, total operations 5.092 ± 0.228M/s After: Benchmark 'crypto-decrypt' started. Iter 0 (268.912us): hits 5.312M/s ( 5.312M/prod), drops 0.000M/s, total operations 5.312M/s Iter 1 (124.869us): hits 5.354M/s ( 5.354M/prod), drops 0.000M/s, total operations 5.354M/s Iter 2 (-36.801us): hits 5.334M/s ( 5.334M/prod), drops 0.000M/s, total operations 5.334M/s Iter 3 (254.628us): hits 5.334M/s ( 5.334M/prod), drops 0.000M/s, total operations 5.334M/s Iter 4 (-77.691us): hits 5.275M/s ( 5.275M/prod), drops 0.000M/s, total operations 5.275M/s Iter 5 (-164.510us): hits 5.313M/s ( 5.313M/prod), drops 0.000M/s, total operations 5.313M/s Iter 6 (-81.376us): hits 5.346M/s ( 5.346M/prod), drops 0.000M/s, total operations 5.346M/s Summary: hits 5.326 ± 0.029M/s ( 5.326M/prod), drops 0.000 ±0.000M/s, total operations 5.326 ± 0.029M/s Reviewed-by: Eduard Zingerman Signed-off-by: Vadim Fedorenko Link: https://lore.kernel.org/r/20240613211817.1551967-5-vadfed@meta.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/progs/crypto_bench.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/tools/testing/selftests/bpf/progs/crypto_bench.c b/tools/testing/selftests/bpf/progs/crypto_bench.c index e61fe0882293..4ac956b26240 100644 --- a/tools/testing/selftests/bpf/progs/crypto_bench.c +++ b/tools/testing/selftests/bpf/progs/crypto_bench.c @@ -57,7 +57,7 @@ int crypto_encrypt(struct __sk_buff *skb) { struct __crypto_ctx_value *v; struct bpf_crypto_ctx *ctx; - struct bpf_dynptr psrc, pdst, iv; + struct bpf_dynptr psrc, pdst; v = crypto_ctx_value_lookup(); if (!v) { @@ -73,9 +73,8 @@ int crypto_encrypt(struct __sk_buff *skb) bpf_dynptr_from_skb(skb, 0, &psrc); bpf_dynptr_from_mem(dst, len, 0, &pdst); - bpf_dynptr_from_mem(dst, 0, 0, &iv); - status = bpf_crypto_encrypt(ctx, &psrc, &pdst, &iv); + status = bpf_crypto_encrypt(ctx, &psrc, &pdst, NULL); __sync_add_and_fetch(&hits, 1); return 0; @@ -84,7 +83,7 @@ int crypto_encrypt(struct __sk_buff *skb) SEC("tc") int crypto_decrypt(struct __sk_buff *skb) { - struct bpf_dynptr psrc, pdst, iv; + struct bpf_dynptr psrc, pdst; struct __crypto_ctx_value *v; struct bpf_crypto_ctx *ctx; @@ -98,9 +97,8 @@ int crypto_decrypt(struct __sk_buff *skb) bpf_dynptr_from_skb(skb, 0, &psrc); bpf_dynptr_from_mem(dst, len, 0, &pdst); - bpf_dynptr_from_mem(dst, 0, 0, &iv); - status = bpf_crypto_decrypt(ctx, &psrc, &pdst, &iv); + status = bpf_crypto_decrypt(ctx, &psrc, &pdst, NULL); __sync_add_and_fetch(&hits, 1); return 0; -- cgit v1.2.3 From 2d45ab1eda469c802728d0a74e1601de5e71c098 Mon Sep 17 00:00:00 2001 From: Vadim Fedorenko Date: Thu, 13 Jun 2024 14:18:17 -0700 Subject: selftests: bpf: add testmod kfunc for nullable params Add special test to be sure that only __nullable BTF params can be replaced by NULL. This patch adds fake kfuncs in bpf_testmod to properly test different params. Acked-by: Eduard Zingerman Signed-off-by: Vadim Fedorenko Link: https://lore.kernel.org/r/20240613211817.1551967-6-vadfed@meta.com Signed-off-by: Alexei Starovoitov --- .../selftests/bpf/bpf_testmod/bpf_testmod.c | 6 +++ .../selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h | 1 + .../bpf/prog_tests/kfunc_param_nullable.c | 11 ++++++ .../bpf/progs/test_kfunc_param_nullable.c | 43 ++++++++++++++++++++++ 4 files changed, 61 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/kfunc_param_nullable.c create mode 100644 tools/testing/selftests/bpf/progs/test_kfunc_param_nullable.c diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c index 0a09732cde4b..49f9a311e49b 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c @@ -154,6 +154,11 @@ __bpf_kfunc void bpf_kfunc_common_test(void) { } +__bpf_kfunc void bpf_kfunc_dynptr_test(struct bpf_dynptr *ptr, + struct bpf_dynptr *ptr__nullable) +{ +} + struct bpf_testmod_btf_type_tag_1 { int a; }; @@ -363,6 +368,7 @@ BTF_ID_FLAGS(func, bpf_iter_testmod_seq_new, KF_ITER_NEW) BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_iter_testmod_seq_destroy, KF_ITER_DESTROY) BTF_ID_FLAGS(func, bpf_kfunc_common_test) +BTF_ID_FLAGS(func, bpf_kfunc_dynptr_test) BTF_KFUNCS_END(bpf_testmod_common_kfunc_ids) static const struct btf_kfunc_id_set bpf_testmod_common_kfunc_set = { diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h index b0d586a6751f..f9809517e7fa 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h @@ -134,4 +134,5 @@ int bpf_kfunc_call_sock_sendmsg(struct sendmsg_args *args) __ksym; int bpf_kfunc_call_kernel_getsockname(struct addr_args *args) __ksym; int bpf_kfunc_call_kernel_getpeername(struct addr_args *args) __ksym; +void bpf_kfunc_dynptr_test(struct bpf_dynptr *ptr, struct bpf_dynptr *ptr__nullable) __ksym; #endif /* _BPF_TESTMOD_KFUNC_H */ diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_param_nullable.c b/tools/testing/selftests/bpf/prog_tests/kfunc_param_nullable.c new file mode 100644 index 000000000000..c8f4dcaac7c7 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/kfunc_param_nullable.c @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Copyright (c) 2024 Meta Platforms, Inc */ + +#include +#include "test_kfunc_param_nullable.skel.h" + +void test_kfunc_param_nullable(void) +{ + RUN_TESTS(test_kfunc_param_nullable); +} diff --git a/tools/testing/selftests/bpf/progs/test_kfunc_param_nullable.c b/tools/testing/selftests/bpf/progs/test_kfunc_param_nullable.c new file mode 100644 index 000000000000..7c75e9b8f455 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_kfunc_param_nullable.c @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc */ +#include +#include +#include "bpf_misc.h" +#include "bpf_kfuncs.h" +#include "../bpf_testmod/bpf_testmod_kfunc.h" + +SEC("tc") +int kfunc_dynptr_nullable_test1(struct __sk_buff *skb) +{ + struct bpf_dynptr data; + + bpf_dynptr_from_skb(skb, 0, &data); + bpf_kfunc_dynptr_test(&data, NULL); + + return 0; +} + +SEC("tc") +int kfunc_dynptr_nullable_test2(struct __sk_buff *skb) +{ + struct bpf_dynptr data; + + bpf_dynptr_from_skb(skb, 0, &data); + bpf_kfunc_dynptr_test(&data, &data); + + return 0; +} + +SEC("tc") +__failure __msg("expected pointer to stack or dynptr_ptr") +int kfunc_dynptr_nullable_test3(struct __sk_buff *skb) +{ + struct bpf_dynptr data; + + bpf_dynptr_from_skb(skb, 0, &data); + bpf_kfunc_dynptr_test(NULL, &data); + + return 0; +} + +char _license[] SEC("license") = "GPL"; -- cgit v1.2.3 From 124e8c2b1b5d08a10d3a44ed082eaaf98a78c91f Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Wed, 12 Jun 2024 18:38:12 -0700 Subject: bpf: Relax tuple len requirement for sk helpers. __bpf_skc_lookup() safely handles incorrect values of tuple len, hence we can allow zero to be passed as tuple len. This patch alone doesn't make an observable verifier difference. It's a trivial improvement that might simplify bpf programs. Signed-off-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann Acked-by: Eduard Zingerman Link: https://lore.kernel.org/bpf/20240613013815.953-2-alexei.starovoitov@gmail.com --- net/core/filter.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/net/core/filter.c b/net/core/filter.c index 73722790cee3..f1c37c85b858 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -6815,7 +6815,7 @@ static const struct bpf_func_proto bpf_skc_lookup_tcp_proto = { .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, - .arg3_type = ARG_CONST_SIZE, + .arg3_type = ARG_CONST_SIZE_OR_ZERO, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; @@ -6834,7 +6834,7 @@ static const struct bpf_func_proto bpf_sk_lookup_tcp_proto = { .ret_type = RET_PTR_TO_SOCKET_OR_NULL, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, - .arg3_type = ARG_CONST_SIZE, + .arg3_type = ARG_CONST_SIZE_OR_ZERO, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; @@ -6853,7 +6853,7 @@ static const struct bpf_func_proto bpf_sk_lookup_udp_proto = { .ret_type = RET_PTR_TO_SOCKET_OR_NULL, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, - .arg3_type = ARG_CONST_SIZE, + .arg3_type = ARG_CONST_SIZE_OR_ZERO, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; @@ -6877,7 +6877,7 @@ static const struct bpf_func_proto bpf_tc_skc_lookup_tcp_proto = { .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, - .arg3_type = ARG_CONST_SIZE, + .arg3_type = ARG_CONST_SIZE_OR_ZERO, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; @@ -6901,7 +6901,7 @@ static const struct bpf_func_proto bpf_tc_sk_lookup_tcp_proto = { .ret_type = RET_PTR_TO_SOCKET_OR_NULL, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, - .arg3_type = ARG_CONST_SIZE, + .arg3_type = ARG_CONST_SIZE_OR_ZERO, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; @@ -6925,7 +6925,7 @@ static const struct bpf_func_proto bpf_tc_sk_lookup_udp_proto = { .ret_type = RET_PTR_TO_SOCKET_OR_NULL, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, - .arg3_type = ARG_CONST_SIZE, + .arg3_type = ARG_CONST_SIZE_OR_ZERO, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; @@ -6963,7 +6963,7 @@ static const struct bpf_func_proto bpf_xdp_sk_lookup_udp_proto = { .ret_type = RET_PTR_TO_SOCKET_OR_NULL, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, - .arg3_type = ARG_CONST_SIZE, + .arg3_type = ARG_CONST_SIZE_OR_ZERO, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; @@ -6987,7 +6987,7 @@ static const struct bpf_func_proto bpf_xdp_skc_lookup_tcp_proto = { .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, - .arg3_type = ARG_CONST_SIZE, + .arg3_type = ARG_CONST_SIZE_OR_ZERO, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; @@ -7011,7 +7011,7 @@ static const struct bpf_func_proto bpf_xdp_sk_lookup_tcp_proto = { .ret_type = RET_PTR_TO_SOCKET_OR_NULL, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, - .arg3_type = ARG_CONST_SIZE, + .arg3_type = ARG_CONST_SIZE_OR_ZERO, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; @@ -7031,7 +7031,7 @@ static const struct bpf_func_proto bpf_sock_addr_skc_lookup_tcp_proto = { .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, - .arg3_type = ARG_CONST_SIZE, + .arg3_type = ARG_CONST_SIZE_OR_ZERO, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; @@ -7050,7 +7050,7 @@ static const struct bpf_func_proto bpf_sock_addr_sk_lookup_tcp_proto = { .ret_type = RET_PTR_TO_SOCKET_OR_NULL, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, - .arg3_type = ARG_CONST_SIZE, + .arg3_type = ARG_CONST_SIZE_OR_ZERO, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; @@ -7069,7 +7069,7 @@ static const struct bpf_func_proto bpf_sock_addr_sk_lookup_udp_proto = { .ret_type = RET_PTR_TO_SOCKET_OR_NULL, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, - .arg3_type = ARG_CONST_SIZE, + .arg3_type = ARG_CONST_SIZE_OR_ZERO, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; -- cgit v1.2.3 From 98d7ca374ba4b39e7535613d40e159f09ca14da2 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Wed, 12 Jun 2024 18:38:13 -0700 Subject: bpf: Track delta between "linked" registers. Compilers can generate the code r1 = r2 r1 += 0x1 if r2 < 1000 goto ... use knowledge of r2 range in subsequent r1 operations So remember constant delta between r2 and r1 and update r1 after 'if' condition. Unfortunately LLVM still uses this pattern for loops with 'can_loop' construct: for (i = 0; i < 1000 && can_loop; i++) The "undo" pass was introduced in LLVM https://reviews.llvm.org/D121937 to prevent this optimization, but it cannot cover all cases. Instead of fighting middle end optimizer in BPF backend teach the verifier about this pattern. Signed-off-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann Acked-by: Eduard Zingerman Link: https://lore.kernel.org/bpf/20240613013815.953-3-alexei.starovoitov@gmail.com --- include/linux/bpf_verifier.h | 12 +++- kernel/bpf/log.c | 4 +- kernel/bpf/verifier.c | 95 +++++++++++++++++++++++--- tools/testing/selftests/bpf/verifier/precise.c | 22 +++--- 4 files changed, 109 insertions(+), 24 deletions(-) diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 50aa87f8d77f..2b54e25d2364 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -73,7 +73,10 @@ enum bpf_iter_state { struct bpf_reg_state { /* Ordering of fields matters. See states_equal() */ enum bpf_reg_type type; - /* Fixed part of pointer offset, pointer types only */ + /* + * Fixed part of pointer offset, pointer types only. + * Or constant delta between "linked" scalars with the same ID. + */ s32 off; union { /* valid when type == PTR_TO_PACKET */ @@ -167,6 +170,13 @@ struct bpf_reg_state { * Similarly to dynptrs, we use ID to track "belonging" of a reference * to a specific instance of bpf_iter. */ + /* + * Upper bit of ID is used to remember relationship between "linked" + * registers. Example: + * r1 = r2; both will have r1->id == r2->id == N + * r1 += 10; r1->id == N | BPF_ADD_CONST and r1->off == 10 + */ +#define BPF_ADD_CONST (1U << 31) u32 id; /* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned * from a pointer-cast helper, bpf_sk_fullsock() and diff --git a/kernel/bpf/log.c b/kernel/bpf/log.c index 4bd8f17a9f24..3f4ae92e549f 100644 --- a/kernel/bpf/log.c +++ b/kernel/bpf/log.c @@ -708,7 +708,9 @@ static void print_reg_state(struct bpf_verifier_env *env, verbose(env, "%s", btf_type_name(reg->btf, reg->btf_id)); verbose(env, "("); if (reg->id) - verbose_a("id=%d", reg->id); + verbose_a("id=%d", reg->id & ~BPF_ADD_CONST); + if (reg->id & BPF_ADD_CONST) + verbose(env, "%+d", reg->off); if (reg->ref_obj_id) verbose_a("ref_obj_id=%d", reg->ref_obj_id); if (type_is_non_owning_ref(reg->type)) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index e857b08e1f2d..dcbbf5f64c5d 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -3991,7 +3991,7 @@ static bool idset_contains(struct bpf_idset *s, u32 id) u32 i; for (i = 0; i < s->count; ++i) - if (s->ids[i] == id) + if (s->ids[i] == (id & ~BPF_ADD_CONST)) return true; return false; @@ -4001,7 +4001,7 @@ static int idset_push(struct bpf_idset *s, u32 id) { if (WARN_ON_ONCE(s->count >= ARRAY_SIZE(s->ids))) return -EFAULT; - s->ids[s->count++] = id; + s->ids[s->count++] = id & ~BPF_ADD_CONST; return 0; } @@ -4438,8 +4438,20 @@ static bool __is_pointer_value(bool allow_ptr_leaks, static void assign_scalar_id_before_mov(struct bpf_verifier_env *env, struct bpf_reg_state *src_reg) { - if (src_reg->type == SCALAR_VALUE && !src_reg->id && - !tnum_is_const(src_reg->var_off)) + if (src_reg->type != SCALAR_VALUE) + return; + + if (src_reg->id & BPF_ADD_CONST) { + /* + * The verifier is processing rX = rY insn and + * rY->id has special linked register already. + * Cleared it, since multiple rX += const are not supported. + */ + src_reg->id = 0; + src_reg->off = 0; + } + + if (!src_reg->id && !tnum_is_const(src_reg->var_off)) /* Ensure that src_reg has a valid ID that will be copied to * dst_reg and then will be used by find_equal_scalars() to * propagate min/max range. @@ -14042,6 +14054,7 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, struct bpf_func_state *state = vstate->frame[vstate->curframe]; struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg; struct bpf_reg_state *ptr_reg = NULL, off_reg = {0}; + bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64); u8 opcode = BPF_OP(insn->code); int err; @@ -14064,11 +14077,7 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, if (dst_reg->type != SCALAR_VALUE) ptr_reg = dst_reg; - else - /* Make sure ID is cleared otherwise dst_reg min/max could be - * incorrectly propagated into other registers by find_equal_scalars() - */ - dst_reg->id = 0; + if (BPF_SRC(insn->code) == BPF_X) { src_reg = ®s[insn->src_reg]; if (src_reg->type != SCALAR_VALUE) { @@ -14132,7 +14141,43 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, verbose(env, "verifier internal error: no src_reg\n"); return -EINVAL; } - return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg); + err = adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg); + if (err) + return err; + /* + * Compilers can generate the code + * r1 = r2 + * r1 += 0x1 + * if r2 < 1000 goto ... + * use r1 in memory access + * So remember constant delta between r2 and r1 and update r1 after + * 'if' condition. + */ + if (env->bpf_capable && BPF_OP(insn->code) == BPF_ADD && + dst_reg->id && is_reg_const(src_reg, alu32)) { + u64 val = reg_const_value(src_reg, alu32); + + if ((dst_reg->id & BPF_ADD_CONST) || + /* prevent overflow in find_equal_scalars() later */ + val > (u32)S32_MAX) { + /* + * If the register already went through rX += val + * we cannot accumulate another val into rx->off. + */ + dst_reg->off = 0; + dst_reg->id = 0; + } else { + dst_reg->id |= BPF_ADD_CONST; + dst_reg->off = val; + } + } else { + /* + * Make sure ID is cleared otherwise dst_reg min/max could be + * incorrectly propagated into other registers by find_equal_scalars() + */ + dst_reg->id = 0; + } + return 0; } /* check validity of 32-bit and 64-bit arithmetic operations */ @@ -15104,12 +15149,36 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn, static void find_equal_scalars(struct bpf_verifier_state *vstate, struct bpf_reg_state *known_reg) { + struct bpf_reg_state fake_reg; struct bpf_func_state *state; struct bpf_reg_state *reg; bpf_for_each_reg_in_vstate(vstate, state, reg, ({ - if (reg->type == SCALAR_VALUE && reg->id == known_reg->id) + if (reg->type != SCALAR_VALUE || reg == known_reg) + continue; + if ((reg->id & ~BPF_ADD_CONST) != (known_reg->id & ~BPF_ADD_CONST)) + continue; + if ((!(reg->id & BPF_ADD_CONST) && !(known_reg->id & BPF_ADD_CONST)) || + reg->off == known_reg->off) { copy_register_state(reg, known_reg); + } else { + s32 saved_off = reg->off; + + fake_reg.type = SCALAR_VALUE; + __mark_reg_known(&fake_reg, (s32)reg->off - (s32)known_reg->off); + + /* reg = known_reg; reg += delta */ + copy_register_state(reg, known_reg); + /* + * Must preserve off, id and add_const flag, + * otherwise another find_equal_scalars() will be incorrect. + */ + reg->off = saved_off; + + scalar32_min_max_add(reg, &fake_reg); + scalar_min_max_add(reg, &fake_reg); + reg->var_off = tnum_add(reg->var_off, fake_reg.var_off); + } })); } @@ -16738,6 +16807,10 @@ static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold, } if (!rold->precise && exact == NOT_EXACT) return true; + if ((rold->id & BPF_ADD_CONST) != (rcur->id & BPF_ADD_CONST)) + return false; + if ((rold->id & BPF_ADD_CONST) && (rold->off != rcur->off)) + return false; /* Why check_ids() for scalar registers? * * Consider the following BPF code: diff --git a/tools/testing/selftests/bpf/verifier/precise.c b/tools/testing/selftests/bpf/verifier/precise.c index 0a9293a57211..90643ccc221d 100644 --- a/tools/testing/selftests/bpf/verifier/precise.c +++ b/tools/testing/selftests/bpf/verifier/precise.c @@ -39,12 +39,12 @@ .result = VERBOSE_ACCEPT, .errstr = "mark_precise: frame0: last_idx 26 first_idx 20\ - mark_precise: frame0: regs=r2 stack= before 25\ - mark_precise: frame0: regs=r2 stack= before 24\ - mark_precise: frame0: regs=r2 stack= before 23\ - mark_precise: frame0: regs=r2 stack= before 22\ - mark_precise: frame0: regs=r2 stack= before 20\ - mark_precise: frame0: parent state regs=r2 stack=:\ + mark_precise: frame0: regs=r2,r9 stack= before 25\ + mark_precise: frame0: regs=r2,r9 stack= before 24\ + mark_precise: frame0: regs=r2,r9 stack= before 23\ + mark_precise: frame0: regs=r2,r9 stack= before 22\ + mark_precise: frame0: regs=r2,r9 stack= before 20\ + mark_precise: frame0: parent state regs=r2,r9 stack=:\ mark_precise: frame0: last_idx 19 first_idx 10\ mark_precise: frame0: regs=r2,r9 stack= before 19\ mark_precise: frame0: regs=r9 stack= before 18\ @@ -100,11 +100,11 @@ .errstr = "26: (85) call bpf_probe_read_kernel#113\ mark_precise: frame0: last_idx 26 first_idx 22\ - mark_precise: frame0: regs=r2 stack= before 25\ - mark_precise: frame0: regs=r2 stack= before 24\ - mark_precise: frame0: regs=r2 stack= before 23\ - mark_precise: frame0: regs=r2 stack= before 22\ - mark_precise: frame0: parent state regs=r2 stack=:\ + mark_precise: frame0: regs=r2,r9 stack= before 25\ + mark_precise: frame0: regs=r2,r9 stack= before 24\ + mark_precise: frame0: regs=r2,r9 stack= before 23\ + mark_precise: frame0: regs=r2,r9 stack= before 22\ + mark_precise: frame0: parent state regs=r2,r9 stack=:\ mark_precise: frame0: last_idx 20 first_idx 20\ mark_precise: frame0: regs=r2,r9 stack= before 20\ mark_precise: frame0: parent state regs=r2,r9 stack=:\ -- cgit v1.2.3 From 6870bdb3f4f2991193449f9de57109b3e263f55c Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Wed, 12 Jun 2024 18:38:14 -0700 Subject: bpf: Support can_loop/cond_break on big endian Add big endian support for can_loop/cond_break macros. Signed-off-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20240613013815.953-4-alexei.starovoitov@gmail.com --- tools/testing/selftests/bpf/bpf_experimental.h | 28 ++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h index 8ee7a00b7c82..eede6fc2ccb4 100644 --- a/tools/testing/selftests/bpf/bpf_experimental.h +++ b/tools/testing/selftests/bpf/bpf_experimental.h @@ -351,6 +351,7 @@ l_true: \ l_continue:; \ }) #else +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ #define can_loop \ ({ __label__ l_break, l_continue; \ bool ret = true; \ @@ -376,6 +377,33 @@ l_true: \ l_break: break; \ l_continue:; \ }) +#else +#define can_loop \ + ({ __label__ l_break, l_continue; \ + bool ret = true; \ + asm volatile goto("1:.byte 0xe5; \ + .byte 0; \ + .long (((%l[l_break] - 1b - 8) / 8) & 0xffff) << 16; \ + .short 0" \ + :::: l_break); \ + goto l_continue; \ + l_break: ret = false; \ + l_continue:; \ + ret; \ + }) + +#define cond_break \ + ({ __label__ l_break, l_continue; \ + asm volatile goto("1:.byte 0xe5; \ + .byte 0; \ + .long (((%l[l_break] - 1b - 8) / 8) & 0xffff) << 16; \ + .short 0" \ + :::: l_break); \ + goto l_continue; \ + l_break: break; \ + l_continue:; \ + }) +#endif #endif #ifndef bpf_nop_mov -- cgit v1.2.3 From dedf56d775c0bebbc3003bfb988dddaf0a583c28 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Wed, 12 Jun 2024 18:38:15 -0700 Subject: selftests/bpf: Add tests for add_const Improve arena based tests and add several C and asm tests with specific pattern. These tests would have failed without add_const verifier support. Also add several loop_inside_iter*() tests that are not related to add_const, but nice to have. Signed-off-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20240613013815.953-5-alexei.starovoitov@gmail.com --- tools/testing/selftests/bpf/progs/arena_htab.c | 16 +- .../bpf/progs/verifier_iterating_callbacks.c | 236 +++++++++++++++++++++ 2 files changed, 249 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/bpf/progs/arena_htab.c b/tools/testing/selftests/bpf/progs/arena_htab.c index cf938cf8c11e..81eaa94afeb0 100644 --- a/tools/testing/selftests/bpf/progs/arena_htab.c +++ b/tools/testing/selftests/bpf/progs/arena_htab.c @@ -19,25 +19,35 @@ void __arena *htab_for_user; bool skip = false; int zero = 0; +char __arena arr1[100000]; +char arr2[1000]; SEC("syscall") int arena_htab_llvm(void *ctx) { #if defined(__BPF_FEATURE_ADDR_SPACE_CAST) || defined(BPF_ARENA_FORCE_ASM) struct htab __arena *htab; + char __arena *arr = arr1; __u64 i; htab = bpf_alloc(sizeof(*htab)); cast_kern(htab); htab_init(htab); + cast_kern(arr); + /* first run. No old elems in the table */ - for (i = zero; i < 1000; i++) + for (i = zero; i < 100000 && can_loop; i++) { htab_update_elem(htab, i, i); + arr[i] = i; + } - /* should replace all elems with new ones */ - for (i = zero; i < 1000; i++) + /* should replace some elems with new ones */ + for (i = zero; i < 1000 && can_loop; i++) { htab_update_elem(htab, i, i); + /* Access mem to make the verifier use bounded loop logic */ + arr2[i] = i; + } cast_user(htab); htab_for_user = htab; #else diff --git a/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c b/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c index bd676d7e615f..53679252e8a1 100644 --- a/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c +++ b/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c @@ -405,4 +405,240 @@ int cond_break5(const void *ctx) return cnt1 > 1 && cnt2 > 1 ? 1 : 0; } +#define ARR2_SZ 1000 +SEC(".data.arr2") +char arr2[ARR2_SZ]; + +SEC("socket") +__success __flag(BPF_F_TEST_STATE_FREQ) +int loop_inside_iter(const void *ctx) +{ + struct bpf_iter_num it; + int *v, sum = 0; + __u64 i = 0; + + bpf_iter_num_new(&it, 0, ARR2_SZ); + while ((v = bpf_iter_num_next(&it))) { + if (i < ARR2_SZ) + sum += arr2[i++]; + } + bpf_iter_num_destroy(&it); + return sum; +} + +SEC("socket") +__success __flag(BPF_F_TEST_STATE_FREQ) +int loop_inside_iter_signed(const void *ctx) +{ + struct bpf_iter_num it; + int *v, sum = 0; + long i = 0; + + bpf_iter_num_new(&it, 0, ARR2_SZ); + while ((v = bpf_iter_num_next(&it))) { + if (i < ARR2_SZ && i >= 0) + sum += arr2[i++]; + } + bpf_iter_num_destroy(&it); + return sum; +} + +volatile const int limit = ARR2_SZ; + +SEC("socket") +__success __flag(BPF_F_TEST_STATE_FREQ) +int loop_inside_iter_volatile_limit(const void *ctx) +{ + struct bpf_iter_num it; + int *v, sum = 0; + __u64 i = 0; + + bpf_iter_num_new(&it, 0, ARR2_SZ); + while ((v = bpf_iter_num_next(&it))) { + if (i < limit) + sum += arr2[i++]; + } + bpf_iter_num_destroy(&it); + return sum; +} + +#define ARR_LONG_SZ 1000 + +SEC(".data.arr_long") +long arr_long[ARR_LONG_SZ]; + +SEC("socket") +__success +int test1(const void *ctx) +{ + long i; + + for (i = 0; i < ARR_LONG_SZ && can_loop; i++) + arr_long[i] = i; + return 0; +} + +SEC("socket") +__success +int test2(const void *ctx) +{ + __u64 i; + + for (i = zero; i < ARR_LONG_SZ && can_loop; i++) { + barrier_var(i); + arr_long[i] = i; + } + return 0; +} + +SEC(".data.arr_foo") +struct { + int a; + int b; +} arr_foo[ARR_LONG_SZ]; + +SEC("socket") +__success +int test3(const void *ctx) +{ + __u64 i; + + for (i = zero; i < ARR_LONG_SZ && can_loop; i++) { + barrier_var(i); + arr_foo[i].a = i; + arr_foo[i].b = i; + } + return 0; +} + +SEC("socket") +__success +int test4(const void *ctx) +{ + long i; + + for (i = zero + ARR_LONG_SZ - 1; i < ARR_LONG_SZ && i >= 0 && can_loop; i--) { + barrier_var(i); + arr_foo[i].a = i; + arr_foo[i].b = i; + } + return 0; +} + +char buf[10] SEC(".data.buf"); + +SEC("socket") +__description("check add const") +__success +__naked void check_add_const(void) +{ + /* typical LLVM generated loop with may_goto */ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if r0 > 9 goto l1_%=; \ +l0_%=: r1 = %[buf]; \ + r2 = r0; \ + r1 += r2; \ + r3 = *(u8 *)(r1 +0); \ + .byte 0xe5; /* may_goto */ \ + .byte 0; /* regs */ \ + .short 4; /* off of l1_%=: */ \ + .long 0; /* imm */ \ + r0 = r2; \ + r0 += 1; \ + if r2 < 9 goto l0_%=; \ + exit; \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns), + __imm_ptr(buf) + : __clobber_common); +} + +SEC("socket") +__failure +__msg("*(u8 *)(r7 +0) = r0") +__msg("invalid access to map value, value_size=10 off=10 size=1") +__naked void check_add_const_3regs(void) +{ + asm volatile ( + "r6 = %[buf];" + "r7 = %[buf];" + "call %[bpf_ktime_get_ns];" + "r1 = r0;" /* link r0.id == r1.id == r2.id */ + "r2 = r0;" + "r1 += 1;" /* r1 == r0+1 */ + "r2 += 2;" /* r2 == r0+2 */ + "if r0 > 8 goto 1f;" /* r0 range [0, 8] */ + "r6 += r1;" /* r1 range [1, 9] */ + "r7 += r2;" /* r2 range [2, 10] */ + "*(u8 *)(r6 +0) = r0;" /* safe, within bounds */ + "*(u8 *)(r7 +0) = r0;" /* unsafe, out of bounds */ + "1: exit;" + : + : __imm(bpf_ktime_get_ns), + __imm_ptr(buf) + : __clobber_common); +} + +SEC("socket") +__failure +__msg("*(u8 *)(r8 -1) = r0") +__msg("invalid access to map value, value_size=10 off=10 size=1") +__naked void check_add_const_3regs_2if(void) +{ + asm volatile ( + "r6 = %[buf];" + "r7 = %[buf];" + "r8 = %[buf];" + "call %[bpf_ktime_get_ns];" + "if r0 < 2 goto 1f;" + "r1 = r0;" /* link r0.id == r1.id == r2.id */ + "r2 = r0;" + "r1 += 1;" /* r1 == r0+1 */ + "r2 += 2;" /* r2 == r0+2 */ + "if r2 > 11 goto 1f;" /* r2 range [0, 11] -> r0 range [-2, 9]; r1 range [-1, 10] */ + "if r0 s< 0 goto 1f;" /* r0 range [0, 9] -> r1 range [1, 10]; r2 range [2, 11]; */ + "r6 += r0;" /* r0 range [0, 9] */ + "r7 += r1;" /* r1 range [1, 10] */ + "r8 += r2;" /* r2 range [2, 11] */ + "*(u8 *)(r6 +0) = r0;" /* safe, within bounds */ + "*(u8 *)(r7 -1) = r0;" /* safe */ + "*(u8 *)(r8 -1) = r0;" /* unsafe */ + "1: exit;" + : + : __imm(bpf_ktime_get_ns), + __imm_ptr(buf) + : __clobber_common); +} + +SEC("socket") +__failure +__flag(BPF_F_TEST_STATE_FREQ) +__naked void check_add_const_regsafe_off(void) +{ + asm volatile ( + "r8 = %[buf];" + "call %[bpf_ktime_get_ns];" + "r6 = r0;" + "call %[bpf_ktime_get_ns];" + "r7 = r0;" + "call %[bpf_ktime_get_ns];" + "r1 = r0;" /* same ids for r1 and r0 */ + "if r6 > r7 goto 1f;" /* this jump can't be predicted */ + "r1 += 1;" /* r1.off == +1 */ + "goto 2f;" + "1: r1 += 100;" /* r1.off == +100 */ + "goto +0;" /* verify r1.off in regsafe() after this insn */ + "2: if r0 > 8 goto 3f;" /* r0 range [0,8], r1 range either [1,9] or [100,108]*/ + "r8 += r1;" + "*(u8 *)(r8 +0) = r0;" /* potentially unsafe, buf size is 10 */ + "3: exit;" + : + : __imm(bpf_ktime_get_ns), + __imm_ptr(buf) + : __clobber_common); +} + char _license[] SEC("license") = "GPL"; -- cgit v1.2.3 From 58e185a0dc359a6c1c9eff348d7badfc9f722159 Mon Sep 17 00:00:00 2001 From: Alan Maguire Date: Thu, 13 Jun 2024 10:50:06 +0100 Subject: libbpf: Add btf__distill_base() creating split BTF with distilled base BTF To support more robust split BTF, adding supplemental context for the base BTF type ids that split BTF refers to is required. Without such references, a simple shuffling of base BTF type ids (without any other significant change) invalidates the split BTF. Here the attempt is made to store additional context to make split BTF more robust. This context comes in the form of distilled base BTF providing minimal information (name and - in some cases - size) for base INTs, FLOATs, STRUCTs, UNIONs, ENUMs and ENUM64s along with modified split BTF that points at that base and contains any additional types needed (such as TYPEDEF, PTR and anonymous STRUCT/UNION declarations). This information constitutes the minimal BTF representation needed to disambiguate or remove split BTF references to base BTF. The rules are as follows: - INT, FLOAT, FWD are recorded in full. - if a named base BTF STRUCT or UNION is referred to from split BTF, it will be encoded as a zero-member sized STRUCT/UNION (preserving size for later relocation checks). Only base BTF STRUCT/UNIONs that are either embedded in split BTF STRUCT/UNIONs or that have multiple STRUCT/UNION instances of the same name will _need_ size checks at relocation time, but as it is possible a different set of types will be duplicates in the later to-be-resolved base BTF, we preserve size information for all named STRUCT/UNIONs. - if an ENUM[64] is named, a ENUM forward representation (an ENUM with no values) of the same size is used. - in all other cases, the type is added to the new split BTF. Avoiding struct/union/enum/enum64 expansion is important to keep the distilled base BTF representation to a minimum size. When successful, new representations of the distilled base BTF and new split BTF that refers to it are returned. Both need to be freed by the caller. So to take a simple example, with split BTF with a type referring to "struct sk_buff", we will generate distilled base BTF with a 0-member STRUCT sk_buff of the appropriate size, and the split BTF will refer to it instead. Tools like pahole can utilize such split BTF to populate the .BTF section (split BTF) and an additional .BTF.base section. Then when the split BTF is loaded, the distilled base BTF can be used to relocate split BTF to reference the current (and possibly changed) base BTF. So for example if "struct sk_buff" was id 502 when the split BTF was originally generated, we can use the distilled base BTF to see that id 502 refers to a "struct sk_buff" and replace instances of id 502 with the current (relocated) base BTF sk_buff type id. Distilled base BTF is small; when building a kernel with all modules using distilled base BTF as a test, overall module size grew by only 5.3Mb total across ~2700 modules. Signed-off-by: Alan Maguire Signed-off-by: Andrii Nakryiko Acked-by: Eduard Zingerman Link: https://lore.kernel.org/bpf/20240613095014.357981-2-alan.maguire@oracle.com --- tools/lib/bpf/btf.c | 319 ++++++++++++++++++++++++++++++++++++++++++++++- tools/lib/bpf/btf.h | 21 ++++ tools/lib/bpf/libbpf.map | 1 + 3 files changed, 335 insertions(+), 6 deletions(-) diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 775ca55a541c..407ed92b4134 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -1770,9 +1770,8 @@ static int btf_rewrite_str(struct btf_pipe *p, __u32 *str_off) return 0; } -int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_type *src_type) +static int btf_add_type(struct btf_pipe *p, const struct btf_type *src_type) { - struct btf_pipe p = { .src = src_btf, .dst = btf }; struct btf_field_iter it; struct btf_type *t; __u32 *str_off; @@ -1783,10 +1782,10 @@ int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_t return libbpf_err(sz); /* deconstruct BTF, if necessary, and invalidate raw_data */ - if (btf_ensure_modifiable(btf)) + if (btf_ensure_modifiable(p->dst)) return libbpf_err(-ENOMEM); - t = btf_add_type_mem(btf, sz); + t = btf_add_type_mem(p->dst, sz); if (!t) return libbpf_err(-ENOMEM); @@ -1797,12 +1796,19 @@ int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_t return libbpf_err(err); while ((str_off = btf_field_iter_next(&it))) { - err = btf_rewrite_str(&p, str_off); + err = btf_rewrite_str(p, str_off); if (err) return libbpf_err(err); } - return btf_commit_type(btf, sz); + return btf_commit_type(p->dst, sz); +} + +int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_type *src_type) +{ + struct btf_pipe p = { .src = src_btf, .dst = btf }; + + return btf_add_type(&p, src_type); } static size_t btf_dedup_identity_hash_fn(long key, void *ctx); @@ -5276,3 +5282,304 @@ int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void return 0; } + +struct btf_distill { + struct btf_pipe pipe; + int *id_map; + unsigned int split_start_id; + unsigned int split_start_str; + int diff_id; +}; + +static int btf_add_distilled_type_ids(struct btf_distill *dist, __u32 i) +{ + struct btf_type *split_t = btf_type_by_id(dist->pipe.src, i); + struct btf_field_iter it; + __u32 *id; + int err; + + err = btf_field_iter_init(&it, split_t, BTF_FIELD_ITER_IDS); + if (err) + return err; + while ((id = btf_field_iter_next(&it))) { + struct btf_type *base_t; + + if (!*id) + continue; + /* split BTF id, not needed */ + if (*id >= dist->split_start_id) + continue; + /* already added ? */ + if (dist->id_map[*id] > 0) + continue; + + /* only a subset of base BTF types should be referenced from + * split BTF; ensure nothing unexpected is referenced. + */ + base_t = btf_type_by_id(dist->pipe.src, *id); + switch (btf_kind(base_t)) { + case BTF_KIND_INT: + case BTF_KIND_FLOAT: + case BTF_KIND_FWD: + case BTF_KIND_ARRAY: + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: + case BTF_KIND_TYPEDEF: + case BTF_KIND_ENUM: + case BTF_KIND_ENUM64: + case BTF_KIND_PTR: + case BTF_KIND_CONST: + case BTF_KIND_RESTRICT: + case BTF_KIND_VOLATILE: + case BTF_KIND_FUNC_PROTO: + case BTF_KIND_TYPE_TAG: + dist->id_map[*id] = *id; + break; + default: + pr_warn("unexpected reference to base type[%u] of kind [%u] when creating distilled base BTF.\n", + *id, btf_kind(base_t)); + return -EINVAL; + } + /* If a base type is used, ensure types it refers to are + * marked as used also; so for example if we find a PTR to INT + * we need both the PTR and INT. + * + * The only exception is named struct/unions, since distilled + * base BTF composite types have no members. + */ + if (btf_is_composite(base_t) && base_t->name_off) + continue; + err = btf_add_distilled_type_ids(dist, *id); + if (err) + return err; + } + return 0; +} + +static int btf_add_distilled_types(struct btf_distill *dist) +{ + bool adding_to_base = dist->pipe.dst->start_id == 1; + int id = btf__type_cnt(dist->pipe.dst); + struct btf_type *t; + int i, err = 0; + + + /* Add types for each of the required references to either distilled + * base or split BTF, depending on type characteristics. + */ + for (i = 1; i < dist->split_start_id; i++) { + const char *name; + int kind; + + if (!dist->id_map[i]) + continue; + t = btf_type_by_id(dist->pipe.src, i); + kind = btf_kind(t); + name = btf__name_by_offset(dist->pipe.src, t->name_off); + + switch (kind) { + case BTF_KIND_INT: + case BTF_KIND_FLOAT: + case BTF_KIND_FWD: + /* Named int, float, fwd are added to base. */ + if (!adding_to_base) + continue; + err = btf_add_type(&dist->pipe, t); + break; + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: + /* Named struct/union are added to base as 0-vlen + * struct/union of same size. Anonymous struct/unions + * are added to split BTF as-is. + */ + if (adding_to_base) { + if (!t->name_off) + continue; + err = btf_add_composite(dist->pipe.dst, kind, name, t->size); + } else { + if (t->name_off) + continue; + err = btf_add_type(&dist->pipe, t); + } + break; + case BTF_KIND_ENUM: + case BTF_KIND_ENUM64: + /* Named enum[64]s are added to base as a sized + * enum; relocation will match with appropriately-named + * and sized enum or enum64. + * + * Anonymous enums are added to split BTF as-is. + */ + if (adding_to_base) { + if (!t->name_off) + continue; + err = btf__add_enum(dist->pipe.dst, name, t->size); + } else { + if (t->name_off) + continue; + err = btf_add_type(&dist->pipe, t); + } + break; + case BTF_KIND_ARRAY: + case BTF_KIND_TYPEDEF: + case BTF_KIND_PTR: + case BTF_KIND_CONST: + case BTF_KIND_RESTRICT: + case BTF_KIND_VOLATILE: + case BTF_KIND_FUNC_PROTO: + case BTF_KIND_TYPE_TAG: + /* All other types are added to split BTF. */ + if (adding_to_base) + continue; + err = btf_add_type(&dist->pipe, t); + break; + default: + pr_warn("unexpected kind when adding base type '%s'[%u] of kind [%u] to distilled base BTF.\n", + name, i, kind); + return -EINVAL; + + } + if (err < 0) + break; + dist->id_map[i] = id++; + } + return err; +} + +/* Split BTF ids without a mapping will be shifted downwards since distilled + * base BTF is smaller than the original base BTF. For those that have a + * mapping (either to base or updated split BTF), update the id based on + * that mapping. + */ +static int btf_update_distilled_type_ids(struct btf_distill *dist, __u32 i) +{ + struct btf_type *t = btf_type_by_id(dist->pipe.dst, i); + struct btf_field_iter it; + __u32 *id; + int err; + + err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS); + if (err) + return err; + while ((id = btf_field_iter_next(&it))) { + if (dist->id_map[*id]) + *id = dist->id_map[*id]; + else if (*id >= dist->split_start_id) + *id -= dist->diff_id; + } + return 0; +} + +/* Create updated split BTF with distilled base BTF; distilled base BTF + * consists of BTF information required to clarify the types that split + * BTF refers to, omitting unneeded details. Specifically it will contain + * base types and memberless definitions of named structs, unions and enumerated + * types. Associated reference types like pointers, arrays and anonymous + * structs, unions and enumerated types will be added to split BTF. + * Size is recorded for named struct/unions to help guide matching to the + * target base BTF during later relocation. + * + * The only case where structs, unions or enumerated types are fully represented + * is when they are anonymous; in such cases, the anonymous type is added to + * split BTF in full. + * + * We return newly-created split BTF where the split BTF refers to a newly-created + * distilled base BTF. Both must be freed separately by the caller. + */ +int btf__distill_base(const struct btf *src_btf, struct btf **new_base_btf, + struct btf **new_split_btf) +{ + struct btf *new_base = NULL, *new_split = NULL; + const struct btf *old_base; + unsigned int n = btf__type_cnt(src_btf); + struct btf_distill dist = {}; + struct btf_type *t; + int i, err = 0; + + /* src BTF must be split BTF. */ + old_base = btf__base_btf(src_btf); + if (!new_base_btf || !new_split_btf || !old_base) + return libbpf_err(-EINVAL); + + new_base = btf__new_empty(); + if (!new_base) + return libbpf_err(-ENOMEM); + dist.id_map = calloc(n, sizeof(*dist.id_map)); + if (!dist.id_map) { + err = -ENOMEM; + goto done; + } + dist.pipe.src = src_btf; + dist.pipe.dst = new_base; + dist.pipe.str_off_map = hashmap__new(btf_dedup_identity_hash_fn, btf_dedup_equal_fn, NULL); + if (IS_ERR(dist.pipe.str_off_map)) { + err = -ENOMEM; + goto done; + } + dist.split_start_id = btf__type_cnt(old_base); + dist.split_start_str = old_base->hdr->str_len; + + /* Pass over src split BTF; generate the list of base BTF type ids it + * references; these will constitute our distilled BTF set to be + * distributed over base and split BTF as appropriate. + */ + for (i = src_btf->start_id; i < n; i++) { + err = btf_add_distilled_type_ids(&dist, i); + if (err < 0) + goto done; + } + /* Next add types for each of the required references to base BTF and split BTF + * in turn. + */ + err = btf_add_distilled_types(&dist); + if (err < 0) + goto done; + + /* Create new split BTF with distilled base BTF as its base; the final + * state is split BTF with distilled base BTF that represents enough + * about its base references to allow it to be relocated with the base + * BTF available. + */ + new_split = btf__new_empty_split(new_base); + if (!new_split_btf) { + err = -errno; + goto done; + } + dist.pipe.dst = new_split; + /* First add all split types */ + for (i = src_btf->start_id; i < n; i++) { + t = btf_type_by_id(src_btf, i); + err = btf_add_type(&dist.pipe, t); + if (err < 0) + goto done; + } + /* Now add distilled types to split BTF that are not added to base. */ + err = btf_add_distilled_types(&dist); + if (err < 0) + goto done; + + /* All split BTF ids will be shifted downwards since there are less base + * BTF ids in distilled base BTF. + */ + dist.diff_id = dist.split_start_id - btf__type_cnt(new_base); + + n = btf__type_cnt(new_split); + /* Now update base/split BTF ids. */ + for (i = 1; i < n; i++) { + err = btf_update_distilled_type_ids(&dist, i); + if (err < 0) + break; + } +done: + free(dist.id_map); + hashmap__free(dist.pipe.str_off_map); + if (err) { + btf__free(new_split); + btf__free(new_base); + return libbpf_err(err); + } + *new_base_btf = new_base; + *new_split_btf = new_split; + + return 0; +} diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index 8e6880d91c84..cb08ee9a5a10 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -107,6 +107,27 @@ LIBBPF_API struct btf *btf__new_empty(void); */ LIBBPF_API struct btf *btf__new_empty_split(struct btf *base_btf); +/** + * @brief **btf__distill_base()** creates new versions of the split BTF + * *src_btf* and its base BTF. The new base BTF will only contain the types + * needed to improve robustness of the split BTF to small changes in base BTF. + * When that split BTF is loaded against a (possibly changed) base, this + * distilled base BTF will help update references to that (possibly changed) + * base BTF. + * + * Both the new split and its associated new base BTF must be freed by + * the caller. + * + * If successful, 0 is returned and **new_base_btf** and **new_split_btf** + * will point at new base/split BTF. Both the new split and its associated + * new base BTF must be freed by the caller. + * + * A negative value is returned on error and the thread-local `errno` variable + * is set to the error code as well. + */ +LIBBPF_API int btf__distill_base(const struct btf *src_btf, struct btf **new_base_btf, + struct btf **new_split_btf); + LIBBPF_API struct btf *btf__parse(const char *path, struct btf_ext **btf_ext); LIBBPF_API struct btf *btf__parse_split(const char *path, struct btf *base_btf); LIBBPF_API struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext); diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 40595233dc7f..175d1536e070 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -419,6 +419,7 @@ LIBBPF_1.4.0 { LIBBPF_1.5.0 { global: + btf__distill_base; bpf_map__autoattach; bpf_map__set_autoattach; bpf_program__attach_sockmap; -- cgit v1.2.3 From eb20e727c4343ad591cff2bef243590c77f62cf1 Mon Sep 17 00:00:00 2001 From: Alan Maguire Date: Thu, 13 Jun 2024 10:50:07 +0100 Subject: selftests/bpf: Test distilled base, split BTF generation Test generation of split+distilled base BTF, ensuring that - named base BTF STRUCTs and UNIONs are represented as 0-vlen sized STRUCT/UNIONs - named ENUM[64]s are represented as 0-vlen named ENUM[64]s - anonymous struct/unions are represented in full in split BTF - anonymous enums are represented in full in split BTF - types unreferenced from split BTF are not present in distilled base BTF Also test that with vmlinux BTF and split BTF based upon it, we only represent needed base types referenced from split BTF in distilled base. Signed-off-by: Alan Maguire Signed-off-by: Andrii Nakryiko Acked-by: Eduard Zingerman Link: https://lore.kernel.org/bpf/20240613095014.357981-3-alan.maguire@oracle.com --- .../testing/selftests/bpf/prog_tests/btf_distill.c | 274 +++++++++++++++++++++ 1 file changed, 274 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/btf_distill.c diff --git a/tools/testing/selftests/bpf/prog_tests/btf_distill.c b/tools/testing/selftests/bpf/prog_tests/btf_distill.c new file mode 100644 index 000000000000..5c3a38747962 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/btf_distill.c @@ -0,0 +1,274 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024, Oracle and/or its affiliates. */ + +#include +#include +#include "btf_helpers.h" + +/* Fabricate base, split BTF with references to base types needed; then create + * split BTF with distilled base BTF and ensure expectations are met: + * - only referenced base types from split BTF are present + * - struct/union/enum are represented as empty unless anonymous, when they + * are represented in full in split BTF + */ +static void test_distilled_base(void) +{ + struct btf *btf1 = NULL, *btf2 = NULL, *btf3 = NULL, *btf4 = NULL; + + btf1 = btf__new_empty(); + if (!ASSERT_OK_PTR(btf1, "empty_main_btf")) + return; + + btf__add_int(btf1, "int", 4, BTF_INT_SIGNED); /* [1] int */ + btf__add_ptr(btf1, 1); /* [2] ptr to int */ + btf__add_struct(btf1, "s1", 8); /* [3] struct s1 { */ + btf__add_field(btf1, "f1", 2, 0, 0); /* int *f1; */ + /* } */ + btf__add_struct(btf1, "", 12); /* [4] struct { */ + btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */ + btf__add_field(btf1, "f2", 3, 32, 0); /* struct s1 f2; */ + /* } */ + btf__add_int(btf1, "unsigned int", 4, 0); /* [5] unsigned int */ + btf__add_union(btf1, "u1", 12); /* [6] union u1 { */ + btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */ + btf__add_field(btf1, "f2", 2, 0, 0); /* int *f2; */ + /* } */ + btf__add_union(btf1, "", 4); /* [7] union { */ + btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */ + /* } */ + btf__add_enum(btf1, "e1", 4); /* [8] enum e1 { */ + btf__add_enum_value(btf1, "v1", 1); /* v1 = 1; */ + /* } */ + btf__add_enum(btf1, "", 4); /* [9] enum { */ + btf__add_enum_value(btf1, "av1", 2); /* av1 = 2; */ + /* } */ + btf__add_enum64(btf1, "e641", 8, true); /* [10] enum64 { */ + btf__add_enum64_value(btf1, "v1", 1024); /* v1 = 1024; */ + /* } */ + btf__add_enum64(btf1, "", 8, true); /* [11] enum64 { */ + btf__add_enum64_value(btf1, "v1", 1025); /* v1 = 1025; */ + /* } */ + btf__add_struct(btf1, "unneeded", 4); /* [12] struct unneeded { */ + btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */ + /* } */ + btf__add_struct(btf1, "embedded", 4); /* [13] struct embedded { */ + btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */ + /* } */ + btf__add_func_proto(btf1, 1); /* [14] int (*)(int *p1); */ + btf__add_func_param(btf1, "p1", 1); + + btf__add_array(btf1, 1, 1, 3); /* [15] int [3]; */ + + btf__add_struct(btf1, "from_proto", 4); /* [16] struct from_proto { */ + btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */ + /* } */ + btf__add_union(btf1, "u1", 4); /* [17] union u1 { */ + btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */ + /* } */ + VALIDATE_RAW_BTF( + btf1, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] PTR '(anon)' type_id=1", + "[3] STRUCT 's1' size=8 vlen=1\n" + "\t'f1' type_id=2 bits_offset=0", + "[4] STRUCT '(anon)' size=12 vlen=2\n" + "\t'f1' type_id=1 bits_offset=0\n" + "\t'f2' type_id=3 bits_offset=32", + "[5] INT 'unsigned int' size=4 bits_offset=0 nr_bits=32 encoding=(none)", + "[6] UNION 'u1' size=12 vlen=2\n" + "\t'f1' type_id=1 bits_offset=0\n" + "\t'f2' type_id=2 bits_offset=0", + "[7] UNION '(anon)' size=4 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0", + "[8] ENUM 'e1' encoding=UNSIGNED size=4 vlen=1\n" + "\t'v1' val=1", + "[9] ENUM '(anon)' encoding=UNSIGNED size=4 vlen=1\n" + "\t'av1' val=2", + "[10] ENUM64 'e641' encoding=SIGNED size=8 vlen=1\n" + "\t'v1' val=1024", + "[11] ENUM64 '(anon)' encoding=SIGNED size=8 vlen=1\n" + "\t'v1' val=1025", + "[12] STRUCT 'unneeded' size=4 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0", + "[13] STRUCT 'embedded' size=4 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0", + "[14] FUNC_PROTO '(anon)' ret_type_id=1 vlen=1\n" + "\t'p1' type_id=1", + "[15] ARRAY '(anon)' type_id=1 index_type_id=1 nr_elems=3", + "[16] STRUCT 'from_proto' size=4 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0", + "[17] UNION 'u1' size=4 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0"); + + btf2 = btf__new_empty_split(btf1); + if (!ASSERT_OK_PTR(btf2, "empty_split_btf")) + goto cleanup; + + btf__add_ptr(btf2, 3); /* [18] ptr to struct s1 */ + /* add ptr to struct anon */ + btf__add_ptr(btf2, 4); /* [19] ptr to struct (anon) */ + btf__add_const(btf2, 6); /* [20] const union u1 */ + btf__add_restrict(btf2, 7); /* [21] restrict union (anon) */ + btf__add_volatile(btf2, 8); /* [22] volatile enum e1 */ + btf__add_typedef(btf2, "et", 9); /* [23] typedef enum (anon) */ + btf__add_const(btf2, 10); /* [24] const enum64 e641 */ + btf__add_ptr(btf2, 11); /* [25] restrict enum64 (anon) */ + btf__add_struct(btf2, "with_embedded", 4); /* [26] struct with_embedded { */ + btf__add_field(btf2, "f1", 13, 0, 0); /* struct embedded f1; */ + /* } */ + btf__add_func(btf2, "fn", BTF_FUNC_STATIC, 14); /* [27] int fn(int p1); */ + btf__add_typedef(btf2, "arraytype", 15); /* [28] typedef int[3] foo; */ + btf__add_func_proto(btf2, 1); /* [29] int (*)(struct from proto p1); */ + btf__add_func_param(btf2, "p1", 16); + + VALIDATE_RAW_BTF( + btf2, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] PTR '(anon)' type_id=1", + "[3] STRUCT 's1' size=8 vlen=1\n" + "\t'f1' type_id=2 bits_offset=0", + "[4] STRUCT '(anon)' size=12 vlen=2\n" + "\t'f1' type_id=1 bits_offset=0\n" + "\t'f2' type_id=3 bits_offset=32", + "[5] INT 'unsigned int' size=4 bits_offset=0 nr_bits=32 encoding=(none)", + "[6] UNION 'u1' size=12 vlen=2\n" + "\t'f1' type_id=1 bits_offset=0\n" + "\t'f2' type_id=2 bits_offset=0", + "[7] UNION '(anon)' size=4 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0", + "[8] ENUM 'e1' encoding=UNSIGNED size=4 vlen=1\n" + "\t'v1' val=1", + "[9] ENUM '(anon)' encoding=UNSIGNED size=4 vlen=1\n" + "\t'av1' val=2", + "[10] ENUM64 'e641' encoding=SIGNED size=8 vlen=1\n" + "\t'v1' val=1024", + "[11] ENUM64 '(anon)' encoding=SIGNED size=8 vlen=1\n" + "\t'v1' val=1025", + "[12] STRUCT 'unneeded' size=4 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0", + "[13] STRUCT 'embedded' size=4 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0", + "[14] FUNC_PROTO '(anon)' ret_type_id=1 vlen=1\n" + "\t'p1' type_id=1", + "[15] ARRAY '(anon)' type_id=1 index_type_id=1 nr_elems=3", + "[16] STRUCT 'from_proto' size=4 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0", + "[17] UNION 'u1' size=4 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0", + "[18] PTR '(anon)' type_id=3", + "[19] PTR '(anon)' type_id=4", + "[20] CONST '(anon)' type_id=6", + "[21] RESTRICT '(anon)' type_id=7", + "[22] VOLATILE '(anon)' type_id=8", + "[23] TYPEDEF 'et' type_id=9", + "[24] CONST '(anon)' type_id=10", + "[25] PTR '(anon)' type_id=11", + "[26] STRUCT 'with_embedded' size=4 vlen=1\n" + "\t'f1' type_id=13 bits_offset=0", + "[27] FUNC 'fn' type_id=14 linkage=static", + "[28] TYPEDEF 'arraytype' type_id=15", + "[29] FUNC_PROTO '(anon)' ret_type_id=1 vlen=1\n" + "\t'p1' type_id=16"); + + if (!ASSERT_EQ(0, btf__distill_base(btf2, &btf3, &btf4), + "distilled_base") || + !ASSERT_OK_PTR(btf3, "distilled_base") || + !ASSERT_OK_PTR(btf4, "distilled_split") || + !ASSERT_EQ(8, btf__type_cnt(btf3), "distilled_base_type_cnt")) + goto cleanup; + + VALIDATE_RAW_BTF( + btf4, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] STRUCT 's1' size=8 vlen=0", + "[3] UNION 'u1' size=12 vlen=0", + "[4] ENUM 'e1' encoding=UNSIGNED size=4 vlen=0", + "[5] ENUM 'e641' encoding=UNSIGNED size=8 vlen=0", + "[6] STRUCT 'embedded' size=4 vlen=0", + "[7] STRUCT 'from_proto' size=4 vlen=0", + /* split BTF; these types should match split BTF above from 17-28, with + * updated type id references + */ + "[8] PTR '(anon)' type_id=2", + "[9] PTR '(anon)' type_id=20", + "[10] CONST '(anon)' type_id=3", + "[11] RESTRICT '(anon)' type_id=21", + "[12] VOLATILE '(anon)' type_id=4", + "[13] TYPEDEF 'et' type_id=22", + "[14] CONST '(anon)' type_id=5", + "[15] PTR '(anon)' type_id=23", + "[16] STRUCT 'with_embedded' size=4 vlen=1\n" + "\t'f1' type_id=6 bits_offset=0", + "[17] FUNC 'fn' type_id=24 linkage=static", + "[18] TYPEDEF 'arraytype' type_id=25", + "[19] FUNC_PROTO '(anon)' ret_type_id=1 vlen=1\n" + "\t'p1' type_id=7", + /* split BTF types added from original base BTF below */ + "[20] STRUCT '(anon)' size=12 vlen=2\n" + "\t'f1' type_id=1 bits_offset=0\n" + "\t'f2' type_id=2 bits_offset=32", + "[21] UNION '(anon)' size=4 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0", + "[22] ENUM '(anon)' encoding=UNSIGNED size=4 vlen=1\n" + "\t'av1' val=2", + "[23] ENUM64 '(anon)' encoding=SIGNED size=8 vlen=1\n" + "\t'v1' val=1025", + "[24] FUNC_PROTO '(anon)' ret_type_id=1 vlen=1\n" + "\t'p1' type_id=1", + "[25] ARRAY '(anon)' type_id=1 index_type_id=1 nr_elems=3"); + +cleanup: + btf__free(btf4); + btf__free(btf3); + btf__free(btf2); + btf__free(btf1); +} + +/* create split reference BTF from vmlinux + split BTF with a few type references; + * ensure the resultant split reference BTF is as expected, containing only types + * needed to disambiguate references from split BTF. + */ +static void test_distilled_base_vmlinux(void) +{ + struct btf *split_btf = NULL, *vmlinux_btf = btf__load_vmlinux_btf(); + struct btf *split_dist = NULL, *base_dist = NULL; + __s32 int_id, myint_id; + + if (!ASSERT_OK_PTR(vmlinux_btf, "load_vmlinux")) + return; + int_id = btf__find_by_name_kind(vmlinux_btf, "int", BTF_KIND_INT); + if (!ASSERT_GT(int_id, 0, "find_int")) + goto cleanup; + split_btf = btf__new_empty_split(vmlinux_btf); + if (!ASSERT_OK_PTR(split_btf, "new_split")) + goto cleanup; + myint_id = btf__add_typedef(split_btf, "myint", int_id); + btf__add_ptr(split_btf, myint_id); + + if (!ASSERT_EQ(btf__distill_base(split_btf, &base_dist, &split_dist), 0, + "distill_vmlinux_base")) + goto cleanup; + + if (!ASSERT_OK_PTR(split_dist, "split_distilled") || + !ASSERT_OK_PTR(base_dist, "base_dist")) + goto cleanup; + VALIDATE_RAW_BTF( + split_dist, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] TYPEDEF 'myint' type_id=1", + "[3] PTR '(anon)' type_id=2"); + +cleanup: + btf__free(split_dist); + btf__free(base_dist); + btf__free(split_btf); + btf__free(vmlinux_btf); +} + +void test_btf_distill(void) +{ + if (test__start_subtest("distilled_base")) + test_distilled_base(); + if (test__start_subtest("distilled_base_vmlinux")) + test_distilled_base_vmlinux(); +} -- cgit v1.2.3 From 19e00c897d5031bed969dd79af28e899e038009f Mon Sep 17 00:00:00 2001 From: Alan Maguire Date: Thu, 13 Jun 2024 10:50:08 +0100 Subject: libbpf: Split BTF relocation Map distilled base BTF type ids referenced in split BTF and their references to the base BTF passed in, and if the mapping succeeds, reparent the split BTF to the base BTF. Relocation is done by first verifying that distilled base BTF only consists of named INT, FLOAT, ENUM, FWD, STRUCT and UNION kinds; then we sort these to speed lookups. Once sorted, the base BTF is iterated, and for each relevant kind we check for an equivalent in distilled base BTF. When found, the mapping from distilled -> base BTF id and string offset is recorded. In establishing mappings, we need to ensure we check STRUCT/UNION size when the STRUCT/UNION is embedded in a split BTF STRUCT/UNION, and when duplicate names exist for the same STRUCT/UNION. Otherwise size is ignored in matching STRUCT/UNIONs. Once all mappings are established, we can update type ids and string offsets in split BTF and reparent it to the new base. Signed-off-by: Alan Maguire Signed-off-by: Andrii Nakryiko Acked-by: Eduard Zingerman Link: https://lore.kernel.org/bpf/20240613095014.357981-4-alan.maguire@oracle.com --- tools/lib/bpf/Build | 2 +- tools/lib/bpf/btf.c | 17 ++ tools/lib/bpf/btf.h | 14 ++ tools/lib/bpf/btf_relocate.c | 506 ++++++++++++++++++++++++++++++++++++++++ tools/lib/bpf/libbpf.map | 1 + tools/lib/bpf/libbpf_internal.h | 3 + 6 files changed, 542 insertions(+), 1 deletion(-) create mode 100644 tools/lib/bpf/btf_relocate.c diff --git a/tools/lib/bpf/Build b/tools/lib/bpf/Build index b6619199a706..336da6844d42 100644 --- a/tools/lib/bpf/Build +++ b/tools/lib/bpf/Build @@ -1,4 +1,4 @@ libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o \ netlink.o bpf_prog_linfo.o libbpf_probes.o hashmap.o \ btf_dump.o ringbuf.o strset.o linker.o gen_loader.o relo_core.o \ - usdt.o zip.o elf.o features.o + usdt.o zip.o elf.o features.o btf_relocate.o diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 407ed92b4134..5e20354fbcfa 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -5583,3 +5583,20 @@ done: return 0; } + +const struct btf_header *btf_header(const struct btf *btf) +{ + return btf->hdr; +} + +void btf_set_base_btf(struct btf *btf, const struct btf *base_btf) +{ + btf->base_btf = (struct btf *)base_btf; + btf->start_id = btf__type_cnt(base_btf); + btf->start_str_off = base_btf->hdr->str_len; +} + +int btf__relocate(struct btf *btf, const struct btf *base_btf) +{ + return libbpf_err(btf_relocate(btf, base_btf, NULL)); +} diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index cb08ee9a5a10..8a93120b7385 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -252,6 +252,20 @@ struct btf_dedup_opts { LIBBPF_API int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts); +/** + * @brief **btf__relocate()** will check the split BTF *btf* for references + * to base BTF kinds, and verify those references are compatible with + * *base_btf*; if they are, *btf* is adjusted such that is re-parented to + * *base_btf* and type ids and strings are adjusted to accommodate this. + * + * If successful, 0 is returned and **btf** now has **base_btf** as its + * base. + * + * A negative value is returned on error and the thread-local `errno` variable + * is set to the error code as well. + */ +LIBBPF_API int btf__relocate(struct btf *btf, const struct btf *base_btf); + struct btf_dump; struct btf_dump_opts { diff --git a/tools/lib/bpf/btf_relocate.c b/tools/lib/bpf/btf_relocate.c new file mode 100644 index 000000000000..eabb8755f662 --- /dev/null +++ b/tools/lib/bpf/btf_relocate.c @@ -0,0 +1,506 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024, Oracle and/or its affiliates. */ + +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif + +#include "btf.h" +#include "bpf.h" +#include "libbpf.h" +#include "libbpf_internal.h" + +struct btf; + +struct btf_relocate { + struct btf *btf; + const struct btf *base_btf; + const struct btf *dist_base_btf; + unsigned int nr_base_types; + unsigned int nr_split_types; + unsigned int nr_dist_base_types; + int dist_str_len; + int base_str_len; + __u32 *id_map; + __u32 *str_map; +}; + +/* Set temporarily in relocation id_map if distilled base struct/union is + * embedded in a split BTF struct/union; in such a case, size information must + * match between distilled base BTF and base BTF representation of type. + */ +#define BTF_IS_EMBEDDED ((__u32)-1) + +/* triple used in sorting/searching distilled base BTF. */ +struct btf_name_info { + const char *name; + /* set when search requires a size match */ + int needs_size:1, + size:31; + __u32 id; +}; + +static int btf_relocate_rewrite_type_id(struct btf_relocate *r, __u32 i) +{ + struct btf_type *t = btf_type_by_id(r->btf, i); + struct btf_field_iter it; + __u32 *id; + int err; + + err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS); + if (err) + return err; + + while ((id = btf_field_iter_next(&it))) + *id = r->id_map[*id]; + return 0; +} + +/* Simple string comparison used for sorting within BTF, since all distilled + * types are named. If strings match, and size is non-zero for both elements + * fall back to using size for ordering. + */ +static int cmp_btf_name_size(const void *n1, const void *n2) +{ + const struct btf_name_info *ni1 = n1; + const struct btf_name_info *ni2 = n2; + int name_diff = strcmp(ni1->name, ni2->name); + + if (!name_diff && ni1->needs_size && ni2->needs_size) + return ni2->size - ni1->size; + return name_diff; +} + +/* Binary search with a small twist; find leftmost element that matches + * so that we can then iterate through all exact matches. So for example + * searching { "a", "bb", "bb", "c" } we would always match on the + * leftmost "bb". + */ +static struct btf_name_info *search_btf_name_size(struct btf_name_info *key, + struct btf_name_info *vals, + int nelems) +{ + struct btf_name_info *ret = NULL; + int high = nelems - 1; + int low = 0; + + while (low <= high) { + int mid = (low + high)/2; + struct btf_name_info *val = &vals[mid]; + int diff = cmp_btf_name_size(key, val); + + if (diff == 0) + ret = val; + /* even if found, keep searching for leftmost match */ + if (diff <= 0) + high = mid - 1; + else + low = mid + 1; + } + return ret; +} + +/* If a member of a split BTF struct/union refers to a base BTF + * struct/union, mark that struct/union id temporarily in the id_map + * with BTF_IS_EMBEDDED. Members can be const/restrict/volatile/typedef + * reference types, but if a pointer is encountered, the type is no longer + * considered embedded. + */ +static int btf_mark_embedded_composite_type_ids(struct btf_relocate *r, __u32 i) +{ + struct btf_type *t = btf_type_by_id(r->btf, i); + struct btf_field_iter it; + __u32 *id; + int err; + + if (!btf_is_composite(t)) + return 0; + + err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS); + if (err) + return err; + + while ((id = btf_field_iter_next(&it))) { + __u32 next_id = *id; + + while (next_id) { + t = btf_type_by_id(r->btf, next_id); + switch (btf_kind(t)) { + case BTF_KIND_CONST: + case BTF_KIND_RESTRICT: + case BTF_KIND_VOLATILE: + case BTF_KIND_TYPEDEF: + case BTF_KIND_TYPE_TAG: + next_id = t->type; + break; + case BTF_KIND_ARRAY: { + struct btf_array *a = btf_array(t); + + next_id = a->type; + break; + } + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: + if (next_id < r->nr_dist_base_types) + r->id_map[next_id] = BTF_IS_EMBEDDED; + next_id = 0; + break; + default: + next_id = 0; + break; + } + } + } + + return 0; +} + +/* Build a map from distilled base BTF ids to base BTF ids. To do so, iterate + * through base BTF looking up distilled type (using binary search) equivalents. + */ +static int btf_relocate_map_distilled_base(struct btf_relocate *r) +{ + struct btf_name_info *dist_base_info_sorted, *dist_base_info_sorted_end; + struct btf_type *base_t, *dist_t; + __u8 *base_name_cnt = NULL; + int err = 0; + __u32 id; + + /* generate a sort index array of name/type ids sorted by name for + * distilled base BTF to speed name-based lookups. + */ + dist_base_info_sorted = calloc(r->nr_dist_base_types, sizeof(*dist_base_info_sorted)); + if (!dist_base_info_sorted) { + err = -ENOMEM; + goto done; + } + dist_base_info_sorted_end = dist_base_info_sorted + r->nr_dist_base_types; + for (id = 0; id < r->nr_dist_base_types; id++) { + dist_t = btf_type_by_id(r->dist_base_btf, id); + dist_base_info_sorted[id].name = btf__name_by_offset(r->dist_base_btf, + dist_t->name_off); + dist_base_info_sorted[id].id = id; + dist_base_info_sorted[id].size = dist_t->size; + dist_base_info_sorted[id].needs_size = true; + } + qsort(dist_base_info_sorted, r->nr_dist_base_types, sizeof(*dist_base_info_sorted), + cmp_btf_name_size); + + /* Mark distilled base struct/union members of split BTF structs/unions + * in id_map with BTF_IS_EMBEDDED; this signals that these types + * need to match both name and size, otherwise embeddding the base + * struct/union in the split type is invalid. + */ + for (id = r->nr_dist_base_types; id < r->nr_split_types; id++) { + err = btf_mark_embedded_composite_type_ids(r, id); + if (err) + goto done; + } + + /* Collect name counts for composite types in base BTF. If multiple + * instances of a struct/union of the same name exist, we need to use + * size to determine which to map to since name alone is ambiguous. + */ + base_name_cnt = calloc(r->base_str_len, sizeof(*base_name_cnt)); + if (!base_name_cnt) { + err = -ENOMEM; + goto done; + } + for (id = 1; id < r->nr_base_types; id++) { + base_t = btf_type_by_id(r->base_btf, id); + if (!btf_is_composite(base_t) || !base_t->name_off) + continue; + if (base_name_cnt[base_t->name_off] < 255) + base_name_cnt[base_t->name_off]++; + } + + /* Now search base BTF for matching distilled base BTF types. */ + for (id = 1; id < r->nr_base_types; id++) { + struct btf_name_info *dist_name_info, *dist_name_info_next = NULL; + struct btf_name_info base_name_info = {}; + int dist_kind, base_kind; + + base_t = btf_type_by_id(r->base_btf, id); + /* distilled base consists of named types only. */ + if (!base_t->name_off) + continue; + base_kind = btf_kind(base_t); + base_name_info.id = id; + base_name_info.name = btf__name_by_offset(r->base_btf, base_t->name_off); + switch (base_kind) { + case BTF_KIND_INT: + case BTF_KIND_FLOAT: + case BTF_KIND_ENUM: + case BTF_KIND_ENUM64: + /* These types should match both name and size */ + base_name_info.needs_size = true; + base_name_info.size = base_t->size; + break; + case BTF_KIND_FWD: + /* No size considerations for fwds. */ + break; + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: + /* Size only needs to be used for struct/union if there + * are multiple types in base BTF with the same name. + * If there are multiple _distilled_ types with the same + * name (a very unlikely scenario), that doesn't matter + * unless corresponding _base_ types to match them are + * missing. + */ + base_name_info.needs_size = base_name_cnt[base_t->name_off] > 1; + base_name_info.size = base_t->size; + break; + default: + continue; + } + /* iterate over all matching distilled base types */ + for (dist_name_info = search_btf_name_size(&base_name_info, dist_base_info_sorted, + r->nr_dist_base_types); + dist_name_info != NULL; dist_name_info = dist_name_info_next) { + /* Are there more distilled matches to process after + * this one? + */ + dist_name_info_next = dist_name_info + 1; + if (dist_name_info_next >= dist_base_info_sorted_end || + cmp_btf_name_size(&base_name_info, dist_name_info_next)) + dist_name_info_next = NULL; + + if (!dist_name_info->id || dist_name_info->id > r->nr_dist_base_types) { + pr_warn("base BTF id [%d] maps to invalid distilled base BTF id [%d]\n", + id, dist_name_info->id); + err = -EINVAL; + goto done; + } + dist_t = btf_type_by_id(r->dist_base_btf, dist_name_info->id); + dist_kind = btf_kind(dist_t); + + /* Validate that the found distilled type is compatible. + * Do not error out on mismatch as another match may + * occur for an identically-named type. + */ + switch (dist_kind) { + case BTF_KIND_FWD: + switch (base_kind) { + case BTF_KIND_FWD: + if (btf_kflag(dist_t) != btf_kflag(base_t)) + continue; + break; + case BTF_KIND_STRUCT: + if (btf_kflag(base_t)) + continue; + break; + case BTF_KIND_UNION: + if (!btf_kflag(base_t)) + continue; + break; + default: + continue; + } + break; + case BTF_KIND_INT: + if (dist_kind != base_kind || + btf_int_encoding(base_t) != btf_int_encoding(dist_t)) + continue; + break; + case BTF_KIND_FLOAT: + if (dist_kind != base_kind) + continue; + break; + case BTF_KIND_ENUM: + /* ENUM and ENUM64 are encoded as sized ENUM in + * distilled base BTF. + */ + if (base_kind != dist_kind && base_kind != BTF_KIND_ENUM64) + continue; + break; + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: + /* size verification is required for embedded + * struct/unions. + */ + if (r->id_map[dist_name_info->id] == BTF_IS_EMBEDDED && + base_t->size != dist_t->size) + continue; + break; + default: + continue; + } + if (r->id_map[dist_name_info->id] && + r->id_map[dist_name_info->id] != BTF_IS_EMBEDDED) { + /* we already have a match; this tells us that + * multiple base types of the same name + * have the same size, since for cases where + * multiple types have the same name we match + * on name and size. In this case, we have + * no way of determining which to relocate + * to in base BTF, so error out. + */ + pr_warn("distilled base BTF type '%s' [%u], size %u has multiple candidates of the same size (ids [%u, %u]) in base BTF\n", + base_name_info.name, dist_name_info->id, + base_t->size, id, r->id_map[dist_name_info->id]); + err = -EINVAL; + goto done; + } + /* map id and name */ + r->id_map[dist_name_info->id] = id; + r->str_map[dist_t->name_off] = base_t->name_off; + } + } + /* ensure all distilled BTF ids now have a mapping... */ + for (id = 1; id < r->nr_dist_base_types; id++) { + const char *name; + + if (r->id_map[id] && r->id_map[id] != BTF_IS_EMBEDDED) + continue; + dist_t = btf_type_by_id(r->dist_base_btf, id); + name = btf__name_by_offset(r->dist_base_btf, dist_t->name_off); + pr_warn("distilled base BTF type '%s' [%d] is not mapped to base BTF id\n", + name, id); + err = -EINVAL; + break; + } +done: + free(base_name_cnt); + free(dist_base_info_sorted); + return err; +} + +/* distilled base should only have named int/float/enum/fwd/struct/union types. */ +static int btf_relocate_validate_distilled_base(struct btf_relocate *r) +{ + unsigned int i; + + for (i = 1; i < r->nr_dist_base_types; i++) { + struct btf_type *t = btf_type_by_id(r->dist_base_btf, i); + int kind = btf_kind(t); + + switch (kind) { + case BTF_KIND_INT: + case BTF_KIND_FLOAT: + case BTF_KIND_ENUM: + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: + case BTF_KIND_FWD: + if (t->name_off) + break; + pr_warn("type [%d], kind [%d] is invalid for distilled base BTF; it is anonymous\n", + i, kind); + return -EINVAL; + default: + pr_warn("type [%d] in distilled based BTF has unexpected kind [%d]\n", + i, kind); + return -EINVAL; + } + } + return 0; +} + +static int btf_relocate_rewrite_strs(struct btf_relocate *r, __u32 i) +{ + struct btf_type *t = btf_type_by_id(r->btf, i); + struct btf_field_iter it; + __u32 *str_off; + int off, err; + + err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS); + if (err) + return err; + + while ((str_off = btf_field_iter_next(&it))) { + if (!*str_off) + continue; + if (*str_off >= r->dist_str_len) { + *str_off += r->base_str_len - r->dist_str_len; + } else { + off = r->str_map[*str_off]; + if (!off) { + pr_warn("string '%s' [offset %u] is not mapped to base BTF", + btf__str_by_offset(r->btf, off), *str_off); + return -ENOENT; + } + *str_off = off; + } + } + return 0; +} + +/* If successful, output of relocation is updated BTF with base BTF pointing + * at base_btf, and type ids, strings adjusted accordingly. + */ +int btf_relocate(struct btf *btf, const struct btf *base_btf, __u32 **id_map) +{ + unsigned int nr_types = btf__type_cnt(btf); + const struct btf_header *dist_base_hdr; + const struct btf_header *base_hdr; + struct btf_relocate r = {}; + int err = 0; + __u32 id, i; + + r.dist_base_btf = btf__base_btf(btf); + if (!base_btf || r.dist_base_btf == base_btf) + return -EINVAL; + + r.nr_dist_base_types = btf__type_cnt(r.dist_base_btf); + r.nr_base_types = btf__type_cnt(base_btf); + r.nr_split_types = nr_types - r.nr_dist_base_types; + r.btf = btf; + r.base_btf = base_btf; + + r.id_map = calloc(nr_types, sizeof(*r.id_map)); + r.str_map = calloc(btf_header(r.dist_base_btf)->str_len, sizeof(*r.str_map)); + dist_base_hdr = btf_header(r.dist_base_btf); + base_hdr = btf_header(r.base_btf); + r.dist_str_len = dist_base_hdr->str_len; + r.base_str_len = base_hdr->str_len; + if (!r.id_map || !r.str_map) { + err = -ENOMEM; + goto err_out; + } + + err = btf_relocate_validate_distilled_base(&r); + if (err) + goto err_out; + + /* Split BTF ids need to be adjusted as base and distilled base + * have different numbers of types, changing the start id of split + * BTF. + */ + for (id = r.nr_dist_base_types; id < nr_types; id++) + r.id_map[id] = id + r.nr_base_types - r.nr_dist_base_types; + + /* Build a map from distilled base ids to actual base BTF ids; it is used + * to update split BTF id references. Also build a str_map mapping from + * distilled base BTF names to base BTF names. + */ + err = btf_relocate_map_distilled_base(&r); + if (err) + goto err_out; + + /* Next, rewrite type ids in split BTF, replacing split ids with updated + * ids based on number of types in base BTF, and base ids with + * relocated ids from base_btf. + */ + for (i = 0, id = r.nr_dist_base_types; i < r.nr_split_types; i++, id++) { + err = btf_relocate_rewrite_type_id(&r, id); + if (err) + goto err_out; + } + /* String offsets now need to be updated using the str_map. */ + for (i = 0; i < r.nr_split_types; i++) { + err = btf_relocate_rewrite_strs(&r, i + r.nr_dist_base_types); + if (err) + goto err_out; + } + /* Finally reset base BTF to be base_btf */ + btf_set_base_btf(btf, base_btf); + + if (id_map) { + *id_map = r.id_map; + r.id_map = NULL; + } +err_out: + free(r.id_map); + free(r.str_map); + return err; +} diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 175d1536e070..8f0d9ea3b1b4 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -420,6 +420,7 @@ LIBBPF_1.4.0 { LIBBPF_1.5.0 { global: btf__distill_base; + btf__relocate; bpf_map__autoattach; bpf_map__set_autoattach; bpf_program__attach_sockmap; diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h index e2f06609c624..408df59e0771 100644 --- a/tools/lib/bpf/libbpf_internal.h +++ b/tools/lib/bpf/libbpf_internal.h @@ -234,6 +234,9 @@ struct btf_type; struct btf_type *btf_type_by_id(const struct btf *btf, __u32 type_id); const char *btf_kind_str(const struct btf_type *t); const struct btf_type *skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id); +const struct btf_header *btf_header(const struct btf *btf); +void btf_set_base_btf(struct btf *btf, const struct btf *base_btf); +int btf_relocate(struct btf *btf, const struct btf *base_btf, __u32 **id_map); static inline enum btf_func_linkage btf_func_linkage(const struct btf_type *t) { -- cgit v1.2.3 From affdeb50616b190c3236cc2bf116e1b931a43be2 Mon Sep 17 00:00:00 2001 From: Alan Maguire Date: Thu, 13 Jun 2024 10:50:09 +0100 Subject: selftests/bpf: Extend distilled BTF tests to cover BTF relocation Ensure relocated BTF looks as expected; in this case identical to original split BTF, with a few duplicate anonymous types added to split BTF by the relocation process. Also add relocation tests for edge cases like missing type in base BTF and multiple types of the same name. Signed-off-by: Alan Maguire Signed-off-by: Andrii Nakryiko Acked-by: Eduard Zingerman Link: https://lore.kernel.org/bpf/20240613095014.357981-5-alan.maguire@oracle.com --- .../testing/selftests/bpf/prog_tests/btf_distill.c | 278 +++++++++++++++++++++ 1 file changed, 278 insertions(+) diff --git a/tools/testing/selftests/bpf/prog_tests/btf_distill.c b/tools/testing/selftests/bpf/prog_tests/btf_distill.c index 5c3a38747962..bfbe795823a2 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_distill.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_distill.c @@ -217,7 +217,277 @@ static void test_distilled_base(void) "\t'p1' type_id=1", "[25] ARRAY '(anon)' type_id=1 index_type_id=1 nr_elems=3"); + if (!ASSERT_EQ(btf__relocate(btf4, btf1), 0, "relocate_split")) + goto cleanup; + + VALIDATE_RAW_BTF( + btf4, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] PTR '(anon)' type_id=1", + "[3] STRUCT 's1' size=8 vlen=1\n" + "\t'f1' type_id=2 bits_offset=0", + "[4] STRUCT '(anon)' size=12 vlen=2\n" + "\t'f1' type_id=1 bits_offset=0\n" + "\t'f2' type_id=3 bits_offset=32", + "[5] INT 'unsigned int' size=4 bits_offset=0 nr_bits=32 encoding=(none)", + "[6] UNION 'u1' size=12 vlen=2\n" + "\t'f1' type_id=1 bits_offset=0\n" + "\t'f2' type_id=2 bits_offset=0", + "[7] UNION '(anon)' size=4 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0", + "[8] ENUM 'e1' encoding=UNSIGNED size=4 vlen=1\n" + "\t'v1' val=1", + "[9] ENUM '(anon)' encoding=UNSIGNED size=4 vlen=1\n" + "\t'av1' val=2", + "[10] ENUM64 'e641' encoding=SIGNED size=8 vlen=1\n" + "\t'v1' val=1024", + "[11] ENUM64 '(anon)' encoding=SIGNED size=8 vlen=1\n" + "\t'v1' val=1025", + "[12] STRUCT 'unneeded' size=4 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0", + "[13] STRUCT 'embedded' size=4 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0", + "[14] FUNC_PROTO '(anon)' ret_type_id=1 vlen=1\n" + "\t'p1' type_id=1", + "[15] ARRAY '(anon)' type_id=1 index_type_id=1 nr_elems=3", + "[16] STRUCT 'from_proto' size=4 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0", + "[17] UNION 'u1' size=4 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0", + "[18] PTR '(anon)' type_id=3", + "[19] PTR '(anon)' type_id=30", + "[20] CONST '(anon)' type_id=6", + "[21] RESTRICT '(anon)' type_id=31", + "[22] VOLATILE '(anon)' type_id=8", + "[23] TYPEDEF 'et' type_id=32", + "[24] CONST '(anon)' type_id=10", + "[25] PTR '(anon)' type_id=33", + "[26] STRUCT 'with_embedded' size=4 vlen=1\n" + "\t'f1' type_id=13 bits_offset=0", + "[27] FUNC 'fn' type_id=34 linkage=static", + "[28] TYPEDEF 'arraytype' type_id=35", + "[29] FUNC_PROTO '(anon)' ret_type_id=1 vlen=1\n" + "\t'p1' type_id=16", + /* below here are (duplicate) anon base types added by distill + * process to split BTF. + */ + "[30] STRUCT '(anon)' size=12 vlen=2\n" + "\t'f1' type_id=1 bits_offset=0\n" + "\t'f2' type_id=3 bits_offset=32", + "[31] UNION '(anon)' size=4 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0", + "[32] ENUM '(anon)' encoding=UNSIGNED size=4 vlen=1\n" + "\t'av1' val=2", + "[33] ENUM64 '(anon)' encoding=SIGNED size=8 vlen=1\n" + "\t'v1' val=1025", + "[34] FUNC_PROTO '(anon)' ret_type_id=1 vlen=1\n" + "\t'p1' type_id=1", + "[35] ARRAY '(anon)' type_id=1 index_type_id=1 nr_elems=3"); + +cleanup: + btf__free(btf4); + btf__free(btf3); + btf__free(btf2); + btf__free(btf1); +} + +/* ensure we can cope with multiple types with the same name in + * distilled base BTF. In this case because sizes are different, + * we can still disambiguate them. + */ +static void test_distilled_base_multi(void) +{ + struct btf *btf1 = NULL, *btf2 = NULL, *btf3 = NULL, *btf4 = NULL; + + btf1 = btf__new_empty(); + if (!ASSERT_OK_PTR(btf1, "empty_main_btf")) + return; + btf__add_int(btf1, "int", 4, BTF_INT_SIGNED); /* [1] int */ + btf__add_int(btf1, "int", 8, BTF_INT_SIGNED); /* [2] int */ + VALIDATE_RAW_BTF( + btf1, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] INT 'int' size=8 bits_offset=0 nr_bits=64 encoding=SIGNED"); + btf2 = btf__new_empty_split(btf1); + if (!ASSERT_OK_PTR(btf2, "empty_split_btf")) + goto cleanup; + btf__add_ptr(btf2, 1); + btf__add_const(btf2, 2); + VALIDATE_RAW_BTF( + btf2, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] INT 'int' size=8 bits_offset=0 nr_bits=64 encoding=SIGNED", + "[3] PTR '(anon)' type_id=1", + "[4] CONST '(anon)' type_id=2"); + if (!ASSERT_EQ(0, btf__distill_base(btf2, &btf3, &btf4), + "distilled_base") || + !ASSERT_OK_PTR(btf3, "distilled_base") || + !ASSERT_OK_PTR(btf4, "distilled_split") || + !ASSERT_EQ(3, btf__type_cnt(btf3), "distilled_base_type_cnt")) + goto cleanup; + VALIDATE_RAW_BTF( + btf3, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] INT 'int' size=8 bits_offset=0 nr_bits=64 encoding=SIGNED"); + if (!ASSERT_EQ(btf__relocate(btf4, btf1), 0, "relocate_split")) + goto cleanup; + + VALIDATE_RAW_BTF( + btf4, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] INT 'int' size=8 bits_offset=0 nr_bits=64 encoding=SIGNED", + "[3] PTR '(anon)' type_id=1", + "[4] CONST '(anon)' type_id=2"); + +cleanup: + btf__free(btf4); + btf__free(btf3); + btf__free(btf2); + btf__free(btf1); +} + +/* If a needed type is not present in the base BTF we wish to relocate + * with, btf__relocate() should error our. + */ +static void test_distilled_base_missing_err(void) +{ + struct btf *btf1 = NULL, *btf2 = NULL, *btf3 = NULL, *btf4 = NULL, *btf5 = NULL; + + btf1 = btf__new_empty(); + if (!ASSERT_OK_PTR(btf1, "empty_main_btf")) + return; + btf__add_int(btf1, "int", 4, BTF_INT_SIGNED); /* [1] int */ + btf__add_int(btf1, "int", 8, BTF_INT_SIGNED); /* [2] int */ + VALIDATE_RAW_BTF( + btf1, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] INT 'int' size=8 bits_offset=0 nr_bits=64 encoding=SIGNED"); + btf2 = btf__new_empty_split(btf1); + if (!ASSERT_OK_PTR(btf2, "empty_split_btf")) + goto cleanup; + btf__add_ptr(btf2, 1); + btf__add_const(btf2, 2); + VALIDATE_RAW_BTF( + btf2, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] INT 'int' size=8 bits_offset=0 nr_bits=64 encoding=SIGNED", + "[3] PTR '(anon)' type_id=1", + "[4] CONST '(anon)' type_id=2"); + if (!ASSERT_EQ(0, btf__distill_base(btf2, &btf3, &btf4), + "distilled_base") || + !ASSERT_OK_PTR(btf3, "distilled_base") || + !ASSERT_OK_PTR(btf4, "distilled_split") || + !ASSERT_EQ(3, btf__type_cnt(btf3), "distilled_base_type_cnt")) + goto cleanup; + VALIDATE_RAW_BTF( + btf3, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] INT 'int' size=8 bits_offset=0 nr_bits=64 encoding=SIGNED"); + btf5 = btf__new_empty(); + if (!ASSERT_OK_PTR(btf5, "empty_reloc_btf")) + return; + btf__add_int(btf5, "int", 4, BTF_INT_SIGNED); /* [1] int */ + VALIDATE_RAW_BTF( + btf5, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED"); + ASSERT_EQ(btf__relocate(btf4, btf5), -EINVAL, "relocate_split"); + +cleanup: + btf__free(btf5); + btf__free(btf4); + btf__free(btf3); + btf__free(btf2); + btf__free(btf1); +} + +/* With 2 types of same size in distilled base BTF, relocation should + * fail as we have no means to choose between them. + */ +static void test_distilled_base_multi_err(void) +{ + struct btf *btf1 = NULL, *btf2 = NULL, *btf3 = NULL, *btf4 = NULL; + + btf1 = btf__new_empty(); + if (!ASSERT_OK_PTR(btf1, "empty_main_btf")) + return; + btf__add_int(btf1, "int", 4, BTF_INT_SIGNED); /* [1] int */ + btf__add_int(btf1, "int", 4, BTF_INT_SIGNED); /* [2] int */ + VALIDATE_RAW_BTF( + btf1, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED"); + btf2 = btf__new_empty_split(btf1); + if (!ASSERT_OK_PTR(btf2, "empty_split_btf")) + goto cleanup; + btf__add_ptr(btf2, 1); + btf__add_const(btf2, 2); + VALIDATE_RAW_BTF( + btf2, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[3] PTR '(anon)' type_id=1", + "[4] CONST '(anon)' type_id=2"); + if (!ASSERT_EQ(0, btf__distill_base(btf2, &btf3, &btf4), + "distilled_base") || + !ASSERT_OK_PTR(btf3, "distilled_base") || + !ASSERT_OK_PTR(btf4, "distilled_split") || + !ASSERT_EQ(3, btf__type_cnt(btf3), "distilled_base_type_cnt")) + goto cleanup; + VALIDATE_RAW_BTF( + btf3, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED"); + ASSERT_EQ(btf__relocate(btf4, btf1), -EINVAL, "relocate_split"); +cleanup: + btf__free(btf4); + btf__free(btf3); + btf__free(btf2); + btf__free(btf1); +} + +/* With 2 types of same size in base BTF, relocation should + * fail as we have no means to choose between them. + */ +static void test_distilled_base_multi_err2(void) +{ + struct btf *btf1 = NULL, *btf2 = NULL, *btf3 = NULL, *btf4 = NULL, *btf5 = NULL; + + btf1 = btf__new_empty(); + if (!ASSERT_OK_PTR(btf1, "empty_main_btf")) + return; + btf__add_int(btf1, "int", 4, BTF_INT_SIGNED); /* [1] int */ + VALIDATE_RAW_BTF( + btf1, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED"); + btf2 = btf__new_empty_split(btf1); + if (!ASSERT_OK_PTR(btf2, "empty_split_btf")) + goto cleanup; + btf__add_ptr(btf2, 1); + VALIDATE_RAW_BTF( + btf2, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] PTR '(anon)' type_id=1"); + if (!ASSERT_EQ(0, btf__distill_base(btf2, &btf3, &btf4), + "distilled_base") || + !ASSERT_OK_PTR(btf3, "distilled_base") || + !ASSERT_OK_PTR(btf4, "distilled_split") || + !ASSERT_EQ(2, btf__type_cnt(btf3), "distilled_base_type_cnt")) + goto cleanup; + VALIDATE_RAW_BTF( + btf3, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED"); + btf5 = btf__new_empty(); + if (!ASSERT_OK_PTR(btf5, "empty_reloc_btf")) + return; + btf__add_int(btf5, "int", 4, BTF_INT_SIGNED); /* [1] int */ + btf__add_int(btf5, "int", 4, BTF_INT_SIGNED); /* [2] int */ + VALIDATE_RAW_BTF( + btf5, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED"); + ASSERT_EQ(btf__relocate(btf4, btf5), -EINVAL, "relocate_split"); cleanup: + btf__free(btf5); btf__free(btf4); btf__free(btf3); btf__free(btf2); @@ -269,6 +539,14 @@ void test_btf_distill(void) { if (test__start_subtest("distilled_base")) test_distilled_base(); + if (test__start_subtest("distilled_base_multi")) + test_distilled_base_multi(); + if (test__start_subtest("distilled_base_missing_err")) + test_distilled_base_missing_err(); + if (test__start_subtest("distilled_base_multi_err")) + test_distilled_base_multi_err(); + if (test__start_subtest("distilled_base_multi_err2")) + test_distilled_base_multi_err2(); if (test__start_subtest("distilled_base_vmlinux")) test_distilled_base_vmlinux(); } -- cgit v1.2.3 From c86f180ffc993975fed5907a869fc9b1555d0cfb Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Thu, 13 Jun 2024 10:50:10 +0100 Subject: libbpf: Make btf_parse_elf process .BTF.base transparently Update btf_parse_elf() to check if .BTF.base section is present. The logic is as follows: if .BTF.base section exists: distilled_base := btf_new(.BTF.base) if distilled_base: btf := btf_new(.BTF, .base_btf=distilled_base) if base_btf: btf_relocate(btf, base_btf) else: btf := btf_new(.BTF) return btf In other words: - if .BTF.base section exists, load BTF from it and use it as a base for .BTF load; - if base_btf is specified and .BTF.base section exist, relocate newly loaded .BTF against base_btf. Signed-off-by: Eduard Zingerman Signed-off-by: Alan Maguire Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20240613095014.357981-6-alan.maguire@oracle.com --- tools/lib/bpf/btf.c | 164 +++++++++++++++++++++++++++++++++++----------------- tools/lib/bpf/btf.h | 1 + 2 files changed, 111 insertions(+), 54 deletions(-) diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 5e20354fbcfa..ef1b2f573c1b 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -116,6 +116,9 @@ struct btf { /* whether strings are already deduplicated */ bool strs_deduped; + /* whether base_btf should be freed in btf_free for this instance */ + bool owns_base; + /* BTF object FD, if loaded into kernel */ int fd; @@ -969,6 +972,8 @@ void btf__free(struct btf *btf) free(btf->raw_data); free(btf->raw_data_swapped); free(btf->type_offs); + if (btf->owns_base) + btf__free(btf->base_btf); free(btf); } @@ -1084,53 +1089,38 @@ struct btf *btf__new_split(const void *data, __u32 size, struct btf *base_btf) return libbpf_ptr(btf_new(data, size, base_btf)); } -static struct btf *btf_parse_elf(const char *path, struct btf *base_btf, - struct btf_ext **btf_ext) +struct btf_elf_secs { + Elf_Data *btf_data; + Elf_Data *btf_ext_data; + Elf_Data *btf_base_data; +}; + +static int btf_find_elf_sections(Elf *elf, const char *path, struct btf_elf_secs *secs) { - Elf_Data *btf_data = NULL, *btf_ext_data = NULL; - int err = 0, fd = -1, idx = 0; - struct btf *btf = NULL; Elf_Scn *scn = NULL; - Elf *elf = NULL; + Elf_Data *data; GElf_Ehdr ehdr; size_t shstrndx; + int idx = 0; - if (elf_version(EV_CURRENT) == EV_NONE) { - pr_warn("failed to init libelf for %s\n", path); - return ERR_PTR(-LIBBPF_ERRNO__LIBELF); - } - - fd = open(path, O_RDONLY | O_CLOEXEC); - if (fd < 0) { - err = -errno; - pr_warn("failed to open %s: %s\n", path, strerror(errno)); - return ERR_PTR(err); - } - - err = -LIBBPF_ERRNO__FORMAT; - - elf = elf_begin(fd, ELF_C_READ, NULL); - if (!elf) { - pr_warn("failed to open %s as ELF file\n", path); - goto done; - } if (!gelf_getehdr(elf, &ehdr)) { pr_warn("failed to get EHDR from %s\n", path); - goto done; + goto err; } if (elf_getshdrstrndx(elf, &shstrndx)) { pr_warn("failed to get section names section index for %s\n", path); - goto done; + goto err; } if (!elf_rawdata(elf_getscn(elf, shstrndx), NULL)) { pr_warn("failed to get e_shstrndx from %s\n", path); - goto done; + goto err; } while ((scn = elf_nextscn(elf, scn)) != NULL) { + Elf_Data **field; GElf_Shdr sh; char *name; @@ -1138,42 +1128,102 @@ static struct btf *btf_parse_elf(const char *path, struct btf *base_btf, if (gelf_getshdr(scn, &sh) != &sh) { pr_warn("failed to get section(%d) header from %s\n", idx, path); - goto done; + goto err; } name = elf_strptr(elf, shstrndx, sh.sh_name); if (!name) { pr_warn("failed to get section(%d) name from %s\n", idx, path); - goto done; + goto err; } - if (strcmp(name, BTF_ELF_SEC) == 0) { - btf_data = elf_getdata(scn, 0); - if (!btf_data) { - pr_warn("failed to get section(%d, %s) data from %s\n", - idx, name, path); - goto done; - } - continue; - } else if (btf_ext && strcmp(name, BTF_EXT_ELF_SEC) == 0) { - btf_ext_data = elf_getdata(scn, 0); - if (!btf_ext_data) { - pr_warn("failed to get section(%d, %s) data from %s\n", - idx, name, path); - goto done; - } + + if (strcmp(name, BTF_ELF_SEC) == 0) + field = &secs->btf_data; + else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) + field = &secs->btf_ext_data; + else if (strcmp(name, BTF_BASE_ELF_SEC) == 0) + field = &secs->btf_base_data; + else continue; + + data = elf_getdata(scn, 0); + if (!data) { + pr_warn("failed to get section(%d, %s) data from %s\n", + idx, name, path); + goto err; } + *field = data; + } + + return 0; + +err: + return -LIBBPF_ERRNO__FORMAT; +} + +static struct btf *btf_parse_elf(const char *path, struct btf *base_btf, + struct btf_ext **btf_ext) +{ + struct btf_elf_secs secs = {}; + struct btf *dist_base_btf = NULL; + struct btf *btf = NULL; + int err = 0, fd = -1; + Elf *elf = NULL; + + if (elf_version(EV_CURRENT) == EV_NONE) { + pr_warn("failed to init libelf for %s\n", path); + return ERR_PTR(-LIBBPF_ERRNO__LIBELF); + } + + fd = open(path, O_RDONLY | O_CLOEXEC); + if (fd < 0) { + err = -errno; + pr_warn("failed to open %s: %s\n", path, strerror(errno)); + return ERR_PTR(err); } - if (!btf_data) { + elf = elf_begin(fd, ELF_C_READ, NULL); + if (!elf) { + pr_warn("failed to open %s as ELF file\n", path); + goto done; + } + + err = btf_find_elf_sections(elf, path, &secs); + if (err) + goto done; + + if (!secs.btf_data) { pr_warn("failed to find '%s' ELF section in %s\n", BTF_ELF_SEC, path); err = -ENODATA; goto done; } - btf = btf_new(btf_data->d_buf, btf_data->d_size, base_btf); - err = libbpf_get_error(btf); - if (err) + + if (secs.btf_base_data) { + dist_base_btf = btf_new(secs.btf_base_data->d_buf, secs.btf_base_data->d_size, + NULL); + if (IS_ERR(dist_base_btf)) { + err = PTR_ERR(dist_base_btf); + dist_base_btf = NULL; + goto done; + } + } + + btf = btf_new(secs.btf_data->d_buf, secs.btf_data->d_size, + dist_base_btf ?: base_btf); + if (IS_ERR(btf)) { + err = PTR_ERR(btf); goto done; + } + if (dist_base_btf && base_btf) { + err = btf__relocate(btf, base_btf); + if (err) + goto done; + btf__free(dist_base_btf); + dist_base_btf = NULL; + } + + if (dist_base_btf) + btf->owns_base = true; switch (gelf_getclass(elf)) { case ELFCLASS32: @@ -1187,11 +1237,12 @@ static struct btf *btf_parse_elf(const char *path, struct btf *base_btf, break; } - if (btf_ext && btf_ext_data) { - *btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size); - err = libbpf_get_error(*btf_ext); - if (err) + if (btf_ext && secs.btf_ext_data) { + *btf_ext = btf_ext__new(secs.btf_ext_data->d_buf, secs.btf_ext_data->d_size); + if (IS_ERR(*btf_ext)) { + err = PTR_ERR(*btf_ext); goto done; + } } else if (btf_ext) { *btf_ext = NULL; } @@ -1205,6 +1256,7 @@ done: if (btf_ext) btf_ext__free(*btf_ext); + btf__free(dist_base_btf); btf__free(btf); return ERR_PTR(err); @@ -5598,5 +5650,9 @@ void btf_set_base_btf(struct btf *btf, const struct btf *base_btf) int btf__relocate(struct btf *btf, const struct btf *base_btf) { - return libbpf_err(btf_relocate(btf, base_btf, NULL)); + int err = btf_relocate(btf, base_btf, NULL); + + if (!err) + btf->owns_base = false; + return libbpf_err(err); } diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index 8a93120b7385..b68d216837a9 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -18,6 +18,7 @@ extern "C" { #define BTF_ELF_SEC ".BTF" #define BTF_EXT_ELF_SEC ".BTF.ext" +#define BTF_BASE_ELF_SEC ".BTF.base" #define MAPS_ELF_SEC ".maps" struct btf; -- cgit v1.2.3 From 6ba77385f386053cea2a1cad33717de74a26db4e Mon Sep 17 00:00:00 2001 From: Alan Maguire Date: Thu, 13 Jun 2024 10:50:11 +0100 Subject: resolve_btfids: Handle presence of .BTF.base section Now that btf_parse_elf() handles .BTF.base section presence, we need to ensure that resolve_btfids uses .BTF.base when present rather than the vmlinux base BTF passed in via the -B option. Detect .BTF.base section presence and unset the base BTF path to ensure that BTF ELF parsing will do the right thing. Signed-off-by: Alan Maguire Signed-off-by: Andrii Nakryiko Reviewed-by: Eduard Zingerman Link: https://lore.kernel.org/bpf/20240613095014.357981-7-alan.maguire@oracle.com --- tools/bpf/resolve_btfids/main.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c index af393c7dee1f..936ef95c3d32 100644 --- a/tools/bpf/resolve_btfids/main.c +++ b/tools/bpf/resolve_btfids/main.c @@ -409,6 +409,14 @@ static int elf_collect(struct object *obj) obj->efile.idlist = data; obj->efile.idlist_shndx = idx; obj->efile.idlist_addr = sh.sh_addr; + } else if (!strcmp(name, BTF_BASE_ELF_SEC)) { + /* If a .BTF.base section is found, do not resolve + * BTF ids relative to vmlinux; resolve relative + * to the .BTF.base section instead. btf__parse_split() + * will take care of this once the base BTF it is + * passed is NULL. + */ + obj->base_btf_path = NULL; } if (compressed_section_fix(elf, scn, &sh)) -- cgit v1.2.3 From 01793ed86b5d7df1e956520b5474940743eb7ed8 Mon Sep 17 00:00:00 2001 From: Leon Hwang Date: Mon, 10 Jun 2024 20:42:23 +0800 Subject: bpf, verifier: Correct tail_call_reachable for bpf prog It's confusing to inspect 'prog->aux->tail_call_reachable' with drgn[0], when bpf prog has tail call but 'tail_call_reachable' is false. This patch corrects 'tail_call_reachable' when bpf prog has tail call. Signed-off-by: Leon Hwang Link: https://lore.kernel.org/r/20240610124224.34673-2-hffilwlqm@gmail.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index dcbbf5f64c5d..ffe98a788c33 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -2982,8 +2982,10 @@ static int check_subprogs(struct bpf_verifier_env *env) if (code == (BPF_JMP | BPF_CALL) && insn[i].src_reg == 0 && - insn[i].imm == BPF_FUNC_tail_call) + insn[i].imm == BPF_FUNC_tail_call) { subprog[cur_subprog].has_tail_call = true; + subprog[cur_subprog].tail_call_reachable = true; + } if (BPF_CLASS(code) == BPF_LD && (BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND)) subprog[cur_subprog].has_ld_abs = true; -- cgit v1.2.3 From f663a03c8e35c5156bad073a4a8f5e673d656e3f Mon Sep 17 00:00:00 2001 From: Leon Hwang Date: Mon, 10 Jun 2024 20:42:24 +0800 Subject: bpf, x64: Remove tail call detection As 'prog->aux->tail_call_reachable' is correct for tail call present, it's unnecessary to detect tail call in x86 jit. Therefore, let's remove it. Signed-off-by: Leon Hwang Link: https://lore.kernel.org/r/20240610124224.34673-3-hffilwlqm@gmail.com Signed-off-by: Alexei Starovoitov --- arch/x86/net/bpf_jit_comp.c | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 5159c7a22922..7c130001fbfe 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -1234,13 +1234,11 @@ bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs) } static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt, - bool *regs_used, bool *tail_call_seen) + bool *regs_used) { int i; for (i = 1; i <= insn_cnt; i++, insn++) { - if (insn->code == (BPF_JMP | BPF_TAIL_CALL)) - *tail_call_seen = true; if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6) regs_used[0] = true; if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7) @@ -1324,7 +1322,6 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image struct bpf_insn *insn = bpf_prog->insnsi; bool callee_regs_used[4] = {}; int insn_cnt = bpf_prog->len; - bool tail_call_seen = false; bool seen_exit = false; u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY]; u64 arena_vm_start, user_vm_start; @@ -1336,11 +1333,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image arena_vm_start = bpf_arena_get_kern_vm_start(bpf_prog->aux->arena); user_vm_start = bpf_arena_get_user_vm_start(bpf_prog->aux->arena); - detect_reg_usage(insn, insn_cnt, callee_regs_used, - &tail_call_seen); - - /* tail call's presence in current prog implies it is reachable */ - tail_call_reachable |= tail_call_seen; + detect_reg_usage(insn, insn_cnt, callee_regs_used); emit_prologue(&prog, bpf_prog->aux->stack_depth, bpf_prog_was_classic(bpf_prog), tail_call_reachable, -- cgit v1.2.3 From 9919c5c98cb25dbf7e76aadb9beab55a2a25f830 Mon Sep 17 00:00:00 2001 From: Rafael Passos Date: Fri, 14 Jun 2024 23:24:08 -0300 Subject: bpf: remove unused parameter in bpf_jit_binary_pack_finalize Fixes a compiler warning. the bpf_jit_binary_pack_finalize function was taking an extra bpf_prog parameter that went unused. This removves it and updates the callers accordingly. Signed-off-by: Rafael Passos Link: https://lore.kernel.org/r/20240615022641.210320-2-rafael@rcpassos.me Signed-off-by: Alexei Starovoitov --- arch/arm64/net/bpf_jit_comp.c | 3 +-- arch/powerpc/net/bpf_jit_comp.c | 4 ++-- arch/riscv/net/bpf_jit_core.c | 5 ++--- arch/x86/net/bpf_jit_comp.c | 4 ++-- include/linux/filter.h | 3 +-- kernel/bpf/core.c | 3 +-- 6 files changed, 9 insertions(+), 13 deletions(-) diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index 720336d28856..6edaeafd1499 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -1829,8 +1829,7 @@ skip_init_ctx: prog->jited_len = 0; goto out_free_hdr; } - if (WARN_ON(bpf_jit_binary_pack_finalize(prog, ro_header, - header))) { + if (WARN_ON(bpf_jit_binary_pack_finalize(ro_header, header))) { /* ro_header has been freed */ ro_header = NULL; prog = orig_prog; diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index 984655419da5..2a36cc2e7e9e 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -225,7 +225,7 @@ skip_init_ctx: fp->jited_len = proglen + FUNCTION_DESCR_SIZE; if (!fp->is_func || extra_pass) { - if (bpf_jit_binary_pack_finalize(fp, fhdr, hdr)) { + if (bpf_jit_binary_pack_finalize(fhdr, hdr)) { fp = org_fp; goto out_addrs; } @@ -348,7 +348,7 @@ void bpf_jit_free(struct bpf_prog *fp) * before freeing it. */ if (jit_data) { - bpf_jit_binary_pack_finalize(fp, jit_data->fhdr, jit_data->hdr); + bpf_jit_binary_pack_finalize(jit_data->fhdr, jit_data->hdr); kvfree(jit_data->addrs); kfree(jit_data); } diff --git a/arch/riscv/net/bpf_jit_core.c b/arch/riscv/net/bpf_jit_core.c index 0a96abdaca65..6de753c667f4 100644 --- a/arch/riscv/net/bpf_jit_core.c +++ b/arch/riscv/net/bpf_jit_core.c @@ -178,8 +178,7 @@ skip_init_ctx: prog->jited_len = prog_size - cfi_get_offset(); if (!prog->is_func || extra_pass) { - if (WARN_ON(bpf_jit_binary_pack_finalize(prog, jit_data->ro_header, - jit_data->header))) { + if (WARN_ON(bpf_jit_binary_pack_finalize(jit_data->ro_header, jit_data->header))) { /* ro_header has been freed */ jit_data->ro_header = NULL; prog = orig_prog; @@ -258,7 +257,7 @@ void bpf_jit_free(struct bpf_prog *prog) * before freeing it. */ if (jit_data) { - bpf_jit_binary_pack_finalize(prog, jit_data->ro_header, jit_data->header); + bpf_jit_binary_pack_finalize(jit_data->ro_header, jit_data->header); kfree(jit_data); } hdr = bpf_jit_binary_pack_hdr(prog); diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 7c130001fbfe..d25d81c8ecc0 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -3356,7 +3356,7 @@ out_image: * * Both cases are serious bugs and justify WARN_ON. */ - if (WARN_ON(bpf_jit_binary_pack_finalize(prog, header, rw_header))) { + if (WARN_ON(bpf_jit_binary_pack_finalize(header, rw_header))) { /* header has been freed */ header = NULL; goto out_image; @@ -3435,7 +3435,7 @@ void bpf_jit_free(struct bpf_prog *prog) * before freeing it. */ if (jit_data) { - bpf_jit_binary_pack_finalize(prog, jit_data->header, + bpf_jit_binary_pack_finalize(jit_data->header, jit_data->rw_header); kvfree(jit_data->addrs); kfree(jit_data); diff --git a/include/linux/filter.h b/include/linux/filter.h index b02aea291b7e..dd41a93f06b2 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -1129,8 +1129,7 @@ bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **ro_image, struct bpf_binary_header **rw_hdr, u8 **rw_image, bpf_jit_fill_hole_t bpf_fill_ill_insns); -int bpf_jit_binary_pack_finalize(struct bpf_prog *prog, - struct bpf_binary_header *ro_header, +int bpf_jit_binary_pack_finalize(struct bpf_binary_header *ro_header, struct bpf_binary_header *rw_header); void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header, struct bpf_binary_header *rw_header); diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 1a6c3faa6e4a..f6951c33790d 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -1174,8 +1174,7 @@ bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **image_ptr, } /* Copy JITed text from rw_header to its final location, the ro_header. */ -int bpf_jit_binary_pack_finalize(struct bpf_prog *prog, - struct bpf_binary_header *ro_header, +int bpf_jit_binary_pack_finalize(struct bpf_binary_header *ro_header, struct bpf_binary_header *rw_header) { void *ptr; -- cgit v1.2.3 From ab224b9ef7c4eaa752752455ea79bd7022209d5d Mon Sep 17 00:00:00 2001 From: Rafael Passos Date: Fri, 14 Jun 2024 23:24:09 -0300 Subject: bpf: remove unused parameter in __bpf_free_used_btfs Fixes a compiler warning. The __bpf_free_used_btfs function was taking an extra unused struct bpf_prog_aux *aux param Signed-off-by: Rafael Passos Link: https://lore.kernel.org/r/20240615022641.210320-3-rafael@rcpassos.me Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 3 +-- kernel/bpf/core.c | 5 ++--- kernel/bpf/verifier.c | 3 +-- 3 files changed, 4 insertions(+), 7 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index f636b4998bf7..960780ef04e1 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -2933,8 +2933,7 @@ bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr) return ret; } -void __bpf_free_used_btfs(struct bpf_prog_aux *aux, - struct btf_mod_pair *used_btfs, u32 len); +void __bpf_free_used_btfs(struct btf_mod_pair *used_btfs, u32 len); static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type) diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index f6951c33790d..ae2e1eeda0d4 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -2742,8 +2742,7 @@ static void bpf_free_used_maps(struct bpf_prog_aux *aux) kfree(aux->used_maps); } -void __bpf_free_used_btfs(struct bpf_prog_aux *aux, - struct btf_mod_pair *used_btfs, u32 len) +void __bpf_free_used_btfs(struct btf_mod_pair *used_btfs, u32 len) { #ifdef CONFIG_BPF_SYSCALL struct btf_mod_pair *btf_mod; @@ -2760,7 +2759,7 @@ void __bpf_free_used_btfs(struct bpf_prog_aux *aux, static void bpf_free_used_btfs(struct bpf_prog_aux *aux) { - __bpf_free_used_btfs(aux, aux->used_btfs, aux->used_btf_cnt); + __bpf_free_used_btfs(aux->used_btfs, aux->used_btf_cnt); kfree(aux->used_btfs); } diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index ffe98a788c33..3f6be4923655 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -18694,8 +18694,7 @@ static void release_maps(struct bpf_verifier_env *env) /* drop refcnt of maps used by the rejected program */ static void release_btfs(struct bpf_verifier_env *env) { - __bpf_free_used_btfs(env->prog->aux, env->used_btfs, - env->used_btf_cnt); + __bpf_free_used_btfs(env->used_btfs, env->used_btf_cnt); } /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */ -- cgit v1.2.3 From 21ab4980e02d495174bc64c00ceb4d3cf87fadb1 Mon Sep 17 00:00:00 2001 From: Rafael Passos Date: Fri, 14 Jun 2024 23:24:10 -0300 Subject: bpf: remove redeclaration of new_n in bpf_verifier_vlog This new_n is defined in the start of this function. Its value is overwritten by `new_n = min(n, log->len_total);` a couple lines before my change, rendering the shadow declaration unnecessary. Signed-off-by: Rafael Passos Link: https://lore.kernel.org/r/20240615022641.210320-4-rafael@rcpassos.me Signed-off-by: Alexei Starovoitov --- kernel/bpf/log.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/bpf/log.c b/kernel/bpf/log.c index 3f4ae92e549f..5aebfc3051e3 100644 --- a/kernel/bpf/log.c +++ b/kernel/bpf/log.c @@ -91,7 +91,7 @@ void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt, goto fail; } else { u64 new_end, new_start; - u32 buf_start, buf_end, new_n; + u32 buf_start, buf_end; new_end = log->end_pos + n; if (new_end - log->start_pos >= log->len_total) -- cgit v1.2.3 From 34ad6ec972525b903d4680202d7b8360f71d0d89 Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Fri, 21 Jun 2024 10:15:58 +0800 Subject: selftests/bpf: Drop type from network_helper_opts The opts.{type, noconnect} is at least a bit non intuitive or unnecessary. The only use case now is in test_bpf_ip_check_defrag_ok which ends up bypassing most (or at least some) of the connect_to_fd_opts() logic. It's much better that test should have its own connect_to_fd_opts() instead. This patch adds a new "type" parameter for connect_to_fd_opts(), then opts->type and getsockopt(SO_TYPE) can be replaced by "type" parameter in it. In connect_to_fd(), use getsockopt(SO_TYPE) to get "type" value and pass it to connect_to_fd_opts(). In bpf_tcp_ca.c and cgroup_v1v2.c, "SOCK_STREAM" types are passed to connect_to_fd_opts(), and in ip_check_defrag.c, different types "SOCK_RAW" and "SOCK_DGRAM" are passed to it. With these changes, the strcut member "type" of network_helper_opts can be dropped now. Acked-by: Eduard Zingerman Signed-off-by: Geliang Tang Link: https://lore.kernel.org/r/cfd20b5ad4085c1d1af5e79df3b09013a407199f.1718932493.git.tanggeliang@kylinos.cn Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/network_helpers.c | 45 ++++++++++------------ tools/testing/selftests/bpf/network_helpers.h | 3 +- .../testing/selftests/bpf/prog_tests/bpf_tcp_ca.c | 2 +- .../testing/selftests/bpf/prog_tests/cgroup_v1v2.c | 4 +- .../selftests/bpf/prog_tests/ip_check_defrag.c | 5 +-- 5 files changed, 26 insertions(+), 33 deletions(-) diff --git a/tools/testing/selftests/bpf/network_helpers.c b/tools/testing/selftests/bpf/network_helpers.c index e20caef06aae..c0646d5a4283 100644 --- a/tools/testing/selftests/bpf/network_helpers.c +++ b/tools/testing/selftests/bpf/network_helpers.c @@ -303,36 +303,16 @@ error_close: return -1; } -int connect_to_fd_opts(int server_fd, const struct network_helper_opts *opts) +int connect_to_fd_opts(int server_fd, int type, const struct network_helper_opts *opts) { struct sockaddr_storage addr; struct sockaddr_in *addr_in; - socklen_t addrlen, optlen; - int fd, type, protocol; + socklen_t addrlen; + int fd; if (!opts) opts = &default_opts; - optlen = sizeof(type); - - if (opts->type) { - type = opts->type; - } else { - if (getsockopt(server_fd, SOL_SOCKET, SO_TYPE, &type, &optlen)) { - log_err("getsockopt(SOL_TYPE)"); - return -1; - } - } - - if (opts->proto) { - protocol = opts->proto; - } else { - if (getsockopt(server_fd, SOL_SOCKET, SO_PROTOCOL, &protocol, &optlen)) { - log_err("getsockopt(SOL_PROTOCOL)"); - return -1; - } - } - addrlen = sizeof(addr); if (getsockname(server_fd, (struct sockaddr *)&addr, &addrlen)) { log_err("Failed to get server addr"); @@ -340,7 +320,7 @@ int connect_to_fd_opts(int server_fd, const struct network_helper_opts *opts) } addr_in = (struct sockaddr_in *)&addr; - fd = socket(addr_in->sin_family, type, protocol); + fd = socket(addr_in->sin_family, type, opts->proto); if (fd < 0) { log_err("Failed to create client socket"); return -1; @@ -369,8 +349,23 @@ int connect_to_fd(int server_fd, int timeout_ms) struct network_helper_opts opts = { .timeout_ms = timeout_ms, }; + int type, protocol; + socklen_t optlen; + + optlen = sizeof(type); + if (getsockopt(server_fd, SOL_SOCKET, SO_TYPE, &type, &optlen)) { + log_err("getsockopt(SOL_TYPE)"); + return -1; + } + + optlen = sizeof(protocol); + if (getsockopt(server_fd, SOL_SOCKET, SO_PROTOCOL, &protocol, &optlen)) { + log_err("getsockopt(SOL_PROTOCOL)"); + return -1; + } + opts.proto = protocol; - return connect_to_fd_opts(server_fd, &opts); + return connect_to_fd_opts(server_fd, type, &opts); } int connect_fd_to_fd(int client_fd, int server_fd, int timeout_ms) diff --git a/tools/testing/selftests/bpf/network_helpers.h b/tools/testing/selftests/bpf/network_helpers.h index 11eea8e2e4f1..c92bed35dfe2 100644 --- a/tools/testing/selftests/bpf/network_helpers.h +++ b/tools/testing/selftests/bpf/network_helpers.h @@ -25,7 +25,6 @@ struct network_helper_opts { int timeout_ms; bool must_fail; bool noconnect; - int type; int proto; int (*post_socket_cb)(int fd, void *opts); void *cb_opts; @@ -61,7 +60,7 @@ void free_fds(int *fds, unsigned int nr_close_fds); int connect_to_addr(int type, const struct sockaddr_storage *addr, socklen_t len, const struct network_helper_opts *opts); int connect_to_fd(int server_fd, int timeout_ms); -int connect_to_fd_opts(int server_fd, const struct network_helper_opts *opts); +int connect_to_fd_opts(int server_fd, int type, const struct network_helper_opts *opts); int connect_fd_to_fd(int client_fd, int server_fd, int timeout_ms); int fastopen_connect(int server_fd, const char *data, unsigned int data_len, int timeout_ms); diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c index 67358adf5db3..164f237b24dd 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c @@ -49,7 +49,7 @@ static bool start_test(char *addr_str, goto err; /* connect to server */ - *cli_fd = connect_to_fd_opts(*srv_fd, cli_opts); + *cli_fd = connect_to_fd_opts(*srv_fd, SOCK_STREAM, cli_opts); if (!ASSERT_NEQ(*cli_fd, -1, "connect_to_fd_opts")) goto err; diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c b/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c index addf720428f7..9709c8db7275 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c @@ -32,7 +32,7 @@ static int run_test(int cgroup_fd, int server_fd, bool classid) goto out; } - fd = connect_to_fd_opts(server_fd, &opts); + fd = connect_to_fd_opts(server_fd, SOCK_STREAM, &opts); if (fd < 0) err = -1; else @@ -52,7 +52,7 @@ void test_cgroup_v1v2(void) server_fd = start_server(AF_INET, SOCK_STREAM, NULL, port, 0); if (!ASSERT_GE(server_fd, 0, "server_fd")) return; - client_fd = connect_to_fd_opts(server_fd, &opts); + client_fd = connect_to_fd_opts(server_fd, SOCK_STREAM, &opts); if (!ASSERT_GE(client_fd, 0, "client_fd")) { close(server_fd); return; diff --git a/tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c b/tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c index 284764e7179f..1607a05bf2c2 100644 --- a/tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c +++ b/tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c @@ -164,7 +164,6 @@ void test_bpf_ip_check_defrag_ok(bool ipv6) }; struct network_helper_opts tx_ops = { .timeout_ms = 1000, - .type = SOCK_RAW, .proto = IPPROTO_RAW, .noconnect = true, }; @@ -201,7 +200,7 @@ void test_bpf_ip_check_defrag_ok(bool ipv6) nstoken = open_netns(NS0); if (!ASSERT_OK_PTR(nstoken, "setns ns0")) goto out; - client_tx_fd = connect_to_fd_opts(srv_fd, &tx_ops); + client_tx_fd = connect_to_fd_opts(srv_fd, SOCK_RAW, &tx_ops); close_netns(nstoken); if (!ASSERT_GE(client_tx_fd, 0, "connect_to_fd_opts")) goto out; @@ -210,7 +209,7 @@ void test_bpf_ip_check_defrag_ok(bool ipv6) nstoken = open_netns(NS0); if (!ASSERT_OK_PTR(nstoken, "setns ns0")) goto out; - client_rx_fd = connect_to_fd_opts(srv_fd, &rx_opts); + client_rx_fd = connect_to_fd_opts(srv_fd, SOCK_DGRAM, &rx_opts); close_netns(nstoken); if (!ASSERT_GE(client_rx_fd, 0, "connect_to_fd_opts")) goto out; -- cgit v1.2.3 From 08a5206240d3763e0c6d91a9a4a9bfbb8fc9600c Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Fri, 21 Jun 2024 10:15:59 +0800 Subject: selftests/bpf: Use connect_to_addr in connect_to_fd_opt This patch moves "post_socket_cb" and "noconnect" into connect_to_addr(), then connect_to_fd_opts() can be implemented by getsockname() and connect_to_addr(). This change makes connect_to_* interfaces more unified. Acked-by: Eduard Zingerman Signed-off-by: Geliang Tang Link: https://lore.kernel.org/r/4569c30533e14c22fae6c05070aad809720551c1.1718932493.git.tanggeliang@kylinos.cn Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/network_helpers.c | 33 ++++++--------------------- 1 file changed, 7 insertions(+), 26 deletions(-) diff --git a/tools/testing/selftests/bpf/network_helpers.c b/tools/testing/selftests/bpf/network_helpers.c index c0646d5a4283..5d1b4f165def 100644 --- a/tools/testing/selftests/bpf/network_helpers.c +++ b/tools/testing/selftests/bpf/network_helpers.c @@ -293,9 +293,14 @@ int connect_to_addr(int type, const struct sockaddr_storage *addr, socklen_t add if (settimeo(fd, opts->timeout_ms)) goto error_close; - if (connect_fd_to_addr(fd, addr, addrlen, opts->must_fail)) + if (opts->post_socket_cb && + opts->post_socket_cb(fd, opts->cb_opts)) goto error_close; + if (!opts->noconnect) + if (connect_fd_to_addr(fd, addr, addrlen, opts->must_fail)) + goto error_close; + return fd; error_close: @@ -306,9 +311,7 @@ error_close: int connect_to_fd_opts(int server_fd, int type, const struct network_helper_opts *opts) { struct sockaddr_storage addr; - struct sockaddr_in *addr_in; socklen_t addrlen; - int fd; if (!opts) opts = &default_opts; @@ -319,29 +322,7 @@ int connect_to_fd_opts(int server_fd, int type, const struct network_helper_opts return -1; } - addr_in = (struct sockaddr_in *)&addr; - fd = socket(addr_in->sin_family, type, opts->proto); - if (fd < 0) { - log_err("Failed to create client socket"); - return -1; - } - - if (settimeo(fd, opts->timeout_ms)) - goto error_close; - - if (opts->post_socket_cb && - opts->post_socket_cb(fd, opts->cb_opts)) - goto error_close; - - if (!opts->noconnect) - if (connect_fd_to_addr(fd, &addr, addrlen, opts->must_fail)) - goto error_close; - - return fd; - -error_close: - save_errno_close(fd); - return -1; + return connect_to_addr(type, &addr, addrlen, opts); } int connect_to_fd(int server_fd, int timeout_ms) -- cgit v1.2.3 From bbca57aa378b43d25af2ec360b3e8bc4185d65cf Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Fri, 21 Jun 2024 10:16:00 +0800 Subject: selftests/bpf: Add client_socket helper This patch extracts a new helper client_socket() from connect_to_fd_opts() to create the client socket, but don't connect to the server. Then connect_to_fd_opts() can be implemented using client_socket() and connect_fd_to_addr(). This helper can be used in connect_to_addr() too, and make "noconnect" opts useless. Acked-by: Eduard Zingerman Signed-off-by: Geliang Tang Link: https://lore.kernel.org/r/4169c554e1cee79223feea49a1adc459d55e1ffe.1718932493.git.tanggeliang@kylinos.cn Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/network_helpers.c | 37 +++++++++++++++++++++------ tools/testing/selftests/bpf/network_helpers.h | 2 ++ 2 files changed, 31 insertions(+), 8 deletions(-) diff --git a/tools/testing/selftests/bpf/network_helpers.c b/tools/testing/selftests/bpf/network_helpers.c index 5d1b4f165def..5f8214e2880d 100644 --- a/tools/testing/selftests/bpf/network_helpers.c +++ b/tools/testing/selftests/bpf/network_helpers.c @@ -249,6 +249,34 @@ error_close: return -1; } +int client_socket(int family, int type, + const struct network_helper_opts *opts) +{ + int fd; + + if (!opts) + opts = &default_opts; + + fd = socket(family, type, opts->proto); + if (fd < 0) { + log_err("Failed to create client socket"); + return -1; + } + + if (settimeo(fd, opts->timeout_ms)) + goto error_close; + + if (opts->post_socket_cb && + opts->post_socket_cb(fd, opts->cb_opts)) + goto error_close; + + return fd; + +error_close: + save_errno_close(fd); + return -1; +} + static int connect_fd_to_addr(int fd, const struct sockaddr_storage *addr, socklen_t addrlen, const bool must_fail) @@ -284,19 +312,12 @@ int connect_to_addr(int type, const struct sockaddr_storage *addr, socklen_t add if (!opts) opts = &default_opts; - fd = socket(addr->ss_family, type, opts->proto); + fd = client_socket(addr->ss_family, type, opts); if (fd < 0) { log_err("Failed to create client socket"); return -1; } - if (settimeo(fd, opts->timeout_ms)) - goto error_close; - - if (opts->post_socket_cb && - opts->post_socket_cb(fd, opts->cb_opts)) - goto error_close; - if (!opts->noconnect) if (connect_fd_to_addr(fd, addr, addrlen, opts->must_fail)) goto error_close; diff --git a/tools/testing/selftests/bpf/network_helpers.h b/tools/testing/selftests/bpf/network_helpers.h index c92bed35dfe2..e89eadfb02d6 100644 --- a/tools/testing/selftests/bpf/network_helpers.h +++ b/tools/testing/selftests/bpf/network_helpers.h @@ -57,6 +57,8 @@ int *start_reuseport_server(int family, int type, const char *addr_str, int start_server_addr(int type, const struct sockaddr_storage *addr, socklen_t len, const struct network_helper_opts *opts); void free_fds(int *fds, unsigned int nr_close_fds); +int client_socket(int family, int type, + const struct network_helper_opts *opts); int connect_to_addr(int type, const struct sockaddr_storage *addr, socklen_t len, const struct network_helper_opts *opts); int connect_to_fd(int server_fd, int timeout_ms); -- cgit v1.2.3 From 7f0d5140a6d69d3e63467a220a2a1e0c9ec1463a Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Fri, 21 Jun 2024 10:16:01 +0800 Subject: selftests/bpf: Drop noconnect from network_helper_opts In test_bpf_ip_check_defrag_ok(), the new helper client_socket() can be used to replace connect_to_fd_opts() with "noconnect" opts, and the strcut member "noconnect" of network_helper_opts can be dropped now, always connect to server in connect_to_fd_opts(). Acked-by: Eduard Zingerman Signed-off-by: Geliang Tang Link: https://lore.kernel.org/r/f45760becce51986e4e08283c7df0f933eb0da14.1718932493.git.tanggeliang@kylinos.cn Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/network_helpers.c | 5 ++--- tools/testing/selftests/bpf/network_helpers.h | 1 - tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c | 13 ++++++------- 3 files changed, 8 insertions(+), 11 deletions(-) diff --git a/tools/testing/selftests/bpf/network_helpers.c b/tools/testing/selftests/bpf/network_helpers.c index 5f8214e2880d..44c2c8fa542a 100644 --- a/tools/testing/selftests/bpf/network_helpers.c +++ b/tools/testing/selftests/bpf/network_helpers.c @@ -318,9 +318,8 @@ int connect_to_addr(int type, const struct sockaddr_storage *addr, socklen_t add return -1; } - if (!opts->noconnect) - if (connect_fd_to_addr(fd, addr, addrlen, opts->must_fail)) - goto error_close; + if (connect_fd_to_addr(fd, addr, addrlen, opts->must_fail)) + goto error_close; return fd; diff --git a/tools/testing/selftests/bpf/network_helpers.h b/tools/testing/selftests/bpf/network_helpers.h index e89eadfb02d6..9ea36524b9db 100644 --- a/tools/testing/selftests/bpf/network_helpers.h +++ b/tools/testing/selftests/bpf/network_helpers.h @@ -24,7 +24,6 @@ typedef __u16 __sum16; struct network_helper_opts { int timeout_ms; bool must_fail; - bool noconnect; int proto; int (*post_socket_cb)(int fd, void *opts); void *cb_opts; diff --git a/tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c b/tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c index 1607a05bf2c2..4ddb8a5fece8 100644 --- a/tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c +++ b/tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c @@ -158,14 +158,13 @@ static int send_frags6(int client) void test_bpf_ip_check_defrag_ok(bool ipv6) { + int family = ipv6 ? AF_INET6 : AF_INET; struct network_helper_opts rx_opts = { .timeout_ms = 1000, - .noconnect = true, }; struct network_helper_opts tx_ops = { .timeout_ms = 1000, .proto = IPPROTO_RAW, - .noconnect = true, }; struct sockaddr_storage caddr; struct ip_check_defrag *skel; @@ -191,7 +190,7 @@ void test_bpf_ip_check_defrag_ok(bool ipv6) nstoken = open_netns(NS1); if (!ASSERT_OK_PTR(nstoken, "setns ns1")) goto out; - srv_fd = start_server(ipv6 ? AF_INET6 : AF_INET, SOCK_DGRAM, NULL, SERVER_PORT, 0); + srv_fd = start_server(family, SOCK_DGRAM, NULL, SERVER_PORT, 0); close_netns(nstoken); if (!ASSERT_GE(srv_fd, 0, "start_server")) goto out; @@ -200,18 +199,18 @@ void test_bpf_ip_check_defrag_ok(bool ipv6) nstoken = open_netns(NS0); if (!ASSERT_OK_PTR(nstoken, "setns ns0")) goto out; - client_tx_fd = connect_to_fd_opts(srv_fd, SOCK_RAW, &tx_ops); + client_tx_fd = client_socket(family, SOCK_RAW, &tx_ops); close_netns(nstoken); - if (!ASSERT_GE(client_tx_fd, 0, "connect_to_fd_opts")) + if (!ASSERT_GE(client_tx_fd, 0, "client_socket")) goto out; /* Open rx socket in ns0 */ nstoken = open_netns(NS0); if (!ASSERT_OK_PTR(nstoken, "setns ns0")) goto out; - client_rx_fd = connect_to_fd_opts(srv_fd, SOCK_DGRAM, &rx_opts); + client_rx_fd = client_socket(family, SOCK_DGRAM, &rx_opts); close_netns(nstoken); - if (!ASSERT_GE(client_rx_fd, 0, "connect_to_fd_opts")) + if (!ASSERT_GE(client_rx_fd, 0, "client_socket")) goto out; /* Bind rx socket to a premeditated port */ -- cgit v1.2.3 From fb69f71cf585aabb2f59c6d7958bccfaebe64f5d Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Fri, 21 Jun 2024 10:16:02 +0800 Subject: selftests/bpf: Use start_server_str in mptcp Since start_server_str() is added now, it can be used in mptcp.c in start_mptcp_server() instead of using helpers make_sockaddr() and start_server_addr() to simplify the code. Acked-by: Eduard Zingerman Signed-off-by: Geliang Tang Link: https://lore.kernel.org/r/16fb3e2cd60b64b5470b0e69f1aa233feaf2717c.1718932493.git.tanggeliang@kylinos.cn Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/mptcp.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/mptcp.c b/tools/testing/selftests/bpf/prog_tests/mptcp.c index 274d2e033e39..d2ca32fa3b21 100644 --- a/tools/testing/selftests/bpf/prog_tests/mptcp.c +++ b/tools/testing/selftests/bpf/prog_tests/mptcp.c @@ -89,13 +89,8 @@ static int start_mptcp_server(int family, const char *addr_str, __u16 port, .timeout_ms = timeout_ms, .proto = IPPROTO_MPTCP, }; - struct sockaddr_storage addr; - socklen_t addrlen; - if (make_sockaddr(family, addr_str, port, &addr, &addrlen)) - return -1; - - return start_server_addr(SOCK_STREAM, &addr, addrlen, &opts); + return start_server_str(family, SOCK_STREAM, addr_str, port, &opts); } static int verify_tsk(int map_fd, int client_fd) -- cgit v1.2.3 From 8cab7cdcf5aebec354ede98bca28c08dd9df924c Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Fri, 21 Jun 2024 10:16:03 +0800 Subject: selftests/bpf: Use start_server_str in test_tcp_check_syncookie_user Since start_server_str() is added now, it can be used in script test_tcp_check_syncookie_user.c instead of start_server_addr() to simplify the code. Acked-by: Eduard Zingerman Signed-off-by: Geliang Tang Link: https://lore.kernel.org/r/5d2f442261d37cff16c1f1b21a2b188508ab67fa.1718932493.git.tanggeliang@kylinos.cn Signed-off-by: Alexei Starovoitov --- .../selftests/bpf/test_tcp_check_syncookie_user.c | 29 +++------------------- 1 file changed, 3 insertions(+), 26 deletions(-) diff --git a/tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c b/tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c index aebc58c24dc5..3844f9b8232a 100644 --- a/tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c +++ b/tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c @@ -156,10 +156,6 @@ static int v6only_false(int fd, void *opts) int main(int argc, char **argv) { struct network_helper_opts opts = { 0 }; - struct sockaddr_in addr4; - struct sockaddr_in6 addr6; - struct sockaddr_in addr4dual; - struct sockaddr_in6 addr6dual; int server = -1; int server_v6 = -1; int server_dual = -1; @@ -181,36 +177,17 @@ int main(int argc, char **argv) goto err; } - memset(&addr4, 0, sizeof(addr4)); - addr4.sin_family = AF_INET; - addr4.sin_addr.s_addr = htonl(INADDR_LOOPBACK); - addr4.sin_port = 0; - memcpy(&addr4dual, &addr4, sizeof(addr4dual)); - - memset(&addr6, 0, sizeof(addr6)); - addr6.sin6_family = AF_INET6; - addr6.sin6_addr = in6addr_loopback; - addr6.sin6_port = 0; - - memset(&addr6dual, 0, sizeof(addr6dual)); - addr6dual.sin6_family = AF_INET6; - addr6dual.sin6_addr = in6addr_any; - addr6dual.sin6_port = 0; - - server = start_server_addr(SOCK_STREAM, (struct sockaddr_storage *)&addr4, - sizeof(addr4), NULL); + server = start_server_str(AF_INET, SOCK_STREAM, "127.0.0.1", 0, NULL); if (server == -1) goto err; opts.post_socket_cb = v6only_true; - server_v6 = start_server_addr(SOCK_STREAM, (struct sockaddr_storage *)&addr6, - sizeof(addr6), &opts); + server_v6 = start_server_str(AF_INET6, SOCK_STREAM, "::1", 0, &opts); if (server_v6 == -1) goto err; opts.post_socket_cb = v6only_false; - server_dual = start_server_addr(SOCK_STREAM, (struct sockaddr_storage *)&addr6dual, - sizeof(addr6dual), &opts); + server_dual = start_server_str(AF_INET6, SOCK_STREAM, "::0", 0, &opts); if (server_dual == -1) goto err; -- cgit v1.2.3 From 717d6313bba1b3179f0bf1026aaec6b7e26f484e Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 19 Jun 2024 10:16:24 +0200 Subject: bpf: Change bpf_session_cookie return value to __u64 * This reverts [1] and changes return value for bpf_session_cookie in bpf selftests. Having long * might lead to problems on 32-bit architectures. Fixes: 2b8dd87332cd ("bpf: Make bpf_session_cookie() kfunc return long *") Suggested-by: Andrii Nakryiko Signed-off-by: Jiri Olsa Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20240619081624.1620152-1-jolsa@kernel.org --- kernel/trace/bpf_trace.c | 2 +- tools/testing/selftests/bpf/bpf_kfuncs.h | 2 +- tools/testing/selftests/bpf/progs/kprobe_multi_session_cookie.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 4b3fda456299..cd098846e251 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -3530,7 +3530,7 @@ __bpf_kfunc bool bpf_session_is_return(void) return session_ctx->is_return; } -__bpf_kfunc long *bpf_session_cookie(void) +__bpf_kfunc __u64 *bpf_session_cookie(void) { struct bpf_session_run_ctx *session_ctx; diff --git a/tools/testing/selftests/bpf/bpf_kfuncs.h b/tools/testing/selftests/bpf/bpf_kfuncs.h index be91a6919315..3b6675ab4086 100644 --- a/tools/testing/selftests/bpf/bpf_kfuncs.h +++ b/tools/testing/selftests/bpf/bpf_kfuncs.h @@ -77,5 +77,5 @@ extern int bpf_verify_pkcs7_signature(struct bpf_dynptr *data_ptr, struct bpf_key *trusted_keyring) __ksym; extern bool bpf_session_is_return(void) __ksym __weak; -extern long *bpf_session_cookie(void) __ksym __weak; +extern __u64 *bpf_session_cookie(void) __ksym __weak; #endif diff --git a/tools/testing/selftests/bpf/progs/kprobe_multi_session_cookie.c b/tools/testing/selftests/bpf/progs/kprobe_multi_session_cookie.c index d49070803e22..0835b5edf685 100644 --- a/tools/testing/selftests/bpf/progs/kprobe_multi_session_cookie.c +++ b/tools/testing/selftests/bpf/progs/kprobe_multi_session_cookie.c @@ -25,7 +25,7 @@ int BPF_PROG(trigger) static int check_cookie(__u64 val, __u64 *result) { - long *cookie; + __u64 *cookie; if (bpf_get_current_pid_tgid() >> 32 != pid) return 1; -- cgit v1.2.3 From 651337c7ca82c259bf5c8fe9beda9673531a0031 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 18 Jun 2024 11:38:32 -0700 Subject: bpftool: Allow compile-time checks of BPF map auto-attach support in skeleton New versions of bpftool now emit additional link placeholders for BPF maps (struct_ops maps are the only maps right now that support attachment), and set up BPF skeleton in such a way that libbpf will auto-attach BPF maps automatically, assumming libbpf is recent enough (v1.5+). Old libbpf will do nothing with those links and won't attempt to auto-attach maps. This allows user code to handle both pre-v1.5 and v1.5+ versions of libbpf at runtime, if necessary. But if users don't have (or don't want to) control bpftool version that generates skeleton, then they can't just assume that skeleton will have link placeholders. To make this detection possible and easy, let's add the following to generated skeleton header file: #define BPF_SKEL_SUPPORTS_MAP_AUTO_ATTACH 1 This can be used during compilation time to guard code that accesses skel->links. slots. Note, if auto-attachment is undesirable, libbpf allows to disable this through bpf_map__set_autoattach(map, false). This is necessary only on libbpf v1.5+, older libbpf doesn't support map auto-attach anyways. Libbpf version can be detected at compilation time using LIBBPF_MAJOR_VERSION and LIBBPF_MINOR_VERSION macros, or at runtime with libbpf_major_version() and libbpf_minor_version() APIs. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Acked-by: Quentin Monnet Link: https://lore.kernel.org/bpf/20240618183832.2535876-1-andrii@kernel.org --- tools/bpf/bpftool/gen.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c index 4a4eedfcd479..51eaed76db97 100644 --- a/tools/bpf/bpftool/gen.c +++ b/tools/bpf/bpftool/gen.c @@ -1272,6 +1272,8 @@ static int do_skeleton(int argc, char **argv) #include \n\ #include \n\ \n\ + #define BPF_SKEL_SUPPORTS_MAP_AUTO_ATTACH 1 \n\ + \n\ struct %1$s { \n\ struct bpf_object_skeleton *skeleton; \n\ struct bpf_object *obj; \n\ -- cgit v1.2.3 From 6ddf3a9abd9fdfdd63d8c906fc1393f7950c23f4 Mon Sep 17 00:00:00 2001 From: Matt Bobrowski Date: Tue, 18 Jun 2024 19:29:22 +0000 Subject: bpf: Add security_file_post_open() LSM hook to sleepable_lsm_hooks The new generic LSM hook security_file_post_open() was recently added to the LSM framework in commit 8f46ff5767b0b ("security: Introduce file_post_open hook"). Let's proactively add this generic LSM hook to the sleepable_lsm_hooks BTF ID set, because I can't see there being any strong reasons not to, and it's only a matter of time before someone else comes around and asks for it to be there. security_file_post_open() is inherently sleepable as it's purposely situated in the kernel that allows LSMs to directly read out the contents of the backing file if need be. Additionally, it's called directly after security_file_open(), and that LSM hook in itself already exists in the sleepable_lsm_hooks BTF ID set. Signed-off-by: Matt Bobrowski Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20240618192923.379852-1-mattbobrowski@google.com --- kernel/bpf/bpf_lsm.c | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel/bpf/bpf_lsm.c b/kernel/bpf/bpf_lsm.c index 68240c3c6e7d..08a338e1f231 100644 --- a/kernel/bpf/bpf_lsm.c +++ b/kernel/bpf/bpf_lsm.c @@ -280,6 +280,7 @@ BTF_ID(func, bpf_lsm_cred_prepare) BTF_ID(func, bpf_lsm_file_ioctl) BTF_ID(func, bpf_lsm_file_lock) BTF_ID(func, bpf_lsm_file_open) +BTF_ID(func, bpf_lsm_file_post_open) BTF_ID(func, bpf_lsm_file_receive) BTF_ID(func, bpf_lsm_inode_create) -- cgit v1.2.3 From cc5083d1f3881624ad2de1f3cbb3a07e152cb254 Mon Sep 17 00:00:00 2001 From: Donglin Peng Date: Wed, 19 Jun 2024 05:23:55 -0700 Subject: libbpf: Checking the btf_type kind when fixing variable offsets I encountered an issue when building the test_progs from the repository [1]: $ pwd /work/Qemu/x86_64/linux-6.10-rc2/tools/testing/selftests/bpf/ $ make test_progs V=1 [...] ./tools/sbin/bpftool gen object ./ip_check_defrag.bpf.linked2.o ./ip_check_defrag.bpf.linked1.o libbpf: failed to find symbol for variable 'bpf_dynptr_slice' in section '.ksyms' Error: failed to link './ip_check_defrag.bpf.linked1.o': No such file or directory (2) [...] Upon investigation, I discovered that the btf_types referenced in the '.ksyms' section had a kind of BTF_KIND_FUNC instead of BTF_KIND_VAR: $ bpftool btf dump file ./ip_check_defrag.bpf.linked1.o [...] [2] DATASEC '.ksyms' size=0 vlen=2 type_id=16 offset=0 size=0 (FUNC 'bpf_dynptr_from_skb') type_id=17 offset=0 size=0 (FUNC 'bpf_dynptr_slice') [...] [16] FUNC 'bpf_dynptr_from_skb' type_id=82 linkage=extern [17] FUNC 'bpf_dynptr_slice' type_id=85 linkage=extern [...] For a detailed analysis, please refer to [2]. We can add a kind checking to fix the issue. [1] https://github.com/eddyz87/bpf/tree/binsort-btf-dedup [2] https://lore.kernel.org/all/0c0ef20c-c05e-4db9-bad7-2cbc0d6dfae7@oracle.com/ Fixes: 8fd27bf69b86 ("libbpf: Add BPF static linker BTF and BTF.ext support") Signed-off-by: Donglin Peng Signed-off-by: Daniel Borkmann Reviewed-by: Alan Maguire Acked-by: Eduard Zingerman Link: https://lore.kernel.org/bpf/20240619122355.426405-1-dolinux.peng@gmail.com --- tools/lib/bpf/linker.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c index fa11a671da3e..9cd3d4109788 100644 --- a/tools/lib/bpf/linker.c +++ b/tools/lib/bpf/linker.c @@ -2227,10 +2227,17 @@ static int linker_fixup_btf(struct src_obj *obj) vi = btf_var_secinfos(t); for (j = 0, m = btf_vlen(t); j < m; j++, vi++) { const struct btf_type *vt = btf__type_by_id(obj->btf, vi->type); - const char *var_name = btf__str_by_offset(obj->btf, vt->name_off); - int var_linkage = btf_var(vt)->linkage; + const char *var_name; + int var_linkage; Elf64_Sym *sym; + /* could be a variable or function */ + if (!btf_is_var(vt)) + continue; + + var_name = btf__str_by_offset(obj->btf, vt->name_off); + var_linkage = btf_var(vt)->linkage; + /* no need to patch up static or extern vars */ if (var_linkage != BTF_VAR_GLOBAL_ALLOCATED) continue; -- cgit v1.2.3 From f06ae6194f278444201e0b041a00192d794f83b6 Mon Sep 17 00:00:00 2001 From: Cupertino Miranda Date: Mon, 17 Jun 2024 15:14:57 +0100 Subject: selftests/bpf: Support checks against a regular expression Add support for __regex and __regex_unpriv macros to check the test execution output against a regular expression. This is similar to __msg and __msg_unpriv, however those expect do substring matching. Signed-off-by: Cupertino Miranda Signed-off-by: Andrii Nakryiko Acked-by: Eduard Zingerman Link: https://lore.kernel.org/bpf/20240617141458.471620-2-cupertino.miranda@oracle.com --- tools/testing/selftests/bpf/progs/bpf_misc.h | 11 ++- tools/testing/selftests/bpf/test_loader.c | 115 ++++++++++++++++++++------- 2 files changed, 96 insertions(+), 30 deletions(-) diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h index fb2f5513e29e..c0280bd2f340 100644 --- a/tools/testing/selftests/bpf/progs/bpf_misc.h +++ b/tools/testing/selftests/bpf/progs/bpf_misc.h @@ -7,9 +7,9 @@ * * The test_loader sequentially loads each program in a skeleton. * Programs could be loaded in privileged and unprivileged modes. - * - __success, __failure, __msg imply privileged mode; - * - __success_unpriv, __failure_unpriv, __msg_unpriv imply - * unprivileged mode. + * - __success, __failure, __msg, __regex imply privileged mode; + * - __success_unpriv, __failure_unpriv, __msg_unpriv, __regex_unpriv + * imply unprivileged mode. * If combination of privileged and unprivileged attributes is present * both modes are used. If none are present privileged mode is implied. * @@ -24,6 +24,9 @@ * Multiple __msg attributes could be specified. * __msg_unpriv Same as __msg but for unprivileged mode. * + * __regex Same as __msg, but using a regular expression. + * __regex_unpriv Same as __msg_unpriv but using a regular expression. + * * __success Expect program load success in privileged mode. * __success_unpriv Expect program load success in unprivileged mode. * @@ -59,10 +62,12 @@ * __auxiliary_unpriv Same, but load program in unprivileged mode. */ #define __msg(msg) __attribute__((btf_decl_tag("comment:test_expect_msg=" msg))) +#define __regex(regex) __attribute__((btf_decl_tag("comment:test_expect_regex=" regex))) #define __failure __attribute__((btf_decl_tag("comment:test_expect_failure"))) #define __success __attribute__((btf_decl_tag("comment:test_expect_success"))) #define __description(desc) __attribute__((btf_decl_tag("comment:test_description=" desc))) #define __msg_unpriv(msg) __attribute__((btf_decl_tag("comment:test_expect_msg_unpriv=" msg))) +#define __regex_unpriv(regex) __attribute__((btf_decl_tag("comment:test_expect_regex_unpriv=" regex))) #define __failure_unpriv __attribute__((btf_decl_tag("comment:test_expect_failure_unpriv"))) #define __success_unpriv __attribute__((btf_decl_tag("comment:test_expect_success_unpriv"))) #define __log_level(lvl) __attribute__((btf_decl_tag("comment:test_log_level="#lvl))) diff --git a/tools/testing/selftests/bpf/test_loader.c b/tools/testing/selftests/bpf/test_loader.c index 524c38e9cde4..f14e10b0de96 100644 --- a/tools/testing/selftests/bpf/test_loader.c +++ b/tools/testing/selftests/bpf/test_loader.c @@ -2,6 +2,7 @@ /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ #include #include +#include #include #include @@ -17,9 +18,11 @@ #define TEST_TAG_EXPECT_FAILURE "comment:test_expect_failure" #define TEST_TAG_EXPECT_SUCCESS "comment:test_expect_success" #define TEST_TAG_EXPECT_MSG_PFX "comment:test_expect_msg=" +#define TEST_TAG_EXPECT_REGEX_PFX "comment:test_expect_regex=" #define TEST_TAG_EXPECT_FAILURE_UNPRIV "comment:test_expect_failure_unpriv" #define TEST_TAG_EXPECT_SUCCESS_UNPRIV "comment:test_expect_success_unpriv" #define TEST_TAG_EXPECT_MSG_PFX_UNPRIV "comment:test_expect_msg_unpriv=" +#define TEST_TAG_EXPECT_REGEX_PFX_UNPRIV "comment:test_expect_regex_unpriv=" #define TEST_TAG_LOG_LEVEL_PFX "comment:test_log_level=" #define TEST_TAG_PROG_FLAGS_PFX "comment:test_prog_flags=" #define TEST_TAG_DESCRIPTION_PFX "comment:test_description=" @@ -46,10 +49,16 @@ enum mode { UNPRIV = 2 }; +struct expect_msg { + const char *substr; /* substring match */ + const char *regex_str; /* regex-based match */ + regex_t regex; +}; + struct test_subspec { char *name; bool expect_failure; - const char **expect_msgs; + struct expect_msg *expect_msgs; size_t expect_msg_cnt; int retval; bool execute; @@ -89,6 +98,16 @@ void test_loader_fini(struct test_loader *tester) static void free_test_spec(struct test_spec *spec) { + int i; + + /* Deallocate expect_msgs arrays. */ + for (i = 0; i < spec->priv.expect_msg_cnt; i++) + if (spec->priv.expect_msgs[i].regex_str) + regfree(&spec->priv.expect_msgs[i].regex); + for (i = 0; i < spec->unpriv.expect_msg_cnt; i++) + if (spec->unpriv.expect_msgs[i].regex_str) + regfree(&spec->unpriv.expect_msgs[i].regex); + free(spec->priv.name); free(spec->unpriv.name); free(spec->priv.expect_msgs); @@ -100,18 +119,38 @@ static void free_test_spec(struct test_spec *spec) spec->unpriv.expect_msgs = NULL; } -static int push_msg(const char *msg, struct test_subspec *subspec) +static int push_msg(const char *substr, const char *regex_str, struct test_subspec *subspec) { void *tmp; + int regcomp_res; + char error_msg[100]; + struct expect_msg *msg; - tmp = realloc(subspec->expect_msgs, (1 + subspec->expect_msg_cnt) * sizeof(void *)); + tmp = realloc(subspec->expect_msgs, + (1 + subspec->expect_msg_cnt) * sizeof(struct expect_msg)); if (!tmp) { ASSERT_FAIL("failed to realloc memory for messages\n"); return -ENOMEM; } subspec->expect_msgs = tmp; - subspec->expect_msgs[subspec->expect_msg_cnt++] = msg; + msg = &subspec->expect_msgs[subspec->expect_msg_cnt]; + + if (substr) { + msg->substr = substr; + msg->regex_str = NULL; + } else { + msg->regex_str = regex_str; + msg->substr = NULL; + regcomp_res = regcomp(&msg->regex, regex_str, REG_EXTENDED|REG_NEWLINE); + if (regcomp_res != 0) { + regerror(regcomp_res, &msg->regex, error_msg, sizeof(error_msg)); + PRINT_FAIL("Regexp compilation error in '%s': '%s'\n", + regex_str, error_msg); + return -EINVAL; + } + } + subspec->expect_msg_cnt += 1; return 0; } @@ -233,13 +272,25 @@ static int parse_test_spec(struct test_loader *tester, spec->mode_mask |= UNPRIV; } else if (str_has_pfx(s, TEST_TAG_EXPECT_MSG_PFX)) { msg = s + sizeof(TEST_TAG_EXPECT_MSG_PFX) - 1; - err = push_msg(msg, &spec->priv); + err = push_msg(msg, NULL, &spec->priv); if (err) goto cleanup; spec->mode_mask |= PRIV; } else if (str_has_pfx(s, TEST_TAG_EXPECT_MSG_PFX_UNPRIV)) { msg = s + sizeof(TEST_TAG_EXPECT_MSG_PFX_UNPRIV) - 1; - err = push_msg(msg, &spec->unpriv); + err = push_msg(msg, NULL, &spec->unpriv); + if (err) + goto cleanup; + spec->mode_mask |= UNPRIV; + } else if (str_has_pfx(s, TEST_TAG_EXPECT_REGEX_PFX)) { + msg = s + sizeof(TEST_TAG_EXPECT_REGEX_PFX) - 1; + err = push_msg(NULL, msg, &spec->priv); + if (err) + goto cleanup; + spec->mode_mask |= PRIV; + } else if (str_has_pfx(s, TEST_TAG_EXPECT_REGEX_PFX_UNPRIV)) { + msg = s + sizeof(TEST_TAG_EXPECT_REGEX_PFX_UNPRIV) - 1; + err = push_msg(NULL, msg, &spec->unpriv); if (err) goto cleanup; spec->mode_mask |= UNPRIV; @@ -337,16 +388,13 @@ static int parse_test_spec(struct test_loader *tester, } if (!spec->unpriv.expect_msgs) { - size_t sz = spec->priv.expect_msg_cnt * sizeof(void *); + for (i = 0; i < spec->priv.expect_msg_cnt; i++) { + struct expect_msg *msg = &spec->priv.expect_msgs[i]; - spec->unpriv.expect_msgs = malloc(sz); - if (!spec->unpriv.expect_msgs) { - PRINT_FAIL("failed to allocate memory for unpriv.expect_msgs\n"); - err = -ENOMEM; - goto cleanup; + err = push_msg(msg->substr, msg->regex_str, &spec->unpriv); + if (err) + goto cleanup; } - memcpy(spec->unpriv.expect_msgs, spec->priv.expect_msgs, sz); - spec->unpriv.expect_msg_cnt = spec->priv.expect_msg_cnt; } } @@ -402,27 +450,40 @@ static void validate_case(struct test_loader *tester, struct bpf_program *prog, int load_err) { - int i, j; + int i, j, err; + char *match; + regmatch_t reg_match[1]; for (i = 0; i < subspec->expect_msg_cnt; i++) { - char *match; - const char *expect_msg; - - expect_msg = subspec->expect_msgs[i]; + struct expect_msg *msg = &subspec->expect_msgs[i]; + + if (msg->substr) { + match = strstr(tester->log_buf + tester->next_match_pos, msg->substr); + if (match) + tester->next_match_pos = match - tester->log_buf + strlen(msg->substr); + } else { + err = regexec(&msg->regex, + tester->log_buf + tester->next_match_pos, 1, reg_match, 0); + if (err == 0) { + match = tester->log_buf + tester->next_match_pos + reg_match[0].rm_so; + tester->next_match_pos += reg_match[0].rm_eo; + } else { + match = NULL; + } + } - match = strstr(tester->log_buf + tester->next_match_pos, expect_msg); if (!ASSERT_OK_PTR(match, "expect_msg")) { - /* if we are in verbose mode, we've already emitted log */ if (env.verbosity == VERBOSE_NONE) emit_verifier_log(tester->log_buf, true /*force*/); - for (j = 0; j < i; j++) - fprintf(stderr, - "MATCHED MSG: '%s'\n", subspec->expect_msgs[j]); - fprintf(stderr, "EXPECTED MSG: '%s'\n", expect_msg); + for (j = 0; j <= i; j++) { + msg = &subspec->expect_msgs[j]; + fprintf(stderr, "%s %s: '%s'\n", + j < i ? "MATCHED " : "EXPECTED", + msg->substr ? "SUBSTR" : " REGEX", + msg->substr ?: msg->regex_str); + } return; } - - tester->next_match_pos = match - tester->log_buf + strlen(expect_msg); } } -- cgit v1.2.3 From 3e23c99764d465ae411f0729fd6d2e0e3edd0ade Mon Sep 17 00:00:00 2001 From: Cupertino Miranda Date: Mon, 17 Jun 2024 15:14:58 +0100 Subject: selftests/bpf: Match tests against regular expression This patch changes a few tests to make use of regular expressions. Fixed tests otherwise fail when compiled with GCC. Signed-off-by: Cupertino Miranda Signed-off-by: Andrii Nakryiko Acked-by: Eduard Zingerman Link: https://lore.kernel.org/bpf/20240617141458.471620-3-cupertino.miranda@oracle.com --- tools/testing/selftests/bpf/progs/dynptr_fail.c | 6 +++--- tools/testing/selftests/bpf/progs/rbtree_fail.c | 2 +- tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tools/testing/selftests/bpf/progs/dynptr_fail.c b/tools/testing/selftests/bpf/progs/dynptr_fail.c index 66a60bfb5867..64cc9d936a13 100644 --- a/tools/testing/selftests/bpf/progs/dynptr_fail.c +++ b/tools/testing/selftests/bpf/progs/dynptr_fail.c @@ -964,7 +964,7 @@ int dynptr_invalidate_slice_reinit(void *ctx) * mem_or_null pointers. */ SEC("?raw_tp") -__failure __msg("R1 type=scalar expected=percpu_ptr_") +__failure __regex("R[0-9]+ type=scalar expected=percpu_ptr_") int dynptr_invalidate_slice_or_null(void *ctx) { struct bpf_dynptr ptr; @@ -982,7 +982,7 @@ int dynptr_invalidate_slice_or_null(void *ctx) /* Destruction of dynptr should also any slices obtained from it */ SEC("?raw_tp") -__failure __msg("R7 invalid mem access 'scalar'") +__failure __regex("R[0-9]+ invalid mem access 'scalar'") int dynptr_invalidate_slice_failure(void *ctx) { struct bpf_dynptr ptr1; @@ -1069,7 +1069,7 @@ int dynptr_read_into_slot(void *ctx) /* bpf_dynptr_slice()s are read-only and cannot be written to */ SEC("?tc") -__failure __msg("R0 cannot write into rdonly_mem") +__failure __regex("R[0-9]+ cannot write into rdonly_mem") int skb_invalid_slice_write(struct __sk_buff *skb) { struct bpf_dynptr ptr; diff --git a/tools/testing/selftests/bpf/progs/rbtree_fail.c b/tools/testing/selftests/bpf/progs/rbtree_fail.c index 3fecf1c6dfe5..b722a1e1ddef 100644 --- a/tools/testing/selftests/bpf/progs/rbtree_fail.c +++ b/tools/testing/selftests/bpf/progs/rbtree_fail.c @@ -105,7 +105,7 @@ long rbtree_api_remove_unadded_node(void *ctx) } SEC("?tc") -__failure __msg("Unreleased reference id=3 alloc_insn=10") +__failure __regex("Unreleased reference id=3 alloc_insn=[0-9]+") long rbtree_api_remove_no_drop(void *ctx) { struct bpf_rb_node *res; diff --git a/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c b/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c index 1553b9c16aa7..f8d4b7cfcd68 100644 --- a/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c +++ b/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c @@ -32,7 +32,7 @@ static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b) } SEC("?tc") -__failure __msg("Unreleased reference id=4 alloc_insn=21") +__failure __regex("Unreleased reference id=4 alloc_insn=[0-9]+") long rbtree_refcounted_node_ref_escapes(void *ctx) { struct node_acquire *n, *m; @@ -73,7 +73,7 @@ long refcount_acquire_maybe_null(void *ctx) } SEC("?tc") -__failure __msg("Unreleased reference id=3 alloc_insn=9") +__failure __regex("Unreleased reference id=3 alloc_insn=[0-9]+") long rbtree_refcounted_node_ref_escapes_owning_input(void *ctx) { struct node_acquire *n, *m; -- cgit v1.2.3 From 2bb138cb20a6a347cfed84381430cd25e05f118e Mon Sep 17 00:00:00 2001 From: Puranjay Mohan Date: Wed, 19 Jun 2024 13:13:34 +0000 Subject: bpf, arm64: Inline bpf_get_current_task/_btf() helpers On ARM64, the pointer to task_struct is always available in the sp_el0 register and therefore the calls to bpf_get_current_task() and bpf_get_current_task_btf() can be inlined into a single MRS instruction. Here is the difference before and after this change: Before: ; struct task_struct *task = bpf_get_current_task_btf(); 54: mov x10, #0xffffffffffff7978 // #-34440 58: movk x10, #0x802b, lsl #16 5c: movk x10, #0x8000, lsl #32 60: blr x10 --------------> 0xffff8000802b7978 <+0>: mrs x0, sp_el0 64: add x7, x0, #0x0 <-------------- 0xffff8000802b797c <+4>: ret After: ; struct task_struct *task = bpf_get_current_task_btf(); 54: mrs x7, sp_el0 This shows around 1% performance improvement in artificial microbenchmark. Signed-off-by: Puranjay Mohan Signed-off-by: Andrii Nakryiko Acked-by: Xu Kuohai Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20240619131334.4297-1-puranjay@kernel.org --- arch/arm64/net/bpf_jit_comp.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index 6edaeafd1499..751331f5ba90 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -1244,6 +1244,13 @@ emit_cond_jmp: break; } + /* Implement helper call to bpf_get_current_task/_btf() inline */ + if (insn->src_reg == 0 && (insn->imm == BPF_FUNC_get_current_task || + insn->imm == BPF_FUNC_get_current_task_btf)) { + emit(A64_MRS_SP_EL0(r0), ctx); + break; + } + ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass, &func_addr, &func_addr_fixed); if (ret < 0) @@ -2580,6 +2587,8 @@ bool bpf_jit_inlines_helper_call(s32 imm) { switch (imm) { case BPF_FUNC_get_smp_processor_id: + case BPF_FUNC_get_current_task: + case BPF_FUNC_get_current_task_btf: return true; default: return false; -- cgit v1.2.3 From cd387ce54834bc7808082c471fd745ce85a0e21f Mon Sep 17 00:00:00 2001 From: Mykyta Yatsenko Date: Fri, 21 Jun 2024 19:03:24 +0100 Subject: selftests/bpf: Test struct_ops bpf map auto-attach Adding selftest to verify that struct_ops maps are auto attached by bpf skeleton's `*__attach` function. Signed-off-by: Mykyta Yatsenko Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20240621180324.238379-1-yatsenko@meta.com --- .../testing/selftests/bpf/prog_tests/bpf_tcp_ca.c | 35 ++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c index 164f237b24dd..bceff5900016 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c @@ -185,6 +185,39 @@ done: close(fd); } +static void test_dctcp_autoattach_map(void) +{ + struct cb_opts cb_opts = { + .cc = "bpf_dctcp", + }; + struct network_helper_opts opts = { + .post_socket_cb = cc_cb, + .cb_opts = &cb_opts, + }; + struct bpf_dctcp *dctcp_skel; + struct bpf_link *link; + + dctcp_skel = bpf_dctcp__open_and_load(); + if (!ASSERT_OK_PTR(dctcp_skel, "bpf_dctcp__open_and_load")) + return; + + bpf_map__set_autoattach(dctcp_skel->maps.dctcp, true); + bpf_map__set_autoattach(dctcp_skel->maps.dctcp_nouse, false); + + if (!ASSERT_OK(bpf_dctcp__attach(dctcp_skel), "bpf_dctcp__attach")) + goto destroy; + + /* struct_ops is auto-attached */ + link = dctcp_skel->links.dctcp; + if (!ASSERT_OK_PTR(link, "link")) + goto destroy; + + do_test(&opts); + +destroy: + bpf_dctcp__destroy(dctcp_skel); +} + static char *err_str; static bool found; @@ -598,4 +631,6 @@ void test_bpf_tcp_ca(void) test_tcp_ca_kfunc(); if (test__start_subtest("cc_cubic")) test_cc_cubic(); + if (test__start_subtest("dctcp_autoattach_map")) + test_dctcp_autoattach_map(); } -- cgit v1.2.3 From d1cf840854bb603c0718a011bc993f69f2df014e Mon Sep 17 00:00:00 2001 From: Alan Maguire Date: Thu, 20 Jun 2024 10:17:28 +0100 Subject: libbpf: BTF relocation followup fixing naming, loop logic Use less verbose names in BTF relocation code and fix off-by-one error and typo in btf_relocate.c. Simplify loop over matching distilled types, moving from assigning a _next value in loop body to moving match check conditions into the guard. Suggested-by: Andrii Nakryiko Signed-off-by: Alan Maguire Signed-off-by: Andrii Nakryiko Acked-by: Eduard Zingerman Link: https://lore.kernel.org/bpf/20240620091733.1967885-2-alan.maguire@oracle.com --- tools/lib/bpf/btf_relocate.c | 72 +++++++++++++++++++------------------------- 1 file changed, 31 insertions(+), 41 deletions(-) diff --git a/tools/lib/bpf/btf_relocate.c b/tools/lib/bpf/btf_relocate.c index eabb8755f662..23a41fb03e0d 100644 --- a/tools/lib/bpf/btf_relocate.c +++ b/tools/lib/bpf/btf_relocate.c @@ -160,7 +160,7 @@ static int btf_mark_embedded_composite_type_ids(struct btf_relocate *r, __u32 i) */ static int btf_relocate_map_distilled_base(struct btf_relocate *r) { - struct btf_name_info *dist_base_info_sorted, *dist_base_info_sorted_end; + struct btf_name_info *info, *info_end; struct btf_type *base_t, *dist_t; __u8 *base_name_cnt = NULL; int err = 0; @@ -169,26 +169,24 @@ static int btf_relocate_map_distilled_base(struct btf_relocate *r) /* generate a sort index array of name/type ids sorted by name for * distilled base BTF to speed name-based lookups. */ - dist_base_info_sorted = calloc(r->nr_dist_base_types, sizeof(*dist_base_info_sorted)); - if (!dist_base_info_sorted) { + info = calloc(r->nr_dist_base_types, sizeof(*info)); + if (!info) { err = -ENOMEM; goto done; } - dist_base_info_sorted_end = dist_base_info_sorted + r->nr_dist_base_types; + info_end = info + r->nr_dist_base_types; for (id = 0; id < r->nr_dist_base_types; id++) { dist_t = btf_type_by_id(r->dist_base_btf, id); - dist_base_info_sorted[id].name = btf__name_by_offset(r->dist_base_btf, - dist_t->name_off); - dist_base_info_sorted[id].id = id; - dist_base_info_sorted[id].size = dist_t->size; - dist_base_info_sorted[id].needs_size = true; + info[id].name = btf__name_by_offset(r->dist_base_btf, dist_t->name_off); + info[id].id = id; + info[id].size = dist_t->size; + info[id].needs_size = true; } - qsort(dist_base_info_sorted, r->nr_dist_base_types, sizeof(*dist_base_info_sorted), - cmp_btf_name_size); + qsort(info, r->nr_dist_base_types, sizeof(*info), cmp_btf_name_size); /* Mark distilled base struct/union members of split BTF structs/unions * in id_map with BTF_IS_EMBEDDED; this signals that these types - * need to match both name and size, otherwise embeddding the base + * need to match both name and size, otherwise embedding the base * struct/union in the split type is invalid. */ for (id = r->nr_dist_base_types; id < r->nr_split_types; id++) { @@ -216,8 +214,7 @@ static int btf_relocate_map_distilled_base(struct btf_relocate *r) /* Now search base BTF for matching distilled base BTF types. */ for (id = 1; id < r->nr_base_types; id++) { - struct btf_name_info *dist_name_info, *dist_name_info_next = NULL; - struct btf_name_info base_name_info = {}; + struct btf_name_info *dist_info, base_info = {}; int dist_kind, base_kind; base_t = btf_type_by_id(r->base_btf, id); @@ -225,16 +222,16 @@ static int btf_relocate_map_distilled_base(struct btf_relocate *r) if (!base_t->name_off) continue; base_kind = btf_kind(base_t); - base_name_info.id = id; - base_name_info.name = btf__name_by_offset(r->base_btf, base_t->name_off); + base_info.id = id; + base_info.name = btf__name_by_offset(r->base_btf, base_t->name_off); switch (base_kind) { case BTF_KIND_INT: case BTF_KIND_FLOAT: case BTF_KIND_ENUM: case BTF_KIND_ENUM64: /* These types should match both name and size */ - base_name_info.needs_size = true; - base_name_info.size = base_t->size; + base_info.needs_size = true; + base_info.size = base_t->size; break; case BTF_KIND_FWD: /* No size considerations for fwds. */ @@ -248,31 +245,24 @@ static int btf_relocate_map_distilled_base(struct btf_relocate *r) * unless corresponding _base_ types to match them are * missing. */ - base_name_info.needs_size = base_name_cnt[base_t->name_off] > 1; - base_name_info.size = base_t->size; + base_info.needs_size = base_name_cnt[base_t->name_off] > 1; + base_info.size = base_t->size; break; default: continue; } /* iterate over all matching distilled base types */ - for (dist_name_info = search_btf_name_size(&base_name_info, dist_base_info_sorted, - r->nr_dist_base_types); - dist_name_info != NULL; dist_name_info = dist_name_info_next) { - /* Are there more distilled matches to process after - * this one? - */ - dist_name_info_next = dist_name_info + 1; - if (dist_name_info_next >= dist_base_info_sorted_end || - cmp_btf_name_size(&base_name_info, dist_name_info_next)) - dist_name_info_next = NULL; - - if (!dist_name_info->id || dist_name_info->id > r->nr_dist_base_types) { + for (dist_info = search_btf_name_size(&base_info, info, r->nr_dist_base_types); + dist_info != NULL && dist_info < info_end && + cmp_btf_name_size(&base_info, dist_info) == 0; + dist_info++) { + if (!dist_info->id || dist_info->id >= r->nr_dist_base_types) { pr_warn("base BTF id [%d] maps to invalid distilled base BTF id [%d]\n", - id, dist_name_info->id); + id, dist_info->id); err = -EINVAL; goto done; } - dist_t = btf_type_by_id(r->dist_base_btf, dist_name_info->id); + dist_t = btf_type_by_id(r->dist_base_btf, dist_info->id); dist_kind = btf_kind(dist_t); /* Validate that the found distilled type is compatible. @@ -319,15 +309,15 @@ static int btf_relocate_map_distilled_base(struct btf_relocate *r) /* size verification is required for embedded * struct/unions. */ - if (r->id_map[dist_name_info->id] == BTF_IS_EMBEDDED && + if (r->id_map[dist_info->id] == BTF_IS_EMBEDDED && base_t->size != dist_t->size) continue; break; default: continue; } - if (r->id_map[dist_name_info->id] && - r->id_map[dist_name_info->id] != BTF_IS_EMBEDDED) { + if (r->id_map[dist_info->id] && + r->id_map[dist_info->id] != BTF_IS_EMBEDDED) { /* we already have a match; this tells us that * multiple base types of the same name * have the same size, since for cases where @@ -337,13 +327,13 @@ static int btf_relocate_map_distilled_base(struct btf_relocate *r) * to in base BTF, so error out. */ pr_warn("distilled base BTF type '%s' [%u], size %u has multiple candidates of the same size (ids [%u, %u]) in base BTF\n", - base_name_info.name, dist_name_info->id, - base_t->size, id, r->id_map[dist_name_info->id]); + base_info.name, dist_info->id, + base_t->size, id, r->id_map[dist_info->id]); err = -EINVAL; goto done; } /* map id and name */ - r->id_map[dist_name_info->id] = id; + r->id_map[dist_info->id] = id; r->str_map[dist_t->name_off] = base_t->name_off; } } @@ -362,7 +352,7 @@ static int btf_relocate_map_distilled_base(struct btf_relocate *r) } done: free(base_name_cnt); - free(dist_base_info_sorted); + free(info); return err; } -- cgit v1.2.3 From d4e48e3dd45017abdd69a19285d197de897ef44f Mon Sep 17 00:00:00 2001 From: Alan Maguire Date: Thu, 20 Jun 2024 10:17:29 +0100 Subject: module, bpf: Store BTF base pointer in struct module ...as this will allow split BTF modules with a base BTF representation (rather than the full vmlinux BTF at time of BTF encoding) to resolve their references to kernel types in a way that is more resilient to small changes in kernel types. This will allow modules that are not built every time the kernel is to provide more resilient BTF, rather than have it invalidated every time BTF ids for core kernel types change. Fields are ordered to avoid holes in struct module. Signed-off-by: Alan Maguire Signed-off-by: Andrii Nakryiko Reviewed-by: Luis Chamberlain Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20240620091733.1967885-3-alan.maguire@oracle.com --- include/linux/module.h | 2 ++ kernel/module/main.c | 5 ++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/include/linux/module.h b/include/linux/module.h index ffa1c603163c..b79d926cae8a 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -509,7 +509,9 @@ struct module { #endif #ifdef CONFIG_DEBUG_INFO_BTF_MODULES unsigned int btf_data_size; + unsigned int btf_base_data_size; void *btf_data; + void *btf_base_data; #endif #ifdef CONFIG_JUMP_LABEL struct jump_entry *jump_entries; diff --git a/kernel/module/main.c b/kernel/module/main.c index d18a94b973e1..d9592195c5bb 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -2166,6 +2166,8 @@ static int find_module_sections(struct module *mod, struct load_info *info) #endif #ifdef CONFIG_DEBUG_INFO_BTF_MODULES mod->btf_data = any_section_objs(info, ".BTF", 1, &mod->btf_data_size); + mod->btf_base_data = any_section_objs(info, ".BTF.base", 1, + &mod->btf_base_data_size); #endif #ifdef CONFIG_JUMP_LABEL mod->jump_entries = section_objs(info, "__jump_table", @@ -2590,8 +2592,9 @@ static noinline int do_init_module(struct module *mod) } #ifdef CONFIG_DEBUG_INFO_BTF_MODULES - /* .BTF is not SHF_ALLOC and will get removed, so sanitize pointer */ + /* .BTF is not SHF_ALLOC and will get removed, so sanitize pointers */ mod->btf_data = NULL; + mod->btf_base_data = NULL; #endif /* * We want to free module_init, but be aware that kallsyms may be -- cgit v1.2.3 From e7ac331b30555cf1a0826784a346f36dbf800451 Mon Sep 17 00:00:00 2001 From: Alan Maguire Date: Thu, 20 Jun 2024 10:17:30 +0100 Subject: libbpf: Split field iter code into its own file kernel This will allow it to be shared with the kernel. No functional change. Suggested-by: Andrii Nakryiko Signed-off-by: Alan Maguire Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20240620091733.1967885-4-alan.maguire@oracle.com --- tools/lib/bpf/Build | 2 +- tools/lib/bpf/btf.c | 162 --------------------------------------------- tools/lib/bpf/btf_iter.c | 169 +++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 170 insertions(+), 163 deletions(-) create mode 100644 tools/lib/bpf/btf_iter.c diff --git a/tools/lib/bpf/Build b/tools/lib/bpf/Build index 336da6844d42..e2cd558ca0b4 100644 --- a/tools/lib/bpf/Build +++ b/tools/lib/bpf/Build @@ -1,4 +1,4 @@ libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o \ netlink.o bpf_prog_linfo.o libbpf_probes.o hashmap.o \ btf_dump.o ringbuf.o strset.o linker.o gen_loader.o relo_core.o \ - usdt.o zip.o elf.o features.o btf_relocate.o + usdt.o zip.o elf.o features.o btf_iter.o btf_relocate.o diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index ef1b2f573c1b..0c0f60cad769 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -5093,168 +5093,6 @@ struct btf *btf__load_module_btf(const char *module_name, struct btf *vmlinux_bt return btf__parse_split(path, vmlinux_btf); } -int btf_field_iter_init(struct btf_field_iter *it, struct btf_type *t, enum btf_field_iter_kind iter_kind) -{ - it->p = NULL; - it->m_idx = -1; - it->off_idx = 0; - it->vlen = 0; - - switch (iter_kind) { - case BTF_FIELD_ITER_IDS: - switch (btf_kind(t)) { - case BTF_KIND_UNKN: - case BTF_KIND_INT: - case BTF_KIND_FLOAT: - case BTF_KIND_ENUM: - case BTF_KIND_ENUM64: - it->desc = (struct btf_field_desc) {}; - break; - case BTF_KIND_FWD: - case BTF_KIND_CONST: - case BTF_KIND_VOLATILE: - case BTF_KIND_RESTRICT: - case BTF_KIND_PTR: - case BTF_KIND_TYPEDEF: - case BTF_KIND_FUNC: - case BTF_KIND_VAR: - case BTF_KIND_DECL_TAG: - case BTF_KIND_TYPE_TAG: - it->desc = (struct btf_field_desc) { 1, {offsetof(struct btf_type, type)} }; - break; - case BTF_KIND_ARRAY: - it->desc = (struct btf_field_desc) { - 2, {sizeof(struct btf_type) + offsetof(struct btf_array, type), - sizeof(struct btf_type) + offsetof(struct btf_array, index_type)} - }; - break; - case BTF_KIND_STRUCT: - case BTF_KIND_UNION: - it->desc = (struct btf_field_desc) { - 0, {}, - sizeof(struct btf_member), - 1, {offsetof(struct btf_member, type)} - }; - break; - case BTF_KIND_FUNC_PROTO: - it->desc = (struct btf_field_desc) { - 1, {offsetof(struct btf_type, type)}, - sizeof(struct btf_param), - 1, {offsetof(struct btf_param, type)} - }; - break; - case BTF_KIND_DATASEC: - it->desc = (struct btf_field_desc) { - 0, {}, - sizeof(struct btf_var_secinfo), - 1, {offsetof(struct btf_var_secinfo, type)} - }; - break; - default: - return -EINVAL; - } - break; - case BTF_FIELD_ITER_STRS: - switch (btf_kind(t)) { - case BTF_KIND_UNKN: - it->desc = (struct btf_field_desc) {}; - break; - case BTF_KIND_INT: - case BTF_KIND_FLOAT: - case BTF_KIND_FWD: - case BTF_KIND_ARRAY: - case BTF_KIND_CONST: - case BTF_KIND_VOLATILE: - case BTF_KIND_RESTRICT: - case BTF_KIND_PTR: - case BTF_KIND_TYPEDEF: - case BTF_KIND_FUNC: - case BTF_KIND_VAR: - case BTF_KIND_DECL_TAG: - case BTF_KIND_TYPE_TAG: - case BTF_KIND_DATASEC: - it->desc = (struct btf_field_desc) { - 1, {offsetof(struct btf_type, name_off)} - }; - break; - case BTF_KIND_ENUM: - it->desc = (struct btf_field_desc) { - 1, {offsetof(struct btf_type, name_off)}, - sizeof(struct btf_enum), - 1, {offsetof(struct btf_enum, name_off)} - }; - break; - case BTF_KIND_ENUM64: - it->desc = (struct btf_field_desc) { - 1, {offsetof(struct btf_type, name_off)}, - sizeof(struct btf_enum64), - 1, {offsetof(struct btf_enum64, name_off)} - }; - break; - case BTF_KIND_STRUCT: - case BTF_KIND_UNION: - it->desc = (struct btf_field_desc) { - 1, {offsetof(struct btf_type, name_off)}, - sizeof(struct btf_member), - 1, {offsetof(struct btf_member, name_off)} - }; - break; - case BTF_KIND_FUNC_PROTO: - it->desc = (struct btf_field_desc) { - 1, {offsetof(struct btf_type, name_off)}, - sizeof(struct btf_param), - 1, {offsetof(struct btf_param, name_off)} - }; - break; - default: - return -EINVAL; - } - break; - default: - return -EINVAL; - } - - if (it->desc.m_sz) - it->vlen = btf_vlen(t); - - it->p = t; - return 0; -} - -__u32 *btf_field_iter_next(struct btf_field_iter *it) -{ - if (!it->p) - return NULL; - - if (it->m_idx < 0) { - if (it->off_idx < it->desc.t_off_cnt) - return it->p + it->desc.t_offs[it->off_idx++]; - /* move to per-member iteration */ - it->m_idx = 0; - it->p += sizeof(struct btf_type); - it->off_idx = 0; - } - - /* if type doesn't have members, stop */ - if (it->desc.m_sz == 0) { - it->p = NULL; - return NULL; - } - - if (it->off_idx >= it->desc.m_off_cnt) { - /* exhausted this member's fields, go to the next member */ - it->m_idx++; - it->p += it->desc.m_sz; - it->off_idx = 0; - } - - if (it->m_idx < it->vlen) - return it->p + it->desc.m_offs[it->off_idx++]; - - it->p = NULL; - return NULL; -} - int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx) { const struct btf_ext_info *seg; diff --git a/tools/lib/bpf/btf_iter.c b/tools/lib/bpf/btf_iter.c new file mode 100644 index 000000000000..c308aa60285d --- /dev/null +++ b/tools/lib/bpf/btf_iter.c @@ -0,0 +1,169 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) +/* Copyright (c) 2021 Facebook */ +/* Copyright (c) 2024, Oracle and/or its affiliates. */ + +#include "btf.h" +#include "libbpf_internal.h" + +int btf_field_iter_init(struct btf_field_iter *it, struct btf_type *t, + enum btf_field_iter_kind iter_kind) +{ + it->p = NULL; + it->m_idx = -1; + it->off_idx = 0; + it->vlen = 0; + + switch (iter_kind) { + case BTF_FIELD_ITER_IDS: + switch (btf_kind(t)) { + case BTF_KIND_UNKN: + case BTF_KIND_INT: + case BTF_KIND_FLOAT: + case BTF_KIND_ENUM: + case BTF_KIND_ENUM64: + it->desc = (struct btf_field_desc) {}; + break; + case BTF_KIND_FWD: + case BTF_KIND_CONST: + case BTF_KIND_VOLATILE: + case BTF_KIND_RESTRICT: + case BTF_KIND_PTR: + case BTF_KIND_TYPEDEF: + case BTF_KIND_FUNC: + case BTF_KIND_VAR: + case BTF_KIND_DECL_TAG: + case BTF_KIND_TYPE_TAG: + it->desc = (struct btf_field_desc) { 1, {offsetof(struct btf_type, type)} }; + break; + case BTF_KIND_ARRAY: + it->desc = (struct btf_field_desc) { + 2, {sizeof(struct btf_type) + offsetof(struct btf_array, type), + sizeof(struct btf_type) + offsetof(struct btf_array, index_type)} + }; + break; + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: + it->desc = (struct btf_field_desc) { + 0, {}, + sizeof(struct btf_member), + 1, {offsetof(struct btf_member, type)} + }; + break; + case BTF_KIND_FUNC_PROTO: + it->desc = (struct btf_field_desc) { + 1, {offsetof(struct btf_type, type)}, + sizeof(struct btf_param), + 1, {offsetof(struct btf_param, type)} + }; + break; + case BTF_KIND_DATASEC: + it->desc = (struct btf_field_desc) { + 0, {}, + sizeof(struct btf_var_secinfo), + 1, {offsetof(struct btf_var_secinfo, type)} + }; + break; + default: + return -EINVAL; + } + break; + case BTF_FIELD_ITER_STRS: + switch (btf_kind(t)) { + case BTF_KIND_UNKN: + it->desc = (struct btf_field_desc) {}; + break; + case BTF_KIND_INT: + case BTF_KIND_FLOAT: + case BTF_KIND_FWD: + case BTF_KIND_ARRAY: + case BTF_KIND_CONST: + case BTF_KIND_VOLATILE: + case BTF_KIND_RESTRICT: + case BTF_KIND_PTR: + case BTF_KIND_TYPEDEF: + case BTF_KIND_FUNC: + case BTF_KIND_VAR: + case BTF_KIND_DECL_TAG: + case BTF_KIND_TYPE_TAG: + case BTF_KIND_DATASEC: + it->desc = (struct btf_field_desc) { + 1, {offsetof(struct btf_type, name_off)} + }; + break; + case BTF_KIND_ENUM: + it->desc = (struct btf_field_desc) { + 1, {offsetof(struct btf_type, name_off)}, + sizeof(struct btf_enum), + 1, {offsetof(struct btf_enum, name_off)} + }; + break; + case BTF_KIND_ENUM64: + it->desc = (struct btf_field_desc) { + 1, {offsetof(struct btf_type, name_off)}, + sizeof(struct btf_enum64), + 1, {offsetof(struct btf_enum64, name_off)} + }; + break; + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: + it->desc = (struct btf_field_desc) { + 1, {offsetof(struct btf_type, name_off)}, + sizeof(struct btf_member), + 1, {offsetof(struct btf_member, name_off)} + }; + break; + case BTF_KIND_FUNC_PROTO: + it->desc = (struct btf_field_desc) { + 1, {offsetof(struct btf_type, name_off)}, + sizeof(struct btf_param), + 1, {offsetof(struct btf_param, name_off)} + }; + break; + default: + return -EINVAL; + } + break; + default: + return -EINVAL; + } + + if (it->desc.m_sz) + it->vlen = btf_vlen(t); + + it->p = t; + return 0; +} + +__u32 *btf_field_iter_next(struct btf_field_iter *it) +{ + if (!it->p) + return NULL; + + if (it->m_idx < 0) { + if (it->off_idx < it->desc.t_off_cnt) + return it->p + it->desc.t_offs[it->off_idx++]; + /* move to per-member iteration */ + it->m_idx = 0; + it->p += sizeof(struct btf_type); + it->off_idx = 0; + } + + /* if type doesn't have members, stop */ + if (it->desc.m_sz == 0) { + it->p = NULL; + return NULL; + } + + if (it->off_idx >= it->desc.m_off_cnt) { + /* exhausted this member's fields, go to the next member */ + it->m_idx++; + it->p += it->desc.m_sz; + it->off_idx = 0; + } + + if (it->m_idx < it->vlen) + return it->p + it->desc.m_offs[it->off_idx++]; + + it->p = NULL; + return NULL; +} -- cgit v1.2.3 From 8646db238997df36c6ad71a9d7e0b52ceee221b2 Mon Sep 17 00:00:00 2001 From: Alan Maguire Date: Thu, 20 Jun 2024 10:17:31 +0100 Subject: libbpf,bpf: Share BTF relocate-related code with kernel Share relocation implementation with the kernel. As part of this, we also need the type/string iteration functions so also share btf_iter.c file. Relocation code in kernel and userspace is identical save for the impementation of the reparenting of split BTF to the relocated base BTF and retrieval of the BTF header from "struct btf"; these small functions need separate user-space and kernel implementations for the separate "struct btf"s they operate upon. One other wrinkle on the kernel side is we have to map .BTF.ids in modules as they were generated with the type ids used at BTF encoding time. btf_relocate() optionally returns an array mapping from old BTF ids to relocated ids, so we use that to fix up these references where needed for kfuncs. Signed-off-by: Alan Maguire Signed-off-by: Andrii Nakryiko Acked-by: Eduard Zingerman Link: https://lore.kernel.org/bpf/20240620091733.1967885-5-alan.maguire@oracle.com --- include/linux/btf.h | 64 ++++++++++++++++ kernel/bpf/Makefile | 8 +- kernel/bpf/btf.c | 176 ++++++++++++++++++++++++++++++------------- tools/lib/bpf/btf_iter.c | 8 ++ tools/lib/bpf/btf_relocate.c | 23 ++++++ 5 files changed, 226 insertions(+), 53 deletions(-) diff --git a/include/linux/btf.h b/include/linux/btf.h index 56d91daacdba..d199fa17abb4 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -140,6 +140,7 @@ extern const struct file_operations btf_fops; const char *btf_get_name(const struct btf *btf); void btf_get(struct btf *btf); void btf_put(struct btf *btf); +const struct btf_header *btf_header(const struct btf *btf); int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr, u32 uattr_sz); struct btf *btf_get_by_fd(int fd); int btf_get_info_by_fd(const struct btf *btf, @@ -212,8 +213,10 @@ int btf_get_fd_by_id(u32 id); u32 btf_obj_id(const struct btf *btf); bool btf_is_kernel(const struct btf *btf); bool btf_is_module(const struct btf *btf); +bool btf_is_vmlinux(const struct btf *btf); struct module *btf_try_get_module(const struct btf *btf); u32 btf_nr_types(const struct btf *btf); +struct btf *btf_base_btf(const struct btf *btf); bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s, const struct btf_member *m, u32 expected_offset, u32 expected_size); @@ -339,6 +342,11 @@ static inline u8 btf_int_offset(const struct btf_type *t) return BTF_INT_OFFSET(*(u32 *)(t + 1)); } +static inline __u8 btf_int_bits(const struct btf_type *t) +{ + return BTF_INT_BITS(*(__u32 *)(t + 1)); +} + static inline bool btf_type_is_scalar(const struct btf_type *t) { return btf_type_is_int(t) || btf_type_is_enum(t); @@ -478,6 +486,11 @@ static inline struct btf_param *btf_params(const struct btf_type *t) return (struct btf_param *)(t + 1); } +static inline struct btf_decl_tag *btf_decl_tag(const struct btf_type *t) +{ + return (struct btf_decl_tag *)(t + 1); +} + static inline int btf_id_cmp_func(const void *a, const void *b) { const int *pa = a, *pb = b; @@ -515,9 +528,38 @@ static inline const struct bpf_struct_ops_desc *bpf_struct_ops_find(struct btf * } #endif +enum btf_field_iter_kind { + BTF_FIELD_ITER_IDS, + BTF_FIELD_ITER_STRS, +}; + +struct btf_field_desc { + /* once-per-type offsets */ + int t_off_cnt, t_offs[2]; + /* member struct size, or zero, if no members */ + int m_sz; + /* repeated per-member offsets */ + int m_off_cnt, m_offs[1]; +}; + +struct btf_field_iter { + struct btf_field_desc desc; + void *p; + int m_idx; + int off_idx; + int vlen; +}; + #ifdef CONFIG_BPF_SYSCALL const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id); +void btf_set_base_btf(struct btf *btf, const struct btf *base_btf); +int btf_relocate(struct btf *btf, const struct btf *base_btf, __u32 **map_ids); +int btf_field_iter_init(struct btf_field_iter *it, struct btf_type *t, + enum btf_field_iter_kind iter_kind); +__u32 *btf_field_iter_next(struct btf_field_iter *it); + const char *btf_name_by_offset(const struct btf *btf, u32 offset); +const char *btf_str_by_offset(const struct btf *btf, u32 offset); struct btf *btf_parse_vmlinux(void); struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog); u32 *btf_kfunc_id_set_contains(const struct btf *btf, u32 kfunc_btf_id, @@ -544,6 +586,28 @@ static inline const struct btf_type *btf_type_by_id(const struct btf *btf, { return NULL; } + +static inline void btf_set_base_btf(struct btf *btf, const struct btf *base_btf) +{ +} + +static inline int btf_relocate(void *log, struct btf *btf, const struct btf *base_btf, + __u32 **map_ids) +{ + return -EOPNOTSUPP; +} + +static inline int btf_field_iter_init(struct btf_field_iter *it, struct btf_type *t, + enum btf_field_iter_kind iter_kind) +{ + return -EOPNOTSUPP; +} + +static inline __u32 *btf_field_iter_next(struct btf_field_iter *it) +{ + return NULL; +} + static inline const char *btf_name_by_offset(const struct btf *btf, u32 offset) { diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile index 7eb9ad3a3ae6..0291eef9ce92 100644 --- a/kernel/bpf/Makefile +++ b/kernel/bpf/Makefile @@ -50,5 +50,11 @@ endif obj-$(CONFIG_BPF_PRELOAD) += preload/ obj-$(CONFIG_BPF_SYSCALL) += relo_core.o -$(obj)/relo_core.o: $(srctree)/tools/lib/bpf/relo_core.c FORCE +obj-$(CONFIG_BPF_SYSCALL) += btf_iter.o +obj-$(CONFIG_BPF_SYSCALL) += btf_relocate.o + +# Some source files are common to libbpf. +vpath %.c $(srctree)/kernel/bpf:$(srctree)/tools/lib/bpf + +$(obj)/%.o: %.c FORCE $(call if_changed_rule,cc_o_c) diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index ce4707968217..8e12cb80ba73 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -274,6 +274,7 @@ struct btf { u32 start_str_off; /* first string offset (0 for base BTF) */ char name[MODULE_NAME_LEN]; bool kernel_btf; + __u32 *base_id_map; /* map from distilled base BTF -> vmlinux BTF ids */ }; enum verifier_phase { @@ -530,6 +531,11 @@ static bool btf_type_is_decl_tag_target(const struct btf_type *t) btf_type_is_var(t) || btf_type_is_typedef(t); } +bool btf_is_vmlinux(const struct btf *btf) +{ + return btf->kernel_btf && !btf->base_btf; +} + u32 btf_nr_types(const struct btf *btf) { u32 total = 0; @@ -772,7 +778,7 @@ static bool __btf_name_char_ok(char c, bool first) return true; } -static const char *btf_str_by_offset(const struct btf *btf, u32 offset) +const char *btf_str_by_offset(const struct btf *btf, u32 offset) { while (offset < btf->start_str_off) btf = btf->base_btf; @@ -1670,14 +1676,8 @@ static void btf_free_kfunc_set_tab(struct btf *btf) if (!tab) return; - /* For module BTF, we directly assign the sets being registered, so - * there is nothing to free except kfunc_set_tab. - */ - if (btf_is_module(btf)) - goto free_tab; for (hook = 0; hook < ARRAY_SIZE(tab->sets); hook++) kfree(tab->sets[hook]); -free_tab: kfree(tab); btf->kfunc_set_tab = NULL; } @@ -1735,7 +1735,12 @@ static void btf_free(struct btf *btf) kvfree(btf->types); kvfree(btf->resolved_sizes); kvfree(btf->resolved_ids); - kvfree(btf->data); + /* vmlinux does not allocate btf->data, it simply points it at + * __start_BTF. + */ + if (!btf_is_vmlinux(btf)) + kvfree(btf->data); + kvfree(btf->base_id_map); kfree(btf); } @@ -1764,6 +1769,23 @@ void btf_put(struct btf *btf) } } +struct btf *btf_base_btf(const struct btf *btf) +{ + return btf->base_btf; +} + +const struct btf_header *btf_header(const struct btf *btf) +{ + return &btf->hdr; +} + +void btf_set_base_btf(struct btf *btf, const struct btf *base_btf) +{ + btf->base_btf = (struct btf *)base_btf; + btf->start_id = btf_nr_types(base_btf); + btf->start_str_off = base_btf->hdr.str_len; +} + static int env_resolve_init(struct btf_verifier_env *env) { struct btf *btf = env->btf; @@ -6083,23 +6105,15 @@ int get_kern_ctx_btf_id(struct bpf_verifier_log *log, enum bpf_prog_type prog_ty BTF_ID_LIST(bpf_ctx_convert_btf_id) BTF_ID(struct, bpf_ctx_convert) -struct btf *btf_parse_vmlinux(void) +static struct btf *btf_parse_base(struct btf_verifier_env *env, const char *name, + void *data, unsigned int data_size) { - struct btf_verifier_env *env = NULL; - struct bpf_verifier_log *log; struct btf *btf = NULL; int err; if (!IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) return ERR_PTR(-ENOENT); - env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN); - if (!env) - return ERR_PTR(-ENOMEM); - - log = &env->log; - log->level = BPF_LOG_KERNEL; - btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN); if (!btf) { err = -ENOMEM; @@ -6107,10 +6121,10 @@ struct btf *btf_parse_vmlinux(void) } env->btf = btf; - btf->data = __start_BTF; - btf->data_size = __stop_BTF - __start_BTF; + btf->data = data; + btf->data_size = data_size; btf->kernel_btf = true; - snprintf(btf->name, sizeof(btf->name), "vmlinux"); + snprintf(btf->name, sizeof(btf->name), "%s", name); err = btf_parse_hdr(env); if (err) @@ -6130,20 +6144,11 @@ struct btf *btf_parse_vmlinux(void) if (err) goto errout; - /* btf_parse_vmlinux() runs under bpf_verifier_lock */ - bpf_ctx_convert.t = btf_type_by_id(btf, bpf_ctx_convert_btf_id[0]); - refcount_set(&btf->refcnt, 1); - err = btf_alloc_id(btf); - if (err) - goto errout; - - btf_verifier_env_free(env); return btf; errout: - btf_verifier_env_free(env); if (btf) { kvfree(btf->types); kfree(btf); @@ -6151,19 +6156,61 @@ errout: return ERR_PTR(err); } +struct btf *btf_parse_vmlinux(void) +{ + struct btf_verifier_env *env = NULL; + struct bpf_verifier_log *log; + struct btf *btf; + int err; + + env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN); + if (!env) + return ERR_PTR(-ENOMEM); + + log = &env->log; + log->level = BPF_LOG_KERNEL; + btf = btf_parse_base(env, "vmlinux", __start_BTF, __stop_BTF - __start_BTF); + if (IS_ERR(btf)) + goto err_out; + + /* btf_parse_vmlinux() runs under bpf_verifier_lock */ + bpf_ctx_convert.t = btf_type_by_id(btf, bpf_ctx_convert_btf_id[0]); + err = btf_alloc_id(btf); + if (err) { + btf_free(btf); + btf = ERR_PTR(err); + } +err_out: + btf_verifier_env_free(env); + return btf; +} + #ifdef CONFIG_DEBUG_INFO_BTF_MODULES -static struct btf *btf_parse_module(const char *module_name, const void *data, unsigned int data_size) +/* If .BTF_ids section was created with distilled base BTF, both base and + * split BTF ids will need to be mapped to actual base/split ids for + * BTF now that it has been relocated. + */ +static __u32 btf_relocate_id(const struct btf *btf, __u32 id) +{ + if (!btf->base_btf || !btf->base_id_map) + return id; + return btf->base_id_map[id]; +} + +static struct btf *btf_parse_module(const char *module_name, const void *data, + unsigned int data_size, void *base_data, + unsigned int base_data_size) { + struct btf *btf = NULL, *vmlinux_btf, *base_btf = NULL; struct btf_verifier_env *env = NULL; struct bpf_verifier_log *log; - struct btf *btf = NULL, *base_btf; - int err; + int err = 0; - base_btf = bpf_get_btf_vmlinux(); - if (IS_ERR(base_btf)) - return base_btf; - if (!base_btf) + vmlinux_btf = bpf_get_btf_vmlinux(); + if (IS_ERR(vmlinux_btf)) + return vmlinux_btf; + if (!vmlinux_btf) return ERR_PTR(-EINVAL); env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN); @@ -6173,6 +6220,16 @@ static struct btf *btf_parse_module(const char *module_name, const void *data, u log = &env->log; log->level = BPF_LOG_KERNEL; + if (base_data) { + base_btf = btf_parse_base(env, ".BTF.base", base_data, base_data_size); + if (IS_ERR(base_btf)) { + err = PTR_ERR(base_btf); + goto errout; + } + } else { + base_btf = vmlinux_btf; + } + btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN); if (!btf) { err = -ENOMEM; @@ -6212,12 +6269,22 @@ static struct btf *btf_parse_module(const char *module_name, const void *data, u if (err) goto errout; + if (base_btf != vmlinux_btf) { + err = btf_relocate(btf, vmlinux_btf, &btf->base_id_map); + if (err) + goto errout; + btf_free(base_btf); + base_btf = vmlinux_btf; + } + btf_verifier_env_free(env); refcount_set(&btf->refcnt, 1); return btf; errout: btf_verifier_env_free(env); + if (base_btf != vmlinux_btf) + btf_free(base_btf); if (btf) { kvfree(btf->data); kvfree(btf->types); @@ -7770,7 +7837,8 @@ static int btf_module_notify(struct notifier_block *nb, unsigned long op, err = -ENOMEM; goto out; } - btf = btf_parse_module(mod->name, mod->btf_data, mod->btf_data_size); + btf = btf_parse_module(mod->name, mod->btf_data, mod->btf_data_size, + mod->btf_base_data, mod->btf_base_data_size); if (IS_ERR(btf)) { kfree(btf_mod); if (!IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH)) { @@ -8094,7 +8162,7 @@ static int btf_populate_kfunc_set(struct btf *btf, enum btf_kfunc_hook hook, bool add_filter = !!kset->filter; struct btf_kfunc_set_tab *tab; struct btf_id_set8 *set; - u32 set_cnt; + u32 set_cnt, i; int ret; if (hook >= BTF_KFUNC_HOOK_MAX) { @@ -8140,21 +8208,15 @@ static int btf_populate_kfunc_set(struct btf *btf, enum btf_kfunc_hook hook, goto end; } - /* We don't need to allocate, concatenate, and sort module sets, because - * only one is allowed per hook. Hence, we can directly assign the - * pointer and return. - */ - if (!vmlinux_set) { - tab->sets[hook] = add_set; - goto do_add_filter; - } - /* In case of vmlinux sets, there may be more than one set being * registered per hook. To create a unified set, we allocate a new set * and concatenate all individual sets being registered. While each set * is individually sorted, they may become unsorted when concatenated, * hence re-sorting the final set again is required to make binary * searching the set using btf_id_set8_contains function work. + * + * For module sets, we need to allocate as we may need to relocate + * BTF ids. */ set_cnt = set ? set->cnt : 0; @@ -8184,11 +8246,14 @@ static int btf_populate_kfunc_set(struct btf *btf, enum btf_kfunc_hook hook, /* Concatenate the two sets */ memcpy(set->pairs + set->cnt, add_set->pairs, add_set->cnt * sizeof(set->pairs[0])); + /* Now that the set is copied, update with relocated BTF ids */ + for (i = set->cnt; i < set->cnt + add_set->cnt; i++) + set->pairs[i].id = btf_relocate_id(btf, set->pairs[i].id); + set->cnt += add_set->cnt; sort(set->pairs, set->cnt, sizeof(set->pairs[0]), btf_id_cmp_func, NULL); -do_add_filter: if (add_filter) { hook_filter = &tab->hook_filters[hook]; hook_filter->filters[hook_filter->nr_filters++] = kset->filter; @@ -8308,7 +8373,7 @@ static int __register_btf_kfunc_id_set(enum btf_kfunc_hook hook, return PTR_ERR(btf); for (i = 0; i < kset->set->cnt; i++) { - ret = btf_check_kfunc_protos(btf, kset->set->pairs[i].id, + ret = btf_check_kfunc_protos(btf, btf_relocate_id(btf, kset->set->pairs[i].id), kset->set->pairs[i].flags); if (ret) goto err_out; @@ -8372,7 +8437,7 @@ static int btf_check_dtor_kfuncs(struct btf *btf, const struct btf_id_dtor_kfunc u32 nr_args, i; for (i = 0; i < cnt; i++) { - dtor_btf_id = dtors[i].kfunc_btf_id; + dtor_btf_id = btf_relocate_id(btf, dtors[i].kfunc_btf_id); dtor_func = btf_type_by_id(btf, dtor_btf_id); if (!dtor_func || !btf_type_is_func(dtor_func)) @@ -8407,7 +8472,7 @@ int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_c { struct btf_id_dtor_kfunc_tab *tab; struct btf *btf; - u32 tab_cnt; + u32 tab_cnt, i; int ret; btf = btf_get_module_btf(owner); @@ -8458,6 +8523,13 @@ int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_c btf->dtor_kfunc_tab = tab; memcpy(tab->dtors + tab->cnt, dtors, add_cnt * sizeof(tab->dtors[0])); + + /* remap BTF ids based on BTF relocation (if any) */ + for (i = tab_cnt; i < tab_cnt + add_cnt; i++) { + tab->dtors[i].btf_id = btf_relocate_id(btf, tab->dtors[i].btf_id); + tab->dtors[i].kfunc_btf_id = btf_relocate_id(btf, tab->dtors[i].kfunc_btf_id); + } + tab->cnt += add_cnt; sort(tab->dtors, tab->cnt, sizeof(tab->dtors[0]), btf_id_cmp_func, NULL); diff --git a/tools/lib/bpf/btf_iter.c b/tools/lib/bpf/btf_iter.c index c308aa60285d..9a6c822c2294 100644 --- a/tools/lib/bpf/btf_iter.c +++ b/tools/lib/bpf/btf_iter.c @@ -2,8 +2,16 @@ /* Copyright (c) 2021 Facebook */ /* Copyright (c) 2024, Oracle and/or its affiliates. */ +#ifdef __KERNEL__ +#include +#include + +#define btf_var_secinfos(t) (struct btf_var_secinfo *)btf_type_var_secinfo(t) + +#else #include "btf.h" #include "libbpf_internal.h" +#endif int btf_field_iter_init(struct btf_field_iter *it, struct btf_type *t, enum btf_field_iter_kind iter_kind) diff --git a/tools/lib/bpf/btf_relocate.c b/tools/lib/bpf/btf_relocate.c index 23a41fb03e0d..2281dbbafa11 100644 --- a/tools/lib/bpf/btf_relocate.c +++ b/tools/lib/bpf/btf_relocate.c @@ -5,11 +5,34 @@ #define _GNU_SOURCE #endif +#ifdef __KERNEL__ +#include +#include +#include +#include +#include +#include + +#define btf_type_by_id (struct btf_type *)btf_type_by_id +#define btf__type_cnt btf_nr_types +#define btf__base_btf btf_base_btf +#define btf__name_by_offset btf_name_by_offset +#define btf__str_by_offset btf_str_by_offset +#define btf_kflag btf_type_kflag + +#define calloc(nmemb, sz) kvcalloc(nmemb, sz, GFP_KERNEL | __GFP_NOWARN) +#define free(ptr) kvfree(ptr) +#define qsort(base, num, sz, cmp) sort(base, num, sz, cmp, NULL) + +#else + #include "btf.h" #include "bpf.h" #include "libbpf.h" #include "libbpf_internal.h" +#endif /* __KERNEL__ */ + struct btf; struct btf_relocate { -- cgit v1.2.3 From 46fb0b62ea29c0dbcb3e44f1d67aafe79bc6e045 Mon Sep 17 00:00:00 2001 From: Alan Maguire Date: Thu, 20 Jun 2024 10:17:32 +0100 Subject: kbuild,bpf: Add module-specific pahole flags for distilled base BTF Support creation of module BTF along with distilled base BTF; the latter is stored in a .BTF.base ELF section and supplements split BTF references to base BTF with information about base types, allowing for later relocation of split BTF with a (possibly changed) base. resolve_btfids detects the presence of a .BTF.base section and will use it instead of the base BTF it is passed in BTF id resolution. Modules will be built with a distilled .BTF.base section for external module build, i.e. make -C. -M=path2/module ...while in-tree module build as part of a normal kernel build will not generate distilled base BTF; this is because in-tree modules change with the kernel and do not require BTF relocation for the running vmlinux. Signed-off-by: Alan Maguire Signed-off-by: Andrii Nakryiko Reviewed-by: Eduard Zingerman Link: https://lore.kernel.org/bpf/20240620091733.1967885-6-alan.maguire@oracle.com --- scripts/Makefile.btf | 5 +++++ scripts/Makefile.modfinal | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/scripts/Makefile.btf b/scripts/Makefile.btf index 2597e3d4d6e0..b75f09f3f424 100644 --- a/scripts/Makefile.btf +++ b/scripts/Makefile.btf @@ -21,8 +21,13 @@ else # Switch to using --btf_features for v1.26 and later. pahole-flags-$(call test-ge, $(pahole-ver), 126) = -j --btf_features=encode_force,var,float,enum64,decl_tag,type_tag,optimized_func,consistent_func,decl_tag_kfuncs +ifneq ($(KBUILD_EXTMOD),) +module-pahole-flags-$(call test-ge, $(pahole-ver), 126) += --btf_features=distilled_base +endif + endif pahole-flags-$(CONFIG_PAHOLE_HAS_LANG_EXCLUDE) += --lang_exclude=rust export PAHOLE_FLAGS := $(pahole-flags-y) +export MODULE_PAHOLE_FLAGS := $(module-pahole-flags-y) diff --git a/scripts/Makefile.modfinal b/scripts/Makefile.modfinal index 3bec9043e4f3..1fa98b5e952b 100644 --- a/scripts/Makefile.modfinal +++ b/scripts/Makefile.modfinal @@ -41,7 +41,7 @@ quiet_cmd_btf_ko = BTF [M] $@ if [ ! -f vmlinux ]; then \ printf "Skipping BTF generation for %s due to unavailability of vmlinux\n" $@ 1>&2; \ else \ - LLVM_OBJCOPY="$(OBJCOPY)" $(PAHOLE) -J $(PAHOLE_FLAGS) --btf_base vmlinux $@; \ + LLVM_OBJCOPY="$(OBJCOPY)" $(PAHOLE) -J $(PAHOLE_FLAGS) $(MODULE_PAHOLE_FLAGS) --btf_base vmlinux $@; \ $(RESOLVE_BTFIDS) -b vmlinux $@; \ fi; -- cgit v1.2.3 From 47a8cf0c5b3f6769b9d558301735c75119a0a165 Mon Sep 17 00:00:00 2001 From: Alan Maguire Date: Thu, 20 Jun 2024 10:17:33 +0100 Subject: selftests/bpf: Add kfunc_call test for simple dtor in bpf_testmod add simple kfuncs to create/destroy a context type to bpf_testmod, register them and add a kfunc_call test to use them. This provides test coverage for registration of dtor kfuncs from modules. By transferring the context pointer to a map value as a __kptr we also trigger the map-based dtor cleanup logic, improving test coverage. Suggested-by: Eduard Zingerman Signed-off-by: Alan Maguire Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20240620091733.1967885-7-alan.maguire@oracle.com --- .../selftests/bpf/bpf_testmod/bpf_testmod.c | 46 ++++++++++++++++++++++ .../selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h | 9 +++++ .../testing/selftests/bpf/prog_tests/kfunc_call.c | 1 + .../testing/selftests/bpf/progs/kfunc_call_test.c | 37 +++++++++++++++++ 4 files changed, 93 insertions(+) diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c index 49f9a311e49b..d8bd01d8560b 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c @@ -159,6 +159,37 @@ __bpf_kfunc void bpf_kfunc_dynptr_test(struct bpf_dynptr *ptr, { } +__bpf_kfunc struct bpf_testmod_ctx * +bpf_testmod_ctx_create(int *err) +{ + struct bpf_testmod_ctx *ctx; + + ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC); + if (!ctx) { + *err = -ENOMEM; + return NULL; + } + refcount_set(&ctx->usage, 1); + + return ctx; +} + +static void testmod_free_cb(struct rcu_head *head) +{ + struct bpf_testmod_ctx *ctx; + + ctx = container_of(head, struct bpf_testmod_ctx, rcu); + kfree(ctx); +} + +__bpf_kfunc void bpf_testmod_ctx_release(struct bpf_testmod_ctx *ctx) +{ + if (!ctx) + return; + if (refcount_dec_and_test(&ctx->usage)) + call_rcu(&ctx->rcu, testmod_free_cb); +} + struct bpf_testmod_btf_type_tag_1 { int a; }; @@ -369,8 +400,14 @@ BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_iter_testmod_seq_destroy, KF_ITER_DESTROY) BTF_ID_FLAGS(func, bpf_kfunc_common_test) BTF_ID_FLAGS(func, bpf_kfunc_dynptr_test) +BTF_ID_FLAGS(func, bpf_testmod_ctx_create, KF_ACQUIRE | KF_RET_NULL) +BTF_ID_FLAGS(func, bpf_testmod_ctx_release, KF_RELEASE) BTF_KFUNCS_END(bpf_testmod_common_kfunc_ids) +BTF_ID_LIST(bpf_testmod_dtor_ids) +BTF_ID(struct, bpf_testmod_ctx) +BTF_ID(func, bpf_testmod_ctx_release) + static const struct btf_kfunc_id_set bpf_testmod_common_kfunc_set = { .owner = THIS_MODULE, .set = &bpf_testmod_common_kfunc_ids, @@ -904,6 +941,12 @@ extern int bpf_fentry_test1(int a); static int bpf_testmod_init(void) { + const struct btf_id_dtor_kfunc bpf_testmod_dtors[] = { + { + .btf_id = bpf_testmod_dtor_ids[0], + .kfunc_btf_id = bpf_testmod_dtor_ids[1] + }, + }; int ret; ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &bpf_testmod_common_kfunc_set); @@ -912,6 +955,9 @@ static int bpf_testmod_init(void) ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_testmod_kfunc_set); ret = ret ?: register_bpf_struct_ops(&bpf_bpf_testmod_ops, bpf_testmod_ops); ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops2, bpf_testmod_ops2); + ret = ret ?: register_btf_id_dtor_kfuncs(bpf_testmod_dtors, + ARRAY_SIZE(bpf_testmod_dtors), + THIS_MODULE); if (ret < 0) return ret; if (bpf_fentry_test1(0) < 0) diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h index f9809517e7fa..e587a79f2239 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h @@ -80,6 +80,11 @@ struct sendmsg_args { int msglen; }; +struct bpf_testmod_ctx { + struct callback_head rcu; + refcount_t usage; +}; + struct prog_test_ref_kfunc * bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr) __ksym; void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym; @@ -135,4 +140,8 @@ int bpf_kfunc_call_kernel_getsockname(struct addr_args *args) __ksym; int bpf_kfunc_call_kernel_getpeername(struct addr_args *args) __ksym; void bpf_kfunc_dynptr_test(struct bpf_dynptr *ptr, struct bpf_dynptr *ptr__nullable) __ksym; + +struct bpf_testmod_ctx *bpf_testmod_ctx_create(int *err) __ksym; +void bpf_testmod_ctx_release(struct bpf_testmod_ctx *ctx) __ksym; + #endif /* _BPF_TESTMOD_KFUNC_H */ diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c index 2eb71559713c..5b743212292f 100644 --- a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c +++ b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c @@ -78,6 +78,7 @@ static struct kfunc_test_params kfunc_tests[] = { SYSCALL_TEST(kfunc_syscall_test, 0), SYSCALL_NULL_CTX_TEST(kfunc_syscall_test_null, 0), TC_TEST(kfunc_call_test_static_unused_arg, 0), + TC_TEST(kfunc_call_ctx, 0), }; struct syscall_test_args { diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_test.c b/tools/testing/selftests/bpf/progs/kfunc_call_test.c index cf68d1e48a0f..f502f755f567 100644 --- a/tools/testing/selftests/bpf/progs/kfunc_call_test.c +++ b/tools/testing/selftests/bpf/progs/kfunc_call_test.c @@ -177,4 +177,41 @@ int kfunc_call_test_static_unused_arg(struct __sk_buff *skb) return actual != expected ? -1 : 0; } +struct ctx_val { + struct bpf_testmod_ctx __kptr *ctx; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, struct ctx_val); +} ctx_map SEC(".maps"); + +SEC("tc") +int kfunc_call_ctx(struct __sk_buff *skb) +{ + struct bpf_testmod_ctx *ctx; + int err = 0; + + ctx = bpf_testmod_ctx_create(&err); + if (!ctx && !err) + err = -1; + if (ctx) { + int key = 0; + struct ctx_val *ctx_val = bpf_map_lookup_elem(&ctx_map, &key); + + /* Transfer ctx to map to be freed via implicit dtor call + * on cleanup. + */ + if (ctx_val) + ctx = bpf_kptr_xchg(&ctx_val->ctx, ctx); + if (ctx) { + bpf_testmod_ctx_release(ctx); + err = -1; + } + } + return err; +} + char _license[] SEC("license") = "GPL"; -- cgit v1.2.3 From 04efaebd72d1d3d9991841051fafc6b195f3676d Mon Sep 17 00:00:00 2001 From: Dave Thaler Date: Sun, 23 Jun 2024 08:04:53 -0700 Subject: bpf, docs: Address comments from IETF Area Directors This patch does the following to address IETF feedback: * Remove mention of "program type" and reference future docs (and mention platform-specific docs exist) for helper functions and BTF. Addresses Roman Danyliw's comments based on GENART review from Ines Robles [0]. * Add reference for endianness as requested by John Scudder [1]. * Added bit numbers to top of 32-bit wide format diagrams as requested by Paul Wouters [2]. * Added more text about why BPF doesn't stand for anything, based on text from ebpf.io [3], as requested by Eric Vyncke and Gunter Van de Velde [4]. * Replaced "htobe16" (and similar) and the direction-specific description with just "be16" (and similar) and a direction-agnostic description, to match the direction-agnostic description in the Byteswap Instructions section. Based on feedback from Eric Vyncke [5]. [0] https://mailarchive.ietf.org/arch/msg/bpf/DvDgDWOiwk05OyNlWlAmELZFPlM/ [1] https://mailarchive.ietf.org/arch/msg/bpf/eKNXpU4jCLjsbZDSw8LjI29M3tM/ [2] https://mailarchive.ietf.org/arch/msg/bpf/hGk8HkYxeZTpdu9qW_MvbGKj7WU/ [3] https://ebpf.io/what-is-ebpf/#what-do-ebpf-and-bpf-stand-for [4] https://mailarchive.ietf.org/arch/msg/bpf/i93lzdN3ewnzzS_JMbinCIYxAIU/ [5] https://mailarchive.ietf.org/arch/msg/bpf/KBWXbMeDcSrq4vsKR_KkBbV6hI4/ Acked-by: David Vernet Signed-off-by: Dave Thaler Link: https://lore.kernel.org/r/20240623150453.10613-1-dthaler1968@gmail.com Signed-off-by: Alexei Starovoitov --- .../bpf/standardization/instruction-set.rst | 80 ++++++++++++---------- 1 file changed, 45 insertions(+), 35 deletions(-) diff --git a/Documentation/bpf/standardization/instruction-set.rst b/Documentation/bpf/standardization/instruction-set.rst index 8d19810504b8..ab820d565052 100644 --- a/Documentation/bpf/standardization/instruction-set.rst +++ b/Documentation/bpf/standardization/instruction-set.rst @@ -5,12 +5,19 @@ BPF Instruction Set Architecture (ISA) ====================================== -eBPF (which is no longer an acronym for anything), also commonly +eBPF, also commonly referred to as BPF, is a technology with origins in the Linux kernel that can run untrusted programs in a privileged context such as an operating system kernel. This document specifies the BPF instruction set architecture (ISA). +As a historical note, BPF originally stood for Berkeley Packet Filter, +but now that it can do so much more than packet filtering, the acronym +no longer makes sense. BPF is now considered a standalone term that +does not stand for anything. The original BPF is sometimes referred to +as cBPF (classic BPF) to distinguish it from the now widely deployed +eBPF (extended BPF). + Documentation conventions ========================= @@ -18,7 +25,7 @@ The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "NOT RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be interpreted as described in BCP 14 ``_ -`RFC8174 `_ +``_ when, and only when, they appear in all capitals, as shown here. For brevity and consistency, this document refers to families @@ -59,24 +66,18 @@ numbers. Functions --------- -* htobe16: Takes an unsigned 16-bit number in host-endian format and - returns the equivalent number as an unsigned 16-bit number in big-endian - format. -* htobe32: Takes an unsigned 32-bit number in host-endian format and - returns the equivalent number as an unsigned 32-bit number in big-endian - format. -* htobe64: Takes an unsigned 64-bit number in host-endian format and - returns the equivalent number as an unsigned 64-bit number in big-endian - format. -* htole16: Takes an unsigned 16-bit number in host-endian format and - returns the equivalent number as an unsigned 16-bit number in little-endian - format. -* htole32: Takes an unsigned 32-bit number in host-endian format and - returns the equivalent number as an unsigned 32-bit number in little-endian - format. -* htole64: Takes an unsigned 64-bit number in host-endian format and - returns the equivalent number as an unsigned 64-bit number in little-endian - format. + +The following byteswap functions are direction-agnostic. That is, +the same function is used for conversion in either direction discussed +below. + +* be16: Takes an unsigned 16-bit number and converts it between + host byte order and big-endian + (`IEN137 `_) byte order. +* be32: Takes an unsigned 32-bit number and converts it between + host byte order and big-endian byte order. +* be64: Takes an unsigned 64-bit number and converts it between + host byte order and big-endian byte order. * bswap16: Takes an unsigned 16-bit number in either big- or little-endian format and returns the equivalent number with the same bit width but opposite endianness. @@ -86,7 +87,12 @@ Functions * bswap64: Takes an unsigned 64-bit number in either big- or little-endian format and returns the equivalent number with the same bit width but opposite endianness. - +* le16: Takes an unsigned 16-bit number and converts it between + host byte order and little-endian byte order. +* le32: Takes an unsigned 32-bit number and converts it between + host byte order and little-endian byte order. +* le64: Takes an unsigned 64-bit number and converts it between + host byte order and little-endian byte order. Definitions ----------- @@ -437,8 +443,8 @@ and MUST be set to 0. ===== ======== ===== ================================================= class source value description ===== ======== ===== ================================================= - ALU TO_LE 0 convert between host byte order and little endian - ALU TO_BE 1 convert between host byte order and big endian + ALU LE 0 convert between host byte order and little endian + ALU BE 1 convert between host byte order and big endian ALU64 Reserved 0 do byte swap unconditionally ===== ======== ===== ================================================= @@ -449,19 +455,19 @@ conformance group. Examples: -``{END, TO_LE, ALU}`` with 'imm' = 16/32/64 means:: +``{END, LE, ALU}`` with 'imm' = 16/32/64 means:: - dst = htole16(dst) - dst = htole32(dst) - dst = htole64(dst) + dst = le16(dst) + dst = le32(dst) + dst = le64(dst) -``{END, TO_BE, ALU}`` with 'imm' = 16/32/64 means:: +``{END, BE, ALU}`` with 'imm' = 16/32/64 means:: - dst = htobe16(dst) - dst = htobe32(dst) - dst = htobe64(dst) + dst = be16(dst) + dst = be32(dst) + dst = be64(dst) -``{END, TO_LE, ALU64}`` with 'imm' = 16/32/64 means:: +``{END, TO, ALU64}`` with 'imm' = 16/32/64 means:: dst = bswap16(dst) dst = bswap32(dst) @@ -541,13 +547,17 @@ Helper functions are a concept whereby BPF programs can call into a set of function calls exposed by the underlying platform. Historically, each helper function was identified by a static ID -encoded in the 'imm' field. The available helper functions may differ -for each program type, but static IDs are unique across all program types. +encoded in the 'imm' field. Further documentation of helper functions +is outside the scope of this document and standardization is left for +future work, but use is widely deployed and more information can be +found in platform-specific documentation (e.g., Linux kernel documentation). Platforms that support the BPF Type Format (BTF) support identifying a helper function by a BTF ID encoded in the 'imm' field, where the BTF ID identifies the helper name and type. Further documentation of BTF -is outside the scope of this document and is left for future work. +is outside the scope of this document and standardization is left for +future work, but use is widely deployed and more information can be +found in platform-specific documentation (e.g., Linux kernel documentation). Program-local functions ~~~~~~~~~~~~~~~~~~~~~~~ -- cgit v1.2.3 From 5a532459aa919d055d822d8db4ea2c5c8d511568 Mon Sep 17 00:00:00 2001 From: Alan Maguire Date: Sun, 23 Jun 2024 14:52:24 +0100 Subject: bpf: fix build when CONFIG_DEBUG_INFO_BTF[_MODULES] is undefined Kernel test robot reports that kernel build fails with resilient split BTF changes. Examining the associated config and code we see that btf_relocate_id() is defined under CONFIG_DEBUG_INFO_BTF_MODULES. Moving it outside the #ifdef solves the issue. Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202406221742.d2srFLVI-lkp@intel.com/ Signed-off-by: Alan Maguire Link: https://lore.kernel.org/r/20240623135224.27981-1-alan.maguire@oracle.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/btf.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 8e12cb80ba73..4ff11779699e 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -6185,8 +6185,6 @@ err_out: return btf; } -#ifdef CONFIG_DEBUG_INFO_BTF_MODULES - /* If .BTF_ids section was created with distilled base BTF, both base and * split BTF ids will need to be mapped to actual base/split ids for * BTF now that it has been relocated. @@ -6198,6 +6196,8 @@ static __u32 btf_relocate_id(const struct btf *btf, __u32 id) return btf->base_id_map[id]; } +#ifdef CONFIG_DEBUG_INFO_BTF_MODULES + static struct btf *btf_parse_module(const char *module_name, const void *data, unsigned int data_size, void *base_data, unsigned int base_data_size) -- cgit v1.2.3 From c73a9683cb21012b6c0f14217974837151c527a8 Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Mon, 24 Jun 2024 11:09:07 +0200 Subject: libbpf: Skip base btf sanity checks When upgrading to libbpf 1.3 we noticed a big performance hit while loading programs using CORE on non base-BTF symbols. This was tracked down to the new BTF sanity check logic. The issue is the base BTF definitions are checked first for the base BTF and then again for every module BTF. Loading 5 dummy programs (using libbpf-rs) that are using CORE on a non-base BTF symbol on my system: - Before this fix: 3s. - With this fix: 0.1s. Fix this by only checking the types starting at the BTF start id. This should ensure the base BTF is still checked as expected but only once (btf->start_id == 1 when creating the base BTF), and then only additional types are checked for each module BTF. Fixes: 3903802bb99a ("libbpf: Add basic BTF sanity validation") Signed-off-by: Antoine Tenart Signed-off-by: Andrii Nakryiko Reviewed-by: Alan Maguire Link: https://lore.kernel.org/bpf/20240624090908.171231-1-atenart@kernel.org --- tools/lib/bpf/btf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 0c0f60cad769..cd5dd6619214 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -601,7 +601,7 @@ static int btf_sanity_check(const struct btf *btf) __u32 i, n = btf__type_cnt(btf); int err; - for (i = 1; i < n; i++) { + for (i = btf->start_id; i < n; i++) { t = btf_type_by_id(btf, i); err = btf_validate_type(btf, t, i); if (err) -- cgit v1.2.3 From d65f3767de20782e75d8a665fdc54f822f344802 Mon Sep 17 00:00:00 2001 From: Leon Hwang Date: Tue, 25 Jun 2024 22:53:51 +0800 Subject: bpf: Fix tailcall cases in test_bpf Since f663a03c8e35 ("bpf, x64: Remove tail call detection"), tail_call_reachable won't be detected in x86 JIT. And, tail_call_reachable is provided by verifier. Therefore, in test_bpf, the tail_call_reachable must be provided in test cases before running. Fix and test: [ 174.828662] test_bpf: #0 Tail call leaf jited:1 170 PASS [ 174.829574] test_bpf: #1 Tail call 2 jited:1 244 PASS [ 174.830363] test_bpf: #2 Tail call 3 jited:1 296 PASS [ 174.830924] test_bpf: #3 Tail call 4 jited:1 719 PASS [ 174.831863] test_bpf: #4 Tail call load/store leaf jited:1 197 PASS [ 174.832240] test_bpf: #5 Tail call load/store jited:1 326 PASS [ 174.832240] test_bpf: #6 Tail call error path, max count reached jited:1 2214 PASS [ 174.835713] test_bpf: #7 Tail call count preserved across function calls jited:1 609751 PASS [ 175.446098] test_bpf: #8 Tail call error path, NULL target jited:1 472 PASS [ 175.447597] test_bpf: #9 Tail call error path, index out of range jited:1 206 PASS [ 175.448833] test_bpf: test_tail_calls: Summary: 10 PASSED, 0 FAILED, [10/10 JIT'ed] Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-lkp/202406251415.c51865bc-oliver.sang@intel.com Fixes: f663a03c8e35 ("bpf, x64: Remove tail call detection") Signed-off-by: Leon Hwang Link: https://lore.kernel.org/r/20240625145351.40072-1-hffilwlqm@gmail.com Signed-off-by: Alexei Starovoitov --- lib/test_bpf.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/lib/test_bpf.c b/lib/test_bpf.c index ce5716c3999a..b7acc29bcc3b 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -15198,6 +15198,7 @@ struct tail_call_test { int flags; int result; int stack_depth; + bool has_tail_call; }; /* Flags that can be passed to tail call test cases */ @@ -15273,6 +15274,7 @@ static struct tail_call_test tail_call_tests[] = { BPF_EXIT_INSN(), }, .result = 3, + .has_tail_call = true, }, { "Tail call 3", @@ -15283,6 +15285,7 @@ static struct tail_call_test tail_call_tests[] = { BPF_EXIT_INSN(), }, .result = 6, + .has_tail_call = true, }, { "Tail call 4", @@ -15293,6 +15296,7 @@ static struct tail_call_test tail_call_tests[] = { BPF_EXIT_INSN(), }, .result = 10, + .has_tail_call = true, }, { "Tail call load/store leaf", @@ -15323,6 +15327,7 @@ static struct tail_call_test tail_call_tests[] = { }, .result = 0, .stack_depth = 16, + .has_tail_call = true, }, { "Tail call error path, max count reached", @@ -15335,6 +15340,7 @@ static struct tail_call_test tail_call_tests[] = { }, .flags = FLAG_NEED_STATE | FLAG_RESULT_IN_STATE, .result = (MAX_TAIL_CALL_CNT + 1) * MAX_TESTRUNS, + .has_tail_call = true, }, { "Tail call count preserved across function calls", @@ -15357,6 +15363,7 @@ static struct tail_call_test tail_call_tests[] = { .stack_depth = 8, .flags = FLAG_NEED_STATE | FLAG_RESULT_IN_STATE, .result = (MAX_TAIL_CALL_CNT + 1) * MAX_TESTRUNS, + .has_tail_call = true, }, { "Tail call error path, NULL target", @@ -15369,6 +15376,7 @@ static struct tail_call_test tail_call_tests[] = { }, .flags = FLAG_NEED_STATE | FLAG_RESULT_IN_STATE, .result = MAX_TESTRUNS, + .has_tail_call = true, }, { "Tail call error path, index out of range", @@ -15381,6 +15389,7 @@ static struct tail_call_test tail_call_tests[] = { }, .flags = FLAG_NEED_STATE | FLAG_RESULT_IN_STATE, .result = MAX_TESTRUNS, + .has_tail_call = true, }, }; @@ -15430,6 +15439,7 @@ static __init int prepare_tail_call_tests(struct bpf_array **pprogs) fp->len = len; fp->type = BPF_PROG_TYPE_SOCKET_FILTER; fp->aux->stack_depth = test->stack_depth; + fp->aux->tail_call_reachable = test->has_tail_call; memcpy(fp->insnsi, test->insns, len * sizeof(struct bpf_insn)); /* Relocate runtime tail call offsets and addresses */ -- cgit v1.2.3 From ec2b9a5e11e51fea1bb04c1e7e471952e887e874 Mon Sep 17 00:00:00 2001 From: Matt Bobrowski Date: Tue, 25 Jun 2024 06:28:56 +0000 Subject: bpf: add missing check_func_arg_reg_off() to prevent out-of-bounds memory accesses Currently, it's possible to pass in a modified CONST_PTR_TO_DYNPTR to a global function as an argument. The adverse effects of this is that BPF helpers can continue to make use of this modified CONST_PTR_TO_DYNPTR from within the context of the global function, which can unintentionally result in out-of-bounds memory accesses and therefore compromise overall system stability i.e. [ 244.157771] BUG: KASAN: slab-out-of-bounds in bpf_dynptr_data+0x137/0x140 [ 244.161345] Read of size 8 at addr ffff88810914be68 by task test_progs/302 [ 244.167151] CPU: 0 PID: 302 Comm: test_progs Tainted: G O E 6.10.0-rc3-00131-g66b586715063 #533 [ 244.174318] Call Trace: [ 244.175787] [ 244.177356] dump_stack_lvl+0x66/0xa0 [ 244.179531] print_report+0xce/0x670 [ 244.182314] ? __virt_addr_valid+0x200/0x3e0 [ 244.184908] kasan_report+0xd7/0x110 [ 244.187408] ? bpf_dynptr_data+0x137/0x140 [ 244.189714] ? bpf_dynptr_data+0x137/0x140 [ 244.192020] bpf_dynptr_data+0x137/0x140 [ 244.194264] bpf_prog_b02a02fdd2bdc5fa_global_call_bpf_dynptr_data+0x22/0x26 [ 244.198044] bpf_prog_b0fe7b9d7dc3abde_callback_adjust_bpf_dynptr_reg_off+0x1f/0x23 [ 244.202136] bpf_user_ringbuf_drain+0x2c7/0x570 [ 244.204744] ? 0xffffffffc0009e58 [ 244.206593] ? __pfx_bpf_user_ringbuf_drain+0x10/0x10 [ 244.209795] bpf_prog_33ab33f6a804ba2d_user_ringbuf_callback_const_ptr_to_dynptr_reg_off+0x47/0x4b [ 244.215922] bpf_trampoline_6442502480+0x43/0xe3 [ 244.218691] __x64_sys_prlimit64+0x9/0xf0 [ 244.220912] do_syscall_64+0xc1/0x1d0 [ 244.223043] entry_SYSCALL_64_after_hwframe+0x77/0x7f [ 244.226458] RIP: 0033:0x7ffa3eb8f059 [ 244.228582] Code: 08 89 e8 5b 5d c3 66 2e 0f 1f 84 00 00 00 00 00 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d 8f 1d 0d 00 f7 d8 64 89 01 48 [ 244.241307] RSP: 002b:00007ffa3e9c6eb8 EFLAGS: 00000206 ORIG_RAX: 000000000000012e [ 244.246474] RAX: ffffffffffffffda RBX: 00007ffa3e9c7cdc RCX: 00007ffa3eb8f059 [ 244.250478] RDX: 00007ffa3eb162b4 RSI: 0000000000000000 RDI: 00007ffa3e9c7fb0 [ 244.255396] RBP: 00007ffa3e9c6ed0 R08: 00007ffa3e9c76c0 R09: 0000000000000000 [ 244.260195] R10: 0000000000000000 R11: 0000000000000206 R12: ffffffffffffff80 [ 244.264201] R13: 000000000000001c R14: 00007ffc5d6b4260 R15: 00007ffa3e1c7000 [ 244.268303] Add a check_func_arg_reg_off() to the path in which the BPF verifier verifies the arguments of global function arguments, specifically those which take an argument of type ARG_PTR_TO_DYNPTR | MEM_RDONLY. Also, process_dynptr_func() doesn't appear to perform any explicit and strict type matching on the supplied register type, so let's also enforce that a register either type PTR_TO_STACK or CONST_PTR_TO_DYNPTR is by the caller. Reported-by: Kumar Kartikeya Dwivedi Acked-by: Kumar Kartikeya Dwivedi Acked-by: Eduard Zingerman Signed-off-by: Matt Bobrowski Link: https://lore.kernel.org/r/20240625062857.92760-1-mattbobrowski@google.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 3f6be4923655..d3927d819465 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -7726,6 +7726,13 @@ static int process_dynptr_func(struct bpf_verifier_env *env, int regno, int insn struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; int err; + if (reg->type != PTR_TO_STACK && reg->type != CONST_PTR_TO_DYNPTR) { + verbose(env, + "arg#%d expected pointer to stack or const struct bpf_dynptr\n", + regno); + return -EINVAL; + } + /* MEM_UNINIT and MEM_RDONLY are exclusive, when applied to an * ARG_PTR_TO_DYNPTR (or ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_*): */ @@ -9475,6 +9482,10 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog, return -EINVAL; } } else if (arg->arg_type == (ARG_PTR_TO_DYNPTR | MEM_RDONLY)) { + ret = check_func_arg_reg_off(env, reg, regno, ARG_PTR_TO_DYNPTR); + if (ret) + return ret; + ret = process_dynptr_func(env, regno, -1, arg->arg_type, 0); if (ret) return ret; @@ -11976,12 +11987,6 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_ enum bpf_arg_type dynptr_arg_type = ARG_PTR_TO_DYNPTR; int clone_ref_obj_id = 0; - if (reg->type != PTR_TO_STACK && - reg->type != CONST_PTR_TO_DYNPTR) { - verbose(env, "arg#%d expected pointer to stack or dynptr_ptr\n", i); - return -EINVAL; - } - if (reg->type == CONST_PTR_TO_DYNPTR) dynptr_arg_type |= MEM_RDONLY; -- cgit v1.2.3 From aa293983d2020390e286544b120f3cd0a3d40749 Mon Sep 17 00:00:00 2001 From: Matt Bobrowski Date: Tue, 25 Jun 2024 06:28:57 +0000 Subject: bpf: add new negative selftests to cover missing check_func_arg_reg_off() and reg->type check Add new negative selftests which are intended to cover the out-of-bounds memory access that could be performed on a CONST_PTR_TO_DYNPTR within functions taking a ARG_PTR_TO_DYNPTR | MEM_RDONLY as an argument, and acceptance of invalid register types i.e. PTR_TO_BTF_ID within functions taking a ARG_PTR_TO_DYNPTR | MEM_RDONLY. Reported-by: Kumar Kartikeya Dwivedi Acked-by: Kumar Kartikeya Dwivedi Acked-by: Eduard Zingerman Signed-off-by: Matt Bobrowski Link: https://lore.kernel.org/r/20240625062857.92760-2-mattbobrowski@google.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/progs/dynptr_fail.c | 24 ++++++++++++++++++++++ .../selftests/bpf/progs/test_kfunc_dynptr_param.c | 2 +- .../bpf/progs/test_kfunc_param_nullable.c | 2 +- .../selftests/bpf/progs/user_ringbuf_fail.c | 22 ++++++++++++++++++++ 4 files changed, 48 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/bpf/progs/dynptr_fail.c b/tools/testing/selftests/bpf/progs/dynptr_fail.c index 64cc9d936a13..e35bc1eac52a 100644 --- a/tools/testing/selftests/bpf/progs/dynptr_fail.c +++ b/tools/testing/selftests/bpf/progs/dynptr_fail.c @@ -1686,3 +1686,27 @@ int test_dynptr_skb_small_buff(struct __sk_buff *skb) return !!data; } + +__noinline long global_call_bpf_dynptr(const struct bpf_dynptr *dynptr) +{ + long ret = 0; + /* Avoid leaving this global function empty to avoid having the compiler + * optimize away the call to this global function. + */ + __sink(ret); + return ret; +} + +SEC("?raw_tp") +__failure __msg("arg#1 expected pointer to stack or const struct bpf_dynptr") +int test_dynptr_reg_type(void *ctx) +{ + struct task_struct *current = NULL; + /* R1 should be holding a PTR_TO_BTF_ID, so this shouldn't be a + * reg->type that can be passed to a function accepting a + * ARG_PTR_TO_DYNPTR | MEM_RDONLY. process_dynptr_func() should catch + * this. + */ + global_call_bpf_dynptr((const struct bpf_dynptr *)current); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c b/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c index 2dde8e3fe4c9..e68667aec6a6 100644 --- a/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c +++ b/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c @@ -45,7 +45,7 @@ int BPF_PROG(not_valid_dynptr, int cmd, union bpf_attr *attr, unsigned int size) } SEC("?lsm.s/bpf") -__failure __msg("arg#0 expected pointer to stack or dynptr_ptr") +__failure __msg("arg#1 expected pointer to stack or const struct bpf_dynptr") int BPF_PROG(not_ptr_to_stack, int cmd, union bpf_attr *attr, unsigned int size) { unsigned long val = 0; diff --git a/tools/testing/selftests/bpf/progs/test_kfunc_param_nullable.c b/tools/testing/selftests/bpf/progs/test_kfunc_param_nullable.c index 7c75e9b8f455..7ac7e1de34d8 100644 --- a/tools/testing/selftests/bpf/progs/test_kfunc_param_nullable.c +++ b/tools/testing/selftests/bpf/progs/test_kfunc_param_nullable.c @@ -29,7 +29,7 @@ int kfunc_dynptr_nullable_test2(struct __sk_buff *skb) } SEC("tc") -__failure __msg("expected pointer to stack or dynptr_ptr") +__failure __msg("expected pointer to stack or const struct bpf_dynptr") int kfunc_dynptr_nullable_test3(struct __sk_buff *skb) { struct bpf_dynptr data; diff --git a/tools/testing/selftests/bpf/progs/user_ringbuf_fail.c b/tools/testing/selftests/bpf/progs/user_ringbuf_fail.c index 11ab25c42c36..54de0389f878 100644 --- a/tools/testing/selftests/bpf/progs/user_ringbuf_fail.c +++ b/tools/testing/selftests/bpf/progs/user_ringbuf_fail.c @@ -221,3 +221,25 @@ int user_ringbuf_callback_reinit_dynptr_ringbuf(void *ctx) bpf_user_ringbuf_drain(&user_ringbuf, try_reinit_dynptr_ringbuf, NULL, 0); return 0; } + +__noinline long global_call_bpf_dynptr_data(struct bpf_dynptr *dynptr) +{ + bpf_dynptr_data(dynptr, 0xA, 0xA); + return 0; +} + +static long callback_adjust_bpf_dynptr_reg_off(struct bpf_dynptr *dynptr, + void *ctx) +{ + global_call_bpf_dynptr_data(dynptr += 1024); + return 0; +} + +SEC("?raw_tp") +__failure __msg("dereference of modified dynptr_ptr ptr R1 off=16384 disallowed") +int user_ringbuf_callback_const_ptr_to_dynptr_reg_off(void *ctx) +{ + bpf_user_ringbuf_drain(&user_ringbuf, + callback_adjust_bpf_dynptr_reg_off, NULL, 0); + return 0; +} -- cgit v1.2.3 From d07980f7373b4c57c85478f8e815cc7b9b394c05 Mon Sep 17 00:00:00 2001 From: Ma Ke Date: Sun, 23 Jun 2024 21:17:53 +0800 Subject: selftests/bpf: Don't close(-1) in serial_test_fexit_stress() Guard close() with extra link_fd[i] > 0 and fexit_fd[i] > 0 check to prevent close(-1). Signed-off-by: Ma Ke Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20240623131753.2133829-1-make24@iscas.ac.cn --- tools/testing/selftests/bpf/prog_tests/fexit_stress.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_stress.c b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c index 596536def43d..49b1ffc9af1f 100644 --- a/tools/testing/selftests/bpf/prog_tests/fexit_stress.c +++ b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c @@ -50,9 +50,9 @@ void serial_test_fexit_stress(void) out: for (i = 0; i < bpf_max_tramp_links; i++) { - if (link_fd[i]) + if (link_fd[i] > 0) close(link_fd[i]); - if (fexit_fd[i]) + if (fexit_fd[i] > 0) close(fexit_fd[i]); } free(fd); -- cgit v1.2.3 From 0f31c2c61f6923747628c65a0fe36b2d4d7e21b0 Mon Sep 17 00:00:00 2001 From: Alan Maguire Date: Mon, 24 Jun 2024 20:29:03 +0100 Subject: libbpf: Fix clang compilation error in btf_relocate.c When building with clang for ARCH=i386, the following errors are observed: CC kernel/bpf/btf_relocate.o ./tools/lib/bpf/btf_relocate.c:206:23: error: implicit truncation from 'int' to a one-bit wide bit-field changes value from 1 to -1 [-Werror,-Wsingle-bit-bitfield-constant-conversion] 206 | info[id].needs_size = true; | ^ ~ ./tools/lib/bpf/btf_relocate.c:256:25: error: implicit truncation from 'int' to a one-bit wide bit-field changes value from 1 to -1 [-Werror,-Wsingle-bit-bitfield-constant-conversion] 256 | base_info.needs_size = true; | ^ ~ 2 errors generated. The problem is we use 1-bit, 31-bit bitfields in a signed int. Changing to bool needs_size: 1; unsigned int size:31; ...resolves the error and pahole reports that 4 bytes are used for the underlying representation: $ pahole btf_name_info tools/lib/bpf/btf_relocate.o struct btf_name_info { const char * name; /* 0 8 */ unsigned int needs_size:1; /* 8: 0 4 */ unsigned int size:31; /* 8: 1 4 */ __u32 id; /* 12 4 */ /* size: 16, cachelines: 1, members: 4 */ /* last cacheline: 16 bytes */ }; Signed-off-by: Alan Maguire Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20240624192903.854261-1-alan.maguire@oracle.com --- tools/lib/bpf/btf_relocate.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/lib/bpf/btf_relocate.c b/tools/lib/bpf/btf_relocate.c index 2281dbbafa11..17f8b32f94a0 100644 --- a/tools/lib/bpf/btf_relocate.c +++ b/tools/lib/bpf/btf_relocate.c @@ -58,8 +58,8 @@ struct btf_relocate { struct btf_name_info { const char *name; /* set when search requires a size match */ - int needs_size:1, - size:31; + bool needs_size: 1; + unsigned int size: 31; __u32 id; }; -- cgit v1.2.3 From a12978712d9001b060bcc10eaae42ad5102abe2b Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 26 Jun 2024 15:47:19 +0200 Subject: selftests/bpf: Move ARRAY_SIZE to bpf_misc.h ARRAY_SIZE is used on multiple places, move its definition in bpf_misc.h header. Signed-off-by: Jiri Olsa Signed-off-by: Andrii Nakryiko Reviewed-by: Alan Maguire Link: https://lore.kernel.org/bpf/20240626134719.3893748-1-jolsa@kernel.org --- tools/testing/selftests/bpf/progs/bpf_misc.h | 4 ++++ tools/testing/selftests/bpf/progs/iters.c | 2 -- tools/testing/selftests/bpf/progs/kprobe_multi_session.c | 3 +-- tools/testing/selftests/bpf/progs/linked_list.c | 5 +---- tools/testing/selftests/bpf/progs/netif_receive_skb.c | 5 +---- tools/testing/selftests/bpf/progs/profiler.inc.h | 5 +---- tools/testing/selftests/bpf/progs/setget_sockopt.c | 5 +---- tools/testing/selftests/bpf/progs/test_bpf_ma.c | 4 ---- tools/testing/selftests/bpf/progs/test_sysctl_loop1.c | 5 +---- tools/testing/selftests/bpf/progs/test_sysctl_loop2.c | 5 +---- tools/testing/selftests/bpf/progs/test_sysctl_prog.c | 5 +---- tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c | 1 + tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.h | 2 -- tools/testing/selftests/bpf/progs/verifier_subprog_precision.c | 2 -- 14 files changed, 13 insertions(+), 40 deletions(-) diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h index c0280bd2f340..81097a3f15eb 100644 --- a/tools/testing/selftests/bpf/progs/bpf_misc.h +++ b/tools/testing/selftests/bpf/progs/bpf_misc.h @@ -140,4 +140,8 @@ /* make it look to compiler like value is read and written */ #define __sink(expr) asm volatile("" : "+g"(expr)) +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + #endif diff --git a/tools/testing/selftests/bpf/progs/iters.c b/tools/testing/selftests/bpf/progs/iters.c index fe65e0952a1e..16bdc3e25591 100644 --- a/tools/testing/selftests/bpf/progs/iters.c +++ b/tools/testing/selftests/bpf/progs/iters.c @@ -7,8 +7,6 @@ #include "bpf_misc.h" #include "bpf_compiler.h" -#define ARRAY_SIZE(x) (int)(sizeof(x) / sizeof((x)[0])) - static volatile int zero = 0; int my_pid; diff --git a/tools/testing/selftests/bpf/progs/kprobe_multi_session.c b/tools/testing/selftests/bpf/progs/kprobe_multi_session.c index bbba9eb46551..bd8b7fb7061e 100644 --- a/tools/testing/selftests/bpf/progs/kprobe_multi_session.c +++ b/tools/testing/selftests/bpf/progs/kprobe_multi_session.c @@ -4,8 +4,7 @@ #include #include #include "bpf_kfuncs.h" - -#define ARRAY_SIZE(x) (int)(sizeof(x) / sizeof((x)[0])) +#include "bpf_misc.h" char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/linked_list.c b/tools/testing/selftests/bpf/progs/linked_list.c index f69bf3e30321..421f40835acd 100644 --- a/tools/testing/selftests/bpf/progs/linked_list.c +++ b/tools/testing/selftests/bpf/progs/linked_list.c @@ -4,10 +4,7 @@ #include #include #include "bpf_experimental.h" - -#ifndef ARRAY_SIZE -#define ARRAY_SIZE(x) (int)(sizeof(x) / sizeof((x)[0])) -#endif +#include "bpf_misc.h" #include "linked_list.h" diff --git a/tools/testing/selftests/bpf/progs/netif_receive_skb.c b/tools/testing/selftests/bpf/progs/netif_receive_skb.c index c0062645fc68..9e067dcbf607 100644 --- a/tools/testing/selftests/bpf/progs/netif_receive_skb.c +++ b/tools/testing/selftests/bpf/progs/netif_receive_skb.c @@ -5,6 +5,7 @@ #include #include #include +#include "bpf_misc.h" #include @@ -23,10 +24,6 @@ bool skip = false; #define BADPTR 0 #endif -#ifndef ARRAY_SIZE -#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) -#endif - struct { __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); __uint(max_entries, 1); diff --git a/tools/testing/selftests/bpf/progs/profiler.inc.h b/tools/testing/selftests/bpf/progs/profiler.inc.h index 6957d9f2805e..8bd1ebd7d6af 100644 --- a/tools/testing/selftests/bpf/progs/profiler.inc.h +++ b/tools/testing/selftests/bpf/progs/profiler.inc.h @@ -9,6 +9,7 @@ #include "err.h" #include "bpf_experimental.h" #include "bpf_compiler.h" +#include "bpf_misc.h" #ifndef NULL #define NULL 0 @@ -133,10 +134,6 @@ struct { __uint(max_entries, 16); } disallowed_exec_inodes SEC(".maps"); -#ifndef ARRAY_SIZE -#define ARRAY_SIZE(arr) (int)(sizeof(arr) / sizeof(arr[0])) -#endif - static INLINE bool IS_ERR(const void* ptr) { return IS_ERR_VALUE((unsigned long)ptr); diff --git a/tools/testing/selftests/bpf/progs/setget_sockopt.c b/tools/testing/selftests/bpf/progs/setget_sockopt.c index 7a438600ae98..60518aed1ffc 100644 --- a/tools/testing/selftests/bpf/progs/setget_sockopt.c +++ b/tools/testing/selftests/bpf/progs/setget_sockopt.c @@ -6,10 +6,7 @@ #include #include #include - -#ifndef ARRAY_SIZE -#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) -#endif +#include "bpf_misc.h" extern unsigned long CONFIG_HZ __kconfig; diff --git a/tools/testing/selftests/bpf/progs/test_bpf_ma.c b/tools/testing/selftests/bpf/progs/test_bpf_ma.c index 3494ca30fa7f..4a4e0b8d9b72 100644 --- a/tools/testing/selftests/bpf/progs/test_bpf_ma.c +++ b/tools/testing/selftests/bpf/progs/test_bpf_ma.c @@ -7,10 +7,6 @@ #include "bpf_experimental.h" #include "bpf_misc.h" -#ifndef ARRAY_SIZE -#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) -#endif - struct generic_map_value { void *data; }; diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c index 7f74077d6622..548660e299a5 100644 --- a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c +++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c @@ -10,10 +10,7 @@ #include #include "bpf_compiler.h" - -#ifndef ARRAY_SIZE -#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) -#endif +#include "bpf_misc.h" /* tcp_mem sysctl has only 3 ints, but this test is doing TCP_MEM_LOOPS */ #define TCP_MEM_LOOPS 28 /* because 30 doesn't fit into 512 bytes of stack */ diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c index 68a75436e8af..81249d119a8b 100644 --- a/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c +++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c @@ -10,10 +10,7 @@ #include #include "bpf_compiler.h" - -#ifndef ARRAY_SIZE -#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) -#endif +#include "bpf_misc.h" /* tcp_mem sysctl has only 3 ints, but this test is doing TCP_MEM_LOOPS */ #define TCP_MEM_LOOPS 20 /* because 30 doesn't fit into 512 bytes of stack */ diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_prog.c b/tools/testing/selftests/bpf/progs/test_sysctl_prog.c index efc3c61f7852..bbdd08764789 100644 --- a/tools/testing/selftests/bpf/progs/test_sysctl_prog.c +++ b/tools/testing/selftests/bpf/progs/test_sysctl_prog.c @@ -10,6 +10,7 @@ #include #include "bpf_compiler.h" +#include "bpf_misc.h" /* Max supported length of a string with unsigned long in base 10 (pow2 - 1). */ #define MAX_ULONG_STR_LEN 0xF @@ -17,10 +18,6 @@ /* Max supported length of sysctl value string (pow2). */ #define MAX_VALUE_STR_LEN 0x40 -#ifndef ARRAY_SIZE -#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) -#endif - const char tcp_mem_name[] = "net/ipv4/tcp_mem"; static __always_inline int is_tcp_mem(struct bpf_sysctl *ctx) { diff --git a/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c b/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c index c8e4553648bf..44ee0d037f95 100644 --- a/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c +++ b/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c @@ -9,6 +9,7 @@ #include "bpf_kfuncs.h" #include "test_siphash.h" #include "test_tcp_custom_syncookie.h" +#include "bpf_misc.h" #define MAX_PACKET_OFF 0xffff diff --git a/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.h b/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.h index 29a6a53cf229..f8b1b7e68d2e 100644 --- a/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.h +++ b/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.h @@ -7,8 +7,6 @@ #define __packed __attribute__((__packed__)) #define __force -#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) - #define swap(a, b) \ do { \ typeof(a) __tmp = (a); \ diff --git a/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c b/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c index 4a58e0398e72..6a6fad625f7e 100644 --- a/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c +++ b/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c @@ -8,8 +8,6 @@ #include "bpf_misc.h" #include <../../../tools/include/linux/filter.h> -#define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0])) - int vals[] SEC(".data.vals") = {1, 2, 3, 4}; __naked __noinline __used -- cgit v1.2.3 From 89cc8f1c5f22568142b7ad118c738204708e4207 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Sun, 30 Jun 2024 00:26:48 +0200 Subject: netfilter: nf_tables: Add flowtable map for xdp offload This adds a small internal mapping table so that a new bpf (xdp) kfunc can perform lookups in a flowtable. As-is, xdp program has access to the device pointer, but no way to do a lookup in a flowtable -- there is no way to obtain the needed struct without questionable stunts. This allows to obtain an nf_flowtable pointer given a net_device structure. In order to keep backward compatibility, the infrastructure allows the user to add a given device to multiple flowtables, but it will always return the first added mapping performing the lookup since it assumes the right configuration is 1:1 mapping between flowtables and net_devices. Co-developed-by: Lorenzo Bianconi Signed-off-by: Florian Westphal Signed-off-by: Lorenzo Bianconi Signed-off-by: Daniel Borkmann Acked-by: Pablo Neira Ayuso Link: https://lore.kernel.org/bpf/9f20e2c36f494b3bf177328718367f636bb0b2ab.1719698275.git.lorenzo@kernel.org --- include/net/netfilter/nf_flow_table.h | 5 ++ net/netfilter/Makefile | 2 +- net/netfilter/nf_flow_table_offload.c | 2 +- net/netfilter/nf_flow_table_xdp.c | 147 ++++++++++++++++++++++++++++++++++ 4 files changed, 154 insertions(+), 2 deletions(-) create mode 100644 net/netfilter/nf_flow_table_xdp.c diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h index 9abb7ee40d72..d845745207d2 100644 --- a/include/net/netfilter/nf_flow_table.h +++ b/include/net/netfilter/nf_flow_table.h @@ -305,6 +305,11 @@ struct flow_ports { __be16 source, dest; }; +struct nf_flowtable *nf_flowtable_by_dev(const struct net_device *dev); +int nf_flow_offload_xdp_setup(struct nf_flowtable *flowtable, + struct net_device *dev, + enum flow_block_command cmd); + unsigned int nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb, const struct nf_hook_state *state); unsigned int nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb, diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile index 614815a3ed73..18046872a38a 100644 --- a/net/netfilter/Makefile +++ b/net/netfilter/Makefile @@ -142,7 +142,7 @@ obj-$(CONFIG_NFT_FWD_NETDEV) += nft_fwd_netdev.o # flow table infrastructure obj-$(CONFIG_NF_FLOW_TABLE) += nf_flow_table.o nf_flow_table-objs := nf_flow_table_core.o nf_flow_table_ip.o \ - nf_flow_table_offload.o + nf_flow_table_offload.o nf_flow_table_xdp.o nf_flow_table-$(CONFIG_NF_FLOW_TABLE_PROCFS) += nf_flow_table_procfs.o obj-$(CONFIG_NF_FLOW_TABLE_INET) += nf_flow_table_inet.o diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c index a010b25076ca..ff1a4e36c2b5 100644 --- a/net/netfilter/nf_flow_table_offload.c +++ b/net/netfilter/nf_flow_table_offload.c @@ -1192,7 +1192,7 @@ int nf_flow_table_offload_setup(struct nf_flowtable *flowtable, int err; if (!nf_flowtable_hw_offload(flowtable)) - return 0; + return nf_flow_offload_xdp_setup(flowtable, dev, cmd); if (dev->netdev_ops->ndo_setup_tc) err = nf_flow_table_offload_cmd(&bo, flowtable, dev, cmd, diff --git a/net/netfilter/nf_flow_table_xdp.c b/net/netfilter/nf_flow_table_xdp.c new file mode 100644 index 000000000000..e1252d042699 --- /dev/null +++ b/net/netfilter/nf_flow_table_xdp.c @@ -0,0 +1,147 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include +#include +#include +#include +#include +#include +#include + +struct flow_offload_xdp_ft { + struct list_head head; + struct nf_flowtable *ft; + struct rcu_head rcuhead; +}; + +struct flow_offload_xdp { + struct hlist_node hnode; + unsigned long net_device_addr; + struct list_head head; +}; + +#define NF_XDP_HT_BITS 4 +static DEFINE_HASHTABLE(nf_xdp_hashtable, NF_XDP_HT_BITS); +static DEFINE_MUTEX(nf_xdp_hashtable_lock); + +/* caller must hold rcu read lock */ +struct nf_flowtable *nf_flowtable_by_dev(const struct net_device *dev) +{ + unsigned long key = (unsigned long)dev; + struct flow_offload_xdp *iter; + + hash_for_each_possible_rcu(nf_xdp_hashtable, iter, hnode, key) { + if (key == iter->net_device_addr) { + struct flow_offload_xdp_ft *ft_elem; + + /* The user is supposed to insert a given net_device + * just into a single nf_flowtable so we always return + * the first element here. + */ + ft_elem = list_first_or_null_rcu(&iter->head, + struct flow_offload_xdp_ft, + head); + return ft_elem ? ft_elem->ft : NULL; + } + } + + return NULL; +} + +static int nf_flowtable_by_dev_insert(struct nf_flowtable *ft, + const struct net_device *dev) +{ + struct flow_offload_xdp *iter, *elem = NULL; + unsigned long key = (unsigned long)dev; + struct flow_offload_xdp_ft *ft_elem; + + ft_elem = kzalloc(sizeof(*ft_elem), GFP_KERNEL_ACCOUNT); + if (!ft_elem) + return -ENOMEM; + + ft_elem->ft = ft; + + mutex_lock(&nf_xdp_hashtable_lock); + + hash_for_each_possible(nf_xdp_hashtable, iter, hnode, key) { + if (key == iter->net_device_addr) { + elem = iter; + break; + } + } + + if (!elem) { + elem = kzalloc(sizeof(*elem), GFP_KERNEL_ACCOUNT); + if (!elem) + goto err_unlock; + + elem->net_device_addr = key; + INIT_LIST_HEAD(&elem->head); + hash_add_rcu(nf_xdp_hashtable, &elem->hnode, key); + } + list_add_tail_rcu(&ft_elem->head, &elem->head); + + mutex_unlock(&nf_xdp_hashtable_lock); + + return 0; + +err_unlock: + mutex_unlock(&nf_xdp_hashtable_lock); + kfree(ft_elem); + + return -ENOMEM; +} + +static void nf_flowtable_by_dev_remove(struct nf_flowtable *ft, + const struct net_device *dev) +{ + struct flow_offload_xdp *iter, *elem = NULL; + unsigned long key = (unsigned long)dev; + + mutex_lock(&nf_xdp_hashtable_lock); + + hash_for_each_possible(nf_xdp_hashtable, iter, hnode, key) { + if (key == iter->net_device_addr) { + elem = iter; + break; + } + } + + if (elem) { + struct flow_offload_xdp_ft *ft_elem, *ft_next; + + list_for_each_entry_safe(ft_elem, ft_next, &elem->head, head) { + if (ft_elem->ft == ft) { + list_del_rcu(&ft_elem->head); + kfree_rcu(ft_elem, rcuhead); + } + } + + if (list_empty(&elem->head)) + hash_del_rcu(&elem->hnode); + else + elem = NULL; + } + + mutex_unlock(&nf_xdp_hashtable_lock); + + if (elem) { + synchronize_rcu(); + kfree(elem); + } +} + +int nf_flow_offload_xdp_setup(struct nf_flowtable *flowtable, + struct net_device *dev, + enum flow_block_command cmd) +{ + switch (cmd) { + case FLOW_BLOCK_BIND: + return nf_flowtable_by_dev_insert(flowtable, dev); + case FLOW_BLOCK_UNBIND: + nf_flowtable_by_dev_remove(flowtable, dev); + return 0; + } + + WARN_ON_ONCE(1); + return 0; +} -- cgit v1.2.3 From 391bb6594fd3a567efb1cd3efc8136c78c4c9e31 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Sun, 30 Jun 2024 00:26:49 +0200 Subject: netfilter: Add bpf_xdp_flow_lookup kfunc Introduce bpf_xdp_flow_lookup kfunc in order to perform the lookup of a given flowtable entry based on a fib tuple of incoming traffic. bpf_xdp_flow_lookup can be used as building block to offload in xdp the processing of sw flowtable when hw flowtable is not available. Signed-off-by: Lorenzo Bianconi Signed-off-by: Daniel Borkmann Acked-by: Kumar Kartikeya Dwivedi Acked-by: Pablo Neira Ayuso Link: https://lore.kernel.org/bpf/55d38a4e5856f6d1509d823ff4e98aaa6d356097.1719698275.git.lorenzo@kernel.org --- include/net/netfilter/nf_flow_table.h | 10 +++ net/netfilter/Makefile | 5 ++ net/netfilter/nf_flow_table_bpf.c | 121 ++++++++++++++++++++++++++++++++++ net/netfilter/nf_flow_table_inet.c | 2 +- 4 files changed, 137 insertions(+), 1 deletion(-) create mode 100644 net/netfilter/nf_flow_table_bpf.c diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h index d845745207d2..b63d53bb9dd6 100644 --- a/include/net/netfilter/nf_flow_table.h +++ b/include/net/netfilter/nf_flow_table.h @@ -315,6 +315,16 @@ unsigned int nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb, unsigned int nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb, const struct nf_hook_state *state); +#if (IS_BUILTIN(CONFIG_NF_FLOW_TABLE) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) || \ + (IS_MODULE(CONFIG_NF_FLOW_TABLE) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES)) +extern int nf_flow_register_bpf(void); +#else +static inline int nf_flow_register_bpf(void) +{ + return 0; +} +#endif + #define MODULE_ALIAS_NF_FLOWTABLE(family) \ MODULE_ALIAS("nf-flowtable-" __stringify(family)) diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile index 18046872a38a..f0aa4d7ef499 100644 --- a/net/netfilter/Makefile +++ b/net/netfilter/Makefile @@ -144,6 +144,11 @@ obj-$(CONFIG_NF_FLOW_TABLE) += nf_flow_table.o nf_flow_table-objs := nf_flow_table_core.o nf_flow_table_ip.o \ nf_flow_table_offload.o nf_flow_table_xdp.o nf_flow_table-$(CONFIG_NF_FLOW_TABLE_PROCFS) += nf_flow_table_procfs.o +ifeq ($(CONFIG_NF_FLOW_TABLE),m) +nf_flow_table-$(CONFIG_DEBUG_INFO_BTF_MODULES) += nf_flow_table_bpf.o +else ifeq ($(CONFIG_NF_FLOW_TABLE),y) +nf_flow_table-$(CONFIG_DEBUG_INFO_BTF) += nf_flow_table_bpf.o +endif obj-$(CONFIG_NF_FLOW_TABLE_INET) += nf_flow_table_inet.o diff --git a/net/netfilter/nf_flow_table_bpf.c b/net/netfilter/nf_flow_table_bpf.c new file mode 100644 index 000000000000..4a5f5195f2d2 --- /dev/null +++ b/net/netfilter/nf_flow_table_bpf.c @@ -0,0 +1,121 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Unstable Flow Table Helpers for XDP hook + * + * These are called from the XDP programs. + * Note that it is allowed to break compatibility for these functions since + * the interface they are exposed through to BPF programs is explicitly + * unstable. + */ + +#include +#include +#include +#include +#include +#include +#include + +/* bpf_flowtable_opts - options for bpf flowtable helpers + * @error: out parameter, set for any encountered error + */ +struct bpf_flowtable_opts { + s32 error; +}; + +enum { + NF_BPF_FLOWTABLE_OPTS_SZ = 4, +}; + +__diag_push(); +__diag_ignore_all("-Wmissing-prototypes", + "Global functions as their definitions will be in nf_flow_table BTF"); + +__bpf_kfunc_start_defs(); + +static struct flow_offload_tuple_rhash * +bpf_xdp_flow_tuple_lookup(struct net_device *dev, + struct flow_offload_tuple *tuple, __be16 proto) +{ + struct flow_offload_tuple_rhash *tuplehash; + struct nf_flowtable *nf_flow_table; + struct flow_offload *nf_flow; + + nf_flow_table = nf_flowtable_by_dev(dev); + if (!nf_flow_table) + return ERR_PTR(-ENOENT); + + tuplehash = flow_offload_lookup(nf_flow_table, tuple); + if (!tuplehash) + return ERR_PTR(-ENOENT); + + nf_flow = container_of(tuplehash, struct flow_offload, + tuplehash[tuplehash->tuple.dir]); + flow_offload_refresh(nf_flow_table, nf_flow, false); + + return tuplehash; +} + +__bpf_kfunc struct flow_offload_tuple_rhash * +bpf_xdp_flow_lookup(struct xdp_md *ctx, struct bpf_fib_lookup *fib_tuple, + struct bpf_flowtable_opts *opts, u32 opts_len) +{ + struct xdp_buff *xdp = (struct xdp_buff *)ctx; + struct flow_offload_tuple tuple = { + .iifidx = fib_tuple->ifindex, + .l3proto = fib_tuple->family, + .l4proto = fib_tuple->l4_protocol, + .src_port = fib_tuple->sport, + .dst_port = fib_tuple->dport, + }; + struct flow_offload_tuple_rhash *tuplehash; + __be16 proto; + + if (opts_len != NF_BPF_FLOWTABLE_OPTS_SZ) { + opts->error = -EINVAL; + return NULL; + } + + switch (fib_tuple->family) { + case AF_INET: + tuple.src_v4.s_addr = fib_tuple->ipv4_src; + tuple.dst_v4.s_addr = fib_tuple->ipv4_dst; + proto = htons(ETH_P_IP); + break; + case AF_INET6: + tuple.src_v6 = *(struct in6_addr *)&fib_tuple->ipv6_src; + tuple.dst_v6 = *(struct in6_addr *)&fib_tuple->ipv6_dst; + proto = htons(ETH_P_IPV6); + break; + default: + opts->error = -EAFNOSUPPORT; + return NULL; + } + + tuplehash = bpf_xdp_flow_tuple_lookup(xdp->rxq->dev, &tuple, proto); + if (IS_ERR(tuplehash)) { + opts->error = PTR_ERR(tuplehash); + return NULL; + } + + return tuplehash; +} + +__diag_pop() + +__bpf_kfunc_end_defs(); + +BTF_KFUNCS_START(nf_ft_kfunc_set) +BTF_ID_FLAGS(func, bpf_xdp_flow_lookup, KF_TRUSTED_ARGS | KF_RET_NULL) +BTF_KFUNCS_END(nf_ft_kfunc_set) + +static const struct btf_kfunc_id_set nf_flow_kfunc_set = { + .owner = THIS_MODULE, + .set = &nf_ft_kfunc_set, +}; + +int nf_flow_register_bpf(void) +{ + return register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, + &nf_flow_kfunc_set); +} +EXPORT_SYMBOL_GPL(nf_flow_register_bpf); diff --git a/net/netfilter/nf_flow_table_inet.c b/net/netfilter/nf_flow_table_inet.c index 6eef15648b7b..88787b45e30d 100644 --- a/net/netfilter/nf_flow_table_inet.c +++ b/net/netfilter/nf_flow_table_inet.c @@ -98,7 +98,7 @@ static int __init nf_flow_inet_module_init(void) nft_register_flowtable_type(&flowtable_ipv6); nft_register_flowtable_type(&flowtable_inet); - return 0; + return nf_flow_register_bpf(); } static void __exit nf_flow_inet_module_exit(void) -- cgit v1.2.3 From c77e572d3a8c0e21c5dca4cc2883c7cd8cbe981f Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Sun, 30 Jun 2024 00:26:50 +0200 Subject: selftests/bpf: Add selftest for bpf_xdp_flow_lookup kfunc Introduce e2e selftest for bpf_xdp_flow_lookup kfunc through xdp_flowtable utility. Signed-off-by: Lorenzo Bianconi Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/b74393fb4539aecbbd5ac7883605f86a95fb0b6b.1719698275.git.lorenzo@kernel.org --- tools/testing/selftests/bpf/config | 13 ++ .../selftests/bpf/prog_tests/xdp_flowtable.c | 168 +++++++++++++++++++++ tools/testing/selftests/bpf/progs/xdp_flowtable.c | 144 ++++++++++++++++++ 3 files changed, 325 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/xdp_flowtable.c create mode 100644 tools/testing/selftests/bpf/progs/xdp_flowtable.c diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config index 2fb16da78dce..5291e97df749 100644 --- a/tools/testing/selftests/bpf/config +++ b/tools/testing/selftests/bpf/config @@ -83,6 +83,19 @@ CONFIG_NF_CONNTRACK_MARK=y CONFIG_NF_CONNTRACK_ZONES=y CONFIG_NF_DEFRAG_IPV4=y CONFIG_NF_DEFRAG_IPV6=y +CONFIG_NF_TABLES=y +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NF_TABLES_IPV4=y +CONFIG_NF_TABLES_IPV6=y +CONFIG_NETFILTER_INGRESS=y +CONFIG_NF_FLOW_TABLE=y +CONFIG_NF_FLOW_TABLE_INET=y +CONFIG_NETFILTER_NETLINK=y +CONFIG_NFT_FLOW_OFFLOAD=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_FILTER=y CONFIG_NF_NAT=y CONFIG_RC_CORE=y CONFIG_SECURITY=y diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_flowtable.c b/tools/testing/selftests/bpf/prog_tests/xdp_flowtable.c new file mode 100644 index 000000000000..e1bf141d3401 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/xdp_flowtable.c @@ -0,0 +1,168 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include +#include + +#include "xdp_flowtable.skel.h" + +#define TX_NETNS_NAME "ns0" +#define RX_NETNS_NAME "ns1" + +#define TX_NAME "v0" +#define FORWARD_NAME "v1" +#define RX_NAME "d0" + +#define TX_MAC "00:00:00:00:00:01" +#define FORWARD_MAC "00:00:00:00:00:02" +#define RX_MAC "00:00:00:00:00:03" +#define DST_MAC "00:00:00:00:00:04" + +#define TX_ADDR "10.0.0.1" +#define FORWARD_ADDR "10.0.0.2" +#define RX_ADDR "20.0.0.1" +#define DST_ADDR "20.0.0.2" + +#define PREFIX_LEN "8" +#define N_PACKETS 10 +#define UDP_PORT 12345 +#define UDP_PORT_STR "12345" + +static int send_udp_traffic(void) +{ + struct sockaddr_storage addr; + int i, sock; + + if (make_sockaddr(AF_INET, DST_ADDR, UDP_PORT, &addr, NULL)) + return -EINVAL; + + sock = socket(AF_INET, SOCK_DGRAM, 0); + if (sock < 0) + return sock; + + for (i = 0; i < N_PACKETS; i++) { + unsigned char buf[] = { 0xaa, 0xbb, 0xcc }; + int n; + + n = sendto(sock, buf, sizeof(buf), MSG_NOSIGNAL | MSG_CONFIRM, + (struct sockaddr *)&addr, sizeof(addr)); + if (n != sizeof(buf)) { + close(sock); + return -EINVAL; + } + + usleep(50000); /* 50ms */ + } + close(sock); + + return 0; +} + +void test_xdp_flowtable(void) +{ + struct xdp_flowtable *skel = NULL; + struct nstoken *tok = NULL; + int iifindex, stats_fd; + __u32 value, key = 0; + struct bpf_link *link; + + if (SYS_NOFAIL("nft -v")) { + fprintf(stdout, "Missing required nft tool\n"); + test__skip(); + return; + } + + SYS(out, "ip netns add " TX_NETNS_NAME); + SYS(out, "ip netns add " RX_NETNS_NAME); + + tok = open_netns(RX_NETNS_NAME); + if (!ASSERT_OK_PTR(tok, "setns")) + goto out; + + SYS(out, "sysctl -qw net.ipv4.conf.all.forwarding=1"); + + SYS(out, "ip link add " TX_NAME " type veth peer " FORWARD_NAME); + SYS(out, "ip link set " TX_NAME " netns " TX_NETNS_NAME); + SYS(out, "ip link set dev " FORWARD_NAME " address " FORWARD_MAC); + SYS(out, + "ip addr add " FORWARD_ADDR "/" PREFIX_LEN " dev " FORWARD_NAME); + SYS(out, "ip link set dev " FORWARD_NAME " up"); + + SYS(out, "ip link add " RX_NAME " type dummy"); + SYS(out, "ip link set dev " RX_NAME " address " RX_MAC); + SYS(out, "ip addr add " RX_ADDR "/" PREFIX_LEN " dev " RX_NAME); + SYS(out, "ip link set dev " RX_NAME " up"); + + /* configure the flowtable */ + SYS(out, "nft add table ip filter"); + SYS(out, + "nft add flowtable ip filter f { hook ingress priority 0\\; " + "devices = { " FORWARD_NAME ", " RX_NAME " }\\; }"); + SYS(out, + "nft add chain ip filter forward " + "{ type filter hook forward priority 0\\; }"); + SYS(out, + "nft add rule ip filter forward ip protocol udp th dport " + UDP_PORT_STR " flow add @f"); + + /* Avoid ARP calls */ + SYS(out, + "ip -4 neigh add " DST_ADDR " lladdr " DST_MAC " dev " RX_NAME); + + close_netns(tok); + tok = open_netns(TX_NETNS_NAME); + if (!ASSERT_OK_PTR(tok, "setns")) + goto out; + + SYS(out, "ip addr add " TX_ADDR "/" PREFIX_LEN " dev " TX_NAME); + SYS(out, "ip link set dev " TX_NAME " address " TX_MAC); + SYS(out, "ip link set dev " TX_NAME " up"); + SYS(out, "ip route add default via " FORWARD_ADDR); + + close_netns(tok); + tok = open_netns(RX_NETNS_NAME); + if (!ASSERT_OK_PTR(tok, "setns")) + goto out; + + iifindex = if_nametoindex(FORWARD_NAME); + if (!ASSERT_NEQ(iifindex, 0, "iifindex")) + goto out; + + skel = xdp_flowtable__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel")) + goto out; + + link = bpf_program__attach_xdp(skel->progs.xdp_flowtable_do_lookup, + iifindex); + if (!ASSERT_OK_PTR(link, "prog_attach")) + goto out; + + close_netns(tok); + tok = open_netns(TX_NETNS_NAME); + if (!ASSERT_OK_PTR(tok, "setns")) + goto out; + + if (!ASSERT_OK(send_udp_traffic(), "send udp")) + goto out; + + close_netns(tok); + tok = open_netns(RX_NETNS_NAME); + if (!ASSERT_OK_PTR(tok, "setns")) + goto out; + + stats_fd = bpf_map__fd(skel->maps.stats); + if (!ASSERT_OK(bpf_map_lookup_elem(stats_fd, &key, &value), + "bpf_map_update_elem stats")) + goto out; + + ASSERT_GE(value, N_PACKETS - 2, "bpf_xdp_flow_lookup failed"); +out: + xdp_flowtable__destroy(skel); + if (tok) + close_netns(tok); + SYS_NOFAIL("ip netns del " TX_NETNS_NAME); + SYS_NOFAIL("ip netns del " RX_NETNS_NAME); +} diff --git a/tools/testing/selftests/bpf/progs/xdp_flowtable.c b/tools/testing/selftests/bpf/progs/xdp_flowtable.c new file mode 100644 index 000000000000..15209650f73b --- /dev/null +++ b/tools/testing/selftests/bpf/progs/xdp_flowtable.c @@ -0,0 +1,144 @@ +// SPDX-License-Identifier: GPL-2.0 +#define BPF_NO_KFUNC_PROTOTYPES +#include +#include +#include + +#define ETH_P_IP 0x0800 +#define ETH_P_IPV6 0x86dd +#define IP_MF 0x2000 /* "More Fragments" */ +#define IP_OFFSET 0x1fff /* "Fragment Offset" */ +#define AF_INET 2 +#define AF_INET6 10 + +struct bpf_flowtable_opts___local { + s32 error; +}; + +struct flow_offload_tuple_rhash * +bpf_xdp_flow_lookup(struct xdp_md *, struct bpf_fib_lookup *, + struct bpf_flowtable_opts___local *, u32) __ksym; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, __u32); + __type(value, __u32); + __uint(max_entries, 1); +} stats SEC(".maps"); + +static bool xdp_flowtable_offload_check_iphdr(struct iphdr *iph) +{ + /* ip fragmented traffic */ + if (iph->frag_off & bpf_htons(IP_MF | IP_OFFSET)) + return false; + + /* ip options */ + if (iph->ihl * 4 != sizeof(*iph)) + return false; + + if (iph->ttl <= 1) + return false; + + return true; +} + +static bool xdp_flowtable_offload_check_tcp_state(void *ports, void *data_end, + u8 proto) +{ + if (proto == IPPROTO_TCP) { + struct tcphdr *tcph = ports; + + if (tcph + 1 > data_end) + return false; + + if (tcph->fin || tcph->rst) + return false; + } + + return true; +} + +SEC("xdp.frags") +int xdp_flowtable_do_lookup(struct xdp_md *ctx) +{ + void *data_end = (void *)(long)ctx->data_end; + struct bpf_flowtable_opts___local opts = {}; + struct flow_offload_tuple_rhash *tuplehash; + struct bpf_fib_lookup tuple = { + .ifindex = ctx->ingress_ifindex, + }; + void *data = (void *)(long)ctx->data; + struct ethhdr *eth = data; + struct flow_ports *ports; + __u32 *val, key = 0; + + if (eth + 1 > data_end) + return XDP_DROP; + + switch (eth->h_proto) { + case bpf_htons(ETH_P_IP): { + struct iphdr *iph = data + sizeof(*eth); + + ports = (struct flow_ports *)(iph + 1); + if (ports + 1 > data_end) + return XDP_PASS; + + /* sanity check on ip header */ + if (!xdp_flowtable_offload_check_iphdr(iph)) + return XDP_PASS; + + if (!xdp_flowtable_offload_check_tcp_state(ports, data_end, + iph->protocol)) + return XDP_PASS; + + tuple.family = AF_INET; + tuple.tos = iph->tos; + tuple.l4_protocol = iph->protocol; + tuple.tot_len = bpf_ntohs(iph->tot_len); + tuple.ipv4_src = iph->saddr; + tuple.ipv4_dst = iph->daddr; + tuple.sport = ports->source; + tuple.dport = ports->dest; + break; + } + case bpf_htons(ETH_P_IPV6): { + struct in6_addr *src = (struct in6_addr *)tuple.ipv6_src; + struct in6_addr *dst = (struct in6_addr *)tuple.ipv6_dst; + struct ipv6hdr *ip6h = data + sizeof(*eth); + + ports = (struct flow_ports *)(ip6h + 1); + if (ports + 1 > data_end) + return XDP_PASS; + + if (ip6h->hop_limit <= 1) + return XDP_PASS; + + if (!xdp_flowtable_offload_check_tcp_state(ports, data_end, + ip6h->nexthdr)) + return XDP_PASS; + + tuple.family = AF_INET6; + tuple.l4_protocol = ip6h->nexthdr; + tuple.tot_len = bpf_ntohs(ip6h->payload_len); + *src = ip6h->saddr; + *dst = ip6h->daddr; + tuple.sport = ports->source; + tuple.dport = ports->dest; + break; + } + default: + return XDP_PASS; + } + + tuplehash = bpf_xdp_flow_lookup(ctx, &tuple, &opts, sizeof(opts)); + if (!tuplehash) + return XDP_PASS; + + val = bpf_map_lookup_elem(&stats, &key); + if (val) + __sync_add_and_fetch(val, 1); + + return XDP_PASS; +} + +char _license[] SEC("license") = "GPL"; -- cgit v1.2.3 From 5b747c23f17d791e08fdf4baa7e14b704625518c Mon Sep 17 00:00:00 2001 From: Alan Maguire Date: Sat, 29 Jun 2024 11:00:58 +0100 Subject: libbpf: Fix error handling in btf__distill_base() Coverity points out that after calling btf__new_empty_split() the wrong value is checked for error. Fixes: 58e185a0dc35 ("libbpf: Add btf__distill_base() creating split BTF with distilled base BTF") Reported-by: Andrii Nakryiko Signed-off-by: Alan Maguire Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20240629100058.2866763-1-alan.maguire@oracle.com --- tools/lib/bpf/btf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index cd5dd6619214..32c00db3b91b 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -5431,7 +5431,7 @@ int btf__distill_base(const struct btf *src_btf, struct btf **new_base_btf, * BTF available. */ new_split = btf__new_empty_split(new_base); - if (!new_split_btf) { + if (!new_split) { err = -errno; goto done; } -- cgit v1.2.3 From d1a426171d76b2cdf3dea5d52f6266090e4aa254 Mon Sep 17 00:00:00 2001 From: Pu Lehui Date: Sat, 22 Jun 2024 03:04:35 +0000 Subject: bpf: Use precise image size for struct_ops trampoline MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For trampoline using bpf_prog_pack, we need to generate a rw_image buffer with size of (image_end - image). For regular trampoline, we use the precise image size generated by arch_bpf_trampoline_size to allocate rw_image. But for struct_ops trampoline, we allocate rw_image directly using close to PAGE_SIZE size. We do not need to allocate for that much, as the patch size is usually much smaller than PAGE_SIZE. Let's use precise image size for it too. Signed-off-by: Pu Lehui Signed-off-by: Daniel Borkmann Tested-by: Björn Töpel #riscv Acked-by: Song Liu Link: https://lore.kernel.org/bpf/20240622030437.3973492-2-pulehui@huaweicloud.com --- kernel/bpf/bpf_struct_ops.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c index a2cf31b14be4..0d515ec57aa5 100644 --- a/kernel/bpf/bpf_struct_ops.c +++ b/kernel/bpf/bpf_struct_ops.c @@ -573,7 +573,7 @@ int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks, } size = arch_prepare_bpf_trampoline(NULL, image + image_off, - image + PAGE_SIZE, + image + image_off + size, model, flags, tlinks, stub_func); if (size <= 0) { if (image != *_image) -- cgit v1.2.3 From 9f1e16fb1fc9826001c69e0551d51fbbcd2d74e9 Mon Sep 17 00:00:00 2001 From: Pu Lehui Date: Sat, 22 Jun 2024 03:04:36 +0000 Subject: riscv, bpf: Fix out-of-bounds issue when preparing trampoline image We get the size of the trampoline image during the dry run phase and allocate memory based on that size. The allocated image will then be populated with instructions during the real patch phase. But after commit 26ef208c209a ("bpf: Use arch_bpf_trampoline_size"), the `im` argument is inconsistent in the dry run and real patch phase. This may cause emit_imm in RV64 to generate a different number of instructions when generating the 'im' address, potentially causing out-of-bounds issues. Let's emit the maximum number of instructions for the "im" address during dry run to fix this problem. Fixes: 26ef208c209a ("bpf: Use arch_bpf_trampoline_size") Signed-off-by: Pu Lehui Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20240622030437.3973492-3-pulehui@huaweicloud.com --- arch/riscv/net/bpf_jit_comp64.c | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c index d5cebb0b0afe..e6d690657f3e 100644 --- a/arch/riscv/net/bpf_jit_comp64.c +++ b/arch/riscv/net/bpf_jit_comp64.c @@ -16,6 +16,8 @@ #include "bpf_jit.h" #define RV_FENTRY_NINSNS 2 +/* imm that allows emit_imm to emit max count insns */ +#define RV_MAX_COUNT_IMM 0x7FFF7FF7FF7FF7FF #define RV_REG_TCC RV_REG_A6 #define RV_REG_TCC_SAVED RV_REG_S6 /* Store A6 in S6 if program do calls */ @@ -916,7 +918,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, orig_call += RV_FENTRY_NINSNS * 4; if (flags & BPF_TRAMP_F_CALL_ORIG) { - emit_imm(RV_REG_A0, (const s64)im, ctx); + emit_imm(RV_REG_A0, ctx->insns ? (const s64)im : RV_MAX_COUNT_IMM, ctx); ret = emit_call((const u64)__bpf_tramp_enter, true, ctx); if (ret) return ret; @@ -977,7 +979,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, if (flags & BPF_TRAMP_F_CALL_ORIG) { im->ip_epilogue = ctx->insns + ctx->ninsns; - emit_imm(RV_REG_A0, (const s64)im, ctx); + emit_imm(RV_REG_A0, ctx->insns ? (const s64)im : RV_MAX_COUNT_IMM, ctx); ret = emit_call((const u64)__bpf_tramp_exit, true, ctx); if (ret) goto out; @@ -1046,6 +1048,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, { int ret; struct rv_jit_context ctx; + u32 size = image_end - image; ctx.ninsns = 0; /* @@ -1059,11 +1062,16 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, ctx.ro_insns = image; ret = __arch_prepare_bpf_trampoline(im, m, tlinks, func_addr, flags, &ctx); if (ret < 0) - return ret; + goto out; - bpf_flush_icache(ctx.insns, ctx.insns + ctx.ninsns); + if (WARN_ON(size < ninsns_rvoff(ctx.ninsns))) { + ret = -E2BIG; + goto out; + } - return ninsns_rvoff(ret); + bpf_flush_icache(image, image_end); +out: + return ret < 0 ? ret : size; } int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, -- cgit v1.2.3 From 2382a405c581ae8f39f898055654e2000e7dd0d3 Mon Sep 17 00:00:00 2001 From: Pu Lehui Date: Sat, 22 Jun 2024 03:04:37 +0000 Subject: riscv, bpf: Use bpf_prog_pack for RV64 bpf trampoline MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We used bpf_prog_pack to aggregate bpf programs into huge page to relieve the iTLB pressure on the system. We can apply it to bpf trampoline, as Song had been implemented it in core and x86 [0]. This patch is going to use bpf_prog_pack to RV64 bpf trampoline. Since Song and Puranjay have done a lot of work for bpf_prog_pack on RV64, implementing this function will be easy. Signed-off-by: Pu Lehui Signed-off-by: Daniel Borkmann Tested-by: Björn Töpel #riscv Link: https://lore.kernel.org/all/20231206224054.492250-1-song@kernel.org [0] Link: https://lore.kernel.org/bpf/20240622030437.3973492-4-pulehui@huaweicloud.com --- arch/riscv/net/bpf_jit_comp64.c | 43 +++++++++++++++++++++++++++-------------- 1 file changed, 29 insertions(+), 14 deletions(-) diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c index e6d690657f3e..351e1484205e 100644 --- a/arch/riscv/net/bpf_jit_comp64.c +++ b/arch/riscv/net/bpf_jit_comp64.c @@ -957,7 +957,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, goto out; emit_sd(RV_REG_FP, -retval_off, RV_REG_A0, ctx); emit_sd(RV_REG_FP, -(retval_off - 8), regmap[BPF_REG_0], ctx); - im->ip_after_call = ctx->insns + ctx->ninsns; + im->ip_after_call = ctx->ro_insns + ctx->ninsns; /* 2 nops reserved for auipc+jalr pair */ emit(rv_nop(), ctx); emit(rv_nop(), ctx); @@ -978,7 +978,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, } if (flags & BPF_TRAMP_F_CALL_ORIG) { - im->ip_epilogue = ctx->insns + ctx->ninsns; + im->ip_epilogue = ctx->ro_insns + ctx->ninsns; emit_imm(RV_REG_A0, ctx->insns ? (const s64)im : RV_MAX_COUNT_IMM, ctx); ret = emit_call((const u64)__bpf_tramp_exit, true, ctx); if (ret) @@ -1041,25 +1041,33 @@ int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags, return ret < 0 ? ret : ninsns_rvoff(ctx.ninsns); } -int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, - void *image_end, const struct btf_func_model *m, +void *arch_alloc_bpf_trampoline(unsigned int size) +{ + return bpf_prog_pack_alloc(size, bpf_fill_ill_insns); +} + +void arch_free_bpf_trampoline(void *image, unsigned int size) +{ + bpf_prog_pack_free(image, size); +} + +int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *ro_image, + void *ro_image_end, const struct btf_func_model *m, u32 flags, struct bpf_tramp_links *tlinks, void *func_addr) { int ret; + void *image, *res; struct rv_jit_context ctx; - u32 size = image_end - image; + u32 size = ro_image_end - ro_image; + + image = kvmalloc(size, GFP_KERNEL); + if (!image) + return -ENOMEM; ctx.ninsns = 0; - /* - * The bpf_int_jit_compile() uses a RW buffer (ctx.insns) to write the - * JITed instructions and later copies it to a RX region (ctx.ro_insns). - * It also uses ctx.ro_insns to calculate offsets for jumps etc. As the - * trampoline image uses the same memory area for writing and execution, - * both ctx.insns and ctx.ro_insns can be set to image. - */ ctx.insns = image; - ctx.ro_insns = image; + ctx.ro_insns = ro_image; ret = __arch_prepare_bpf_trampoline(im, m, tlinks, func_addr, flags, &ctx); if (ret < 0) goto out; @@ -1069,8 +1077,15 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, goto out; } - bpf_flush_icache(image, image_end); + res = bpf_arch_text_copy(ro_image, image, size); + if (IS_ERR(res)) { + ret = PTR_ERR(res); + goto out; + } + + bpf_flush_icache(ro_image, ro_image_end); out: + kvfree(image); return ret < 0 ? ret : size; } -- cgit v1.2.3 From 03922e97bc305c6b2e8bc4b7cc765959ca63b05d Mon Sep 17 00:00:00 2001 From: Zhu Jun Date: Wed, 26 Jun 2024 20:19:05 -0700 Subject: selftests/bpf: Delete extra blank lines in test_sockmap Delete extra blank lines inside of test_selftest(). Signed-off-by: Zhu Jun Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20240627031905.7133-1-zhujun2@cmss.chinamobile.com --- tools/testing/selftests/bpf/test_sockmap.c | 1 - 1 file changed, 1 deletion(-) diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c index 9cba4ec844a5..3e02d7267de8 100644 --- a/tools/testing/selftests/bpf/test_sockmap.c +++ b/tools/testing/selftests/bpf/test_sockmap.c @@ -1936,7 +1936,6 @@ static void test_selftests_ktls(int cg_fd, struct sockmap_options *opt) static int test_selftest(int cg_fd, struct sockmap_options *opt) { - test_selftests_sockmap(cg_fd, opt); test_selftests_sockhash(cg_fd, opt); test_selftests_ktls(cg_fd, opt); -- cgit v1.2.3 From d80d61ab0609f7f7168d59ec82ee5f055a4b6be7 Mon Sep 17 00:00:00 2001 From: Tushar Vyavahare Date: Tue, 2 Jul 2024 05:59:15 +0000 Subject: selftests/xsk: Ensure traffic validation proceeds after ring size adjustment in xskxceiver Previously, HW_SW_MIN_RING_SIZE and HW_SW_MAX_RING_SIZE test cases were not validating Tx/Rx traffic at all due to early return after changing HW ring size in testapp_validate_traffic(). Fix the flow by checking return value of set_ring_size() and act upon it rather than terminating the test case there. Signed-off-by: Tushar Vyavahare Signed-off-by: Daniel Borkmann Reviewed-by: Maciej Fijalkowski Link: https://lore.kernel.org/bpf/20240702055916.48071-2-tushar.vyavahare@intel.com --- tools/testing/selftests/bpf/xskxceiver.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c index 2eac0895b0a1..088df53869e8 100644 --- a/tools/testing/selftests/bpf/xskxceiver.c +++ b/tools/testing/selftests/bpf/xskxceiver.c @@ -1899,11 +1899,15 @@ static int testapp_validate_traffic(struct test_spec *test) } if (test->set_ring) { - if (ifobj_tx->hw_ring_size_supp) - return set_ring_size(ifobj_tx); - - ksft_test_result_skip("Changing HW ring size not supported.\n"); - return TEST_SKIP; + if (ifobj_tx->hw_ring_size_supp) { + if (set_ring_size(ifobj_tx)) { + ksft_test_result_skip("Failed to change HW ring size.\n"); + return TEST_FAILURE; + } + } else { + ksft_test_result_skip("Changing HW ring size not supported.\n"); + return TEST_SKIP; + } } xsk_attach_xdp_progs(test, ifobj_rx, ifobj_tx); -- cgit v1.2.3 From e4a195e2b95e4602c667ed19a20f71218df138c2 Mon Sep 17 00:00:00 2001 From: Tushar Vyavahare Date: Tue, 2 Jul 2024 05:59:16 +0000 Subject: selftests/xsk: Enhance batch size support with dynamic configurations Introduce dynamic adjustment capabilities for fill_size and comp_size parameters to support larger batch sizes beyond the previous 2K limit. Update HW_SW_MAX_RING_SIZE test cases to evaluate AF_XDP's robustness by pushing hardware and software ring sizes to their limits. This test ensures AF_XDP's reliability amidst potential producer/consumer throttling due to maximum ring utilization. Signed-off-by: Tushar Vyavahare Signed-off-by: Daniel Borkmann Reviewed-by: Maciej Fijalkowski Link: https://lore.kernel.org/bpf/20240702055916.48071-3-tushar.vyavahare@intel.com --- tools/testing/selftests/bpf/xskxceiver.c | 26 ++++++++++++++++++++------ tools/testing/selftests/bpf/xskxceiver.h | 2 ++ 2 files changed, 22 insertions(+), 6 deletions(-) diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c index 088df53869e8..8144fd145237 100644 --- a/tools/testing/selftests/bpf/xskxceiver.c +++ b/tools/testing/selftests/bpf/xskxceiver.c @@ -196,6 +196,12 @@ static int xsk_configure_umem(struct ifobject *ifobj, struct xsk_umem_info *umem }; int ret; + if (umem->fill_size) + cfg.fill_size = umem->fill_size; + + if (umem->comp_size) + cfg.comp_size = umem->comp_size; + if (umem->unaligned_mode) cfg.flags |= XDP_UMEM_UNALIGNED_CHUNK_FLAG; @@ -265,6 +271,10 @@ static int __xsk_configure_socket(struct xsk_socket_info *xsk, struct xsk_umem_i cfg.bind_flags |= XDP_SHARED_UMEM; if (ifobject->mtu > MAX_ETH_PKT_SIZE) cfg.bind_flags |= XDP_USE_SG; + if (umem->comp_size) + cfg.tx_size = umem->comp_size; + if (umem->fill_size) + cfg.rx_size = umem->fill_size; txr = ifobject->tx_on ? &xsk->tx : NULL; rxr = ifobject->rx_on ? &xsk->rx : NULL; @@ -1616,7 +1626,7 @@ static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream if (umem->num_frames < XSK_RING_PROD__DEFAULT_NUM_DESCS) buffers_to_fill = umem->num_frames; else - buffers_to_fill = XSK_RING_PROD__DEFAULT_NUM_DESCS; + buffers_to_fill = umem->fill_size; ret = xsk_ring_prod__reserve(&umem->fq, buffers_to_fill, &idx); if (ret != buffers_to_fill) @@ -2445,7 +2455,7 @@ static int testapp_hw_sw_min_ring_size(struct test_spec *test) static int testapp_hw_sw_max_ring_size(struct test_spec *test) { - u32 max_descs = XSK_RING_PROD__DEFAULT_NUM_DESCS * 2; + u32 max_descs = XSK_RING_PROD__DEFAULT_NUM_DESCS * 4; int ret; test->set_ring = true; @@ -2453,7 +2463,8 @@ static int testapp_hw_sw_max_ring_size(struct test_spec *test) test->ifobj_tx->ring.tx_pending = test->ifobj_tx->ring.tx_max_pending; test->ifobj_tx->ring.rx_pending = test->ifobj_tx->ring.rx_max_pending; test->ifobj_rx->umem->num_frames = max_descs; - test->ifobj_rx->xsk->rxqsize = max_descs; + test->ifobj_rx->umem->fill_size = max_descs; + test->ifobj_rx->umem->comp_size = max_descs; test->ifobj_tx->xsk->batch_size = XSK_RING_PROD__DEFAULT_NUM_DESCS; test->ifobj_rx->xsk->batch_size = XSK_RING_PROD__DEFAULT_NUM_DESCS; @@ -2461,9 +2472,12 @@ static int testapp_hw_sw_max_ring_size(struct test_spec *test) if (ret) return ret; - /* Set batch_size to 4095 */ - test->ifobj_tx->xsk->batch_size = max_descs - 1; - test->ifobj_rx->xsk->batch_size = max_descs - 1; + /* Set batch_size to 8152 for testing, as the ice HW ignores the 3 lowest bits when + * updating the Rx HW tail register. + */ + test->ifobj_tx->xsk->batch_size = test->ifobj_tx->ring.tx_max_pending - 8; + test->ifobj_rx->xsk->batch_size = test->ifobj_tx->ring.tx_max_pending - 8; + pkt_stream_replace(test, max_descs, MIN_PKT_SIZE); return testapp_validate_traffic(test); } diff --git a/tools/testing/selftests/bpf/xskxceiver.h b/tools/testing/selftests/bpf/xskxceiver.h index 906de5fab7a3..885c948c5d83 100644 --- a/tools/testing/selftests/bpf/xskxceiver.h +++ b/tools/testing/selftests/bpf/xskxceiver.h @@ -80,6 +80,8 @@ struct xsk_umem_info { void *buffer; u32 frame_size; u32 base_addr; + u32 fill_size; + u32 comp_size; bool unaligned_mode; }; -- cgit v1.2.3 From 6801b0aef79db475591c3146a701ea373e4663b7 Mon Sep 17 00:00:00 2001 From: Pu Lehui Date: Tue, 2 Jul 2024 12:19:42 +0000 Subject: riscv, bpf: Add 12-argument support for RV64 bpf trampoline MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch adds 12 function arguments support for riscv64 bpf trampoline. The current bpf trampoline supports <= sizeof(u64) bytes scalar arguments [0] and <= 16 bytes struct arguments [1]. Therefore, we focus on the situation where scalars are at most XLEN bits and aggregates whose total size does not exceed 2×XLEN bits in the riscv calling convention [2]. Signed-off-by: Pu Lehui Signed-off-by: Daniel Borkmann Reviewed-by: Björn Töpel Acked-by: Björn Töpel Acked-by: Puranjay Mohan Link: https://elixir.bootlin.com/linux/v6.8/source/kernel/bpf/btf.c#L6184 [0] Link: https://elixir.bootlin.com/linux/v6.8/source/kernel/bpf/btf.c#L6769 [1] Link: https://github.com/riscv-non-isa/riscv-elf-psabi-doc/releases/download/draft-20230929-e5c800e661a53efe3c2678d71a306323b60eb13b/riscv-abi.pdf [2] Link: https://lore.kernel.org/bpf/20240702121944.1091530-2-pulehui@huaweicloud.com --- arch/riscv/net/bpf_jit_comp64.c | 66 +++++++++++++++++++++++++++++------------ 1 file changed, 47 insertions(+), 19 deletions(-) diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c index 351e1484205e..685c7389ae7e 100644 --- a/arch/riscv/net/bpf_jit_comp64.c +++ b/arch/riscv/net/bpf_jit_comp64.c @@ -15,6 +15,7 @@ #include #include "bpf_jit.h" +#define RV_MAX_REG_ARGS 8 #define RV_FENTRY_NINSNS 2 /* imm that allows emit_imm to emit max count insns */ #define RV_MAX_COUNT_IMM 0x7FFF7FF7FF7FF7FF @@ -692,26 +693,45 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type, return ret; } -static void store_args(int nregs, int args_off, struct rv_jit_context *ctx) +static void store_args(int nr_arg_slots, int args_off, struct rv_jit_context *ctx) { int i; - for (i = 0; i < nregs; i++) { - emit_sd(RV_REG_FP, -args_off, RV_REG_A0 + i, ctx); + for (i = 0; i < nr_arg_slots; i++) { + if (i < RV_MAX_REG_ARGS) { + emit_sd(RV_REG_FP, -args_off, RV_REG_A0 + i, ctx); + } else { + /* skip slots for T0 and FP of traced function */ + emit_ld(RV_REG_T1, 16 + (i - RV_MAX_REG_ARGS) * 8, RV_REG_FP, ctx); + emit_sd(RV_REG_FP, -args_off, RV_REG_T1, ctx); + } args_off -= 8; } } -static void restore_args(int nregs, int args_off, struct rv_jit_context *ctx) +static void restore_args(int nr_reg_args, int args_off, struct rv_jit_context *ctx) { int i; - for (i = 0; i < nregs; i++) { + for (i = 0; i < nr_reg_args; i++) { emit_ld(RV_REG_A0 + i, -args_off, RV_REG_FP, ctx); args_off -= 8; } } +static void restore_stack_args(int nr_stack_args, int args_off, int stk_arg_off, + struct rv_jit_context *ctx) +{ + int i; + + for (i = 0; i < nr_stack_args; i++) { + emit_ld(RV_REG_T1, -(args_off - RV_MAX_REG_ARGS * 8), RV_REG_FP, ctx); + emit_sd(RV_REG_FP, -stk_arg_off, RV_REG_T1, ctx); + args_off -= 8; + stk_arg_off -= 8; + } +} + static int invoke_bpf_prog(struct bpf_tramp_link *l, int args_off, int retval_off, int run_ctx_off, bool save_ret, struct rv_jit_context *ctx) { @@ -784,8 +804,8 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, { int i, ret, offset; int *branches_off = NULL; - int stack_size = 0, nregs = m->nr_args; - int retval_off, args_off, nregs_off, ip_off, run_ctx_off, sreg_off; + int stack_size = 0, nr_arg_slots = 0; + int retval_off, args_off, nregs_off, ip_off, run_ctx_off, sreg_off, stk_arg_off; struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY]; struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT]; struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN]; @@ -831,20 +851,21 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, * FP - sreg_off [ callee saved reg ] * * [ pads ] pads for 16 bytes alignment + * + * [ stack_argN ] + * [ ... ] + * FP - stk_arg_off [ stack_arg1 ] BPF_TRAMP_F_CALL_ORIG */ if (flags & (BPF_TRAMP_F_ORIG_STACK | BPF_TRAMP_F_SHARE_IPMODIFY)) return -ENOTSUPP; - /* extra regiters for struct arguments */ - for (i = 0; i < m->nr_args; i++) - if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG) - nregs += round_up(m->arg_size[i], 8) / 8 - 1; - - /* 8 arguments passed by registers */ - if (nregs > 8) + if (m->nr_args > MAX_BPF_FUNC_ARGS) return -ENOTSUPP; + for (i = 0; i < m->nr_args; i++) + nr_arg_slots += round_up(m->arg_size[i], 8) / 8; + /* room of trampoline frame to store return address and frame pointer */ stack_size += 16; @@ -854,7 +875,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, retval_off = stack_size; } - stack_size += nregs * 8; + stack_size += nr_arg_slots * 8; args_off = stack_size; stack_size += 8; @@ -871,8 +892,14 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, stack_size += 8; sreg_off = stack_size; + if (nr_arg_slots - RV_MAX_REG_ARGS > 0) + stack_size += (nr_arg_slots - RV_MAX_REG_ARGS) * 8; + stack_size = round_up(stack_size, STACK_ALIGN); + /* room for args on stack must be at the top of stack */ + stk_arg_off = stack_size; + if (!is_struct_ops) { /* For the trampoline called from function entry, * the frame of traced function and the frame of @@ -908,10 +935,10 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, emit_sd(RV_REG_FP, -ip_off, RV_REG_T1, ctx); } - emit_li(RV_REG_T1, nregs, ctx); + emit_li(RV_REG_T1, nr_arg_slots, ctx); emit_sd(RV_REG_FP, -nregs_off, RV_REG_T1, ctx); - store_args(nregs, args_off, ctx); + store_args(nr_arg_slots, args_off, ctx); /* skip to actual body of traced function */ if (flags & BPF_TRAMP_F_SKIP_FRAME) @@ -951,7 +978,8 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, } if (flags & BPF_TRAMP_F_CALL_ORIG) { - restore_args(nregs, args_off, ctx); + restore_args(min_t(int, nr_arg_slots, RV_MAX_REG_ARGS), args_off, ctx); + restore_stack_args(nr_arg_slots - RV_MAX_REG_ARGS, args_off, stk_arg_off, ctx); ret = emit_call((const u64)orig_call, true, ctx); if (ret) goto out; @@ -986,7 +1014,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, } if (flags & BPF_TRAMP_F_RESTORE_REGS) - restore_args(nregs, args_off, ctx); + restore_args(min_t(int, nr_arg_slots, RV_MAX_REG_ARGS), args_off, ctx); if (save_ret) { emit_ld(RV_REG_A0, -retval_off, RV_REG_FP, ctx); -- cgit v1.2.3 From 5d52ad36683af64f04da295d67fb943f94658929 Mon Sep 17 00:00:00 2001 From: Pu Lehui Date: Tue, 2 Jul 2024 12:19:43 +0000 Subject: selftests/bpf: Factor out many args tests from tracing_struct Factor out many args tests from tracing_struct and rename some function names to make more sense. Meanwhile, remove unnecessary skeleton detach operation as it will be covered by skeleton destroy operation. Signed-off-by: Pu Lehui Signed-off-by: Daniel Borkmann Acked-by: Jiri Olsa Link: https://lore.kernel.org/bpf/20240702121944.1091530-3-pulehui@huaweicloud.com --- .../selftests/bpf/prog_tests/tracing_struct.c | 30 +++++++++-- tools/testing/selftests/bpf/progs/tracing_struct.c | 54 ------------------- .../selftests/bpf/progs/tracing_struct_many_args.c | 60 ++++++++++++++++++++++ 3 files changed, 86 insertions(+), 58 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/tracing_struct_many_args.c diff --git a/tools/testing/selftests/bpf/prog_tests/tracing_struct.c b/tools/testing/selftests/bpf/prog_tests/tracing_struct.c index fe0fb0c9849a..cb2a95da2617 100644 --- a/tools/testing/selftests/bpf/prog_tests/tracing_struct.c +++ b/tools/testing/selftests/bpf/prog_tests/tracing_struct.c @@ -3,8 +3,9 @@ #include #include "tracing_struct.skel.h" +#include "tracing_struct_many_args.skel.h" -static void test_fentry(void) +static void test_struct_args(void) { struct tracing_struct *skel; int err; @@ -55,6 +56,25 @@ static void test_fentry(void) ASSERT_EQ(skel->bss->t6, 1, "t6 ret"); +destroy_skel: + tracing_struct__destroy(skel); +} + +static void test_struct_many_args(void) +{ + struct tracing_struct_many_args *skel; + int err; + + skel = tracing_struct_many_args__open_and_load(); + if (!ASSERT_OK_PTR(skel, "tracing_struct_many_args__open_and_load")) + return; + + err = tracing_struct_many_args__attach(skel); + if (!ASSERT_OK(err, "tracing_struct_many_args__attach")) + goto destroy_skel; + + ASSERT_OK(trigger_module_test_read(256), "trigger_read"); + ASSERT_EQ(skel->bss->t7_a, 16, "t7:a"); ASSERT_EQ(skel->bss->t7_b, 17, "t7:b"); ASSERT_EQ(skel->bss->t7_c, 18, "t7:c"); @@ -74,12 +94,14 @@ static void test_fentry(void) ASSERT_EQ(skel->bss->t8_g, 23, "t8:g"); ASSERT_EQ(skel->bss->t8_ret, 156, "t8 ret"); - tracing_struct__detach(skel); destroy_skel: - tracing_struct__destroy(skel); + tracing_struct_many_args__destroy(skel); } void test_tracing_struct(void) { - test_fentry(); + if (test__start_subtest("struct_args")) + test_struct_args(); + if (test__start_subtest("struct_many_args")) + test_struct_many_args(); } diff --git a/tools/testing/selftests/bpf/progs/tracing_struct.c b/tools/testing/selftests/bpf/progs/tracing_struct.c index 515daef3c84b..c435a3a8328a 100644 --- a/tools/testing/selftests/bpf/progs/tracing_struct.c +++ b/tools/testing/selftests/bpf/progs/tracing_struct.c @@ -18,11 +18,6 @@ struct bpf_testmod_struct_arg_3 { int b[]; }; -struct bpf_testmod_struct_arg_4 { - u64 a; - int b; -}; - long t1_a_a, t1_a_b, t1_b, t1_c, t1_ret, t1_nregs; __u64 t1_reg0, t1_reg1, t1_reg2, t1_reg3; long t2_a, t2_b_a, t2_b_b, t2_c, t2_ret; @@ -30,9 +25,6 @@ long t3_a, t3_b, t3_c_a, t3_c_b, t3_ret; long t4_a_a, t4_b, t4_c, t4_d, t4_e_a, t4_e_b, t4_ret; long t5_ret; int t6; -long t7_a, t7_b, t7_c, t7_d, t7_e, t7_f_a, t7_f_b, t7_ret; -long t8_a, t8_b, t8_c, t8_d, t8_e, t8_f_a, t8_f_b, t8_g, t8_ret; - SEC("fentry/bpf_testmod_test_struct_arg_1") int BPF_PROG2(test_struct_arg_1, struct bpf_testmod_struct_arg_2, a, int, b, int, c) @@ -138,50 +130,4 @@ int BPF_PROG2(test_struct_arg_11, struct bpf_testmod_struct_arg_3 *, a) return 0; } -SEC("fentry/bpf_testmod_test_struct_arg_7") -int BPF_PROG2(test_struct_arg_12, __u64, a, void *, b, short, c, int, d, - void *, e, struct bpf_testmod_struct_arg_4, f) -{ - t7_a = a; - t7_b = (long)b; - t7_c = c; - t7_d = d; - t7_e = (long)e; - t7_f_a = f.a; - t7_f_b = f.b; - return 0; -} - -SEC("fexit/bpf_testmod_test_struct_arg_7") -int BPF_PROG2(test_struct_arg_13, __u64, a, void *, b, short, c, int, d, - void *, e, struct bpf_testmod_struct_arg_4, f, int, ret) -{ - t7_ret = ret; - return 0; -} - -SEC("fentry/bpf_testmod_test_struct_arg_8") -int BPF_PROG2(test_struct_arg_14, __u64, a, void *, b, short, c, int, d, - void *, e, struct bpf_testmod_struct_arg_4, f, int, g) -{ - t8_a = a; - t8_b = (long)b; - t8_c = c; - t8_d = d; - t8_e = (long)e; - t8_f_a = f.a; - t8_f_b = f.b; - t8_g = g; - return 0; -} - -SEC("fexit/bpf_testmod_test_struct_arg_8") -int BPF_PROG2(test_struct_arg_15, __u64, a, void *, b, short, c, int, d, - void *, e, struct bpf_testmod_struct_arg_4, f, int, g, - int, ret) -{ - t8_ret = ret; - return 0; -} - char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/tracing_struct_many_args.c b/tools/testing/selftests/bpf/progs/tracing_struct_many_args.c new file mode 100644 index 000000000000..3de4bb918178 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/tracing_struct_many_args.c @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include + +struct bpf_testmod_struct_arg_4 { + u64 a; + int b; +}; + +long t7_a, t7_b, t7_c, t7_d, t7_e, t7_f_a, t7_f_b, t7_ret; +long t8_a, t8_b, t8_c, t8_d, t8_e, t8_f_a, t8_f_b, t8_g, t8_ret; + +SEC("fentry/bpf_testmod_test_struct_arg_7") +int BPF_PROG2(test_struct_many_args_1, __u64, a, void *, b, short, c, int, d, + void *, e, struct bpf_testmod_struct_arg_4, f) +{ + t7_a = a; + t7_b = (long)b; + t7_c = c; + t7_d = d; + t7_e = (long)e; + t7_f_a = f.a; + t7_f_b = f.b; + return 0; +} + +SEC("fexit/bpf_testmod_test_struct_arg_7") +int BPF_PROG2(test_struct_many_args_2, __u64, a, void *, b, short, c, int, d, + void *, e, struct bpf_testmod_struct_arg_4, f, int, ret) +{ + t7_ret = ret; + return 0; +} + +SEC("fentry/bpf_testmod_test_struct_arg_8") +int BPF_PROG2(test_struct_many_args_3, __u64, a, void *, b, short, c, int, d, + void *, e, struct bpf_testmod_struct_arg_4, f, int, g) +{ + t8_a = a; + t8_b = (long)b; + t8_c = c; + t8_d = d; + t8_e = (long)e; + t8_f_a = f.a; + t8_f_b = f.b; + t8_g = g; + return 0; +} + +SEC("fexit/bpf_testmod_test_struct_arg_8") +int BPF_PROG2(test_struct_many_args_4, __u64, a, void *, b, short, c, int, d, + void *, e, struct bpf_testmod_struct_arg_4, f, int, g, + int, ret) +{ + t8_ret = ret; + return 0; +} + +char _license[] SEC("license") = "GPL"; -- cgit v1.2.3 From 9474f72cd6573ee788013147e3590be4a28e085a Mon Sep 17 00:00:00 2001 From: Pu Lehui Date: Tue, 2 Jul 2024 12:19:44 +0000 Subject: selftests/bpf: Add testcase where 7th argment is struct MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add testcase where 7th argument is struct for architectures with 8 argument registers, and increase the complexity of the struct. Signed-off-by: Pu Lehui Signed-off-by: Daniel Borkmann Reviewed-by: Björn Töpel Acked-by: Björn Töpel Acked-by: Jiri Olsa Link: https://lore.kernel.org/bpf/20240702121944.1091530-4-pulehui@huaweicloud.com --- tools/testing/selftests/bpf/DENYLIST.aarch64 | 1 + .../selftests/bpf/bpf_testmod/bpf_testmod.c | 19 ++++++++++++ .../selftests/bpf/prog_tests/tracing_struct.c | 14 +++++++++ .../selftests/bpf/progs/tracing_struct_many_args.c | 35 ++++++++++++++++++++++ 4 files changed, 69 insertions(+) diff --git a/tools/testing/selftests/bpf/DENYLIST.aarch64 b/tools/testing/selftests/bpf/DENYLIST.aarch64 index 0445ac38bc07..3c7c3e79aa93 100644 --- a/tools/testing/selftests/bpf/DENYLIST.aarch64 +++ b/tools/testing/selftests/bpf/DENYLIST.aarch64 @@ -6,6 +6,7 @@ kprobe_multi_test # needs CONFIG_FPROBE module_attach # prog 'kprobe_multi': failed to auto-attach: -95 fentry_test/fentry_many_args # fentry_many_args:FAIL:fentry_many_args_attach unexpected error: -524 fexit_test/fexit_many_args # fexit_many_args:FAIL:fexit_many_args_attach unexpected error: -524 +tracing_struct/struct_many_args # struct_many_args:FAIL:tracing_struct_many_args__attach unexpected error: -524 fill_link_info/kprobe_multi_link_info # bpf_program__attach_kprobe_multi_opts unexpected error: -95 fill_link_info/kretprobe_multi_link_info # bpf_program__attach_kprobe_multi_opts unexpected error: -95 fill_link_info/kprobe_multi_invalid_ubuff # bpf_program__attach_kprobe_multi_opts unexpected error: -95 diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c index d8bd01d8560b..f8962a1dd397 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c @@ -53,6 +53,13 @@ struct bpf_testmod_struct_arg_4 { int b; }; +struct bpf_testmod_struct_arg_5 { + char a; + short b; + int c; + long d; +}; + __bpf_hook_start(); noinline int @@ -110,6 +117,15 @@ bpf_testmod_test_struct_arg_8(u64 a, void *b, short c, int d, void *e, return bpf_testmod_test_struct_arg_result; } +noinline int +bpf_testmod_test_struct_arg_9(u64 a, void *b, short c, int d, void *e, char f, + short g, struct bpf_testmod_struct_arg_5 h, long i) +{ + bpf_testmod_test_struct_arg_result = a + (long)b + c + d + (long)e + + f + g + h.a + h.b + h.c + h.d + i; + return bpf_testmod_test_struct_arg_result; +} + noinline int bpf_testmod_test_arg_ptr_to_struct(struct bpf_testmod_struct_arg_1 *a) { bpf_testmod_test_struct_arg_result = a->a; @@ -305,6 +321,7 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj, struct bpf_testmod_struct_arg_2 struct_arg2 = {2, 3}; struct bpf_testmod_struct_arg_3 *struct_arg3; struct bpf_testmod_struct_arg_4 struct_arg4 = {21, 22}; + struct bpf_testmod_struct_arg_5 struct_arg5 = {23, 24, 25, 26}; int i = 1; while (bpf_testmod_return_ptr(i)) @@ -319,6 +336,8 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj, (void *)20, struct_arg4); (void)bpf_testmod_test_struct_arg_8(16, (void *)17, 18, 19, (void *)20, struct_arg4, 23); + (void)bpf_testmod_test_struct_arg_9(16, (void *)17, 18, 19, (void *)20, + 21, 22, struct_arg5, 27); (void)bpf_testmod_test_arg_ptr_to_struct(&struct_arg1_2); diff --git a/tools/testing/selftests/bpf/prog_tests/tracing_struct.c b/tools/testing/selftests/bpf/prog_tests/tracing_struct.c index cb2a95da2617..19e68d4b3532 100644 --- a/tools/testing/selftests/bpf/prog_tests/tracing_struct.c +++ b/tools/testing/selftests/bpf/prog_tests/tracing_struct.c @@ -94,6 +94,20 @@ static void test_struct_many_args(void) ASSERT_EQ(skel->bss->t8_g, 23, "t8:g"); ASSERT_EQ(skel->bss->t8_ret, 156, "t8 ret"); + ASSERT_EQ(skel->bss->t9_a, 16, "t9:a"); + ASSERT_EQ(skel->bss->t9_b, 17, "t9:b"); + ASSERT_EQ(skel->bss->t9_c, 18, "t9:c"); + ASSERT_EQ(skel->bss->t9_d, 19, "t9:d"); + ASSERT_EQ(skel->bss->t9_e, 20, "t9:e"); + ASSERT_EQ(skel->bss->t9_f, 21, "t9:f"); + ASSERT_EQ(skel->bss->t9_g, 22, "t9:f"); + ASSERT_EQ(skel->bss->t9_h_a, 23, "t9:h.a"); + ASSERT_EQ(skel->bss->t9_h_b, 24, "t9:h.b"); + ASSERT_EQ(skel->bss->t9_h_c, 25, "t9:h.c"); + ASSERT_EQ(skel->bss->t9_h_d, 26, "t9:h.d"); + ASSERT_EQ(skel->bss->t9_i, 27, "t9:i"); + ASSERT_EQ(skel->bss->t9_ret, 258, "t9 ret"); + destroy_skel: tracing_struct_many_args__destroy(skel); } diff --git a/tools/testing/selftests/bpf/progs/tracing_struct_many_args.c b/tools/testing/selftests/bpf/progs/tracing_struct_many_args.c index 3de4bb918178..4742012ace06 100644 --- a/tools/testing/selftests/bpf/progs/tracing_struct_many_args.c +++ b/tools/testing/selftests/bpf/progs/tracing_struct_many_args.c @@ -8,8 +8,16 @@ struct bpf_testmod_struct_arg_4 { int b; }; +struct bpf_testmod_struct_arg_5 { + char a; + short b; + int c; + long d; +}; + long t7_a, t7_b, t7_c, t7_d, t7_e, t7_f_a, t7_f_b, t7_ret; long t8_a, t8_b, t8_c, t8_d, t8_e, t8_f_a, t8_f_b, t8_g, t8_ret; +long t9_a, t9_b, t9_c, t9_d, t9_e, t9_f, t9_g, t9_h_a, t9_h_b, t9_h_c, t9_h_d, t9_i, t9_ret; SEC("fentry/bpf_testmod_test_struct_arg_7") int BPF_PROG2(test_struct_many_args_1, __u64, a, void *, b, short, c, int, d, @@ -57,4 +65,31 @@ int BPF_PROG2(test_struct_many_args_4, __u64, a, void *, b, short, c, int, d, return 0; } +SEC("fentry/bpf_testmod_test_struct_arg_9") +int BPF_PROG2(test_struct_many_args_5, __u64, a, void *, b, short, c, int, d, void *, e, + char, f, short, g, struct bpf_testmod_struct_arg_5, h, long, i) +{ + t9_a = a; + t9_b = (long)b; + t9_c = c; + t9_d = d; + t9_e = (long)e; + t9_f = f; + t9_g = g; + t9_h_a = h.a; + t9_h_b = h.b; + t9_h_c = h.c; + t9_h_d = h.d; + t9_i = i; + return 0; +} + +SEC("fexit/bpf_testmod_test_struct_arg_9") +int BPF_PROG2(test_struct_many_args_6, __u64, a, void *, b, short, c, int, d, void *, e, + char, f, short, g, struct bpf_testmod_struct_arg_5, h, long, i, int, ret) +{ + t9_ret = ret; + return 0; +} + char _license[] SEC("license") = "GPL"; -- cgit v1.2.3 From da5f8fd1f0d393d5eaaba9ad8c22d1c26bb2bf9b Mon Sep 17 00:00:00 2001 From: Tao Chen Date: Tue, 2 Jul 2024 21:11:50 +0800 Subject: bpftool: Mount bpffs when pinmaps path not under the bpffs As Quentin said [0], BPF map pinning will fail if the pinmaps path is not under the bpffs, like: libbpf: specified path /home/ubuntu/test/sock_ops_map is not on BPF FS Error: failed to pin all maps [0] https://github.com/libbpf/bpftool/issues/146 Fixes: 3767a94b3253 ("bpftool: add pinmaps argument to the load/loadall") Signed-off-by: Tao Chen Signed-off-by: Daniel Borkmann Tested-by: Quentin Monnet Reviewed-by: Quentin Monnet Link: https://lore.kernel.org/bpf/20240702131150.15622-1-chen.dylane@gmail.com --- tools/bpf/bpftool/prog.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index 1a501cf09e78..40ea743d139f 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -1813,6 +1813,10 @@ offload_dev: } if (pinmaps) { + err = create_and_mount_bpffs_dir(pinmaps); + if (err) + goto err_unpin; + err = bpf_object__pin_maps(obj, pinmaps); if (err) { p_err("failed to pin all maps"); -- cgit v1.2.3 From df34ec9db6f521118895f22795da49f2ec01f8cf Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Tue, 2 Jul 2024 01:40:19 +0200 Subject: bpf: Fix atomic probe zero-extension Zero-extending results of atomic probe operations fails with: verifier bug. zext_dst is set, but no reg is defined The problem is that insn_def_regno() handles BPF_ATOMICs, but not BPF_PROBE_ATOMICs. Fix by adding the missing condition. Fixes: d503a04f8bc0 ("bpf: Add support for certain atomics in bpf_arena to x86 JIT") Signed-off-by: Ilya Leoshkevich Signed-off-by: Daniel Borkmann Acked-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20240701234304.14336-2-iii@linux.ibm.com --- kernel/bpf/verifier.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index d3927d819465..e25ad5fb9115 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -3217,7 +3217,8 @@ static int insn_def_regno(const struct bpf_insn *insn) case BPF_ST: return -1; case BPF_STX: - if (BPF_MODE(insn->code) == BPF_ATOMIC && + if ((BPF_MODE(insn->code) == BPF_ATOMIC || + BPF_MODE(insn->code) == BPF_PROBE_ATOMIC) && (insn->imm & BPF_FETCH)) { if (insn->imm == BPF_CMPXCHG) return BPF_REG_0; -- cgit v1.2.3 From d0736d8c491ddc7d31c7f839d281c907366e2562 Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Tue, 2 Jul 2024 01:40:20 +0200 Subject: s390/bpf: Factor out emitting probe nops The upcoming arena support for the loop-based BPF_XCHG implementation requires emitting nop and extable entries separately. Move nop handling into a separate function, and keep track of the nop offset. Signed-off-by: Ilya Leoshkevich Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20240701234304.14336-3-iii@linux.ibm.com --- arch/s390/net/bpf_jit_comp.c | 62 ++++++++++++++++++++++++++++---------------- 1 file changed, 40 insertions(+), 22 deletions(-) diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 4be8f5cadd02..260e7009784b 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -693,24 +693,52 @@ bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs) return true; } +/* + * A single BPF probe instruction + */ +struct bpf_jit_probe { + int prg; /* JITed instruction offset */ + int nop_prg; /* JITed nop offset */ +}; + +static void bpf_jit_probe_init(struct bpf_jit_probe *probe) +{ + probe->prg = -1; + probe->nop_prg = -1; +} + +/* + * Handlers of certain exceptions leave psw.addr pointing to the instruction + * directly after the failing one. Therefore, create two exception table + * entries and also add a nop in case two probing instructions come directly + * after each other. + */ +static void bpf_jit_probe_emit_nop(struct bpf_jit *jit, + struct bpf_jit_probe *probe) +{ + probe->nop_prg = jit->prg; + /* bcr 0,%0 */ + _EMIT2(0x0700); +} + static int bpf_jit_probe_mem(struct bpf_jit *jit, struct bpf_prog *fp, - int probe_prg, int nop_prg) + struct bpf_jit_probe *probe) { struct exception_table_entry *ex; - int reg, prg; + int i, prg, reg; s64 delta; u8 *insn; - int i; + bpf_jit_probe_emit_nop(jit, probe); if (!fp->aux->extable) /* Do nothing during early JIT passes. */ return 0; - insn = jit->prg_buf + probe_prg; + insn = jit->prg_buf + probe->prg; reg = get_probe_mem_regno(insn); if (WARN_ON_ONCE(reg < 0)) /* JIT bug - unexpected probe instruction. */ return -1; - if (WARN_ON_ONCE(probe_prg + insn_length(*insn) != nop_prg)) + if (WARN_ON_ONCE(probe->prg + insn_length(*insn) != probe->nop_prg)) /* JIT bug - gap between probe and nop instructions. */ return -1; for (i = 0; i < 2; i++) { @@ -719,7 +747,7 @@ static int bpf_jit_probe_mem(struct bpf_jit *jit, struct bpf_prog *fp, return -1; ex = &fp->aux->extable[jit->excnt]; /* Add extable entries for probe and nop instructions. */ - prg = i == 0 ? probe_prg : nop_prg; + prg = i == 0 ? probe->prg : probe->nop_prg; delta = jit->prg_buf + prg - (u8 *)&ex->insn; if (WARN_ON_ONCE(delta < INT_MIN || delta > INT_MAX)) /* JIT bug - code and extable must be close. */ @@ -729,7 +757,7 @@ static int bpf_jit_probe_mem(struct bpf_jit *jit, struct bpf_prog *fp, * Always land on the nop. Note that extable infrastructure * ignores fixup field, it is handled by ex_handler_bpf(). */ - delta = jit->prg_buf + nop_prg - (u8 *)&ex->fixup; + delta = jit->prg_buf + probe->nop_prg - (u8 *)&ex->fixup; if (WARN_ON_ONCE(delta < INT_MIN || delta > INT_MAX)) /* JIT bug - landing pad and extable must be close. */ return -1; @@ -782,19 +810,19 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, s32 branch_oc_off = insn->off; u32 dst_reg = insn->dst_reg; u32 src_reg = insn->src_reg; + struct bpf_jit_probe probe; int last, insn_count = 1; u32 *addrs = jit->addrs; s32 imm = insn->imm; s16 off = insn->off; - int probe_prg = -1; unsigned int mask; - int nop_prg; int err; + bpf_jit_probe_init(&probe); if (BPF_CLASS(insn->code) == BPF_LDX && (BPF_MODE(insn->code) == BPF_PROBE_MEM || BPF_MODE(insn->code) == BPF_PROBE_MEMSX)) - probe_prg = jit->prg; + probe.prg = jit->prg; switch (insn->code) { /* @@ -1897,18 +1925,8 @@ branch_oc: return -1; } - if (probe_prg != -1) { - /* - * Handlers of certain exceptions leave psw.addr pointing to - * the instruction directly after the failing one. Therefore, - * create two exception table entries and also add a nop in - * case two probing instructions come directly after each - * other. - */ - nop_prg = jit->prg; - /* bcr 0,%0 */ - _EMIT2(0x0700); - err = bpf_jit_probe_mem(jit, fp, probe_prg, nop_prg); + if (probe.prg != -1) { + err = bpf_jit_probe_mem(jit, fp, &probe); if (err < 0) return err; } -- cgit v1.2.3 From 9a048587269174f218e8d8d737ebfa628589358f Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Tue, 2 Jul 2024 01:40:21 +0200 Subject: s390/bpf: Get rid of get_probe_mem_regno() Commit 7fc8c362e782 ("s390/bpf: encode register within extable entry") introduced explicit passing of the number of the register to be cleared to ex_handler_bpf(), which replaced deducing it from the respective native load instruction using get_probe_mem_regno(). Replace the second and last usage in the same manner, and remove this function. Signed-off-by: Ilya Leoshkevich Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20240701234304.14336-4-iii@linux.ibm.com --- arch/s390/net/bpf_jit_comp.c | 33 +++++++-------------------------- 1 file changed, 7 insertions(+), 26 deletions(-) diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 260e7009784b..d9d79aa2be1b 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -667,25 +667,6 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth) jit->prg += sizeof(struct bpf_plt); } -static int get_probe_mem_regno(const u8 *insn) -{ - /* - * insn must point to llgc, llgh, llgf, lg, lgb, lgh or lgf, which have - * destination register at the same position. - */ - if (insn[0] != 0xe3) /* common prefix */ - return -1; - if (insn[5] != 0x90 && /* llgc */ - insn[5] != 0x91 && /* llgh */ - insn[5] != 0x16 && /* llgf */ - insn[5] != 0x04 && /* lg */ - insn[5] != 0x77 && /* lgb */ - insn[5] != 0x15 && /* lgh */ - insn[5] != 0x14) /* lgf */ - return -1; - return insn[1] >> 4; -} - bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs) { regs->psw.addr = extable_fixup(x); @@ -699,12 +680,14 @@ bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs) struct bpf_jit_probe { int prg; /* JITed instruction offset */ int nop_prg; /* JITed nop offset */ + int reg; /* Register to clear on exception */ }; static void bpf_jit_probe_init(struct bpf_jit_probe *probe) { probe->prg = -1; probe->nop_prg = -1; + probe->reg = -1; } /* @@ -725,7 +708,7 @@ static int bpf_jit_probe_mem(struct bpf_jit *jit, struct bpf_prog *fp, struct bpf_jit_probe *probe) { struct exception_table_entry *ex; - int i, prg, reg; + int i, prg; s64 delta; u8 *insn; @@ -734,10 +717,6 @@ static int bpf_jit_probe_mem(struct bpf_jit *jit, struct bpf_prog *fp, /* Do nothing during early JIT passes. */ return 0; insn = jit->prg_buf + probe->prg; - reg = get_probe_mem_regno(insn); - if (WARN_ON_ONCE(reg < 0)) - /* JIT bug - unexpected probe instruction. */ - return -1; if (WARN_ON_ONCE(probe->prg + insn_length(*insn) != probe->nop_prg)) /* JIT bug - gap between probe and nop instructions. */ return -1; @@ -763,7 +742,7 @@ static int bpf_jit_probe_mem(struct bpf_jit *jit, struct bpf_prog *fp, return -1; ex->fixup = delta; ex->type = EX_TYPE_BPF; - ex->data = reg; + ex->data = probe->reg; jit->excnt++; } return 0; @@ -821,8 +800,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, bpf_jit_probe_init(&probe); if (BPF_CLASS(insn->code) == BPF_LDX && (BPF_MODE(insn->code) == BPF_PROBE_MEM || - BPF_MODE(insn->code) == BPF_PROBE_MEMSX)) + BPF_MODE(insn->code) == BPF_PROBE_MEMSX)) { probe.prg = jit->prg; + probe.reg = reg2hex[dst_reg]; + } switch (insn->code) { /* -- cgit v1.2.3 From 89b933a2013794d8272d432591a2a7a9c41f6351 Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Tue, 2 Jul 2024 01:40:22 +0200 Subject: s390/bpf: Introduce pre- and post- probe functions Currently probe insns are handled by two "if" statements at the beginning and at the end of bpf_jit_insn(). The first one needs to be in sync with the huge insn->code statement that follows it, which was not a problem so far, since the check is small. The introduction of arena will make it significantly larger, and it will no longer be obvious whether it is in sync with the opcode switch. Move these statements to the new bpf_jit_probe_load_pre() and bpf_jit_probe_post() functions, and call them only from cases that need them. Signed-off-by: Ilya Leoshkevich Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20240701234304.14336-5-iii@linux.ibm.com --- arch/s390/net/bpf_jit_comp.c | 58 +++++++++++++++++++++++++++++++++----------- 1 file changed, 44 insertions(+), 14 deletions(-) diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index d9d79aa2be1b..582fa3830772 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -704,14 +704,28 @@ static void bpf_jit_probe_emit_nop(struct bpf_jit *jit, _EMIT2(0x0700); } -static int bpf_jit_probe_mem(struct bpf_jit *jit, struct bpf_prog *fp, - struct bpf_jit_probe *probe) +static void bpf_jit_probe_load_pre(struct bpf_jit *jit, struct bpf_insn *insn, + struct bpf_jit_probe *probe) +{ + if (BPF_MODE(insn->code) != BPF_PROBE_MEM && + BPF_MODE(insn->code) != BPF_PROBE_MEMSX) + return; + + probe->prg = jit->prg; + probe->reg = reg2hex[insn->dst_reg]; +} + +static int bpf_jit_probe_post(struct bpf_jit *jit, struct bpf_prog *fp, + struct bpf_jit_probe *probe) { struct exception_table_entry *ex; int i, prg; s64 delta; u8 *insn; + if (probe->prg == -1) + /* The probe is not armed. */ + return 0; bpf_jit_probe_emit_nop(jit, probe); if (!fp->aux->extable) /* Do nothing during early JIT passes. */ @@ -798,12 +812,6 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int err; bpf_jit_probe_init(&probe); - if (BPF_CLASS(insn->code) == BPF_LDX && - (BPF_MODE(insn->code) == BPF_PROBE_MEM || - BPF_MODE(insn->code) == BPF_PROBE_MEMSX)) { - probe.prg = jit->prg; - probe.reg = reg2hex[dst_reg]; - } switch (insn->code) { /* @@ -1497,51 +1505,79 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, */ case BPF_LDX | BPF_MEM | BPF_B: /* dst = *(u8 *)(ul) (src + off) */ case BPF_LDX | BPF_PROBE_MEM | BPF_B: + bpf_jit_probe_load_pre(jit, insn, &probe); /* llgc %dst,0(off,%src) */ EMIT6_DISP_LH(0xe3000000, 0x0090, dst_reg, src_reg, REG_0, off); + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; jit->seen |= SEEN_MEM; if (insn_is_zext(&insn[1])) insn_count = 2; break; case BPF_LDX | BPF_MEMSX | BPF_B: /* dst = *(s8 *)(ul) (src + off) */ case BPF_LDX | BPF_PROBE_MEMSX | BPF_B: + bpf_jit_probe_load_pre(jit, insn, &probe); /* lgb %dst,0(off,%src) */ EMIT6_DISP_LH(0xe3000000, 0x0077, dst_reg, src_reg, REG_0, off); + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; jit->seen |= SEEN_MEM; break; case BPF_LDX | BPF_MEM | BPF_H: /* dst = *(u16 *)(ul) (src + off) */ case BPF_LDX | BPF_PROBE_MEM | BPF_H: + bpf_jit_probe_load_pre(jit, insn, &probe); /* llgh %dst,0(off,%src) */ EMIT6_DISP_LH(0xe3000000, 0x0091, dst_reg, src_reg, REG_0, off); + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; jit->seen |= SEEN_MEM; if (insn_is_zext(&insn[1])) insn_count = 2; break; case BPF_LDX | BPF_MEMSX | BPF_H: /* dst = *(s16 *)(ul) (src + off) */ case BPF_LDX | BPF_PROBE_MEMSX | BPF_H: + bpf_jit_probe_load_pre(jit, insn, &probe); /* lgh %dst,0(off,%src) */ EMIT6_DISP_LH(0xe3000000, 0x0015, dst_reg, src_reg, REG_0, off); + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; jit->seen |= SEEN_MEM; break; case BPF_LDX | BPF_MEM | BPF_W: /* dst = *(u32 *)(ul) (src + off) */ case BPF_LDX | BPF_PROBE_MEM | BPF_W: + bpf_jit_probe_load_pre(jit, insn, &probe); /* llgf %dst,off(%src) */ jit->seen |= SEEN_MEM; EMIT6_DISP_LH(0xe3000000, 0x0016, dst_reg, src_reg, REG_0, off); + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; if (insn_is_zext(&insn[1])) insn_count = 2; break; case BPF_LDX | BPF_MEMSX | BPF_W: /* dst = *(s32 *)(ul) (src + off) */ case BPF_LDX | BPF_PROBE_MEMSX | BPF_W: + bpf_jit_probe_load_pre(jit, insn, &probe); /* lgf %dst,off(%src) */ jit->seen |= SEEN_MEM; EMIT6_DISP_LH(0xe3000000, 0x0014, dst_reg, src_reg, REG_0, off); + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; break; case BPF_LDX | BPF_MEM | BPF_DW: /* dst = *(u64 *)(ul) (src + off) */ case BPF_LDX | BPF_PROBE_MEM | BPF_DW: + bpf_jit_probe_load_pre(jit, insn, &probe); /* lg %dst,0(off,%src) */ jit->seen |= SEEN_MEM; EMIT6_DISP_LH(0xe3000000, 0x0004, dst_reg, src_reg, REG_0, off); + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; break; /* * BPF_JMP / CALL @@ -1906,12 +1942,6 @@ branch_oc: return -1; } - if (probe.prg != -1) { - err = bpf_jit_probe_mem(jit, fp, &probe); - if (err < 0) - return err; - } - return insn_count; } -- cgit v1.2.3 From a1c04bcc41f9638460a9c68f894fb770596380de Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Tue, 2 Jul 2024 01:40:23 +0200 Subject: s390/bpf: Land on the next JITed instruction after exception Currently we land on the nop, which is unnecessary: we can just as well begin executing the next instruction. Furthermore, the upcoming arena support for the loop-based BPF_XCHG implementation will require landing on an instruction that comes after the loop. So land on the next JITed instruction, which covers both cases. Signed-off-by: Ilya Leoshkevich Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20240701234304.14336-6-iii@linux.ibm.com --- arch/s390/net/bpf_jit_comp.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 582fa3830772..ecd53f8f0602 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -747,10 +747,11 @@ static int bpf_jit_probe_post(struct bpf_jit *jit, struct bpf_prog *fp, return -1; ex->insn = delta; /* - * Always land on the nop. Note that extable infrastructure - * ignores fixup field, it is handled by ex_handler_bpf(). + * Land on the current instruction. Note that the extable + * infrastructure ignores the fixup field; it is handled by + * ex_handler_bpf(). */ - delta = jit->prg_buf + probe->nop_prg - (u8 *)&ex->fixup; + delta = jit->prg_buf + jit->prg - (u8 *)&ex->fixup; if (WARN_ON_ONCE(delta < INT_MIN || delta > INT_MAX)) /* JIT bug - landing pad and extable must be close. */ return -1; -- cgit v1.2.3 From 4d3a453b434fd2f389960890ae6d767f8d50c403 Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Tue, 2 Jul 2024 01:40:24 +0200 Subject: s390/bpf: Support BPF_PROBE_MEM32 BPF_PROBE_MEM32 is a new mode for LDX, ST and STX instructions. The JIT is supposed to add the start address of the kernel arena mapping to the %dst register, and use a probing variant of the respective memory access. Reuse the existing probing infrastructure for that. Put the arena address into the literal pool, load it into %r1 and use that as an index register. Do not clear any registers in ex_handler_bpf() for failing ST and STX instructions. Signed-off-by: Ilya Leoshkevich Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20240701234304.14336-7-iii@linux.ibm.com --- arch/s390/net/bpf_jit_comp.c | 137 ++++++++++++++++++++++++++++++++++--------- 1 file changed, 110 insertions(+), 27 deletions(-) diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index ecd53f8f0602..4b62b5162dfb 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -53,6 +53,7 @@ struct bpf_jit { int excnt; /* Number of exception table entries */ int prologue_plt_ret; /* Return address for prologue hotpatch PLT */ int prologue_plt; /* Start of prologue hotpatch PLT */ + int kern_arena; /* Pool offset of kernel arena address */ }; #define SEEN_MEM BIT(0) /* use mem[] for temporary storage */ @@ -670,7 +671,8 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth) bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs) { regs->psw.addr = extable_fixup(x); - regs->gprs[x->data] = 0; + if (x->data != -1) + regs->gprs[x->data] = 0; return true; } @@ -681,6 +683,7 @@ struct bpf_jit_probe { int prg; /* JITed instruction offset */ int nop_prg; /* JITed nop offset */ int reg; /* Register to clear on exception */ + int arena_reg; /* Register to use for arena addressing */ }; static void bpf_jit_probe_init(struct bpf_jit_probe *probe) @@ -688,6 +691,7 @@ static void bpf_jit_probe_init(struct bpf_jit_probe *probe) probe->prg = -1; probe->nop_prg = -1; probe->reg = -1; + probe->arena_reg = REG_0; } /* @@ -708,13 +712,31 @@ static void bpf_jit_probe_load_pre(struct bpf_jit *jit, struct bpf_insn *insn, struct bpf_jit_probe *probe) { if (BPF_MODE(insn->code) != BPF_PROBE_MEM && - BPF_MODE(insn->code) != BPF_PROBE_MEMSX) + BPF_MODE(insn->code) != BPF_PROBE_MEMSX && + BPF_MODE(insn->code) != BPF_PROBE_MEM32) return; + if (BPF_MODE(insn->code) == BPF_PROBE_MEM32) { + /* lgrl %r1,kern_arena */ + EMIT6_PCREL_RILB(0xc4080000, REG_W1, jit->kern_arena); + probe->arena_reg = REG_W1; + } probe->prg = jit->prg; probe->reg = reg2hex[insn->dst_reg]; } +static void bpf_jit_probe_store_pre(struct bpf_jit *jit, struct bpf_insn *insn, + struct bpf_jit_probe *probe) +{ + if (BPF_MODE(insn->code) != BPF_PROBE_MEM32) + return; + + /* lgrl %r1,kern_arena */ + EMIT6_PCREL_RILB(0xc4080000, REG_W1, jit->kern_arena); + probe->arena_reg = REG_W1; + probe->prg = jit->prg; +} + static int bpf_jit_probe_post(struct bpf_jit *jit, struct bpf_prog *fp, struct bpf_jit_probe *probe) { @@ -1384,51 +1406,99 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, * BPF_ST(X) */ case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src_reg */ - /* stcy %src,off(%dst) */ - EMIT6_DISP_LH(0xe3000000, 0x0072, src_reg, dst_reg, REG_0, off); + case BPF_STX | BPF_PROBE_MEM32 | BPF_B: + bpf_jit_probe_store_pre(jit, insn, &probe); + /* stcy %src,off(%dst,%arena) */ + EMIT6_DISP_LH(0xe3000000, 0x0072, src_reg, dst_reg, + probe.arena_reg, off); + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; jit->seen |= SEEN_MEM; break; case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */ - /* sthy %src,off(%dst) */ - EMIT6_DISP_LH(0xe3000000, 0x0070, src_reg, dst_reg, REG_0, off); + case BPF_STX | BPF_PROBE_MEM32 | BPF_H: + bpf_jit_probe_store_pre(jit, insn, &probe); + /* sthy %src,off(%dst,%arena) */ + EMIT6_DISP_LH(0xe3000000, 0x0070, src_reg, dst_reg, + probe.arena_reg, off); + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; jit->seen |= SEEN_MEM; break; case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */ - /* sty %src,off(%dst) */ - EMIT6_DISP_LH(0xe3000000, 0x0050, src_reg, dst_reg, REG_0, off); + case BPF_STX | BPF_PROBE_MEM32 | BPF_W: + bpf_jit_probe_store_pre(jit, insn, &probe); + /* sty %src,off(%dst,%arena) */ + EMIT6_DISP_LH(0xe3000000, 0x0050, src_reg, dst_reg, + probe.arena_reg, off); + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; jit->seen |= SEEN_MEM; break; case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */ - /* stg %src,off(%dst) */ - EMIT6_DISP_LH(0xe3000000, 0x0024, src_reg, dst_reg, REG_0, off); + case BPF_STX | BPF_PROBE_MEM32 | BPF_DW: + bpf_jit_probe_store_pre(jit, insn, &probe); + /* stg %src,off(%dst,%arena) */ + EMIT6_DISP_LH(0xe3000000, 0x0024, src_reg, dst_reg, + probe.arena_reg, off); + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; jit->seen |= SEEN_MEM; break; case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */ + case BPF_ST | BPF_PROBE_MEM32 | BPF_B: /* lhi %w0,imm */ EMIT4_IMM(0xa7080000, REG_W0, (u8) imm); - /* stcy %w0,off(dst) */ - EMIT6_DISP_LH(0xe3000000, 0x0072, REG_W0, dst_reg, REG_0, off); + bpf_jit_probe_store_pre(jit, insn, &probe); + /* stcy %w0,off(%dst,%arena) */ + EMIT6_DISP_LH(0xe3000000, 0x0072, REG_W0, dst_reg, + probe.arena_reg, off); + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; jit->seen |= SEEN_MEM; break; case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */ + case BPF_ST | BPF_PROBE_MEM32 | BPF_H: /* lhi %w0,imm */ EMIT4_IMM(0xa7080000, REG_W0, (u16) imm); - /* sthy %w0,off(dst) */ - EMIT6_DISP_LH(0xe3000000, 0x0070, REG_W0, dst_reg, REG_0, off); + bpf_jit_probe_store_pre(jit, insn, &probe); + /* sthy %w0,off(%dst,%arena) */ + EMIT6_DISP_LH(0xe3000000, 0x0070, REG_W0, dst_reg, + probe.arena_reg, off); + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; jit->seen |= SEEN_MEM; break; case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */ + case BPF_ST | BPF_PROBE_MEM32 | BPF_W: /* llilf %w0,imm */ EMIT6_IMM(0xc00f0000, REG_W0, (u32) imm); - /* sty %w0,off(%dst) */ - EMIT6_DISP_LH(0xe3000000, 0x0050, REG_W0, dst_reg, REG_0, off); + bpf_jit_probe_store_pre(jit, insn, &probe); + /* sty %w0,off(%dst,%arena) */ + EMIT6_DISP_LH(0xe3000000, 0x0050, REG_W0, dst_reg, + probe.arena_reg, off); + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; jit->seen |= SEEN_MEM; break; case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */ + case BPF_ST | BPF_PROBE_MEM32 | BPF_DW: /* lgfi %w0,imm */ EMIT6_IMM(0xc0010000, REG_W0, imm); - /* stg %w0,off(%dst) */ - EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W0, dst_reg, REG_0, off); + bpf_jit_probe_store_pre(jit, insn, &probe); + /* stg %w0,off(%dst,%arena) */ + EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W0, dst_reg, + probe.arena_reg, off); + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; jit->seen |= SEEN_MEM; break; /* @@ -1506,9 +1576,11 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, */ case BPF_LDX | BPF_MEM | BPF_B: /* dst = *(u8 *)(ul) (src + off) */ case BPF_LDX | BPF_PROBE_MEM | BPF_B: + case BPF_LDX | BPF_PROBE_MEM32 | BPF_B: bpf_jit_probe_load_pre(jit, insn, &probe); - /* llgc %dst,0(off,%src) */ - EMIT6_DISP_LH(0xe3000000, 0x0090, dst_reg, src_reg, REG_0, off); + /* llgc %dst,off(%src,%arena) */ + EMIT6_DISP_LH(0xe3000000, 0x0090, dst_reg, src_reg, + probe.arena_reg, off); err = bpf_jit_probe_post(jit, fp, &probe); if (err < 0) return err; @@ -1519,7 +1591,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, case BPF_LDX | BPF_MEMSX | BPF_B: /* dst = *(s8 *)(ul) (src + off) */ case BPF_LDX | BPF_PROBE_MEMSX | BPF_B: bpf_jit_probe_load_pre(jit, insn, &probe); - /* lgb %dst,0(off,%src) */ + /* lgb %dst,off(%src) */ EMIT6_DISP_LH(0xe3000000, 0x0077, dst_reg, src_reg, REG_0, off); err = bpf_jit_probe_post(jit, fp, &probe); if (err < 0) @@ -1528,9 +1600,11 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, break; case BPF_LDX | BPF_MEM | BPF_H: /* dst = *(u16 *)(ul) (src + off) */ case BPF_LDX | BPF_PROBE_MEM | BPF_H: + case BPF_LDX | BPF_PROBE_MEM32 | BPF_H: bpf_jit_probe_load_pre(jit, insn, &probe); - /* llgh %dst,0(off,%src) */ - EMIT6_DISP_LH(0xe3000000, 0x0091, dst_reg, src_reg, REG_0, off); + /* llgh %dst,off(%src,%arena) */ + EMIT6_DISP_LH(0xe3000000, 0x0091, dst_reg, src_reg, + probe.arena_reg, off); err = bpf_jit_probe_post(jit, fp, &probe); if (err < 0) return err; @@ -1541,7 +1615,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, case BPF_LDX | BPF_MEMSX | BPF_H: /* dst = *(s16 *)(ul) (src + off) */ case BPF_LDX | BPF_PROBE_MEMSX | BPF_H: bpf_jit_probe_load_pre(jit, insn, &probe); - /* lgh %dst,0(off,%src) */ + /* lgh %dst,off(%src) */ EMIT6_DISP_LH(0xe3000000, 0x0015, dst_reg, src_reg, REG_0, off); err = bpf_jit_probe_post(jit, fp, &probe); if (err < 0) @@ -1550,10 +1624,12 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, break; case BPF_LDX | BPF_MEM | BPF_W: /* dst = *(u32 *)(ul) (src + off) */ case BPF_LDX | BPF_PROBE_MEM | BPF_W: + case BPF_LDX | BPF_PROBE_MEM32 | BPF_W: bpf_jit_probe_load_pre(jit, insn, &probe); /* llgf %dst,off(%src) */ jit->seen |= SEEN_MEM; - EMIT6_DISP_LH(0xe3000000, 0x0016, dst_reg, src_reg, REG_0, off); + EMIT6_DISP_LH(0xe3000000, 0x0016, dst_reg, src_reg, + probe.arena_reg, off); err = bpf_jit_probe_post(jit, fp, &probe); if (err < 0) return err; @@ -1572,10 +1648,12 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, break; case BPF_LDX | BPF_MEM | BPF_DW: /* dst = *(u64 *)(ul) (src + off) */ case BPF_LDX | BPF_PROBE_MEM | BPF_DW: + case BPF_LDX | BPF_PROBE_MEM32 | BPF_DW: bpf_jit_probe_load_pre(jit, insn, &probe); - /* lg %dst,0(off,%src) */ + /* lg %dst,off(%src,%arena) */ jit->seen |= SEEN_MEM; - EMIT6_DISP_LH(0xe3000000, 0x0004, dst_reg, src_reg, REG_0, off); + EMIT6_DISP_LH(0xe3000000, 0x0004, dst_reg, src_reg, + probe.arena_reg, off); err = bpf_jit_probe_post(jit, fp, &probe); if (err < 0) return err; @@ -1988,12 +2066,17 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp, bool extra_pass, u32 stack_depth) { int i, insn_count, lit32_size, lit64_size; + u64 kern_arena; jit->lit32 = jit->lit32_start; jit->lit64 = jit->lit64_start; jit->prg = 0; jit->excnt = 0; + kern_arena = bpf_arena_get_kern_vm_start(fp->aux->arena); + if (kern_arena) + jit->kern_arena = _EMIT_CONST_U64(kern_arena); + bpf_jit_prologue(jit, fp, stack_depth); if (bpf_set_addr(jit, 0) < 0) return -1; -- cgit v1.2.3 From 555469cc9be4a7f52c0ad07a4a237d63e8c5c5f4 Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Tue, 2 Jul 2024 01:40:25 +0200 Subject: s390/bpf: Support address space cast instruction The new address cast instruction translates arena offsets to userspace addresses. NULL pointers must not be translated. The common code sets up the mappings in such a way that it's enough to replace the higher 32 bits to achieve the desired result. s390x has just an instruction for this: INSERT IMMEDIATE. Implement the sequence using 3 instruction: LOAD AND TEST, BRANCH RELATIVE ON CONDITION and INSERT IMMEDIATE. Signed-off-by: Ilya Leoshkevich Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20240701234304.14336-8-iii@linux.ibm.com --- arch/s390/net/bpf_jit_comp.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 4b62b5162dfb..39c1d9aa7f1e 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -54,6 +54,7 @@ struct bpf_jit { int prologue_plt_ret; /* Return address for prologue hotpatch PLT */ int prologue_plt; /* Start of prologue hotpatch PLT */ int kern_arena; /* Pool offset of kernel arena address */ + u64 user_arena; /* User arena address */ }; #define SEEN_MEM BIT(0) /* use mem[] for temporary storage */ @@ -863,6 +864,22 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, } break; case BPF_ALU64 | BPF_MOV | BPF_X: + if (insn_is_cast_user(insn)) { + int patch_brc; + + /* ltgr %dst,%src */ + EMIT4(0xb9020000, dst_reg, src_reg); + /* brc 8,0f */ + patch_brc = jit->prg; + EMIT4_PCREL_RIC(0xa7040000, 8, 0); + /* iihf %dst,user_arena>>32 */ + EMIT6_IMM(0xc0080000, dst_reg, jit->user_arena >> 32); + /* 0: */ + if (jit->prg_buf) + *(u16 *)(jit->prg_buf + patch_brc + 2) = + (jit->prg - patch_brc) >> 1; + break; + } switch (insn->off) { case 0: /* DST = SRC */ /* lgr %dst,%src */ @@ -2076,6 +2093,7 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp, kern_arena = bpf_arena_get_kern_vm_start(fp->aux->arena); if (kern_arena) jit->kern_arena = _EMIT_CONST_U64(kern_arena); + jit->user_arena = bpf_arena_get_user_vm_start(fp->aux->arena); bpf_jit_prologue(jit, fp, stack_depth); if (bpf_set_addr(jit, 0) < 0) -- cgit v1.2.3 From 1e36027e39b8b3fa567ce3d743dbda5954dc0a56 Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Tue, 2 Jul 2024 01:40:26 +0200 Subject: s390/bpf: Enable arena Now that BPF_PROBE_MEM32 and address space cast instructions are implemented, tell the verifier that the JIT supports arena. Signed-off-by: Ilya Leoshkevich Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20240701234304.14336-9-iii@linux.ibm.com --- arch/s390/net/bpf_jit_comp.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 39c1d9aa7f1e..1dd359c25ada 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -2820,3 +2820,8 @@ bool bpf_jit_supports_subprog_tailcalls(void) { return true; } + +bool bpf_jit_supports_arena(void) +{ + return true; +} -- cgit v1.2.3 From 2f9469484a3b52c66b799de73bd1ca75617bc8d5 Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Tue, 2 Jul 2024 01:40:27 +0200 Subject: s390/bpf: Support arena atomics s390x supports most BPF atomics using single instructions, which makes implementing arena support a matter of adding arena address to the base register (unfortunately atomics do not support index registers), and wrapping the respective native instruction in probing sequences. An exception is BPF_XCHG, which is implemented using two different memory accesses and a loop. Make sure there is enough extable entries for both instructions. Compute the base address once for both memory accesses. Since on exception we need to land after the loop, emit the nops manually. Signed-off-by: Ilya Leoshkevich Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20240701234304.14336-10-iii@linux.ibm.com --- arch/s390/net/bpf_jit_comp.c | 104 ++++++++++++++++++++++++++++++++++++++----- 1 file changed, 94 insertions(+), 10 deletions(-) diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 1dd359c25ada..ddfc0e99872e 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -704,6 +704,10 @@ static void bpf_jit_probe_init(struct bpf_jit_probe *probe) static void bpf_jit_probe_emit_nop(struct bpf_jit *jit, struct bpf_jit_probe *probe) { + if (probe->prg == -1 || probe->nop_prg != -1) + /* The probe is not armed or nop is already emitted. */ + return; + probe->nop_prg = jit->prg; /* bcr 0,%0 */ _EMIT2(0x0700); @@ -738,6 +742,21 @@ static void bpf_jit_probe_store_pre(struct bpf_jit *jit, struct bpf_insn *insn, probe->prg = jit->prg; } +static void bpf_jit_probe_atomic_pre(struct bpf_jit *jit, + struct bpf_insn *insn, + struct bpf_jit_probe *probe) +{ + if (BPF_MODE(insn->code) != BPF_PROBE_ATOMIC) + return; + + /* lgrl %r1,kern_arena */ + EMIT6_PCREL_RILB(0xc4080000, REG_W1, jit->kern_arena); + /* agr %r1,%dst */ + EMIT4(0xb9080000, REG_W1, insn->dst_reg); + probe->arena_reg = REG_W1; + probe->prg = jit->prg; +} + static int bpf_jit_probe_post(struct bpf_jit *jit, struct bpf_prog *fp, struct bpf_jit_probe *probe) { @@ -1523,15 +1542,30 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, */ case BPF_STX | BPF_ATOMIC | BPF_DW: case BPF_STX | BPF_ATOMIC | BPF_W: + case BPF_STX | BPF_PROBE_ATOMIC | BPF_DW: + case BPF_STX | BPF_PROBE_ATOMIC | BPF_W: { bool is32 = BPF_SIZE(insn->code) == BPF_W; + /* + * Unlike loads and stores, atomics have only a base register, + * but no index register. For the non-arena case, simply use + * %dst as a base. For the arena case, use the work register + * %r1: first, load the arena base into it, and then add %dst + * to it. + */ + probe.arena_reg = dst_reg; + switch (insn->imm) { -/* {op32|op64} {%w0|%src},%src,off(%dst) */ #define EMIT_ATOMIC(op32, op64) do { \ + bpf_jit_probe_atomic_pre(jit, insn, &probe); \ + /* {op32|op64} {%w0|%src},%src,off(%arena) */ \ EMIT6_DISP_LH(0xeb000000, is32 ? (op32) : (op64), \ (insn->imm & BPF_FETCH) ? src_reg : REG_W0, \ - src_reg, dst_reg, off); \ + src_reg, probe.arena_reg, off); \ + err = bpf_jit_probe_post(jit, fp, &probe); \ + if (err < 0) \ + return err; \ if (insn->imm & BPF_FETCH) { \ /* bcr 14,0 - see atomic_fetch_{add,and,or,xor}() */ \ _EMIT2(0x07e0); \ @@ -1560,25 +1594,50 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, EMIT_ATOMIC(0x00f7, 0x00e7); break; #undef EMIT_ATOMIC - case BPF_XCHG: - /* {ly|lg} %w0,off(%dst) */ + case BPF_XCHG: { + struct bpf_jit_probe load_probe = probe; + int loop_start; + + bpf_jit_probe_atomic_pre(jit, insn, &load_probe); + /* {ly|lg} %w0,off(%arena) */ EMIT6_DISP_LH(0xe3000000, is32 ? 0x0058 : 0x0004, REG_W0, REG_0, - dst_reg, off); - /* 0: {csy|csg} %w0,%src,off(%dst) */ + load_probe.arena_reg, off); + bpf_jit_probe_emit_nop(jit, &load_probe); + /* Reuse {ly|lg}'s arena_reg for {csy|csg}. */ + if (load_probe.prg != -1) { + probe.prg = jit->prg; + probe.arena_reg = load_probe.arena_reg; + } + loop_start = jit->prg; + /* 0: {csy|csg} %w0,%src,off(%arena) */ EMIT6_DISP_LH(0xeb000000, is32 ? 0x0014 : 0x0030, - REG_W0, src_reg, dst_reg, off); + REG_W0, src_reg, probe.arena_reg, off); + bpf_jit_probe_emit_nop(jit, &probe); /* brc 4,0b */ - EMIT4_PCREL_RIC(0xa7040000, 4, jit->prg - 6); + EMIT4_PCREL_RIC(0xa7040000, 4, loop_start); /* {llgfr|lgr} %src,%w0 */ EMIT4(is32 ? 0xb9160000 : 0xb9040000, src_reg, REG_W0); + /* Both probes should land here on exception. */ + err = bpf_jit_probe_post(jit, fp, &load_probe); + if (err < 0) + return err; + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; if (is32 && insn_is_zext(&insn[1])) insn_count = 2; break; + } case BPF_CMPXCHG: - /* 0: {csy|csg} %b0,%src,off(%dst) */ + bpf_jit_probe_atomic_pre(jit, insn, &probe); + /* 0: {csy|csg} %b0,%src,off(%arena) */ EMIT6_DISP_LH(0xeb000000, is32 ? 0x0014 : 0x0030, - BPF_REG_0, src_reg, dst_reg, off); + BPF_REG_0, src_reg, + probe.arena_reg, off); + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; break; default: pr_err("Unknown atomic operation %02x\n", insn->imm); @@ -2142,9 +2201,25 @@ static struct bpf_binary_header *bpf_jit_alloc(struct bpf_jit *jit, struct bpf_prog *fp) { struct bpf_binary_header *header; + struct bpf_insn *insn; u32 extable_size; u32 code_size; + int i; + for (i = 0; i < fp->len; i++) { + insn = &fp->insnsi[i]; + + if (BPF_CLASS(insn->code) == BPF_STX && + BPF_MODE(insn->code) == BPF_PROBE_ATOMIC && + (BPF_SIZE(insn->code) == BPF_DW || + BPF_SIZE(insn->code) == BPF_W) && + insn->imm == BPF_XCHG) + /* + * bpf_jit_insn() emits a load and a compare-and-swap, + * both of which need to be probed. + */ + fp->aux->num_exentries += 1; + } /* We need two entries per insn. */ fp->aux->num_exentries *= 2; @@ -2825,3 +2900,12 @@ bool bpf_jit_supports_arena(void) { return true; } + +bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena) +{ + /* + * Currently the verifier uses this function only to check which + * atomic stores to arena are supported, and they all are. + */ + return true; +} -- cgit v1.2.3 From b6349fd3448cf349af327f90585a712d60265429 Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Tue, 2 Jul 2024 01:40:28 +0200 Subject: selftests/bpf: Introduce __arena_global While clang uses __attribute__((address_space(1))) both for defining arena pointers and arena globals, GCC requires different syntax for both. While __arena covers the first use case, introduce __arena_global to cover the second one. Signed-off-by: Ilya Leoshkevich Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20240701234304.14336-11-iii@linux.ibm.com --- tools/testing/selftests/bpf/bpf_arena_common.h | 2 + tools/testing/selftests/bpf/progs/arena_atomics.c | 67 ++++++++++------------- 2 files changed, 32 insertions(+), 37 deletions(-) diff --git a/tools/testing/selftests/bpf/bpf_arena_common.h b/tools/testing/selftests/bpf/bpf_arena_common.h index 567491f3e1b5..68a51dcc0669 100644 --- a/tools/testing/selftests/bpf/bpf_arena_common.h +++ b/tools/testing/selftests/bpf/bpf_arena_common.h @@ -34,10 +34,12 @@ #if defined(__BPF_FEATURE_ADDR_SPACE_CAST) && !defined(BPF_ARENA_FORCE_ASM) #define __arena __attribute__((address_space(1))) +#define __arena_global __attribute__((address_space(1))) #define cast_kern(ptr) /* nop for bpf prog. emitted by LLVM */ #define cast_user(ptr) /* nop for bpf prog. emitted by LLVM */ #else #define __arena +#define __arena_global SEC(".addr_space.1") #define cast_kern(ptr) bpf_addr_space_cast(ptr, 0, 1) #define cast_user(ptr) bpf_addr_space_cast(ptr, 1, 0) #endif diff --git a/tools/testing/selftests/bpf/progs/arena_atomics.c b/tools/testing/selftests/bpf/progs/arena_atomics.c index 55f10563208d..77a4dfa9cdf9 100644 --- a/tools/testing/selftests/bpf/progs/arena_atomics.c +++ b/tools/testing/selftests/bpf/progs/arena_atomics.c @@ -25,20 +25,13 @@ bool skip_tests = true; __u32 pid = 0; -#undef __arena -#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) -#define __arena __attribute__((address_space(1))) -#else -#define __arena SEC(".addr_space.1") -#endif - -__u64 __arena add64_value = 1; -__u64 __arena add64_result = 0; -__u32 __arena add32_value = 1; -__u32 __arena add32_result = 0; -__u64 __arena add_stack_value_copy = 0; -__u64 __arena add_stack_result = 0; -__u64 __arena add_noreturn_value = 1; +__u64 __arena_global add64_value = 1; +__u64 __arena_global add64_result = 0; +__u32 __arena_global add32_value = 1; +__u32 __arena_global add32_result = 0; +__u64 __arena_global add_stack_value_copy = 0; +__u64 __arena_global add_stack_result = 0; +__u64 __arena_global add_noreturn_value = 1; SEC("raw_tp/sys_enter") int add(const void *ctx) @@ -58,13 +51,13 @@ int add(const void *ctx) return 0; } -__s64 __arena sub64_value = 1; -__s64 __arena sub64_result = 0; -__s32 __arena sub32_value = 1; -__s32 __arena sub32_result = 0; -__s64 __arena sub_stack_value_copy = 0; -__s64 __arena sub_stack_result = 0; -__s64 __arena sub_noreturn_value = 1; +__s64 __arena_global sub64_value = 1; +__s64 __arena_global sub64_result = 0; +__s32 __arena_global sub32_value = 1; +__s32 __arena_global sub32_result = 0; +__s64 __arena_global sub_stack_value_copy = 0; +__s64 __arena_global sub_stack_result = 0; +__s64 __arena_global sub_noreturn_value = 1; SEC("raw_tp/sys_enter") int sub(const void *ctx) @@ -84,8 +77,8 @@ int sub(const void *ctx) return 0; } -__u64 __arena and64_value = (0x110ull << 32); -__u32 __arena and32_value = 0x110; +__u64 __arena_global and64_value = (0x110ull << 32); +__u32 __arena_global and32_value = 0x110; SEC("raw_tp/sys_enter") int and(const void *ctx) @@ -101,8 +94,8 @@ int and(const void *ctx) return 0; } -__u32 __arena or32_value = 0x110; -__u64 __arena or64_value = (0x110ull << 32); +__u32 __arena_global or32_value = 0x110; +__u64 __arena_global or64_value = (0x110ull << 32); SEC("raw_tp/sys_enter") int or(const void *ctx) @@ -117,8 +110,8 @@ int or(const void *ctx) return 0; } -__u64 __arena xor64_value = (0x110ull << 32); -__u32 __arena xor32_value = 0x110; +__u64 __arena_global xor64_value = (0x110ull << 32); +__u32 __arena_global xor32_value = 0x110; SEC("raw_tp/sys_enter") int xor(const void *ctx) @@ -133,12 +126,12 @@ int xor(const void *ctx) return 0; } -__u32 __arena cmpxchg32_value = 1; -__u32 __arena cmpxchg32_result_fail = 0; -__u32 __arena cmpxchg32_result_succeed = 0; -__u64 __arena cmpxchg64_value = 1; -__u64 __arena cmpxchg64_result_fail = 0; -__u64 __arena cmpxchg64_result_succeed = 0; +__u32 __arena_global cmpxchg32_value = 1; +__u32 __arena_global cmpxchg32_result_fail = 0; +__u32 __arena_global cmpxchg32_result_succeed = 0; +__u64 __arena_global cmpxchg64_value = 1; +__u64 __arena_global cmpxchg64_result_fail = 0; +__u64 __arena_global cmpxchg64_result_succeed = 0; SEC("raw_tp/sys_enter") int cmpxchg(const void *ctx) @@ -156,10 +149,10 @@ int cmpxchg(const void *ctx) return 0; } -__u64 __arena xchg64_value = 1; -__u64 __arena xchg64_result = 0; -__u32 __arena xchg32_value = 1; -__u32 __arena xchg32_result = 0; +__u64 __arena_global xchg64_value = 1; +__u64 __arena_global xchg64_result = 0; +__u32 __arena_global xchg32_value = 1; +__u32 __arena_global xchg32_result = 0; SEC("raw_tp/sys_enter") int xchg(const void *ctx) -- cgit v1.2.3 From 490c99d4ed99bb01dac8bf2896e27941403549c4 Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Tue, 2 Jul 2024 01:40:29 +0200 Subject: selftests/bpf: Add UAF tests for arena atomics Check that __sync_*() functions don't cause kernel panics when handling freed arena pages. x86_64 does not support some arena atomics yet, and aarch64 may or may not support them, based on the availability of LSE atomics at run time. Do not enable this test for these architectures for simplicity. Signed-off-by: Ilya Leoshkevich Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20240701234304.14336-12-iii@linux.ibm.com --- .../selftests/bpf/prog_tests/arena_atomics.c | 18 +++++ tools/testing/selftests/bpf/progs/arena_atomics.c | 76 ++++++++++++++++++++++ 2 files changed, 94 insertions(+) diff --git a/tools/testing/selftests/bpf/prog_tests/arena_atomics.c b/tools/testing/selftests/bpf/prog_tests/arena_atomics.c index 0807a48a58ee..26e7c06c6cb4 100644 --- a/tools/testing/selftests/bpf/prog_tests/arena_atomics.c +++ b/tools/testing/selftests/bpf/prog_tests/arena_atomics.c @@ -146,6 +146,22 @@ static void test_xchg(struct arena_atomics *skel) ASSERT_EQ(skel->arena->xchg32_result, 1, "xchg32_result"); } +static void test_uaf(struct arena_atomics *skel) +{ + LIBBPF_OPTS(bpf_test_run_opts, topts); + int err, prog_fd; + + /* No need to attach it, just run it directly */ + prog_fd = bpf_program__fd(skel->progs.uaf); + err = bpf_prog_test_run_opts(prog_fd, &topts); + if (!ASSERT_OK(err, "test_run_opts err")) + return; + if (!ASSERT_OK(topts.retval, "test_run_opts retval")) + return; + + ASSERT_EQ(skel->arena->uaf_recovery_fails, 0, "uaf_recovery_fails"); +} + void test_arena_atomics(void) { struct arena_atomics *skel; @@ -180,6 +196,8 @@ void test_arena_atomics(void) test_cmpxchg(skel); if (test__start_subtest("xchg")) test_xchg(skel); + if (test__start_subtest("uaf")) + test_uaf(skel); cleanup: arena_atomics__destroy(skel); diff --git a/tools/testing/selftests/bpf/progs/arena_atomics.c b/tools/testing/selftests/bpf/progs/arena_atomics.c index 77a4dfa9cdf9..bb0acd79d28a 100644 --- a/tools/testing/selftests/bpf/progs/arena_atomics.c +++ b/tools/testing/selftests/bpf/progs/arena_atomics.c @@ -169,3 +169,79 @@ int xchg(const void *ctx) return 0; } + +__u64 __arena_global uaf_sink; +volatile __u64 __arena_global uaf_recovery_fails; + +SEC("syscall") +int uaf(const void *ctx) +{ + if (pid != (bpf_get_current_pid_tgid() >> 32)) + return 0; +#if defined(ENABLE_ATOMICS_TESTS) && !defined(__TARGET_ARCH_arm64) && \ + !defined(__TARGET_ARCH_x86) + __u32 __arena *page32; + __u64 __arena *page64; + void __arena *page; + + page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); + bpf_arena_free_pages(&arena, page, 1); + uaf_recovery_fails = 24; + + page32 = (__u32 __arena *)page; + uaf_sink += __sync_fetch_and_add(page32, 1); + uaf_recovery_fails -= 1; + __sync_add_and_fetch(page32, 1); + uaf_recovery_fails -= 1; + uaf_sink += __sync_fetch_and_sub(page32, 1); + uaf_recovery_fails -= 1; + __sync_sub_and_fetch(page32, 1); + uaf_recovery_fails -= 1; + uaf_sink += __sync_fetch_and_and(page32, 1); + uaf_recovery_fails -= 1; + __sync_and_and_fetch(page32, 1); + uaf_recovery_fails -= 1; + uaf_sink += __sync_fetch_and_or(page32, 1); + uaf_recovery_fails -= 1; + __sync_or_and_fetch(page32, 1); + uaf_recovery_fails -= 1; + uaf_sink += __sync_fetch_and_xor(page32, 1); + uaf_recovery_fails -= 1; + __sync_xor_and_fetch(page32, 1); + uaf_recovery_fails -= 1; + uaf_sink += __sync_val_compare_and_swap(page32, 0, 1); + uaf_recovery_fails -= 1; + uaf_sink += __sync_lock_test_and_set(page32, 1); + uaf_recovery_fails -= 1; + + page64 = (__u64 __arena *)page; + uaf_sink += __sync_fetch_and_add(page64, 1); + uaf_recovery_fails -= 1; + __sync_add_and_fetch(page64, 1); + uaf_recovery_fails -= 1; + uaf_sink += __sync_fetch_and_sub(page64, 1); + uaf_recovery_fails -= 1; + __sync_sub_and_fetch(page64, 1); + uaf_recovery_fails -= 1; + uaf_sink += __sync_fetch_and_and(page64, 1); + uaf_recovery_fails -= 1; + __sync_and_and_fetch(page64, 1); + uaf_recovery_fails -= 1; + uaf_sink += __sync_fetch_and_or(page64, 1); + uaf_recovery_fails -= 1; + __sync_or_and_fetch(page64, 1); + uaf_recovery_fails -= 1; + uaf_sink += __sync_fetch_and_xor(page64, 1); + uaf_recovery_fails -= 1; + __sync_xor_and_fetch(page64, 1); + uaf_recovery_fails -= 1; + uaf_sink += __sync_val_compare_and_swap(page64, 0, 1); + uaf_recovery_fails -= 1; + uaf_sink += __sync_lock_test_and_set(page64, 1); + uaf_recovery_fails -= 1; +#endif + + return 0; +} + +char _license[] SEC("license") = "GPL"; -- cgit v1.2.3 From 69716e44a74af464060faa68fa2b54f3af03c16a Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Tue, 2 Jul 2024 01:40:30 +0200 Subject: selftests/bpf: Remove arena tests from DENYLIST.s390x Now that the s390x JIT supports arena, remove the respective tests from the denylist. Signed-off-by: Ilya Leoshkevich Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20240701234304.14336-13-iii@linux.ibm.com --- tools/testing/selftests/bpf/DENYLIST.s390x | 3 --- 1 file changed, 3 deletions(-) diff --git a/tools/testing/selftests/bpf/DENYLIST.s390x b/tools/testing/selftests/bpf/DENYLIST.s390x index c34adf39eeb2..cb810a98e78f 100644 --- a/tools/testing/selftests/bpf/DENYLIST.s390x +++ b/tools/testing/selftests/bpf/DENYLIST.s390x @@ -4,6 +4,3 @@ exceptions # JIT does not support calling kfunc bpf_throw (excepti get_stack_raw_tp # user_stack corrupted user stack (no backchain userspace) stacktrace_build_id # compare_map_keys stackid_hmap vs. stackmap err -2 errno 2 (?) verifier_iterating_callbacks -verifier_arena # JIT does not support arena -arena_htab # JIT does not support arena -arena_atomics -- cgit v1.2.3 From fd8db07705c55a995c42b1e71afc42faad675b0b Mon Sep 17 00:00:00 2001 From: Florian Lehner Date: Sat, 15 Jun 2024 12:11:58 +0200 Subject: bpf, devmap: Add .map_alloc_check Use the .map_allock_check callback to perform allocation checks before allocating memory for the devmap. Signed-off-by: Florian Lehner Signed-off-by: Daniel Borkmann Acked-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20240615101158.57889-1-dev@der-flo.net --- kernel/bpf/devmap.c | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index 7f3b34452243..da1fec906b96 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -107,7 +107,7 @@ static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab, return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)]; } -static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr) +static int dev_map_alloc_check(union bpf_attr *attr) { u32 valsize = attr->value_size; @@ -121,23 +121,28 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr) attr->map_flags & ~DEV_CREATE_FLAG_MASK) return -EINVAL; + if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) { + /* Hash table size must be power of 2; roundup_pow_of_two() + * can overflow into UB on 32-bit arches + */ + if (attr->max_entries > 1UL << 31) + return -EINVAL; + } + + return 0; +} + +static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr) +{ /* Lookup returns a pointer straight to dev->ifindex, so make sure the * verifier prevents writes from the BPF side */ attr->map_flags |= BPF_F_RDONLY_PROG; - - bpf_map_init_from_attr(&dtab->map, attr); if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) { - /* hash table size must be power of 2; roundup_pow_of_two() can - * overflow into UB on 32-bit arches, so check that first - */ - if (dtab->map.max_entries > 1UL << 31) - return -EINVAL; - + /* Hash table size must be power of 2 */ dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries); - dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets, dtab->map.numa_node); if (!dtab->dev_index_head) @@ -1040,6 +1045,7 @@ static u64 dev_map_mem_usage(const struct bpf_map *map) BTF_ID_LIST_SINGLE(dev_map_btf_ids, struct, bpf_dtab) const struct bpf_map_ops dev_map_ops = { .map_meta_equal = bpf_map_meta_equal, + .map_alloc_check = dev_map_alloc_check, .map_alloc = dev_map_alloc, .map_free = dev_map_free, .map_get_next_key = dev_map_get_next_key, @@ -1054,6 +1060,7 @@ const struct bpf_map_ops dev_map_ops = { const struct bpf_map_ops dev_map_hash_ops = { .map_meta_equal = bpf_map_meta_equal, + .map_alloc_check = dev_map_alloc_check, .map_alloc = dev_map_alloc, .map_free = dev_map_free, .map_get_next_key = dev_map_hash_get_next_key, -- cgit v1.2.3 From a5912c37faf723c0812a6a2c9dd18ffbd34a233b Mon Sep 17 00:00:00 2001 From: Puranjay Mohan Date: Mon, 8 Jul 2024 11:47:58 +0000 Subject: riscv, bpf: Optimize stack usage of trampoline When BPF_TRAMP_F_CALL_ORIG is not set, stack space for passing arguments on stack doesn't need to be reserved because the original function is not called. Only reserve space for stacked arguments when BPF_TRAMP_F_CALL_ORIG is set. Signed-off-by: Puranjay Mohan Signed-off-by: Daniel Borkmann Acked-by: Pu Lehui Link: https://lore.kernel.org/bpf/20240708114758.64414-1-puranjay@kernel.org --- arch/riscv/net/bpf_jit_comp64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c index 685c7389ae7e..0795efdd3519 100644 --- a/arch/riscv/net/bpf_jit_comp64.c +++ b/arch/riscv/net/bpf_jit_comp64.c @@ -892,7 +892,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, stack_size += 8; sreg_off = stack_size; - if (nr_arg_slots - RV_MAX_REG_ARGS > 0) + if ((flags & BPF_TRAMP_F_CALL_ORIG) && (nr_arg_slots - RV_MAX_REG_ARGS > 0)) stack_size += (nr_arg_slots - RV_MAX_REG_ARGS) * 8; stack_size = round_up(stack_size, STACK_ALIGN); -- cgit v1.2.3 From bc239eb271e5f35fcade689c6782c962ef3704c8 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 4 Jul 2024 10:19:19 -0500 Subject: bpf: Remove unnecessary loop in task_file_seq_get_next() After commit 0ede61d8589c ("file: convert to SLAB_TYPESAFE_BY_RCU") this loop always iterates exactly one time. Delete the for statement and pull the code in a tab. Signed-off-by: Dan Carpenter Signed-off-by: Daniel Borkmann Reviewed-by: Christian Brauner Acked-by: Jiri Olsa Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/ZoWJF51D4zWb6f5t@stanley.mountain --- kernel/bpf/task_iter.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c index ec4e97c61eef..02aa9db8d796 100644 --- a/kernel/bpf/task_iter.c +++ b/kernel/bpf/task_iter.c @@ -261,6 +261,7 @@ task_file_seq_get_next(struct bpf_iter_seq_task_file_info *info) u32 saved_tid = info->tid; struct task_struct *curr_task; unsigned int curr_fd = info->fd; + struct file *f; /* If this function returns a non-NULL file object, * it held a reference to the task/file. @@ -286,12 +287,8 @@ again: } rcu_read_lock(); - for (;; curr_fd++) { - struct file *f; - f = task_lookup_next_fdget_rcu(curr_task, &curr_fd); - if (!f) - break; - + f = task_lookup_next_fdget_rcu(curr_task, &curr_fd); + if (f) { /* set info->fd */ info->fd = curr_fd; rcu_read_unlock(); -- cgit v1.2.3 From 7ba4f43e16de351fe9821de80e15d88c884b2967 Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Wed, 3 Jul 2024 02:48:47 +0200 Subject: s390/bpf: Change seen_reg to a mask Using a mask instead of an array saves a small amount of memory and allows marking multiple registers as seen with a simple "or". Another positive side-effect is that it speeds up verification with jitterbug. Signed-off-by: Ilya Leoshkevich Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20240703005047.40915-2-iii@linux.ibm.com --- arch/s390/net/bpf_jit_comp.c | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index ddfc0e99872e..945f2ee6511b 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -35,7 +35,7 @@ struct bpf_jit { u32 seen; /* Flags to remember seen eBPF instructions */ - u32 seen_reg[16]; /* Array to remember which registers are used */ + u16 seen_regs; /* Mask to remember which registers are used */ u32 *addrs; /* Array with relative instruction addresses */ u8 *prg_buf; /* Start of program */ int size; /* Size of program and literal pool */ @@ -120,8 +120,8 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) { u32 r1 = reg2hex[b1]; - if (r1 >= 6 && r1 <= 15 && !jit->seen_reg[r1]) - jit->seen_reg[r1] = 1; + if (r1 >= 6 && r1 <= 15) + jit->seen_regs |= (1 << r1); } #define REG_SET_SEEN(b1) \ @@ -129,8 +129,6 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) reg_set_seen(jit, b1); \ }) -#define REG_SEEN(b1) jit->seen_reg[reg2hex[(b1)]] - /* * EMIT macros for code generation */ @@ -438,12 +436,12 @@ static void restore_regs(struct bpf_jit *jit, u32 rs, u32 re, u32 stack_depth) /* * Return first seen register (from start) */ -static int get_start(struct bpf_jit *jit, int start) +static int get_start(u16 seen_regs, int start) { int i; for (i = start; i <= 15; i++) { - if (jit->seen_reg[i]) + if (seen_regs & (1 << i)) return i; } return 0; @@ -452,15 +450,15 @@ static int get_start(struct bpf_jit *jit, int start) /* * Return last seen register (from start) (gap >= 2) */ -static int get_end(struct bpf_jit *jit, int start) +static int get_end(u16 seen_regs, int start) { int i; for (i = start; i < 15; i++) { - if (!jit->seen_reg[i] && !jit->seen_reg[i + 1]) + if (!(seen_regs & (3 << i))) return i - 1; } - return jit->seen_reg[15] ? 15 : 14; + return (seen_regs & (1 << 15)) ? 15 : 14; } #define REGS_SAVE 1 @@ -469,8 +467,10 @@ static int get_end(struct bpf_jit *jit, int start) * Save and restore clobbered registers (6-15) on stack. * We save/restore registers in chunks with gap >= 2 registers. */ -static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth) +static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth, + u16 extra_regs) { + u16 seen_regs = jit->seen_regs | extra_regs; const int last = 15, save_restore_size = 6; int re = 6, rs; @@ -484,10 +484,10 @@ static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth) } do { - rs = get_start(jit, re); + rs = get_start(seen_regs, re); if (!rs) break; - re = get_end(jit, rs + 1); + re = get_end(seen_regs, rs + 1); if (op == REGS_SAVE) save_regs(jit, rs, re); else @@ -573,7 +573,7 @@ static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp, /* Tail calls have to skip above initialization */ jit->tail_call_start = jit->prg; /* Save registers */ - save_restore_regs(jit, REGS_SAVE, stack_depth); + save_restore_regs(jit, REGS_SAVE, stack_depth, 0); /* Setup literal pool */ if (is_first_pass(jit) || (jit->seen & SEEN_LITERAL)) { if (!is_first_pass(jit) && @@ -649,7 +649,7 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth) /* Load exit code: lgr %r2,%b0 */ EMIT4(0xb9040000, REG_2, BPF_REG_0); /* Restore registers */ - save_restore_regs(jit, REGS_RESTORE, stack_depth); + save_restore_regs(jit, REGS_RESTORE, stack_depth, 0); if (nospec_uses_trampoline()) { jit->r14_thunk_ip = jit->prg; /* Generate __s390_indirect_jump_r14 thunk */ @@ -1847,7 +1847,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, /* * Restore registers before calling function */ - save_restore_regs(jit, REGS_RESTORE, stack_depth); + save_restore_regs(jit, REGS_RESTORE, stack_depth, 0); /* * goto *(prog->bpf_func + tail_call_start); -- cgit v1.2.3 From fa7bd4b000a7ae32eb6fc049125943561e5b46f3 Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Wed, 3 Jul 2024 02:48:48 +0200 Subject: s390/bpf: Implement exceptions Implement the following three pieces required from the JIT: - A "top-level" BPF prog (exception_boundary) must save all non-volatile registers, and not only the ones that it clobbers. - A "handler" BPF prog (exception_cb) must switch stack to that of exception_boundary, and restore the registers that exception_boundary saved. - arch_bpf_stack_walk() must unwind the stack and provide the results in a way that satisfies both bpf_throw() and exception_cb. Signed-off-by: Ilya Leoshkevich Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20240703005047.40915-3-iii@linux.ibm.com --- arch/s390/net/bpf_jit_comp.c | 55 ++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 53 insertions(+), 2 deletions(-) diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 945f2ee6511b..9d440a0b729e 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -31,6 +31,7 @@ #include #include #include +#include #include "bpf_jit.h" struct bpf_jit { @@ -62,6 +63,8 @@ struct bpf_jit { #define SEEN_FUNC BIT(2) /* calls C functions */ #define SEEN_STACK (SEEN_FUNC | SEEN_MEM) +#define NVREGS 0xffc0 /* %r6-%r15 */ + /* * s390 registers */ @@ -572,8 +575,21 @@ static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp, } /* Tail calls have to skip above initialization */ jit->tail_call_start = jit->prg; - /* Save registers */ - save_restore_regs(jit, REGS_SAVE, stack_depth, 0); + if (fp->aux->exception_cb) { + /* + * Switch stack, the new address is in the 2nd parameter. + * + * Arrange the restoration of %r6-%r15 in the epilogue. + * Do not restore them now, the prog does not need them. + */ + /* lgr %r15,%r3 */ + EMIT4(0xb9040000, REG_15, REG_3); + jit->seen_regs |= NVREGS; + } else { + /* Save registers */ + save_restore_regs(jit, REGS_SAVE, stack_depth, + fp->aux->exception_boundary ? NVREGS : 0); + } /* Setup literal pool */ if (is_first_pass(jit) || (jit->seen & SEEN_LITERAL)) { if (!is_first_pass(jit) && @@ -2909,3 +2925,38 @@ bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena) */ return true; } + +bool bpf_jit_supports_exceptions(void) +{ + /* + * Exceptions require unwinding support, which is always available, + * because the kernel is always built with backchain. + */ + return true; +} + +void arch_bpf_stack_walk(bool (*consume_fn)(void *, u64, u64, u64), + void *cookie) +{ + unsigned long addr, prev_addr = 0; + struct unwind_state state; + + unwind_for_each_frame(&state, NULL, NULL, 0) { + addr = unwind_get_return_address(&state); + if (!addr) + break; + /* + * addr is a return address and state.sp is the value of %r15 + * at this address. exception_cb needs %r15 at entry to the + * function containing addr, so take the next state.sp. + * + * There is no bp, and the exception_cb prog does not need one + * to perform a quasi-longjmp. The common code requires a + * non-zero bp, so pass sp there as well. + */ + if (prev_addr && !consume_fn(cookie, prev_addr, state.sp, + state.sp)) + break; + prev_addr = addr; + } +} -- cgit v1.2.3 From 02480fe8a6a6d44c16900b1d3a2a66d140d0a005 Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Wed, 3 Jul 2024 02:48:49 +0200 Subject: selftests/bpf: Remove exceptions tests from DENYLIST.s390x Now that the s390x JIT supports exceptions, remove the respective tests from the denylist. Signed-off-by: Ilya Leoshkevich Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20240703005047.40915-4-iii@linux.ibm.com --- tools/testing/selftests/bpf/DENYLIST.s390x | 1 - 1 file changed, 1 deletion(-) diff --git a/tools/testing/selftests/bpf/DENYLIST.s390x b/tools/testing/selftests/bpf/DENYLIST.s390x index cb810a98e78f..3ebd77206f98 100644 --- a/tools/testing/selftests/bpf/DENYLIST.s390x +++ b/tools/testing/selftests/bpf/DENYLIST.s390x @@ -1,6 +1,5 @@ # TEMPORARY # Alphabetical order -exceptions # JIT does not support calling kfunc bpf_throw (exceptions) get_stack_raw_tp # user_stack corrupted user stack (no backchain userspace) stacktrace_build_id # compare_map_keys stackid_hmap vs. stackmap err -2 errno 2 (?) verifier_iterating_callbacks -- cgit v1.2.3 From cedc12c5b57f7efa6dbebfb2b140e8675f5a2616 Mon Sep 17 00:00:00 2001 From: Andreas Ziegler Date: Wed, 3 Jul 2024 10:34:36 +0200 Subject: libbpf: Add NULL checks to bpf_object__{prev_map,next_map} In the current state, an erroneous call to bpf_object__find_map_by_name(NULL, ...) leads to a segmentation fault through the following call chain: bpf_object__find_map_by_name(obj = NULL, ...) -> bpf_object__for_each_map(pos, obj = NULL) -> bpf_object__next_map((obj = NULL), NULL) -> return (obj = NULL)->maps While calling bpf_object__find_map_by_name with obj = NULL is obviously incorrect, this should not lead to a segmentation fault but rather be handled gracefully. As __bpf_map__iter already handles this situation correctly, we can delegate the check for the regular case there and only add a check in case the prev or next parameter is NULL. Signed-off-by: Andreas Ziegler Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20240703083436.505124-1-ziegler.andreas@siemens.com --- tools/lib/bpf/libbpf.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 4a28fac4908a..30f121754d83 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -10375,7 +10375,7 @@ __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i) struct bpf_map * bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev) { - if (prev == NULL) + if (prev == NULL && obj != NULL) return obj->maps; return __bpf_map__iter(prev, obj, 1); @@ -10384,7 +10384,7 @@ bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev) struct bpf_map * bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *next) { - if (next == NULL) { + if (next == NULL && obj != NULL) { if (!obj->nr_maps) return NULL; return obj->maps + obj->nr_maps - 1; -- cgit v1.2.3 From f56f4d541eab1ae060a46b56dd6ec9130d6e3a98 Mon Sep 17 00:00:00 2001 From: Benjamin Tissoires Date: Mon, 8 Jul 2024 11:52:57 +0200 Subject: bpf: helpers: fix bpf_wq_set_callback_impl signature I realized this while having a map containing both a struct bpf_timer and a struct bpf_wq: the third argument provided to the bpf_wq callback is not the struct bpf_wq pointer itself, but the pointer to the value in the map. Which means that the users need to double cast the provided "value" as this is not a struct bpf_wq *. This is a change of API, but there doesn't seem to be much users of bpf_wq right now, so we should be able to go with this right now. Fixes: 81f1d7a583fa ("bpf: wq: add bpf_wq_set_callback_impl") Signed-off-by: Benjamin Tissoires Link: https://lore.kernel.org/r/20240708-fix-wq-v2-1-667e5c9fbd99@kernel.org Signed-off-by: Alexei Starovoitov --- kernel/bpf/helpers.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 229396172026..5241ba671c5a 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -2734,7 +2734,7 @@ __bpf_kfunc int bpf_wq_start(struct bpf_wq *wq, unsigned int flags) } __bpf_kfunc int bpf_wq_set_callback_impl(struct bpf_wq *wq, - int (callback_fn)(void *map, int *key, struct bpf_wq *wq), + int (callback_fn)(void *map, int *key, void *value), unsigned int flags, void *aux__ign) { -- cgit v1.2.3 From 16e86f2e8199cdb8789573c8784eb5c1cd478f13 Mon Sep 17 00:00:00 2001 From: Benjamin Tissoires Date: Mon, 8 Jul 2024 11:52:58 +0200 Subject: selftests/bpf: amend for wrong bpf_wq_set_callback_impl signature See the previous patch: the API was wrong, we were provided the pointer to the value, not the actual struct bpf_wq *. Signed-off-by: Benjamin Tissoires Link: https://lore.kernel.org/r/20240708-fix-wq-v2-2-667e5c9fbd99@kernel.org Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/bpf_experimental.h | 2 +- tools/testing/selftests/bpf/progs/wq.c | 19 ++++++++++++++----- tools/testing/selftests/bpf/progs/wq_failures.c | 4 ++-- 3 files changed, 17 insertions(+), 8 deletions(-) diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h index eede6fc2ccb4..828556cdc2f0 100644 --- a/tools/testing/selftests/bpf/bpf_experimental.h +++ b/tools/testing/selftests/bpf/bpf_experimental.h @@ -552,7 +552,7 @@ extern void bpf_iter_css_destroy(struct bpf_iter_css *it) __weak __ksym; extern int bpf_wq_init(struct bpf_wq *wq, void *p__map, unsigned int flags) __weak __ksym; extern int bpf_wq_start(struct bpf_wq *wq, unsigned int flags) __weak __ksym; extern int bpf_wq_set_callback_impl(struct bpf_wq *wq, - int (callback_fn)(void *map, int *key, struct bpf_wq *wq), + int (callback_fn)(void *map, int *key, void *value), unsigned int flags__k, void *aux__ign) __ksym; #define bpf_wq_set_callback(timer, cb, flags) \ bpf_wq_set_callback_impl(timer, cb, flags, NULL) diff --git a/tools/testing/selftests/bpf/progs/wq.c b/tools/testing/selftests/bpf/progs/wq.c index 49e712acbf60..f8d3ae0c29ae 100644 --- a/tools/testing/selftests/bpf/progs/wq.c +++ b/tools/testing/selftests/bpf/progs/wq.c @@ -32,6 +32,7 @@ struct { } hmap_malloc SEC(".maps"); struct elem { + int ok_offset; struct bpf_wq w; }; @@ -53,7 +54,7 @@ __u32 ok; __u32 ok_sleepable; static int test_elem_callback(void *map, int *key, - int (callback_fn)(void *map, int *key, struct bpf_wq *wq)) + int (callback_fn)(void *map, int *key, void *value)) { struct elem init = {}, *val; struct bpf_wq *wq; @@ -70,6 +71,8 @@ static int test_elem_callback(void *map, int *key, if (!val) return -2; + val->ok_offset = *key; + wq = &val->w; if (bpf_wq_init(wq, map, 0) != 0) return -3; @@ -84,7 +87,7 @@ static int test_elem_callback(void *map, int *key, } static int test_hmap_elem_callback(void *map, int *key, - int (callback_fn)(void *map, int *key, struct bpf_wq *wq)) + int (callback_fn)(void *map, int *key, void *value)) { struct hmap_elem init = {}, *val; struct bpf_wq *wq; @@ -114,7 +117,7 @@ static int test_hmap_elem_callback(void *map, int *key, } /* callback for non sleepable workqueue */ -static int wq_callback(void *map, int *key, struct bpf_wq *work) +static int wq_callback(void *map, int *key, void *value) { bpf_kfunc_common_test(); ok |= (1 << *key); @@ -122,10 +125,16 @@ static int wq_callback(void *map, int *key, struct bpf_wq *work) } /* callback for sleepable workqueue */ -static int wq_cb_sleepable(void *map, int *key, struct bpf_wq *work) +static int wq_cb_sleepable(void *map, int *key, void *value) { + struct elem *data = (struct elem *)value; + int offset = data->ok_offset; + + if (*key != offset) + return 0; + bpf_kfunc_call_test_sleepable(); - ok_sleepable |= (1 << *key); + ok_sleepable |= (1 << offset); return 0; } diff --git a/tools/testing/selftests/bpf/progs/wq_failures.c b/tools/testing/selftests/bpf/progs/wq_failures.c index 4cbdb425f223..25b51a72fe0f 100644 --- a/tools/testing/selftests/bpf/progs/wq_failures.c +++ b/tools/testing/selftests/bpf/progs/wq_failures.c @@ -28,14 +28,14 @@ struct { } lru SEC(".maps"); /* callback for non sleepable workqueue */ -static int wq_callback(void *map, int *key, struct bpf_wq *work) +static int wq_callback(void *map, int *key, void *value) { bpf_kfunc_common_test(); return 0; } /* callback for sleepable workqueue */ -static int wq_cb_sleepable(void *map, int *key, struct bpf_wq *work) +static int wq_cb_sleepable(void *map, int *key, void *value) { bpf_kfunc_call_test_sleepable(); return 0; -- cgit v1.2.3 From 90dc946059b7d346f077b870a8d8aaf03b4d0772 Mon Sep 17 00:00:00 2001 From: Puranjay Mohan Date: Fri, 5 Jul 2024 14:50:09 +0000 Subject: selftests/bpf: DENYLIST.aarch64: Remove fexit_sleep fexit_sleep test runs successfully now on the BPF CI so remove it from the deny list. ftrace direct calls was blocking tracing programs on arm64 but it has been resolved by now. For more details see also discussion in [*]. Signed-off-by: Puranjay Mohan Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20240705145009.32340-1-puranjay@kernel.org [*] --- tools/testing/selftests/bpf/DENYLIST.aarch64 | 1 - 1 file changed, 1 deletion(-) diff --git a/tools/testing/selftests/bpf/DENYLIST.aarch64 b/tools/testing/selftests/bpf/DENYLIST.aarch64 index 3c7c3e79aa93..901349da680f 100644 --- a/tools/testing/selftests/bpf/DENYLIST.aarch64 +++ b/tools/testing/selftests/bpf/DENYLIST.aarch64 @@ -1,6 +1,5 @@ bpf_cookie/multi_kprobe_attach_api # kprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected error: -3 bpf_cookie/multi_kprobe_link_api # kprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected error: -3 -fexit_sleep # The test never returns. The remaining tests cannot start. kprobe_multi_bench_attach # needs CONFIG_FPROBE kprobe_multi_test # needs CONFIG_FPROBE module_attach # prog 'kprobe_multi': failed to auto-attach: -95 -- cgit v1.2.3