diff options
Diffstat (limited to 'tools/testing/selftests/bpf/progs')
23 files changed, 1266 insertions, 160 deletions
diff --git a/tools/testing/selftests/bpf/progs/bpf_dctcp.c b/tools/testing/selftests/bpf/progs/bpf_dctcp.c index 9573be6122be..460682759aed 100644 --- a/tools/testing/selftests/bpf/progs/bpf_dctcp.c +++ b/tools/testing/selftests/bpf/progs/bpf_dctcp.c @@ -11,6 +11,7 @@ #include <linux/types.h> #include <linux/stddef.h> #include <linux/tcp.h> +#include <errno.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> #include "bpf_tcp_helpers.h" @@ -23,6 +24,7 @@ const char tcp_cdg[] = "cdg"; char cc_res[TCP_CA_NAME_MAX]; int tcp_cdg_res = 0; int stg_result = 0; +int ebusy_cnt = 0; struct { __uint(type, BPF_MAP_TYPE_SK_STORAGE); @@ -64,16 +66,23 @@ void BPF_PROG(dctcp_init, struct sock *sk) if (!(tp->ecn_flags & TCP_ECN_OK) && fallback[0]) { /* Switch to fallback */ - bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION, - (void *)fallback, sizeof(fallback)); - /* Switch back to myself which the bpf trampoline - * stopped calling dctcp_init recursively. + if (bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION, + (void *)fallback, sizeof(fallback)) == -EBUSY) + ebusy_cnt++; + + /* Switch back to myself and the recurred dctcp_init() + * will get -EBUSY for all bpf_setsockopt(TCP_CONGESTION), + * except the last "cdg" one. */ - bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION, - (void *)bpf_dctcp, sizeof(bpf_dctcp)); + if (bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION, + (void *)bpf_dctcp, sizeof(bpf_dctcp)) == -EBUSY) + ebusy_cnt++; + /* Switch back to fallback */ - bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION, - (void *)fallback, sizeof(fallback)); + if (bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION, + (void *)fallback, sizeof(fallback)) == -EBUSY) + ebusy_cnt++; + /* Expecting -ENOTSUPP for tcp_cdg_res */ tcp_cdg_res = bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION, (void *)tcp_cdg, sizeof(tcp_cdg)); diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task.c b/tools/testing/selftests/bpf/progs/bpf_iter_task.c index d22741272692..96131b9a1caa 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_task.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_task.c @@ -6,6 +6,10 @@ char _license[] SEC("license") = "GPL"; +uint32_t tid = 0; +int num_unknown_tid = 0; +int num_known_tid = 0; + SEC("iter/task") int dump_task(struct bpf_iter__task *ctx) { @@ -18,6 +22,11 @@ int dump_task(struct bpf_iter__task *ctx) return 0; } + if (task->pid != tid) + num_unknown_tid++; + else + num_known_tid++; + if (ctx->meta->seq_num == 0) BPF_SEQ_PRINTF(seq, " tgid gid\n"); diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c b/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c index 6e7b400888fe..b0255080662d 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c @@ -7,14 +7,16 @@ char _license[] SEC("license") = "GPL"; int count = 0; int tgid = 0; +int last_tgid = 0; +int unique_tgid_count = 0; SEC("iter/task_file") int dump_task_file(struct bpf_iter__task_file *ctx) { struct seq_file *seq = ctx->meta->seq; struct task_struct *task = ctx->task; - __u32 fd = ctx->fd; struct file *file = ctx->file; + __u32 fd = ctx->fd; if (task == (void *)0 || file == (void *)0) return 0; @@ -27,6 +29,11 @@ int dump_task_file(struct bpf_iter__task_file *ctx) if (tgid == task->tgid && task->tgid != task->pid) count++; + if (last_tgid != task->tgid) { + last_tgid = task->tgid; + unique_tgid_count++; + } + BPF_SEQ_PRINTF(seq, "%8d %8d %8d %lx\n", task->tgid, task->pid, fd, (long)file->f_op); return 0; diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task_vma.c b/tools/testing/selftests/bpf/progs/bpf_iter_task_vma.c index 4ea6a37d1345..dd923dc637d5 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_task_vma.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_task_vma.c @@ -20,6 +20,8 @@ char _license[] SEC("license") = "GPL"; #define D_PATH_BUF_SIZE 1024 char d_path_buf[D_PATH_BUF_SIZE] = {}; __u32 pid = 0; +__u32 one_task = 0; +__u32 one_task_error = 0; SEC("iter/task_vma") int proc_maps(struct bpf_iter__task_vma *ctx) { @@ -33,8 +35,11 @@ SEC("iter/task_vma") int proc_maps(struct bpf_iter__task_vma *ctx) return 0; file = vma->vm_file; - if (task->tgid != pid) + if (task->tgid != pid) { + if (one_task) + one_task_error = 1; return 0; + } perm_str[0] = (vma->vm_flags & VM_READ) ? 'r' : '-'; perm_str[1] = (vma->vm_flags & VM_WRITE) ? 'w' : '-'; perm_str[2] = (vma->vm_flags & VM_EXEC) ? 'x' : '-'; diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_vma_offset.c b/tools/testing/selftests/bpf/progs/bpf_iter_vma_offset.c new file mode 100644 index 000000000000..ee7455d2623a --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_iter_vma_offset.c @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ +#include "bpf_iter.h" +#include <bpf/bpf_helpers.h> + +char _license[] SEC("license") = "GPL"; + +__u32 unique_tgid_cnt = 0; +uintptr_t address = 0; +uintptr_t offset = 0; +__u32 last_tgid = 0; +__u32 pid = 0; +__u32 page_shift = 0; + +SEC("iter/task_vma") +int get_vma_offset(struct bpf_iter__task_vma *ctx) +{ + struct vm_area_struct *vma = ctx->vma; + struct seq_file *seq = ctx->meta->seq; + struct task_struct *task = ctx->task; + + if (task == NULL || vma == NULL) + return 0; + + if (last_tgid != task->tgid) + unique_tgid_cnt++; + last_tgid = task->tgid; + + if (task->tgid != pid) + return 0; + + if (vma->vm_start <= address && vma->vm_end > address) { + offset = address - vma->vm_start + (vma->vm_pgoff << page_shift); + BPF_SEQ_PRINTF(seq, "OK\n"); + } + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/cgroup_hierarchical_stats.c b/tools/testing/selftests/bpf/progs/cgroup_hierarchical_stats.c index 8ab4253a1592..c74362854948 100644 --- a/tools/testing/selftests/bpf/progs/cgroup_hierarchical_stats.c +++ b/tools/testing/selftests/bpf/progs/cgroup_hierarchical_stats.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Functions to manage eBPF programs attached to cgroup subsystems - * * Copyright 2022 Google LLC. */ #include "vmlinux.h" @@ -11,25 +9,14 @@ char _license[] SEC("license") = "GPL"; -/* - * Start times are stored per-task, not per-cgroup, as multiple tasks in one - * cgroup can perform reclaim concurrently. - */ -struct { - __uint(type, BPF_MAP_TYPE_TASK_STORAGE); - __uint(map_flags, BPF_F_NO_PREALLOC); - __type(key, int); - __type(value, __u64); -} vmscan_start_time SEC(".maps"); - -struct vmscan_percpu { +struct percpu_attach_counter { /* Previous percpu state, to figure out if we have new updates */ __u64 prev; /* Current percpu state */ __u64 state; }; -struct vmscan { +struct attach_counter { /* State propagated through children, pending aggregation */ __u64 pending; /* Total state, including all cpus and all children */ @@ -38,147 +25,94 @@ struct vmscan { struct { __uint(type, BPF_MAP_TYPE_PERCPU_HASH); - __uint(max_entries, 100); + __uint(max_entries, 1024); __type(key, __u64); - __type(value, struct vmscan_percpu); -} pcpu_cgroup_vmscan_elapsed SEC(".maps"); + __type(value, struct percpu_attach_counter); +} percpu_attach_counters SEC(".maps"); struct { __uint(type, BPF_MAP_TYPE_HASH); - __uint(max_entries, 100); + __uint(max_entries, 1024); __type(key, __u64); - __type(value, struct vmscan); -} cgroup_vmscan_elapsed SEC(".maps"); + __type(value, struct attach_counter); +} attach_counters SEC(".maps"); extern void cgroup_rstat_updated(struct cgroup *cgrp, int cpu) __ksym; extern void cgroup_rstat_flush(struct cgroup *cgrp) __ksym; -static struct cgroup *task_memcg(struct task_struct *task) -{ - int cgrp_id; - -#if __has_builtin(__builtin_preserve_enum_value) - cgrp_id = bpf_core_enum_value(enum cgroup_subsys_id, memory_cgrp_id); -#else - cgrp_id = memory_cgrp_id; -#endif - return task->cgroups->subsys[cgrp_id]->cgroup; -} - static uint64_t cgroup_id(struct cgroup *cgrp) { return cgrp->kn->id; } -static int create_vmscan_percpu_elem(__u64 cg_id, __u64 state) +static int create_percpu_attach_counter(__u64 cg_id, __u64 state) { - struct vmscan_percpu pcpu_init = {.state = state, .prev = 0}; + struct percpu_attach_counter pcpu_init = {.state = state, .prev = 0}; - return bpf_map_update_elem(&pcpu_cgroup_vmscan_elapsed, &cg_id, + return bpf_map_update_elem(&percpu_attach_counters, &cg_id, &pcpu_init, BPF_NOEXIST); } -static int create_vmscan_elem(__u64 cg_id, __u64 state, __u64 pending) +static int create_attach_counter(__u64 cg_id, __u64 state, __u64 pending) { - struct vmscan init = {.state = state, .pending = pending}; + struct attach_counter init = {.state = state, .pending = pending}; - return bpf_map_update_elem(&cgroup_vmscan_elapsed, &cg_id, + return bpf_map_update_elem(&attach_counters, &cg_id, &init, BPF_NOEXIST); } -SEC("tp_btf/mm_vmscan_memcg_reclaim_begin") -int BPF_PROG(vmscan_start, int order, gfp_t gfp_flags) +SEC("fentry/cgroup_attach_task") +int BPF_PROG(counter, struct cgroup *dst_cgrp, struct task_struct *leader, + bool threadgroup) { - struct task_struct *task = bpf_get_current_task_btf(); - __u64 *start_time_ptr; - - start_time_ptr = bpf_task_storage_get(&vmscan_start_time, task, 0, - BPF_LOCAL_STORAGE_GET_F_CREATE); - if (start_time_ptr) - *start_time_ptr = bpf_ktime_get_ns(); - return 0; -} - -SEC("tp_btf/mm_vmscan_memcg_reclaim_end") -int BPF_PROG(vmscan_end, unsigned long nr_reclaimed) -{ - struct vmscan_percpu *pcpu_stat; - struct task_struct *current = bpf_get_current_task_btf(); - struct cgroup *cgrp; - __u64 *start_time_ptr; - __u64 current_elapsed, cg_id; - __u64 end_time = bpf_ktime_get_ns(); - - /* - * cgrp is the first parent cgroup of current that has memcg enabled in - * its subtree_control, or NULL if memcg is disabled in the entire tree. - * In a cgroup hierarchy like this: - * a - * / \ - * b c - * If "a" has memcg enabled, while "b" doesn't, then processes in "b" - * will accumulate their stats directly to "a". This makes sure that no - * stats are lost from processes in leaf cgroups that don't have memcg - * enabled, but only exposes stats for cgroups that have memcg enabled. - */ - cgrp = task_memcg(current); - if (!cgrp) + __u64 cg_id = cgroup_id(dst_cgrp); + struct percpu_attach_counter *pcpu_counter = bpf_map_lookup_elem( + &percpu_attach_counters, + &cg_id); + + if (pcpu_counter) + pcpu_counter->state += 1; + else if (create_percpu_attach_counter(cg_id, 1)) return 0; - cg_id = cgroup_id(cgrp); - start_time_ptr = bpf_task_storage_get(&vmscan_start_time, current, 0, - BPF_LOCAL_STORAGE_GET_F_CREATE); - if (!start_time_ptr) - return 0; - - current_elapsed = end_time - *start_time_ptr; - pcpu_stat = bpf_map_lookup_elem(&pcpu_cgroup_vmscan_elapsed, - &cg_id); - if (pcpu_stat) - pcpu_stat->state += current_elapsed; - else if (create_vmscan_percpu_elem(cg_id, current_elapsed)) - return 0; - - cgroup_rstat_updated(cgrp, bpf_get_smp_processor_id()); + cgroup_rstat_updated(dst_cgrp, bpf_get_smp_processor_id()); return 0; } SEC("fentry/bpf_rstat_flush") -int BPF_PROG(vmscan_flush, struct cgroup *cgrp, struct cgroup *parent, int cpu) +int BPF_PROG(flusher, struct cgroup *cgrp, struct cgroup *parent, int cpu) { - struct vmscan_percpu *pcpu_stat; - struct vmscan *total_stat, *parent_stat; + struct percpu_attach_counter *pcpu_counter; + struct attach_counter *total_counter, *parent_counter; __u64 cg_id = cgroup_id(cgrp); __u64 parent_cg_id = parent ? cgroup_id(parent) : 0; - __u64 *pcpu_vmscan; __u64 state; __u64 delta = 0; /* Add CPU changes on this level since the last flush */ - pcpu_stat = bpf_map_lookup_percpu_elem(&pcpu_cgroup_vmscan_elapsed, - &cg_id, cpu); - if (pcpu_stat) { - state = pcpu_stat->state; - delta += state - pcpu_stat->prev; - pcpu_stat->prev = state; + pcpu_counter = bpf_map_lookup_percpu_elem(&percpu_attach_counters, + &cg_id, cpu); + if (pcpu_counter) { + state = pcpu_counter->state; + delta += state - pcpu_counter->prev; + pcpu_counter->prev = state; } - total_stat = bpf_map_lookup_elem(&cgroup_vmscan_elapsed, &cg_id); - if (!total_stat) { - if (create_vmscan_elem(cg_id, delta, 0)) + total_counter = bpf_map_lookup_elem(&attach_counters, &cg_id); + if (!total_counter) { + if (create_attach_counter(cg_id, delta, 0)) return 0; - goto update_parent; } /* Collect pending stats from subtree */ - if (total_stat->pending) { - delta += total_stat->pending; - total_stat->pending = 0; + if (total_counter->pending) { + delta += total_counter->pending; + total_counter->pending = 0; } /* Propagate changes to this cgroup's total */ - total_stat->state += delta; + total_counter->state += delta; update_parent: /* Skip if there are no changes to propagate, or no parent */ @@ -186,20 +120,20 @@ update_parent: return 0; /* Propagate changes to cgroup's parent */ - parent_stat = bpf_map_lookup_elem(&cgroup_vmscan_elapsed, - &parent_cg_id); - if (parent_stat) - parent_stat->pending += delta; + parent_counter = bpf_map_lookup_elem(&attach_counters, + &parent_cg_id); + if (parent_counter) + parent_counter->pending += delta; else - create_vmscan_elem(parent_cg_id, 0, delta); + create_attach_counter(parent_cg_id, 0, delta); return 0; } SEC("iter.s/cgroup") -int BPF_PROG(dump_vmscan, struct bpf_iter_meta *meta, struct cgroup *cgrp) +int BPF_PROG(dumper, struct bpf_iter_meta *meta, struct cgroup *cgrp) { struct seq_file *seq = meta->seq; - struct vmscan *total_stat; + struct attach_counter *total_counter; __u64 cg_id = cgrp ? cgroup_id(cgrp) : 0; /* Do nothing for the terminal call */ @@ -209,18 +143,13 @@ int BPF_PROG(dump_vmscan, struct bpf_iter_meta *meta, struct cgroup *cgrp) /* Flush the stats to make sure we get the most updated numbers */ cgroup_rstat_flush(cgrp); - total_stat = bpf_map_lookup_elem(&cgroup_vmscan_elapsed, &cg_id); - if (!total_stat) { - BPF_SEQ_PRINTF(seq, "cg_id: %llu, total_vmscan_delay: 0\n", + total_counter = bpf_map_lookup_elem(&attach_counters, &cg_id); + if (!total_counter) { + BPF_SEQ_PRINTF(seq, "cg_id: %llu, attach_counter: 0\n", cg_id); } else { - BPF_SEQ_PRINTF(seq, "cg_id: %llu, total_vmscan_delay: %llu\n", - cg_id, total_stat->state); + BPF_SEQ_PRINTF(seq, "cg_id: %llu, attach_counter: %llu\n", + cg_id, total_counter->state); } - - /* - * We only dump stats for one cgroup here, so return 1 to stop - * iteration after the first cgroup. - */ - return 1; + return 0; } diff --git a/tools/testing/selftests/bpf/progs/connect_ping.c b/tools/testing/selftests/bpf/progs/connect_ping.c new file mode 100644 index 000000000000..60178192b672 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/connect_ping.c @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: GPL-2.0-only + +/* + * Copyright 2022 Google LLC. + */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> +#include <netinet/in.h> +#include <sys/socket.h> + +/* 2001:db8::1 */ +#define BINDADDR_V6 { { { 0x20,0x01,0x0d,0xb8,0,0,0,0,0,0,0,0,0,0,0,1 } } } + +__u32 do_bind = 0; +__u32 has_error = 0; +__u32 invocations_v4 = 0; +__u32 invocations_v6 = 0; + +SEC("cgroup/connect4") +int connect_v4_prog(struct bpf_sock_addr *ctx) +{ + struct sockaddr_in sa = { + .sin_family = AF_INET, + .sin_addr.s_addr = bpf_htonl(0x01010101), + }; + + __sync_fetch_and_add(&invocations_v4, 1); + + if (do_bind && bpf_bind(ctx, (struct sockaddr *)&sa, sizeof(sa))) + has_error = 1; + + return 1; +} + +SEC("cgroup/connect6") +int connect_v6_prog(struct bpf_sock_addr *ctx) +{ + struct sockaddr_in6 sa = { + .sin6_family = AF_INET6, + .sin6_addr = BINDADDR_V6, + }; + + __sync_fetch_and_add(&invocations_v6, 1); + + if (do_bind && bpf_bind(ctx, (struct sockaddr *)&sa, sizeof(sa))) + has_error = 1; + + return 1; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/get_func_ip_test.c b/tools/testing/selftests/bpf/progs/get_func_ip_test.c index a587aeca5ae0..8559e698b40d 100644 --- a/tools/testing/selftests/bpf/progs/get_func_ip_test.c +++ b/tools/testing/selftests/bpf/progs/get_func_ip_test.c @@ -2,6 +2,7 @@ #include <linux/bpf.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> +#include <stdbool.h> char _license[] SEC("license") = "GPL"; @@ -13,6 +14,16 @@ extern const void bpf_modify_return_test __ksym; extern const void bpf_fentry_test6 __ksym; extern const void bpf_fentry_test7 __ksym; +extern bool CONFIG_X86_KERNEL_IBT __kconfig __weak; + +/* This function is here to have CONFIG_X86_KERNEL_IBT + * used and added to object BTF. + */ +int unused(void) +{ + return CONFIG_X86_KERNEL_IBT ? 0 : 1; +} + __u64 test1_result = 0; SEC("fentry/bpf_fentry_test1") int BPF_PROG(test1, int a) @@ -64,21 +75,11 @@ int BPF_PROG(test5, int a, int *b, int ret) } __u64 test6_result = 0; -SEC("kprobe/bpf_fentry_test6+0x5") +SEC("?kprobe") int test6(struct pt_regs *ctx) { __u64 addr = bpf_get_func_ip(ctx); - test6_result = (const void *) addr == &bpf_fentry_test6 + 5; - return 0; -} - -__u64 test7_result = 0; -SEC("kprobe/bpf_fentry_test7+5") -int test7(struct pt_regs *ctx) -{ - __u64 addr = bpf_get_func_ip(ctx); - - test7_result = (const void *) addr == &bpf_fentry_test7 + 5; + test6_result = (const void *) addr == 0; return 0; } diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_fail.c b/tools/testing/selftests/bpf/progs/kfunc_call_fail.c new file mode 100644 index 000000000000..b98313d391c6 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/kfunc_call_fail.c @@ -0,0 +1,160 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> + +extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp) __ksym; +extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym; +extern void bpf_kfunc_call_test_mem_len_pass1(void *mem, int len) __ksym; +extern int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p, const int rdwr_buf_size) __ksym; +extern int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size) __ksym; +extern int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size) __ksym; +extern void bpf_kfunc_call_int_mem_release(int *p) __ksym; + +struct syscall_test_args { + __u8 data[16]; + size_t size; +}; + +SEC("?syscall") +int kfunc_syscall_test_fail(struct syscall_test_args *args) +{ + bpf_kfunc_call_test_mem_len_pass1(&args->data, sizeof(*args) + 1); + + return 0; +} + +SEC("?syscall") +int kfunc_syscall_test_null_fail(struct syscall_test_args *args) +{ + /* Must be called with args as a NULL pointer + * we do not check for it to have the verifier consider that + * the pointer might not be null, and so we can load it. + * + * So the following can not be added: + * + * if (args) + * return -22; + */ + + bpf_kfunc_call_test_mem_len_pass1(args, sizeof(*args)); + + return 0; +} + +SEC("?tc") +int kfunc_call_test_get_mem_fail_rdonly(struct __sk_buff *skb) +{ + struct prog_test_ref_kfunc *pt; + unsigned long s = 0; + int *p = NULL; + int ret = 0; + + pt = bpf_kfunc_call_test_acquire(&s); + if (pt) { + p = bpf_kfunc_call_test_get_rdonly_mem(pt, 2 * sizeof(int)); + if (p) + p[0] = 42; /* this is a read-only buffer, so -EACCES */ + else + ret = -1; + + bpf_kfunc_call_test_release(pt); + } + return ret; +} + +SEC("?tc") +int kfunc_call_test_get_mem_fail_use_after_free(struct __sk_buff *skb) +{ + struct prog_test_ref_kfunc *pt; + unsigned long s = 0; + int *p = NULL; + int ret = 0; + + pt = bpf_kfunc_call_test_acquire(&s); + if (pt) { + p = bpf_kfunc_call_test_get_rdwr_mem(pt, 2 * sizeof(int)); + if (p) { + p[0] = 42; + ret = p[1]; /* 108 */ + } else { + ret = -1; + } + + bpf_kfunc_call_test_release(pt); + } + if (p) + ret = p[0]; /* p is not valid anymore */ + + return ret; +} + +SEC("?tc") +int kfunc_call_test_get_mem_fail_oob(struct __sk_buff *skb) +{ + struct prog_test_ref_kfunc *pt; + unsigned long s = 0; + int *p = NULL; + int ret = 0; + + pt = bpf_kfunc_call_test_acquire(&s); + if (pt) { + p = bpf_kfunc_call_test_get_rdonly_mem(pt, 2 * sizeof(int)); + if (p) + ret = p[2 * sizeof(int)]; /* oob access, so -EACCES */ + else + ret = -1; + + bpf_kfunc_call_test_release(pt); + } + return ret; +} + +int not_const_size = 2 * sizeof(int); + +SEC("?tc") +int kfunc_call_test_get_mem_fail_not_const(struct __sk_buff *skb) +{ + struct prog_test_ref_kfunc *pt; + unsigned long s = 0; + int *p = NULL; + int ret = 0; + + pt = bpf_kfunc_call_test_acquire(&s); + if (pt) { + p = bpf_kfunc_call_test_get_rdonly_mem(pt, not_const_size); /* non const size, -EINVAL */ + if (p) + ret = p[0]; + else + ret = -1; + + bpf_kfunc_call_test_release(pt); + } + return ret; +} + +SEC("?tc") +int kfunc_call_test_mem_acquire_fail(struct __sk_buff *skb) +{ + struct prog_test_ref_kfunc *pt; + unsigned long s = 0; + int *p = NULL; + int ret = 0; + + pt = bpf_kfunc_call_test_acquire(&s); + if (pt) { + /* we are failing on this one, because we are not acquiring a PTR_TO_BTF_ID (a struct ptr) */ + p = bpf_kfunc_call_test_acq_rdonly_mem(pt, 2 * sizeof(int)); + if (p) + ret = p[0]; + else + ret = -1; + + bpf_kfunc_call_int_mem_release(p); + + bpf_kfunc_call_test_release(pt); + } + return ret; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_test.c b/tools/testing/selftests/bpf/progs/kfunc_call_test.c index 5aecbb9fdc68..f636e50be259 100644 --- a/tools/testing/selftests/bpf/progs/kfunc_call_test.c +++ b/tools/testing/selftests/bpf/progs/kfunc_call_test.c @@ -14,6 +14,8 @@ extern void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p) __ksym; extern void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p) __ksym; extern void bpf_kfunc_call_test_mem_len_pass1(void *mem, int len) __ksym; extern void bpf_kfunc_call_test_mem_len_fail2(__u64 *mem, int len) __ksym; +extern int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p, const int rdwr_buf_size) __ksym; +extern int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size) __ksym; SEC("tc") int kfunc_call_test2(struct __sk_buff *skb) @@ -92,4 +94,73 @@ int kfunc_call_test_pass(struct __sk_buff *skb) return 0; } +struct syscall_test_args { + __u8 data[16]; + size_t size; +}; + +SEC("syscall") +int kfunc_syscall_test(struct syscall_test_args *args) +{ + const long size = args->size; + + if (size > sizeof(args->data)) + return -7; /* -E2BIG */ + + bpf_kfunc_call_test_mem_len_pass1(&args->data, sizeof(args->data)); + bpf_kfunc_call_test_mem_len_pass1(&args->data, sizeof(*args)); + bpf_kfunc_call_test_mem_len_pass1(&args->data, size); + + return 0; +} + +SEC("syscall") +int kfunc_syscall_test_null(struct syscall_test_args *args) +{ + /* Must be called with args as a NULL pointer + * we do not check for it to have the verifier consider that + * the pointer might not be null, and so we can load it. + * + * So the following can not be added: + * + * if (args) + * return -22; + */ + + bpf_kfunc_call_test_mem_len_pass1(args, 0); + + return 0; +} + +SEC("tc") +int kfunc_call_test_get_mem(struct __sk_buff *skb) +{ + struct prog_test_ref_kfunc *pt; + unsigned long s = 0; + int *p = NULL; + int ret = 0; + + pt = bpf_kfunc_call_test_acquire(&s); + if (pt) { + p = bpf_kfunc_call_test_get_rdwr_mem(pt, 2 * sizeof(int)); + if (p) { + p[0] = 42; + ret = p[1]; /* 108 */ + } else { + ret = -1; + } + + if (ret >= 0) { + p = bpf_kfunc_call_test_get_rdonly_mem(pt, 2 * sizeof(int)); + if (p) + ret = p[0]; /* 42 */ + else + ret = -1; + } + + bpf_kfunc_call_test_release(pt); + } + return ret; +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/kprobe_multi.c b/tools/testing/selftests/bpf/progs/kprobe_multi.c index 08f95a8155d1..98c3399e15c0 100644 --- a/tools/testing/selftests/bpf/progs/kprobe_multi.c +++ b/tools/testing/selftests/bpf/progs/kprobe_multi.c @@ -36,15 +36,13 @@ __u64 kretprobe_test6_result = 0; __u64 kretprobe_test7_result = 0; __u64 kretprobe_test8_result = 0; -extern bool CONFIG_X86_KERNEL_IBT __kconfig __weak; - static void kprobe_multi_check(void *ctx, bool is_return) { if (bpf_get_current_pid_tgid() >> 32 != pid) return; __u64 cookie = test_cookie ? bpf_get_attach_cookie(ctx) : 0; - __u64 addr = bpf_get_func_ip(ctx) - (CONFIG_X86_KERNEL_IBT ? 4 : 0); + __u64 addr = bpf_get_func_ip(ctx); #define SET(__var, __addr, __cookie) ({ \ if (((const void *) addr == __addr) && \ diff --git a/tools/testing/selftests/bpf/progs/test_bpf_nf.c b/tools/testing/selftests/bpf/progs/test_bpf_nf.c index 2722441850cc..227e85e85dda 100644 --- a/tools/testing/selftests/bpf/progs/test_bpf_nf.c +++ b/tools/testing/selftests/bpf/progs/test_bpf_nf.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <vmlinux.h> #include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> #define EAFNOSUPPORT 97 #define EPROTO 71 @@ -23,6 +24,9 @@ int test_insert_entry = -EAFNOSUPPORT; int test_succ_lookup = -ENOENT; u32 test_delta_timeout = 0; u32 test_status = 0; +u32 test_insert_lookup_mark = 0; +int test_snat_addr = -EINVAL; +int test_dnat_addr = -EINVAL; __be32 saddr = 0; __be16 sport = 0; __be32 daddr = 0; @@ -53,6 +57,8 @@ void bpf_ct_set_timeout(struct nf_conn *, u32) __ksym; int bpf_ct_change_timeout(struct nf_conn *, u32) __ksym; int bpf_ct_set_status(struct nf_conn *, u32) __ksym; int bpf_ct_change_status(struct nf_conn *, u32) __ksym; +int bpf_ct_set_nat_info(struct nf_conn *, union nf_inet_addr *, + int port, enum nf_nat_manip_type) __ksym; static __always_inline void nf_ct_test(struct nf_conn *(*lookup_fn)(void *, struct bpf_sock_tuple *, u32, @@ -140,10 +146,21 @@ nf_ct_test(struct nf_conn *(*lookup_fn)(void *, struct bpf_sock_tuple *, u32, ct = alloc_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def, sizeof(opts_def)); if (ct) { + __u16 sport = bpf_get_prandom_u32(); + __u16 dport = bpf_get_prandom_u32(); + union nf_inet_addr saddr = {}; + union nf_inet_addr daddr = {}; struct nf_conn *ct_ins; bpf_ct_set_timeout(ct, 10000); - bpf_ct_set_status(ct, IPS_CONFIRMED); + ct->mark = 77; + + /* snat */ + saddr.ip = bpf_get_prandom_u32(); + bpf_ct_set_nat_info(ct, &saddr, sport, NF_NAT_MANIP_SRC); + /* dnat */ + daddr.ip = bpf_get_prandom_u32(); + bpf_ct_set_nat_info(ct, &daddr, dport, NF_NAT_MANIP_DST); ct_ins = bpf_ct_insert_entry(ct); if (ct_ins) { @@ -152,12 +169,26 @@ nf_ct_test(struct nf_conn *(*lookup_fn)(void *, struct bpf_sock_tuple *, u32, ct_lk = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def, sizeof(opts_def)); if (ct_lk) { + struct nf_conntrack_tuple *tuple; + + /* check snat and dnat addresses */ + tuple = &ct_lk->tuplehash[IP_CT_DIR_REPLY].tuple; + if (tuple->dst.u3.ip == saddr.ip && + tuple->dst.u.all == bpf_htons(sport)) + test_snat_addr = 0; + if (tuple->src.u3.ip == daddr.ip && + tuple->src.u.all == bpf_htons(dport)) + test_dnat_addr = 0; + /* update ct entry timeout */ bpf_ct_change_timeout(ct_lk, 10000); test_delta_timeout = ct_lk->timeout - bpf_jiffies64(); test_delta_timeout /= CONFIG_HZ; - test_status = IPS_SEEN_REPLY; - bpf_ct_change_status(ct_lk, IPS_SEEN_REPLY); + test_insert_lookup_mark = ct_lk->mark; + bpf_ct_change_status(ct_lk, + IPS_CONFIRMED | IPS_SEEN_REPLY); + test_status = ct_lk->status; + bpf_ct_release(ct_lk); test_succ_lookup = 0; } @@ -175,8 +206,10 @@ nf_ct_test(struct nf_conn *(*lookup_fn)(void *, struct bpf_sock_tuple *, u32, sizeof(opts_def)); if (ct) { test_exist_lookup = 0; - if (ct->mark == 42) - test_exist_lookup_mark = 43; + if (ct->mark == 42) { + ct->mark++; + test_exist_lookup_mark = ct->mark; + } bpf_ct_release(ct); } else { test_exist_lookup = opts_def.error; diff --git a/tools/testing/selftests/bpf/progs/test_bpf_nf_fail.c b/tools/testing/selftests/bpf/progs/test_bpf_nf_fail.c index bf79af15c808..0e4759ab38ff 100644 --- a/tools/testing/selftests/bpf/progs/test_bpf_nf_fail.c +++ b/tools/testing/selftests/bpf/progs/test_bpf_nf_fail.c @@ -70,6 +70,20 @@ int lookup_insert(struct __sk_buff *ctx) } SEC("?tc") +int write_not_allowlisted_field(struct __sk_buff *ctx) +{ + struct bpf_ct_opts___local opts = {}; + struct bpf_sock_tuple tup = {}; + struct nf_conn *ct; + + ct = bpf_skb_ct_lookup(ctx, &tup, sizeof(tup.ipv4), &opts, sizeof(opts)); + if (!ct) + return 0; + ct->status = 0xF00; + return 0; +} + +SEC("?tc") int set_timeout_after_insert(struct __sk_buff *ctx) { struct bpf_ct_opts___local opts = {}; diff --git a/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c b/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c new file mode 100644 index 000000000000..ce39d096bba3 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright (C) 2022 Huawei Technologies Duesseldorf GmbH + * + * Author: Roberto Sassu <roberto.sassu@huawei.com> + */ + +#include "vmlinux.h" +#include <errno.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +extern struct bpf_key *bpf_lookup_system_key(__u64 id) __ksym; +extern void bpf_key_put(struct bpf_key *key) __ksym; +extern int bpf_verify_pkcs7_signature(struct bpf_dynptr *data_ptr, + struct bpf_dynptr *sig_ptr, + struct bpf_key *trusted_keyring) __ksym; + +struct { + __uint(type, BPF_MAP_TYPE_RINGBUF); +} ringbuf SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, __u32); +} array_map SEC(".maps"); + +int err, pid; + +char _license[] SEC("license") = "GPL"; + +SEC("?lsm.s/bpf") +int BPF_PROG(dynptr_type_not_supp, int cmd, union bpf_attr *attr, + unsigned int size) +{ + char write_data[64] = "hello there, world!!"; + struct bpf_dynptr ptr; + + bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(write_data), 0, &ptr); + + return bpf_verify_pkcs7_signature(&ptr, &ptr, NULL); +} + +SEC("?lsm.s/bpf") +int BPF_PROG(not_valid_dynptr, int cmd, union bpf_attr *attr, unsigned int size) +{ + unsigned long val; + + return bpf_verify_pkcs7_signature((struct bpf_dynptr *)&val, + (struct bpf_dynptr *)&val, NULL); +} + +SEC("?lsm.s/bpf") +int BPF_PROG(not_ptr_to_stack, int cmd, union bpf_attr *attr, unsigned int size) +{ + unsigned long val; + + return bpf_verify_pkcs7_signature((struct bpf_dynptr *)val, + (struct bpf_dynptr *)val, NULL); +} + +SEC("lsm.s/bpf") +int BPF_PROG(dynptr_data_null, int cmd, union bpf_attr *attr, unsigned int size) +{ + struct bpf_key *trusted_keyring; + struct bpf_dynptr ptr; + __u32 *value; + int ret, zero = 0; + + if (bpf_get_current_pid_tgid() >> 32 != pid) + return 0; + + value = bpf_map_lookup_elem(&array_map, &zero); + if (!value) + return 0; + + /* Pass invalid flags. */ + ret = bpf_dynptr_from_mem(value, sizeof(*value), ((__u64)~0ULL), &ptr); + if (ret != -EINVAL) + return 0; + + trusted_keyring = bpf_lookup_system_key(0); + if (!trusted_keyring) + return 0; + + err = bpf_verify_pkcs7_signature(&ptr, &ptr, trusted_keyring); + + bpf_key_put(trusted_keyring); + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/test_lookup_key.c b/tools/testing/selftests/bpf/progs/test_lookup_key.c new file mode 100644 index 000000000000..c73776990ae3 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_lookup_key.c @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright (C) 2022 Huawei Technologies Duesseldorf GmbH + * + * Author: Roberto Sassu <roberto.sassu@huawei.com> + */ + +#include "vmlinux.h" +#include <errno.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +char _license[] SEC("license") = "GPL"; + +__u32 monitored_pid; +__u32 key_serial; +__u32 key_id; +__u64 flags; + +extern struct bpf_key *bpf_lookup_user_key(__u32 serial, __u64 flags) __ksym; +extern struct bpf_key *bpf_lookup_system_key(__u64 id) __ksym; +extern void bpf_key_put(struct bpf_key *key) __ksym; + +SEC("lsm.s/bpf") +int BPF_PROG(bpf, int cmd, union bpf_attr *attr, unsigned int size) +{ + struct bpf_key *bkey; + __u32 pid; + + pid = bpf_get_current_pid_tgid() >> 32; + if (pid != monitored_pid) + return 0; + + if (key_serial) + bkey = bpf_lookup_user_key(key_serial, flags); + else + bkey = bpf_lookup_system_key(key_id); + + if (!bkey) + return -ENOENT; + + bpf_key_put(bkey); + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/test_user_ringbuf.h b/tools/testing/selftests/bpf/progs/test_user_ringbuf.h new file mode 100644 index 000000000000..1643b4d59ba7 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_user_ringbuf.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ + +#ifndef _TEST_USER_RINGBUF_H +#define _TEST_USER_RINGBUF_H + +#define TEST_OP_64 4 +#define TEST_OP_32 2 + +enum test_msg_op { + TEST_MSG_OP_INC64, + TEST_MSG_OP_INC32, + TEST_MSG_OP_MUL64, + TEST_MSG_OP_MUL32, + + // Must come last. + TEST_MSG_OP_NUM_OPS, +}; + +struct test_msg { + enum test_msg_op msg_op; + union { + __s64 operand_64; + __s32 operand_32; + }; +}; + +struct sample { + int pid; + int seq; + long value; + char comm[16]; +}; + +#endif /* _TEST_USER_RINGBUF_H */ diff --git a/tools/testing/selftests/bpf/progs/test_verif_scale1.c b/tools/testing/selftests/bpf/progs/test_verif_scale1.c index d38153dab3dd..ac6135d9374c 100644 --- a/tools/testing/selftests/bpf/progs/test_verif_scale1.c +++ b/tools/testing/selftests/bpf/progs/test_verif_scale1.c @@ -5,7 +5,7 @@ #define ATTR __attribute__((noinline)) #include "test_jhash.h" -SEC("scale90_noinline") +SEC("tc") int balancer_ingress(struct __sk_buff *ctx) { void *data_end = (void *)(long)ctx->data_end; diff --git a/tools/testing/selftests/bpf/progs/test_verif_scale3.c b/tools/testing/selftests/bpf/progs/test_verif_scale3.c index 9beb5bf80373..ca33a9b711c4 100644 --- a/tools/testing/selftests/bpf/progs/test_verif_scale3.c +++ b/tools/testing/selftests/bpf/progs/test_verif_scale3.c @@ -5,7 +5,7 @@ #define ATTR __attribute__((noinline)) #include "test_jhash.h" -SEC("scale90_noinline32") +SEC("tc") int balancer_ingress(struct __sk_buff *ctx) { void *data_end = (void *)(long)ctx->data_end; diff --git a/tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c b/tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c new file mode 100644 index 000000000000..ce419304ff1f --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright (C) 2022 Huawei Technologies Duesseldorf GmbH + * + * Author: Roberto Sassu <roberto.sassu@huawei.com> + */ + +#include "vmlinux.h" +#include <errno.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +#define MAX_DATA_SIZE (1024 * 1024) +#define MAX_SIG_SIZE 1024 + +extern struct bpf_key *bpf_lookup_user_key(__u32 serial, __u64 flags) __ksym; +extern struct bpf_key *bpf_lookup_system_key(__u64 id) __ksym; +extern void bpf_key_put(struct bpf_key *key) __ksym; +extern int bpf_verify_pkcs7_signature(struct bpf_dynptr *data_ptr, + struct bpf_dynptr *sig_ptr, + struct bpf_key *trusted_keyring) __ksym; + +__u32 monitored_pid; +__u32 user_keyring_serial; +__u64 system_keyring_id; + +struct data { + __u8 data[MAX_DATA_SIZE]; + __u32 data_len; + __u8 sig[MAX_SIG_SIZE]; + __u32 sig_len; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, struct data); +} data_input SEC(".maps"); + +char _license[] SEC("license") = "GPL"; + +SEC("lsm.s/bpf") +int BPF_PROG(bpf, int cmd, union bpf_attr *attr, unsigned int size) +{ + struct bpf_dynptr data_ptr, sig_ptr; + struct data *data_val; + struct bpf_key *trusted_keyring; + __u32 pid; + __u64 value; + int ret, zero = 0; + + pid = bpf_get_current_pid_tgid() >> 32; + if (pid != monitored_pid) + return 0; + + data_val = bpf_map_lookup_elem(&data_input, &zero); + if (!data_val) + return 0; + + bpf_probe_read(&value, sizeof(value), &attr->value); + + bpf_copy_from_user(data_val, sizeof(struct data), + (void *)(unsigned long)value); + + if (data_val->data_len > sizeof(data_val->data)) + return -EINVAL; + + bpf_dynptr_from_mem(data_val->data, data_val->data_len, 0, &data_ptr); + + if (data_val->sig_len > sizeof(data_val->sig)) + return -EINVAL; + + bpf_dynptr_from_mem(data_val->sig, data_val->sig_len, 0, &sig_ptr); + + if (user_keyring_serial) + trusted_keyring = bpf_lookup_user_key(user_keyring_serial, 0); + else + trusted_keyring = bpf_lookup_system_key(system_keyring_id); + + if (!trusted_keyring) + return -ENOENT; + + ret = bpf_verify_pkcs7_signature(&data_ptr, &sig_ptr, trusted_keyring); + + bpf_key_put(trusted_keyring); + + return ret; +} diff --git a/tools/testing/selftests/bpf/progs/timer.c b/tools/testing/selftests/bpf/progs/timer.c index 0053c5402173..acda5c9cea93 100644 --- a/tools/testing/selftests/bpf/progs/timer.c +++ b/tools/testing/selftests/bpf/progs/timer.c @@ -120,7 +120,7 @@ static int timer_cb1(void *map, int *key, struct bpf_timer *timer) } SEC("fentry/bpf_fentry_test1") -int BPF_PROG(test1, int a) +int BPF_PROG2(test1, int, a) { struct bpf_timer *arr_timer, *lru_timer; struct elem init = {}; @@ -236,7 +236,7 @@ int bpf_timer_test(void) } SEC("fentry/bpf_fentry_test2") -int BPF_PROG(test2, int a, int b) +int BPF_PROG2(test2, int, a, int, b) { struct hmap_elem init = {}, *val; int key = HTAB, key_malloc = HTAB_MALLOC; diff --git a/tools/testing/selftests/bpf/progs/tracing_struct.c b/tools/testing/selftests/bpf/progs/tracing_struct.c new file mode 100644 index 000000000000..e718f0ebee7d --- /dev/null +++ b/tools/testing/selftests/bpf/progs/tracing_struct.c @@ -0,0 +1,120 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ + +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> + +struct bpf_testmod_struct_arg_1 { + int a; +}; +struct bpf_testmod_struct_arg_2 { + long a; + long b; +}; + +long t1_a_a, t1_a_b, t1_b, t1_c, t1_ret, t1_nregs; +__u64 t1_reg0, t1_reg1, t1_reg2, t1_reg3; +long t2_a, t2_b_a, t2_b_b, t2_c, t2_ret; +long t3_a, t3_b, t3_c_a, t3_c_b, t3_ret; +long t4_a_a, t4_b, t4_c, t4_d, t4_e_a, t4_e_b, t4_ret; +long t5_ret; + +SEC("fentry/bpf_testmod_test_struct_arg_1") +int BPF_PROG2(test_struct_arg_1, struct bpf_testmod_struct_arg_2, a, int, b, int, c) +{ + t1_a_a = a.a; + t1_a_b = a.b; + t1_b = b; + t1_c = c; + return 0; +} + +SEC("fexit/bpf_testmod_test_struct_arg_1") +int BPF_PROG2(test_struct_arg_2, struct bpf_testmod_struct_arg_2, a, int, b, int, c, int, ret) +{ + t1_nregs = bpf_get_func_arg_cnt(ctx); + /* a.a */ + bpf_get_func_arg(ctx, 0, &t1_reg0); + /* a.b */ + bpf_get_func_arg(ctx, 1, &t1_reg1); + /* b */ + bpf_get_func_arg(ctx, 2, &t1_reg2); + t1_reg2 = (int)t1_reg2; + /* c */ + bpf_get_func_arg(ctx, 3, &t1_reg3); + t1_reg3 = (int)t1_reg3; + + t1_ret = ret; + return 0; +} + +SEC("fentry/bpf_testmod_test_struct_arg_2") +int BPF_PROG2(test_struct_arg_3, int, a, struct bpf_testmod_struct_arg_2, b, int, c) +{ + t2_a = a; + t2_b_a = b.a; + t2_b_b = b.b; + t2_c = c; + return 0; +} + +SEC("fexit/bpf_testmod_test_struct_arg_2") +int BPF_PROG2(test_struct_arg_4, int, a, struct bpf_testmod_struct_arg_2, b, int, c, int, ret) +{ + t2_ret = ret; + return 0; +} + +SEC("fentry/bpf_testmod_test_struct_arg_3") +int BPF_PROG2(test_struct_arg_5, int, a, int, b, struct bpf_testmod_struct_arg_2, c) +{ + t3_a = a; + t3_b = b; + t3_c_a = c.a; + t3_c_b = c.b; + return 0; +} + +SEC("fexit/bpf_testmod_test_struct_arg_3") +int BPF_PROG2(test_struct_arg_6, int, a, int, b, struct bpf_testmod_struct_arg_2, c, int, ret) +{ + t3_ret = ret; + return 0; +} + +SEC("fentry/bpf_testmod_test_struct_arg_4") +int BPF_PROG2(test_struct_arg_7, struct bpf_testmod_struct_arg_1, a, int, b, + int, c, int, d, struct bpf_testmod_struct_arg_2, e) +{ + t4_a_a = a.a; + t4_b = b; + t4_c = c; + t4_d = d; + t4_e_a = e.a; + t4_e_b = e.b; + return 0; +} + +SEC("fexit/bpf_testmod_test_struct_arg_4") +int BPF_PROG2(test_struct_arg_8, struct bpf_testmod_struct_arg_1, a, int, b, + int, c, int, d, struct bpf_testmod_struct_arg_2, e, int, ret) +{ + t4_ret = ret; + return 0; +} + +SEC("fentry/bpf_testmod_test_struct_arg_5") +int BPF_PROG2(test_struct_arg_9) +{ + return 0; +} + +SEC("fexit/bpf_testmod_test_struct_arg_5") +int BPF_PROG2(test_struct_arg_10, int, ret) +{ + t5_ret = ret; + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/user_ringbuf_fail.c b/tools/testing/selftests/bpf/progs/user_ringbuf_fail.c new file mode 100644 index 000000000000..82aba4529aa9 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/user_ringbuf_fail.c @@ -0,0 +1,177 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +char _license[] SEC("license") = "GPL"; + +struct sample { + int pid; + int seq; + long value; + char comm[16]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_USER_RINGBUF); +} user_ringbuf SEC(".maps"); + +static long +bad_access1(struct bpf_dynptr *dynptr, void *context) +{ + const struct sample *sample; + + sample = bpf_dynptr_data(dynptr - 1, 0, sizeof(*sample)); + bpf_printk("Was able to pass bad pointer %lx\n", (__u64)dynptr - 1); + + return 0; +} + +/* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should + * not be able to read before the pointer. + */ +SEC("?raw_tp/sys_nanosleep") +int user_ringbuf_callback_bad_access1(void *ctx) +{ + bpf_user_ringbuf_drain(&user_ringbuf, bad_access1, NULL, 0); + + return 0; +} + +static long +bad_access2(struct bpf_dynptr *dynptr, void *context) +{ + const struct sample *sample; + + sample = bpf_dynptr_data(dynptr + 1, 0, sizeof(*sample)); + bpf_printk("Was able to pass bad pointer %lx\n", (__u64)dynptr + 1); + + return 0; +} + +/* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should + * not be able to read past the end of the pointer. + */ +SEC("?raw_tp/sys_nanosleep") +int user_ringbuf_callback_bad_access2(void *ctx) +{ + bpf_user_ringbuf_drain(&user_ringbuf, bad_access2, NULL, 0); + + return 0; +} + +static long +write_forbidden(struct bpf_dynptr *dynptr, void *context) +{ + *((long *)dynptr) = 0; + + return 0; +} + +/* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should + * not be able to write to that pointer. + */ +SEC("?raw_tp/sys_nanosleep") +int user_ringbuf_callback_write_forbidden(void *ctx) +{ + bpf_user_ringbuf_drain(&user_ringbuf, write_forbidden, NULL, 0); + + return 0; +} + +static long +null_context_write(struct bpf_dynptr *dynptr, void *context) +{ + *((__u64 *)context) = 0; + + return 0; +} + +/* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should + * not be able to write to that pointer. + */ +SEC("?raw_tp/sys_nanosleep") +int user_ringbuf_callback_null_context_write(void *ctx) +{ + bpf_user_ringbuf_drain(&user_ringbuf, null_context_write, NULL, 0); + + return 0; +} + +static long +null_context_read(struct bpf_dynptr *dynptr, void *context) +{ + __u64 id = *((__u64 *)context); + + bpf_printk("Read id %lu\n", id); + + return 0; +} + +/* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should + * not be able to write to that pointer. + */ +SEC("?raw_tp/sys_nanosleep") +int user_ringbuf_callback_null_context_read(void *ctx) +{ + bpf_user_ringbuf_drain(&user_ringbuf, null_context_read, NULL, 0); + + return 0; +} + +static long +try_discard_dynptr(struct bpf_dynptr *dynptr, void *context) +{ + bpf_ringbuf_discard_dynptr(dynptr, 0); + + return 0; +} + +/* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should + * not be able to read past the end of the pointer. + */ +SEC("?raw_tp/sys_nanosleep") +int user_ringbuf_callback_discard_dynptr(void *ctx) +{ + bpf_user_ringbuf_drain(&user_ringbuf, try_discard_dynptr, NULL, 0); + + return 0; +} + +static long +try_submit_dynptr(struct bpf_dynptr *dynptr, void *context) +{ + bpf_ringbuf_submit_dynptr(dynptr, 0); + + return 0; +} + +/* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should + * not be able to read past the end of the pointer. + */ +SEC("?raw_tp/sys_nanosleep") +int user_ringbuf_callback_submit_dynptr(void *ctx) +{ + bpf_user_ringbuf_drain(&user_ringbuf, try_submit_dynptr, NULL, 0); + + return 0; +} + +static long +invalid_drain_callback_return(struct bpf_dynptr *dynptr, void *context) +{ + return 2; +} + +/* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should + * not be able to write to that pointer. + */ +SEC("?raw_tp/sys_nanosleep") +int user_ringbuf_callback_invalid_return(void *ctx) +{ + bpf_user_ringbuf_drain(&user_ringbuf, invalid_drain_callback_return, NULL, 0); + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/user_ringbuf_success.c b/tools/testing/selftests/bpf/progs/user_ringbuf_success.c new file mode 100644 index 000000000000..099c23d9aa21 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/user_ringbuf_success.c @@ -0,0 +1,218 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" +#include "test_user_ringbuf.h" + +char _license[] SEC("license") = "GPL"; + +struct { + __uint(type, BPF_MAP_TYPE_USER_RINGBUF); +} user_ringbuf SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_RINGBUF); +} kernel_ringbuf SEC(".maps"); + +/* inputs */ +int pid, err, val; + +int read = 0; + +/* Counter used for end-to-end protocol test */ +__u64 kern_mutated = 0; +__u64 user_mutated = 0; +__u64 expected_user_mutated = 0; + +static int +is_test_process(void) +{ + int cur_pid = bpf_get_current_pid_tgid() >> 32; + + return cur_pid == pid; +} + +static long +record_sample(struct bpf_dynptr *dynptr, void *context) +{ + const struct sample *sample = NULL; + struct sample stack_sample; + int status; + static int num_calls; + + if (num_calls++ % 2 == 0) { + status = bpf_dynptr_read(&stack_sample, sizeof(stack_sample), dynptr, 0, 0); + if (status) { + bpf_printk("bpf_dynptr_read() failed: %d\n", status); + err = 1; + return 0; + } + } else { + sample = bpf_dynptr_data(dynptr, 0, sizeof(*sample)); + if (!sample) { + bpf_printk("Unexpectedly failed to get sample\n"); + err = 2; + return 0; + } + stack_sample = *sample; + } + + __sync_fetch_and_add(&read, 1); + return 0; +} + +static void +handle_sample_msg(const struct test_msg *msg) +{ + switch (msg->msg_op) { + case TEST_MSG_OP_INC64: + kern_mutated += msg->operand_64; + break; + case TEST_MSG_OP_INC32: + kern_mutated += msg->operand_32; + break; + case TEST_MSG_OP_MUL64: + kern_mutated *= msg->operand_64; + break; + case TEST_MSG_OP_MUL32: + kern_mutated *= msg->operand_32; + break; + default: + bpf_printk("Unrecognized op %d\n", msg->msg_op); + err = 2; + } +} + +static long +read_protocol_msg(struct bpf_dynptr *dynptr, void *context) +{ + const struct test_msg *msg = NULL; + + msg = bpf_dynptr_data(dynptr, 0, sizeof(*msg)); + if (!msg) { + err = 1; + bpf_printk("Unexpectedly failed to get msg\n"); + return 0; + } + + handle_sample_msg(msg); + + return 0; +} + +static int publish_next_kern_msg(__u32 index, void *context) +{ + struct test_msg *msg = NULL; + int operand_64 = TEST_OP_64; + int operand_32 = TEST_OP_32; + + msg = bpf_ringbuf_reserve(&kernel_ringbuf, sizeof(*msg), 0); + if (!msg) { + err = 4; + return 1; + } + + switch (index % TEST_MSG_OP_NUM_OPS) { + case TEST_MSG_OP_INC64: + msg->operand_64 = operand_64; + msg->msg_op = TEST_MSG_OP_INC64; + expected_user_mutated += operand_64; + break; + case TEST_MSG_OP_INC32: + msg->operand_32 = operand_32; + msg->msg_op = TEST_MSG_OP_INC32; + expected_user_mutated += operand_32; + break; + case TEST_MSG_OP_MUL64: + msg->operand_64 = operand_64; + msg->msg_op = TEST_MSG_OP_MUL64; + expected_user_mutated *= operand_64; + break; + case TEST_MSG_OP_MUL32: + msg->operand_32 = operand_32; + msg->msg_op = TEST_MSG_OP_MUL32; + expected_user_mutated *= operand_32; + break; + default: + bpf_ringbuf_discard(msg, 0); + err = 5; + return 1; + } + + bpf_ringbuf_submit(msg, 0); + + return 0; +} + +static void +publish_kern_messages(void) +{ + if (expected_user_mutated != user_mutated) { + bpf_printk("%lu != %lu\n", expected_user_mutated, user_mutated); + err = 3; + return; + } + + bpf_loop(8, publish_next_kern_msg, NULL, 0); +} + +SEC("fentry/" SYS_PREFIX "sys_prctl") +int test_user_ringbuf_protocol(void *ctx) +{ + long status = 0; + struct sample *sample = NULL; + struct bpf_dynptr ptr; + + if (!is_test_process()) + return 0; + + status = bpf_user_ringbuf_drain(&user_ringbuf, read_protocol_msg, NULL, 0); + if (status < 0) { + bpf_printk("Drain returned: %ld\n", status); + err = 1; + return 0; + } + + publish_kern_messages(); + + return 0; +} + +SEC("fentry/" SYS_PREFIX "sys_getpgid") +int test_user_ringbuf(void *ctx) +{ + int status = 0; + struct sample *sample = NULL; + struct bpf_dynptr ptr; + + if (!is_test_process()) + return 0; + + err = bpf_user_ringbuf_drain(&user_ringbuf, record_sample, NULL, 0); + + return 0; +} + +static long +do_nothing_cb(struct bpf_dynptr *dynptr, void *context) +{ + __sync_fetch_and_add(&read, 1); + return 0; +} + +SEC("fentry/" SYS_PREFIX "sys_getrlimit") +int test_user_ringbuf_epoll(void *ctx) +{ + long num_samples; + + if (!is_test_process()) + return 0; + + num_samples = bpf_user_ringbuf_drain(&user_ringbuf, do_nothing_cb, NULL, 0); + if (num_samples <= 0) + err = 1; + + return 0; +} |