summaryrefslogtreecommitdiff
path: root/tools/testing/selftests
diff options
context:
space:
mode:
Diffstat (limited to 'tools/testing/selftests')
-rw-r--r--tools/testing/selftests/bpf/Makefile2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/atomics.c246
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c4
-rw-r--r--tools/testing/selftests/bpf/progs/atomics.c154
-rw-r--r--tools/testing/selftests/bpf/test_cgroup_storage.c2
-rw-r--r--tools/testing/selftests/bpf/verifier/atomic_and.c77
-rw-r--r--tools/testing/selftests/bpf/verifier/atomic_cmpxchg.c96
-rw-r--r--tools/testing/selftests/bpf/verifier/atomic_fetch_add.c106
-rw-r--r--tools/testing/selftests/bpf/verifier/atomic_or.c77
-rw-r--r--tools/testing/selftests/bpf/verifier/atomic_xchg.c46
-rw-r--r--tools/testing/selftests/bpf/verifier/atomic_xor.c77
-rw-r--r--tools/testing/selftests/bpf/verifier/ctx.c7
-rw-r--r--tools/testing/selftests/bpf/verifier/direct_packet_access.c4
-rw-r--r--tools/testing/selftests/bpf/verifier/leak_ptr.c10
-rw-r--r--tools/testing/selftests/bpf/verifier/meta_access.c4
-rw-r--r--tools/testing/selftests/bpf/verifier/unpriv.c3
-rw-r--r--tools/testing/selftests/bpf/verifier/value_illegal_alu.c2
-rw-r--r--tools/testing/selftests/bpf/verifier/xadd.c18
18 files changed, 908 insertions, 27 deletions
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 7f8667ad113e..0552b07717b6 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -414,10 +414,12 @@ TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read $(OUTPUT)/bpf_testmod.ko \
$(wildcard progs/btf_dump_test_case_*.c)
TRUNNER_BPF_BUILD_RULE := CLANG_BPF_BUILD_RULE
TRUNNER_BPF_CFLAGS := $(BPF_CFLAGS) $(CLANG_CFLAGS)
+TRUNNER_BPF_CFLAGS += -DENABLE_ATOMICS_TESTS
$(eval $(call DEFINE_TEST_RUNNER,test_progs))
# Define test_progs-no_alu32 test runner.
TRUNNER_BPF_BUILD_RULE := CLANG_NOALU32_BPF_BUILD_RULE
+TRUNNER_BPF_CFLAGS := $(BPF_CFLAGS) $(CLANG_CFLAGS)
$(eval $(call DEFINE_TEST_RUNNER,test_progs,no_alu32))
# Define test_progs BPF-GCC-flavored test runner.
diff --git a/tools/testing/selftests/bpf/prog_tests/atomics.c b/tools/testing/selftests/bpf/prog_tests/atomics.c
new file mode 100644
index 000000000000..21efe7bbf10d
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/atomics.c
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+
+#include "atomics.skel.h"
+
+static void test_add(struct atomics *skel)
+{
+ int err, prog_fd;
+ __u32 duration = 0, retval;
+ struct bpf_link *link;
+
+ link = bpf_program__attach(skel->progs.add);
+ if (CHECK(IS_ERR(link), "attach(add)", "err: %ld\n", PTR_ERR(link)))
+ return;
+
+ prog_fd = bpf_program__fd(skel->progs.add);
+ err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
+ NULL, NULL, &retval, &duration);
+ if (CHECK(err || retval, "test_run add",
+ "err %d errno %d retval %d duration %d\n", err, errno, retval, duration))
+ goto cleanup;
+
+ ASSERT_EQ(skel->data->add64_value, 3, "add64_value");
+ ASSERT_EQ(skel->bss->add64_result, 1, "add64_result");
+
+ ASSERT_EQ(skel->data->add32_value, 3, "add32_value");
+ ASSERT_EQ(skel->bss->add32_result, 1, "add32_result");
+
+ ASSERT_EQ(skel->bss->add_stack_value_copy, 3, "add_stack_value");
+ ASSERT_EQ(skel->bss->add_stack_result, 1, "add_stack_result");
+
+ ASSERT_EQ(skel->data->add_noreturn_value, 3, "add_noreturn_value");
+
+cleanup:
+ bpf_link__destroy(link);
+}
+
+static void test_sub(struct atomics *skel)
+{
+ int err, prog_fd;
+ __u32 duration = 0, retval;
+ struct bpf_link *link;
+
+ link = bpf_program__attach(skel->progs.sub);
+ if (CHECK(IS_ERR(link), "attach(sub)", "err: %ld\n", PTR_ERR(link)))
+ return;
+
+ prog_fd = bpf_program__fd(skel->progs.sub);
+ err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
+ NULL, NULL, &retval, &duration);
+ if (CHECK(err || retval, "test_run sub",
+ "err %d errno %d retval %d duration %d\n",
+ err, errno, retval, duration))
+ goto cleanup;
+
+ ASSERT_EQ(skel->data->sub64_value, -1, "sub64_value");
+ ASSERT_EQ(skel->bss->sub64_result, 1, "sub64_result");
+
+ ASSERT_EQ(skel->data->sub32_value, -1, "sub32_value");
+ ASSERT_EQ(skel->bss->sub32_result, 1, "sub32_result");
+
+ ASSERT_EQ(skel->bss->sub_stack_value_copy, -1, "sub_stack_value");
+ ASSERT_EQ(skel->bss->sub_stack_result, 1, "sub_stack_result");
+
+ ASSERT_EQ(skel->data->sub_noreturn_value, -1, "sub_noreturn_value");
+
+cleanup:
+ bpf_link__destroy(link);
+}
+
+static void test_and(struct atomics *skel)
+{
+ int err, prog_fd;
+ __u32 duration = 0, retval;
+ struct bpf_link *link;
+
+ link = bpf_program__attach(skel->progs.and);
+ if (CHECK(IS_ERR(link), "attach(and)", "err: %ld\n", PTR_ERR(link)))
+ return;
+
+ prog_fd = bpf_program__fd(skel->progs.and);
+ err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
+ NULL, NULL, &retval, &duration);
+ if (CHECK(err || retval, "test_run and",
+ "err %d errno %d retval %d duration %d\n", err, errno, retval, duration))
+ goto cleanup;
+
+ ASSERT_EQ(skel->data->and64_value, 0x010ull << 32, "and64_value");
+ ASSERT_EQ(skel->bss->and64_result, 0x110ull << 32, "and64_result");
+
+ ASSERT_EQ(skel->data->and32_value, 0x010, "and32_value");
+ ASSERT_EQ(skel->bss->and32_result, 0x110, "and32_result");
+
+ ASSERT_EQ(skel->data->and_noreturn_value, 0x010ull << 32, "and_noreturn_value");
+cleanup:
+ bpf_link__destroy(link);
+}
+
+static void test_or(struct atomics *skel)
+{
+ int err, prog_fd;
+ __u32 duration = 0, retval;
+ struct bpf_link *link;
+
+ link = bpf_program__attach(skel->progs.or);
+ if (CHECK(IS_ERR(link), "attach(or)", "err: %ld\n", PTR_ERR(link)))
+ return;
+
+ prog_fd = bpf_program__fd(skel->progs.or);
+ err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
+ NULL, NULL, &retval, &duration);
+ if (CHECK(err || retval, "test_run or",
+ "err %d errno %d retval %d duration %d\n",
+ err, errno, retval, duration))
+ goto cleanup;
+
+ ASSERT_EQ(skel->data->or64_value, 0x111ull << 32, "or64_value");
+ ASSERT_EQ(skel->bss->or64_result, 0x110ull << 32, "or64_result");
+
+ ASSERT_EQ(skel->data->or32_value, 0x111, "or32_value");
+ ASSERT_EQ(skel->bss->or32_result, 0x110, "or32_result");
+
+ ASSERT_EQ(skel->data->or_noreturn_value, 0x111ull << 32, "or_noreturn_value");
+cleanup:
+ bpf_link__destroy(link);
+}
+
+static void test_xor(struct atomics *skel)
+{
+ int err, prog_fd;
+ __u32 duration = 0, retval;
+ struct bpf_link *link;
+
+ link = bpf_program__attach(skel->progs.xor);
+ if (CHECK(IS_ERR(link), "attach(xor)", "err: %ld\n", PTR_ERR(link)))
+ return;
+
+ prog_fd = bpf_program__fd(skel->progs.xor);
+ err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
+ NULL, NULL, &retval, &duration);
+ if (CHECK(err || retval, "test_run xor",
+ "err %d errno %d retval %d duration %d\n", err, errno, retval, duration))
+ goto cleanup;
+
+ ASSERT_EQ(skel->data->xor64_value, 0x101ull << 32, "xor64_value");
+ ASSERT_EQ(skel->bss->xor64_result, 0x110ull << 32, "xor64_result");
+
+ ASSERT_EQ(skel->data->xor32_value, 0x101, "xor32_value");
+ ASSERT_EQ(skel->bss->xor32_result, 0x110, "xor32_result");
+
+ ASSERT_EQ(skel->data->xor_noreturn_value, 0x101ull << 32, "xor_nxoreturn_value");
+cleanup:
+ bpf_link__destroy(link);
+}
+
+static void test_cmpxchg(struct atomics *skel)
+{
+ int err, prog_fd;
+ __u32 duration = 0, retval;
+ struct bpf_link *link;
+
+ link = bpf_program__attach(skel->progs.cmpxchg);
+ if (CHECK(IS_ERR(link), "attach(cmpxchg)", "err: %ld\n", PTR_ERR(link)))
+ return;
+
+ prog_fd = bpf_program__fd(skel->progs.cmpxchg);
+ err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
+ NULL, NULL, &retval, &duration);
+ if (CHECK(err || retval, "test_run add",
+ "err %d errno %d retval %d duration %d\n", err, errno, retval, duration))
+ goto cleanup;
+
+ ASSERT_EQ(skel->data->cmpxchg64_value, 2, "cmpxchg64_value");
+ ASSERT_EQ(skel->bss->cmpxchg64_result_fail, 1, "cmpxchg_result_fail");
+ ASSERT_EQ(skel->bss->cmpxchg64_result_succeed, 1, "cmpxchg_result_succeed");
+
+ ASSERT_EQ(skel->data->cmpxchg32_value, 2, "lcmpxchg32_value");
+ ASSERT_EQ(skel->bss->cmpxchg32_result_fail, 1, "cmpxchg_result_fail");
+ ASSERT_EQ(skel->bss->cmpxchg32_result_succeed, 1, "cmpxchg_result_succeed");
+
+cleanup:
+ bpf_link__destroy(link);
+}
+
+static void test_xchg(struct atomics *skel)
+{
+ int err, prog_fd;
+ __u32 duration = 0, retval;
+ struct bpf_link *link;
+
+ link = bpf_program__attach(skel->progs.xchg);
+ if (CHECK(IS_ERR(link), "attach(xchg)", "err: %ld\n", PTR_ERR(link)))
+ return;
+
+ prog_fd = bpf_program__fd(skel->progs.xchg);
+ err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
+ NULL, NULL, &retval, &duration);
+ if (CHECK(err || retval, "test_run add",
+ "err %d errno %d retval %d duration %d\n", err, errno, retval, duration))
+ goto cleanup;
+
+ ASSERT_EQ(skel->data->xchg64_value, 2, "xchg64_value");
+ ASSERT_EQ(skel->bss->xchg64_result, 1, "xchg64_result");
+
+ ASSERT_EQ(skel->data->xchg32_value, 2, "xchg32_value");
+ ASSERT_EQ(skel->bss->xchg32_result, 1, "xchg32_result");
+
+cleanup:
+ bpf_link__destroy(link);
+}
+
+void test_atomics(void)
+{
+ struct atomics *skel;
+ __u32 duration = 0;
+
+ skel = atomics__open_and_load();
+ if (CHECK(!skel, "skel_load", "atomics skeleton failed\n"))
+ return;
+
+ if (skel->data->skip_tests) {
+ printf("%s:SKIP:no ENABLE_ATOMICS_TESTS (missing Clang BPF atomics support)",
+ __func__);
+ test__skip();
+ goto cleanup;
+ }
+
+ if (test__start_subtest("add"))
+ test_add(skel);
+ if (test__start_subtest("sub"))
+ test_sub(skel);
+ if (test__start_subtest("and"))
+ test_and(skel);
+ if (test__start_subtest("or"))
+ test_or(skel);
+ if (test__start_subtest("xor"))
+ test_xor(skel);
+ if (test__start_subtest("cmpxchg"))
+ test_cmpxchg(skel);
+ if (test__start_subtest("xchg"))
+ test_xchg(skel);
+
+cleanup:
+ atomics__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c
index b549fcfacc0b..0a1fc9816cef 100644
--- a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c
@@ -45,13 +45,13 @@ static int prog_load_cnt(int verdict, int val)
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
BPF_MOV64_IMM(BPF_REG_1, val), /* r1 = 1 */
- BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_0, BPF_REG_1, 0, 0), /* xadd r0 += r1 */
+ BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_0, BPF_REG_1, 0),
BPF_LD_MAP_FD(BPF_REG_1, cgroup_storage_fd),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
BPF_MOV64_IMM(BPF_REG_1, val),
- BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_0, BPF_REG_1, 0, 0),
+ BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_0, BPF_REG_1, 0),
BPF_LD_MAP_FD(BPF_REG_1, percpu_cgroup_storage_fd),
BPF_MOV64_IMM(BPF_REG_2, 0),
diff --git a/tools/testing/selftests/bpf/progs/atomics.c b/tools/testing/selftests/bpf/progs/atomics.c
new file mode 100644
index 000000000000..c245345e41ca
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/atomics.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <stdbool.h>
+
+#ifdef ENABLE_ATOMICS_TESTS
+bool skip_tests __attribute((__section__(".data"))) = false;
+#else
+bool skip_tests = true;
+#endif
+
+__u64 add64_value = 1;
+__u64 add64_result = 0;
+__u32 add32_value = 1;
+__u32 add32_result = 0;
+__u64 add_stack_value_copy = 0;
+__u64 add_stack_result = 0;
+__u64 add_noreturn_value = 1;
+
+SEC("fentry/bpf_fentry_test1")
+int BPF_PROG(add, int a)
+{
+#ifdef ENABLE_ATOMICS_TESTS
+ __u64 add_stack_value = 1;
+
+ add64_result = __sync_fetch_and_add(&add64_value, 2);
+ add32_result = __sync_fetch_and_add(&add32_value, 2);
+ add_stack_result = __sync_fetch_and_add(&add_stack_value, 2);
+ add_stack_value_copy = add_stack_value;
+ __sync_fetch_and_add(&add_noreturn_value, 2);
+#endif
+
+ return 0;
+}
+
+__s64 sub64_value = 1;
+__s64 sub64_result = 0;
+__s32 sub32_value = 1;
+__s32 sub32_result = 0;
+__s64 sub_stack_value_copy = 0;
+__s64 sub_stack_result = 0;
+__s64 sub_noreturn_value = 1;
+
+SEC("fentry/bpf_fentry_test1")
+int BPF_PROG(sub, int a)
+{
+#ifdef ENABLE_ATOMICS_TESTS
+ __u64 sub_stack_value = 1;
+
+ sub64_result = __sync_fetch_and_sub(&sub64_value, 2);
+ sub32_result = __sync_fetch_and_sub(&sub32_value, 2);
+ sub_stack_result = __sync_fetch_and_sub(&sub_stack_value, 2);
+ sub_stack_value_copy = sub_stack_value;
+ __sync_fetch_and_sub(&sub_noreturn_value, 2);
+#endif
+
+ return 0;
+}
+
+__u64 and64_value = (0x110ull << 32);
+__u64 and64_result = 0;
+__u32 and32_value = 0x110;
+__u32 and32_result = 0;
+__u64 and_noreturn_value = (0x110ull << 32);
+
+SEC("fentry/bpf_fentry_test1")
+int BPF_PROG(and, int a)
+{
+#ifdef ENABLE_ATOMICS_TESTS
+
+ and64_result = __sync_fetch_and_and(&and64_value, 0x011ull << 32);
+ and32_result = __sync_fetch_and_and(&and32_value, 0x011);
+ __sync_fetch_and_and(&and_noreturn_value, 0x011ull << 32);
+#endif
+
+ return 0;
+}
+
+__u64 or64_value = (0x110ull << 32);
+__u64 or64_result = 0;
+__u32 or32_value = 0x110;
+__u32 or32_result = 0;
+__u64 or_noreturn_value = (0x110ull << 32);
+
+SEC("fentry/bpf_fentry_test1")
+int BPF_PROG(or, int a)
+{
+#ifdef ENABLE_ATOMICS_TESTS
+ or64_result = __sync_fetch_and_or(&or64_value, 0x011ull << 32);
+ or32_result = __sync_fetch_and_or(&or32_value, 0x011);
+ __sync_fetch_and_or(&or_noreturn_value, 0x011ull << 32);
+#endif
+
+ return 0;
+}
+
+__u64 xor64_value = (0x110ull << 32);
+__u64 xor64_result = 0;
+__u32 xor32_value = 0x110;
+__u32 xor32_result = 0;
+__u64 xor_noreturn_value = (0x110ull << 32);
+
+SEC("fentry/bpf_fentry_test1")
+int BPF_PROG(xor, int a)
+{
+#ifdef ENABLE_ATOMICS_TESTS
+ xor64_result = __sync_fetch_and_xor(&xor64_value, 0x011ull << 32);
+ xor32_result = __sync_fetch_and_xor(&xor32_value, 0x011);
+ __sync_fetch_and_xor(&xor_noreturn_value, 0x011ull << 32);
+#endif
+
+ return 0;
+}
+
+__u64 cmpxchg64_value = 1;
+__u64 cmpxchg64_result_fail = 0;
+__u64 cmpxchg64_result_succeed = 0;
+__u32 cmpxchg32_value = 1;
+__u32 cmpxchg32_result_fail = 0;
+__u32 cmpxchg32_result_succeed = 0;
+
+SEC("fentry/bpf_fentry_test1")
+int BPF_PROG(cmpxchg, int a)
+{
+#ifdef ENABLE_ATOMICS_TESTS
+ cmpxchg64_result_fail = __sync_val_compare_and_swap(&cmpxchg64_value, 0, 3);
+ cmpxchg64_result_succeed = __sync_val_compare_and_swap(&cmpxchg64_value, 1, 2);
+
+ cmpxchg32_result_fail = __sync_val_compare_and_swap(&cmpxchg32_value, 0, 3);
+ cmpxchg32_result_succeed = __sync_val_compare_and_swap(&cmpxchg32_value, 1, 2);
+#endif
+
+ return 0;
+}
+
+__u64 xchg64_value = 1;
+__u64 xchg64_result = 0;
+__u32 xchg32_value = 1;
+__u32 xchg32_result = 0;
+
+SEC("fentry/bpf_fentry_test1")
+int BPF_PROG(xchg, int a)
+{
+#ifdef ENABLE_ATOMICS_TESTS
+ __u64 val64 = 2;
+ __u32 val32 = 2;
+
+ xchg64_result = __sync_lock_test_and_set(&xchg64_value, val64);
+ xchg32_result = __sync_lock_test_and_set(&xchg32_value, val32);
+#endif
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/test_cgroup_storage.c b/tools/testing/selftests/bpf/test_cgroup_storage.c
index d946252a25bb..0cda61da5d39 100644
--- a/tools/testing/selftests/bpf/test_cgroup_storage.c
+++ b/tools/testing/selftests/bpf/test_cgroup_storage.c
@@ -29,7 +29,7 @@ int main(int argc, char **argv)
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_get_local_storage),
BPF_MOV64_IMM(BPF_REG_1, 1),
- BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+ BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_0, BPF_REG_1, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x1),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
diff --git a/tools/testing/selftests/bpf/verifier/atomic_and.c b/tools/testing/selftests/bpf/verifier/atomic_and.c
new file mode 100644
index 000000000000..600bc5e0f143
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/atomic_and.c
@@ -0,0 +1,77 @@
+{
+ "BPF_ATOMIC_AND without fetch",
+ .insns = {
+ /* val = 0x110; */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0x110),
+ /* atomic_and(&val, 0x011); */
+ BPF_MOV64_IMM(BPF_REG_1, 0x011),
+ BPF_ATOMIC_OP(BPF_DW, BPF_AND, BPF_REG_10, BPF_REG_1, -8),
+ /* if (val != 0x010) exit(2); */
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0x010, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ /* r1 should not be clobbered, no BPF_FETCH flag */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x011, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "BPF_ATOMIC_AND with fetch",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 123),
+ /* val = 0x110; */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0x110),
+ /* old = atomic_fetch_and(&val, 0x011); */
+ BPF_MOV64_IMM(BPF_REG_1, 0x011),
+ BPF_ATOMIC_OP(BPF_DW, BPF_AND | BPF_FETCH, BPF_REG_10, BPF_REG_1, -8),
+ /* if (old != 0x110) exit(3); */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x110, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 3),
+ BPF_EXIT_INSN(),
+ /* if (val != 0x010) exit(2); */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x010, 2),
+ BPF_MOV64_IMM(BPF_REG_1, 2),
+ BPF_EXIT_INSN(),
+ /* Check R0 wasn't clobbered (for fear of x86 JIT bug) */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 123, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ /* exit(0); */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "BPF_ATOMIC_AND with fetch 32bit",
+ .insns = {
+ /* r0 = (s64) -1 */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 1),
+ /* val = 0x110; */
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0x110),
+ /* old = atomic_fetch_and(&val, 0x011); */
+ BPF_MOV32_IMM(BPF_REG_1, 0x011),
+ BPF_ATOMIC_OP(BPF_W, BPF_AND | BPF_FETCH, BPF_REG_10, BPF_REG_1, -4),
+ /* if (old != 0x110) exit(3); */
+ BPF_JMP32_IMM(BPF_JEQ, BPF_REG_1, 0x110, 2),
+ BPF_MOV32_IMM(BPF_REG_0, 3),
+ BPF_EXIT_INSN(),
+ /* if (val != 0x010) exit(2); */
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, -4),
+ BPF_JMP32_IMM(BPF_JEQ, BPF_REG_1, 0x010, 2),
+ BPF_MOV32_IMM(BPF_REG_1, 2),
+ BPF_EXIT_INSN(),
+ /* Check R0 wasn't clobbered (for fear of x86 JIT bug)
+ * It should be -1 so add 1 to get exit code.
+ */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/atomic_cmpxchg.c b/tools/testing/selftests/bpf/verifier/atomic_cmpxchg.c
new file mode 100644
index 000000000000..2efd8bcf57a1
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/atomic_cmpxchg.c
@@ -0,0 +1,96 @@
+{
+ "atomic compare-and-exchange smoketest - 64bit",
+ .insns = {
+ /* val = 3; */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 3),
+ /* old = atomic_cmpxchg(&val, 2, 4); */
+ BPF_MOV64_IMM(BPF_REG_1, 4),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_1, -8),
+ /* if (old != 3) exit(2); */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 3, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ /* if (val != 3) exit(3); */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 3, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 3),
+ BPF_EXIT_INSN(),
+ /* old = atomic_cmpxchg(&val, 3, 4); */
+ BPF_MOV64_IMM(BPF_REG_1, 4),
+ BPF_MOV64_IMM(BPF_REG_0, 3),
+ BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_1, -8),
+ /* if (old != 3) exit(4); */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 3, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 4),
+ BPF_EXIT_INSN(),
+ /* if (val != 4) exit(5); */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 4, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 5),
+ BPF_EXIT_INSN(),
+ /* exit(0); */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "atomic compare-and-exchange smoketest - 32bit",
+ .insns = {
+ /* val = 3; */
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 3),
+ /* old = atomic_cmpxchg(&val, 2, 4); */
+ BPF_MOV32_IMM(BPF_REG_1, 4),
+ BPF_MOV32_IMM(BPF_REG_0, 2),
+ BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_1, -4),
+ /* if (old != 3) exit(2); */
+ BPF_JMP32_IMM(BPF_JEQ, BPF_REG_0, 3, 2),
+ BPF_MOV32_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ /* if (val != 3) exit(3); */
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -4),
+ BPF_JMP32_IMM(BPF_JEQ, BPF_REG_0, 3, 2),
+ BPF_MOV32_IMM(BPF_REG_0, 3),
+ BPF_EXIT_INSN(),
+ /* old = atomic_cmpxchg(&val, 3, 4); */
+ BPF_MOV32_IMM(BPF_REG_1, 4),
+ BPF_MOV32_IMM(BPF_REG_0, 3),
+ BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_1, -4),
+ /* if (old != 3) exit(4); */
+ BPF_JMP32_IMM(BPF_JEQ, BPF_REG_0, 3, 2),
+ BPF_MOV32_IMM(BPF_REG_0, 4),
+ BPF_EXIT_INSN(),
+ /* if (val != 4) exit(5); */
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -4),
+ BPF_JMP32_IMM(BPF_JEQ, BPF_REG_0, 4, 2),
+ BPF_MOV32_IMM(BPF_REG_0, 5),
+ BPF_EXIT_INSN(),
+ /* exit(0); */
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "Can't use cmpxchg on uninit src reg",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 3),
+ BPF_MOV64_IMM(BPF_REG_0, 3),
+ BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_2, -8),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "!read_ok",
+},
+{
+ "Can't use cmpxchg on uninit memory",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 3),
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_2, -8),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid read from stack",
+},
diff --git a/tools/testing/selftests/bpf/verifier/atomic_fetch_add.c b/tools/testing/selftests/bpf/verifier/atomic_fetch_add.c
new file mode 100644
index 000000000000..a91de8cd9def
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/atomic_fetch_add.c
@@ -0,0 +1,106 @@
+{
+ "BPF_ATOMIC_FETCH_ADD smoketest - 64bit",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ /* Write 3 to stack */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 3),
+ /* Put a 1 in R1, add it to the 3 on the stack, and load the value back into R1 */
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ BPF_ATOMIC_OP(BPF_DW, BPF_ADD | BPF_FETCH, BPF_REG_10, BPF_REG_1, -8),
+ /* Check the value we loaded back was 3 */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ /* Load value from stack */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -8),
+ /* Check value loaded from stack was 4 */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "BPF_ATOMIC_FETCH_ADD smoketest - 32bit",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ /* Write 3 to stack */
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 3),
+ /* Put a 1 in R1, add it to the 3 on the stack, and load the value back into R1 */
+ BPF_MOV32_IMM(BPF_REG_1, 1),
+ BPF_ATOMIC_OP(BPF_W, BPF_ADD | BPF_FETCH, BPF_REG_10, BPF_REG_1, -4),
+ /* Check the value we loaded back was 3 */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ /* Load value from stack */
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, -4),
+ /* Check value loaded from stack was 4 */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "Can't use ATM_FETCH_ADD on frame pointer",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 3),
+ BPF_ATOMIC_OP(BPF_DW, BPF_ADD | BPF_FETCH, BPF_REG_10, BPF_REG_10, -8),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr_unpriv = "R10 leaks addr into mem",
+ .errstr = "frame pointer is read only",
+},
+{
+ "Can't use ATM_FETCH_ADD on uninit src reg",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 3),
+ BPF_ATOMIC_OP(BPF_DW, BPF_ADD | BPF_FETCH, BPF_REG_10, BPF_REG_2, -8),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ /* It happens that the address leak check is first, but it would also be
+ * complain about the fact that we're trying to modify R10.
+ */
+ .errstr = "!read_ok",
+},
+{
+ "Can't use ATM_FETCH_ADD on uninit dst reg",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ATOMIC_OP(BPF_DW, BPF_ADD | BPF_FETCH, BPF_REG_2, BPF_REG_0, -8),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ /* It happens that the address leak check is first, but it would also be
+ * complain about the fact that we're trying to modify R10.
+ */
+ .errstr = "!read_ok",
+},
+{
+ "Can't use ATM_FETCH_ADD on kernel memory",
+ .insns = {
+ /* This is an fentry prog, context is array of the args of the
+ * kernel function being called. Load first arg into R2.
+ */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 0),
+ /* First arg of bpf_fentry_test7 is a pointer to a struct.
+ * Attempt to modify that struct. Verifier shouldn't let us
+ * because it's kernel memory.
+ */
+ BPF_MOV64_IMM(BPF_REG_3, 1),
+ BPF_ATOMIC_OP(BPF_DW, BPF_ADD | BPF_FETCH, BPF_REG_2, BPF_REG_3, 0),
+ /* Done */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACING,
+ .expected_attach_type = BPF_TRACE_FENTRY,
+ .kfunc = "bpf_fentry_test7",
+ .result = REJECT,
+ .errstr = "only read is supported",
+},
diff --git a/tools/testing/selftests/bpf/verifier/atomic_or.c b/tools/testing/selftests/bpf/verifier/atomic_or.c
new file mode 100644
index 000000000000..ebe6e51455ba
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/atomic_or.c
@@ -0,0 +1,77 @@
+{
+ "BPF_ATOMIC OR without fetch",
+ .insns = {
+ /* val = 0x110; */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0x110),
+ /* atomic_or(&val, 0x011); */
+ BPF_MOV64_IMM(BPF_REG_1, 0x011),
+ BPF_ATOMIC_OP(BPF_DW, BPF_OR, BPF_REG_10, BPF_REG_1, -8),
+ /* if (val != 0x111) exit(2); */
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0x111, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ /* r1 should not be clobbered, no BPF_FETCH flag */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x011, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "BPF_ATOMIC OR with fetch",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 123),
+ /* val = 0x110; */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0x110),
+ /* old = atomic_fetch_or(&val, 0x011); */
+ BPF_MOV64_IMM(BPF_REG_1, 0x011),
+ BPF_ATOMIC_OP(BPF_DW, BPF_OR | BPF_FETCH, BPF_REG_10, BPF_REG_1, -8),
+ /* if (old != 0x110) exit(3); */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x110, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 3),
+ BPF_EXIT_INSN(),
+ /* if (val != 0x111) exit(2); */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x111, 2),
+ BPF_MOV64_IMM(BPF_REG_1, 2),
+ BPF_EXIT_INSN(),
+ /* Check R0 wasn't clobbered (for fear of x86 JIT bug) */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 123, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ /* exit(0); */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "BPF_ATOMIC OR with fetch 32bit",
+ .insns = {
+ /* r0 = (s64) -1 */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 1),
+ /* val = 0x110; */
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0x110),
+ /* old = atomic_fetch_or(&val, 0x011); */
+ BPF_MOV32_IMM(BPF_REG_1, 0x011),
+ BPF_ATOMIC_OP(BPF_W, BPF_OR | BPF_FETCH, BPF_REG_10, BPF_REG_1, -4),
+ /* if (old != 0x110) exit(3); */
+ BPF_JMP32_IMM(BPF_JEQ, BPF_REG_1, 0x110, 2),
+ BPF_MOV32_IMM(BPF_REG_0, 3),
+ BPF_EXIT_INSN(),
+ /* if (val != 0x111) exit(2); */
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, -4),
+ BPF_JMP32_IMM(BPF_JEQ, BPF_REG_1, 0x111, 2),
+ BPF_MOV32_IMM(BPF_REG_1, 2),
+ BPF_EXIT_INSN(),
+ /* Check R0 wasn't clobbered (for fear of x86 JIT bug)
+ * It should be -1 so add 1 to get exit code.
+ */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/atomic_xchg.c b/tools/testing/selftests/bpf/verifier/atomic_xchg.c
new file mode 100644
index 000000000000..33e2d6c973ee
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/atomic_xchg.c
@@ -0,0 +1,46 @@
+{
+ "atomic exchange smoketest - 64bit",
+ .insns = {
+ /* val = 3; */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 3),
+ /* old = atomic_xchg(&val, 4); */
+ BPF_MOV64_IMM(BPF_REG_1, 4),
+ BPF_ATOMIC_OP(BPF_DW, BPF_XCHG, BPF_REG_10, BPF_REG_1, -8),
+ /* if (old != 3) exit(1); */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ /* if (val != 4) exit(2); */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 4, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ /* exit(0); */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "atomic exchange smoketest - 32bit",
+ .insns = {
+ /* val = 3; */
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 3),
+ /* old = atomic_xchg(&val, 4); */
+ BPF_MOV32_IMM(BPF_REG_1, 4),
+ BPF_ATOMIC_OP(BPF_W, BPF_XCHG, BPF_REG_10, BPF_REG_1, -4),
+ /* if (old != 3) exit(1); */
+ BPF_JMP32_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ /* if (val != 4) exit(2); */
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -4),
+ BPF_JMP32_IMM(BPF_JEQ, BPF_REG_0, 4, 2),
+ BPF_MOV32_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ /* exit(0); */
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/atomic_xor.c b/tools/testing/selftests/bpf/verifier/atomic_xor.c
new file mode 100644
index 000000000000..eb791e547b47
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/atomic_xor.c
@@ -0,0 +1,77 @@
+{
+ "BPF_ATOMIC XOR without fetch",
+ .insns = {
+ /* val = 0x110; */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0x110),
+ /* atomic_xor(&val, 0x011); */
+ BPF_MOV64_IMM(BPF_REG_1, 0x011),
+ BPF_ATOMIC_OP(BPF_DW, BPF_XOR, BPF_REG_10, BPF_REG_1, -8),
+ /* if (val != 0x101) exit(2); */
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0x101, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ /* r1 should not be clobbered, no BPF_FETCH flag */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x011, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "BPF_ATOMIC XOR with fetch",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 123),
+ /* val = 0x110; */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0x110),
+ /* old = atomic_fetch_xor(&val, 0x011); */
+ BPF_MOV64_IMM(BPF_REG_1, 0x011),
+ BPF_ATOMIC_OP(BPF_DW, BPF_XOR | BPF_FETCH, BPF_REG_10, BPF_REG_1, -8),
+ /* if (old != 0x110) exit(3); */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x110, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 3),
+ BPF_EXIT_INSN(),
+ /* if (val != 0x101) exit(2); */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x101, 2),
+ BPF_MOV64_IMM(BPF_REG_1, 2),
+ BPF_EXIT_INSN(),
+ /* Check R0 wasn't clobbered (fxor fear of x86 JIT bug) */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 123, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ /* exit(0); */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "BPF_ATOMIC XOR with fetch 32bit",
+ .insns = {
+ /* r0 = (s64) -1 */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 1),
+ /* val = 0x110; */
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0x110),
+ /* old = atomic_fetch_xor(&val, 0x011); */
+ BPF_MOV32_IMM(BPF_REG_1, 0x011),
+ BPF_ATOMIC_OP(BPF_W, BPF_XOR | BPF_FETCH, BPF_REG_10, BPF_REG_1, -4),
+ /* if (old != 0x110) exit(3); */
+ BPF_JMP32_IMM(BPF_JEQ, BPF_REG_1, 0x110, 2),
+ BPF_MOV32_IMM(BPF_REG_0, 3),
+ BPF_EXIT_INSN(),
+ /* if (val != 0x101) exit(2); */
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, -4),
+ BPF_JMP32_IMM(BPF_JEQ, BPF_REG_1, 0x101, 2),
+ BPF_MOV32_IMM(BPF_REG_1, 2),
+ BPF_EXIT_INSN(),
+ /* Check R0 wasn't clobbered (fxor fear of x86 JIT bug)
+ * It should be -1 so add 1 to get exit code.
+ */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/ctx.c b/tools/testing/selftests/bpf/verifier/ctx.c
index 93d6b1641481..23080862aafd 100644
--- a/tools/testing/selftests/bpf/verifier/ctx.c
+++ b/tools/testing/selftests/bpf/verifier/ctx.c
@@ -10,14 +10,13 @@
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
- "context stores via XADD",
+ "context stores via BPF_ATOMIC",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1,
- BPF_REG_0, offsetof(struct __sk_buff, mark), 0),
+ BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_1, BPF_REG_0, offsetof(struct __sk_buff, mark)),
BPF_EXIT_INSN(),
},
- .errstr = "BPF_XADD stores into R1 ctx is not allowed",
+ .errstr = "BPF_ATOMIC stores into R1 ctx is not allowed",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
diff --git a/tools/testing/selftests/bpf/verifier/direct_packet_access.c b/tools/testing/selftests/bpf/verifier/direct_packet_access.c
index ae72536603fe..ac1e19d0f520 100644
--- a/tools/testing/selftests/bpf/verifier/direct_packet_access.c
+++ b/tools/testing/selftests/bpf/verifier/direct_packet_access.c
@@ -333,7 +333,7 @@
BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
+ BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_4, BPF_REG_5, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
@@ -488,7 +488,7 @@
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
- BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
+ BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_10, BPF_REG_4, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49),
BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
diff --git a/tools/testing/selftests/bpf/verifier/leak_ptr.c b/tools/testing/selftests/bpf/verifier/leak_ptr.c
index d6eec17f2cd2..73f0dea95546 100644
--- a/tools/testing/selftests/bpf/verifier/leak_ptr.c
+++ b/tools/testing/selftests/bpf/verifier/leak_ptr.c
@@ -5,7 +5,7 @@
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0])),
BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
+ BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_1, BPF_REG_2,
offsetof(struct __sk_buff, cb[0])),
BPF_EXIT_INSN(),
},
@@ -13,7 +13,7 @@
.errstr_unpriv = "R2 leaks addr into mem",
.result_unpriv = REJECT,
.result = REJECT,
- .errstr = "BPF_XADD stores into R1 ctx is not allowed",
+ .errstr = "BPF_ATOMIC stores into R1 ctx is not allowed",
},
{
"leak pointer into ctx 2",
@@ -21,14 +21,14 @@
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0])),
- BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
+ BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_1, BPF_REG_10,
offsetof(struct __sk_buff, cb[0])),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R10 leaks addr into mem",
.result_unpriv = REJECT,
.result = REJECT,
- .errstr = "BPF_XADD stores into R1 ctx is not allowed",
+ .errstr = "BPF_ATOMIC stores into R1 ctx is not allowed",
},
{
"leak pointer into ctx 3",
@@ -56,7 +56,7 @@
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
- BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_0, BPF_REG_6, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
diff --git a/tools/testing/selftests/bpf/verifier/meta_access.c b/tools/testing/selftests/bpf/verifier/meta_access.c
index 205292b8dd65..b45e8af41420 100644
--- a/tools/testing/selftests/bpf/verifier/meta_access.c
+++ b/tools/testing/selftests/bpf/verifier/meta_access.c
@@ -171,7 +171,7 @@
BPF_MOV64_IMM(BPF_REG_5, 42),
BPF_MOV64_IMM(BPF_REG_6, 24),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
- BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
+ BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_10, BPF_REG_6, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_5),
@@ -196,7 +196,7 @@
BPF_MOV64_IMM(BPF_REG_5, 42),
BPF_MOV64_IMM(BPF_REG_6, 24),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
- BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
+ BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_10, BPF_REG_6, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_5),
diff --git a/tools/testing/selftests/bpf/verifier/unpriv.c b/tools/testing/selftests/bpf/verifier/unpriv.c
index a3fe0fbaed41..ee298627abae 100644
--- a/tools/testing/selftests/bpf/verifier/unpriv.c
+++ b/tools/testing/selftests/bpf/verifier/unpriv.c
@@ -207,7 +207,8 @@
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10, BPF_REG_0, -8, 0),
+ BPF_RAW_INSN(BPF_STX | BPF_ATOMIC | BPF_DW,
+ BPF_REG_10, BPF_REG_0, -8, BPF_ADD),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_hash_recalc),
BPF_EXIT_INSN(),
diff --git a/tools/testing/selftests/bpf/verifier/value_illegal_alu.c b/tools/testing/selftests/bpf/verifier/value_illegal_alu.c
index ed1c2cea1dea..489062867218 100644
--- a/tools/testing/selftests/bpf/verifier/value_illegal_alu.c
+++ b/tools/testing/selftests/bpf/verifier/value_illegal_alu.c
@@ -82,7 +82,7 @@
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
- BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
+ BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_2, BPF_REG_3, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
BPF_EXIT_INSN(),
diff --git a/tools/testing/selftests/bpf/verifier/xadd.c b/tools/testing/selftests/bpf/verifier/xadd.c
index c5de2e62cc8b..b96ef3526815 100644
--- a/tools/testing/selftests/bpf/verifier/xadd.c
+++ b/tools/testing/selftests/bpf/verifier/xadd.c
@@ -3,7 +3,7 @@
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -7),
+ BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_10, BPF_REG_0, -7),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
BPF_EXIT_INSN(),
},
@@ -22,7 +22,7 @@
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_1, 1),
- BPF_STX_XADD(BPF_W, BPF_REG_0, BPF_REG_1, 3),
+ BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_0, BPF_REG_1, 3),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3),
BPF_EXIT_INSN(),
},
@@ -45,13 +45,13 @@
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0),
- BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 1),
- BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 2),
+ BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_2, BPF_REG_0, 1),
+ BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_2, BPF_REG_0, 2),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1),
BPF_EXIT_INSN(),
},
.result = REJECT,
- .errstr = "BPF_XADD stores into R2 pkt is not allowed",
+ .errstr = "BPF_ATOMIC stores into R2 pkt is not allowed",
.prog_type = BPF_PROG_TYPE_XDP,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
@@ -62,8 +62,8 @@
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_10, BPF_REG_0, -8),
+ BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_10, BPF_REG_0, -8),
BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
@@ -82,8 +82,8 @@
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
- BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
- BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
+ BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_10, BPF_REG_0, -8),
+ BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_10, BPF_REG_0, -8),
BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),