summaryrefslogtreecommitdiff
path: root/tools/testing
diff options
context:
space:
mode:
Diffstat (limited to 'tools/testing')
-rw-r--r--tools/testing/memblock/tests/basic_api.c314
-rw-r--r--tools/testing/memblock/tests/common.c8
-rw-r--r--tools/testing/memblock/tests/common.h4
-rw-r--r--tools/testing/selftests/alsa/mixer-test.c143
-rw-r--r--tools/testing/selftests/alsa/pcm-test.c70
-rw-r--r--tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c123
-rw-r--r--tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c385
-rw-r--r--tools/testing/selftests/bpf/prog_tests/uretprobe_stack.c186
-rw-r--r--tools/testing/selftests/bpf/progs/uprobe_syscall.c15
-rw-r--r--tools/testing/selftests/bpf/progs/uprobe_syscall_executed.c17
-rw-r--r--tools/testing/selftests/bpf/progs/uretprobe_stack.c96
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/fgraph-multi.tc103
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc29
-rw-r--r--tools/testing/selftests/hid/hid_bpf.c426
-rw-r--r--tools/testing/selftests/hid/progs/hid.c392
-rw-r--r--tools/testing/selftests/hid/progs/hid_bpf_helpers.h46
-rw-r--r--tools/testing/selftests/iommu/iommufd.c86
-rw-r--r--tools/testing/selftests/iommu/iommufd_fail_nth.c2
-rw-r--r--tools/testing/selftests/iommu/iommufd_utils.h92
-rwxr-xr-xtools/testing/selftests/net/fib_tests.sh24
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_fdb_learning_limit.sh18
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_concat_range.sh76
-rw-r--r--tools/testing/selftests/x86/test_shadow_stack.c145
23 files changed, 2603 insertions, 197 deletions
diff --git a/tools/testing/memblock/tests/basic_api.c b/tools/testing/memblock/tests/basic_api.c
index 57bf2688edfd..67503089e6a0 100644
--- a/tools/testing/memblock/tests/basic_api.c
+++ b/tools/testing/memblock/tests/basic_api.c
@@ -15,12 +15,12 @@ static int memblock_initialization_check(void)
PREFIX_PUSH();
ASSERT_NE(memblock.memory.regions, NULL);
- ASSERT_EQ(memblock.memory.cnt, 1);
+ ASSERT_EQ(memblock.memory.cnt, 0);
ASSERT_EQ(memblock.memory.max, EXPECTED_MEMBLOCK_REGIONS);
ASSERT_EQ(strcmp(memblock.memory.name, "memory"), 0);
ASSERT_NE(memblock.reserved.regions, NULL);
- ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.cnt, 0);
ASSERT_EQ(memblock.memory.max, EXPECTED_MEMBLOCK_REGIONS);
ASSERT_EQ(strcmp(memblock.reserved.name, "reserved"), 0);
@@ -982,6 +982,262 @@ static int memblock_reserve_many_check(void)
return 0;
}
+
+/*
+ * A test that trying to reserve the 129th memory block at all locations.
+ * Expect to trigger memblock_double_array() to double the
+ * memblock.memory.max, find a new valid memory as reserved.regions.
+ *
+ * 0 1 2 128
+ * +-------+ +-------+ +-------+ +-------+
+ * | 32K | | 32K | | 32K | ... | 32K |
+ * +-------+-------+-------+-------+-------+ +-------+
+ * |<-32K->| |<-32K->|
+ *
+ */
+/* Keep the gap so these memory region will not be merged. */
+#define MEMORY_BASE(idx) (SZ_128K + (MEM_SIZE * 2) * (idx))
+static int memblock_reserve_all_locations_check(void)
+{
+ int i, skip;
+ void *orig_region;
+ struct region r = {
+ .base = SZ_16K,
+ .size = SZ_16K,
+ };
+ phys_addr_t new_reserved_regions_size;
+
+ PREFIX_PUSH();
+
+ /* Reserve the 129th memory block for all possible positions*/
+ for (skip = 0; skip < INIT_MEMBLOCK_REGIONS + 1; skip++) {
+ reset_memblock_regions();
+ memblock_allow_resize();
+
+ /* Add a valid memory region used by double_array(). */
+ dummy_physical_memory_init();
+ memblock_add(dummy_physical_memory_base(), MEM_SIZE);
+
+ for (i = 0; i < INIT_MEMBLOCK_REGIONS + 1; i++) {
+ if (i == skip)
+ continue;
+
+ /* Reserve some fakes memory region to fulfill the memblock. */
+ memblock_reserve(MEMORY_BASE(i), MEM_SIZE);
+
+ if (i < skip) {
+ ASSERT_EQ(memblock.reserved.cnt, i + 1);
+ ASSERT_EQ(memblock.reserved.total_size, (i + 1) * MEM_SIZE);
+ } else {
+ ASSERT_EQ(memblock.reserved.cnt, i);
+ ASSERT_EQ(memblock.reserved.total_size, i * MEM_SIZE);
+ }
+ }
+
+ orig_region = memblock.reserved.regions;
+
+ /* This reserve the 129 memory_region, and makes it double array. */
+ memblock_reserve(MEMORY_BASE(skip), MEM_SIZE);
+
+ /*
+ * This is the memory region size used by the doubled reserved.regions,
+ * and it has been reserved due to it has been used. The size is used to
+ * calculate the total_size that the memblock.reserved have now.
+ */
+ new_reserved_regions_size = PAGE_ALIGN((INIT_MEMBLOCK_REGIONS * 2) *
+ sizeof(struct memblock_region));
+ /*
+ * The double_array() will find a free memory region as the new
+ * reserved.regions, and the used memory region will be reserved, so
+ * there will be one more region exist in the reserved memblock. And the
+ * one more reserved region's size is new_reserved_regions_size.
+ */
+ ASSERT_EQ(memblock.reserved.cnt, INIT_MEMBLOCK_REGIONS + 2);
+ ASSERT_EQ(memblock.reserved.total_size, (INIT_MEMBLOCK_REGIONS + 1) * MEM_SIZE +
+ new_reserved_regions_size);
+ ASSERT_EQ(memblock.reserved.max, INIT_MEMBLOCK_REGIONS * 2);
+
+ /*
+ * Now memblock_double_array() works fine. Let's check after the
+ * double_array(), the memblock_reserve() still works as normal.
+ */
+ memblock_reserve(r.base, r.size);
+ ASSERT_EQ(memblock.reserved.regions[0].base, r.base);
+ ASSERT_EQ(memblock.reserved.regions[0].size, r.size);
+
+ ASSERT_EQ(memblock.reserved.cnt, INIT_MEMBLOCK_REGIONS + 3);
+ ASSERT_EQ(memblock.reserved.total_size, (INIT_MEMBLOCK_REGIONS + 1) * MEM_SIZE +
+ new_reserved_regions_size +
+ r.size);
+ ASSERT_EQ(memblock.reserved.max, INIT_MEMBLOCK_REGIONS * 2);
+
+ dummy_physical_memory_cleanup();
+
+ /*
+ * The current reserved.regions is occupying a range of memory that
+ * allocated from dummy_physical_memory_init(). After free the memory,
+ * we must not use it. So restore the origin memory region to make sure
+ * the tests can run as normal and not affected by the double array.
+ */
+ memblock.reserved.regions = orig_region;
+ memblock.reserved.cnt = INIT_MEMBLOCK_RESERVED_REGIONS;
+ }
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that trying to reserve the 129th memory block at all locations.
+ * Expect to trigger memblock_double_array() to double the
+ * memblock.memory.max, find a new valid memory as reserved.regions. And make
+ * sure it doesn't conflict with the range we want to reserve.
+ *
+ * For example, we have 128 regions in reserved and now want to reserve
+ * the skipped one. Since reserved is full, memblock_double_array() would find
+ * an available range in memory for the new array. We intended to put two
+ * ranges in memory with one is the exact range of the skipped one. Before
+ * commit 48c3b583bbdd ("mm/memblock: fix overlapping allocation when doubling
+ * reserved array"), the new array would sits in the skipped range which is a
+ * conflict. The expected new array should be allocated from memory.regions[0].
+ *
+ * 0 1
+ * memory +-------+ +-------+
+ * | 32K | | 32K |
+ * +-------+ ------+-------+-------+-------+
+ * |<-32K->|<-32K->|<-32K->|
+ *
+ * 0 skipped 127
+ * reserved +-------+ ......... +-------+
+ * | 32K | . 32K . ... | 32K |
+ * +-------+-------+-------+ +-------+
+ * |<-32K->|
+ * ^
+ * |
+ * |
+ * skipped one
+ */
+/* Keep the gap so these memory region will not be merged. */
+#define MEMORY_BASE_OFFSET(idx, offset) ((offset) + (MEM_SIZE * 2) * (idx))
+static int memblock_reserve_many_may_conflict_check(void)
+{
+ int i, skip;
+ void *orig_region;
+ struct region r = {
+ .base = SZ_16K,
+ .size = SZ_16K,
+ };
+ phys_addr_t new_reserved_regions_size;
+
+ /*
+ * 0 1 129
+ * +---+ +---+ +---+
+ * |32K| |32K| .. |32K|
+ * +---+ +---+ +---+
+ *
+ * Pre-allocate the range for 129 memory block + one range for double
+ * memblock.reserved.regions at idx 0.
+ */
+ dummy_physical_memory_init();
+ phys_addr_t memory_base = dummy_physical_memory_base();
+ phys_addr_t offset = PAGE_ALIGN(memory_base);
+
+ PREFIX_PUSH();
+
+ /* Reserve the 129th memory block for all possible positions*/
+ for (skip = 1; skip <= INIT_MEMBLOCK_REGIONS + 1; skip++) {
+ reset_memblock_regions();
+ memblock_allow_resize();
+
+ reset_memblock_attributes();
+ /* Add a valid memory region used by double_array(). */
+ memblock_add(MEMORY_BASE_OFFSET(0, offset), MEM_SIZE);
+ /*
+ * Add a memory region which will be reserved as 129th memory
+ * region. This is not expected to be used by double_array().
+ */
+ memblock_add(MEMORY_BASE_OFFSET(skip, offset), MEM_SIZE);
+
+ for (i = 1; i <= INIT_MEMBLOCK_REGIONS + 1; i++) {
+ if (i == skip)
+ continue;
+
+ /* Reserve some fakes memory region to fulfill the memblock. */
+ memblock_reserve(MEMORY_BASE_OFFSET(i, offset), MEM_SIZE);
+
+ if (i < skip) {
+ ASSERT_EQ(memblock.reserved.cnt, i);
+ ASSERT_EQ(memblock.reserved.total_size, i * MEM_SIZE);
+ } else {
+ ASSERT_EQ(memblock.reserved.cnt, i - 1);
+ ASSERT_EQ(memblock.reserved.total_size, (i - 1) * MEM_SIZE);
+ }
+ }
+
+ orig_region = memblock.reserved.regions;
+
+ /* This reserve the 129 memory_region, and makes it double array. */
+ memblock_reserve(MEMORY_BASE_OFFSET(skip, offset), MEM_SIZE);
+
+ /*
+ * This is the memory region size used by the doubled reserved.regions,
+ * and it has been reserved due to it has been used. The size is used to
+ * calculate the total_size that the memblock.reserved have now.
+ */
+ new_reserved_regions_size = PAGE_ALIGN((INIT_MEMBLOCK_REGIONS * 2) *
+ sizeof(struct memblock_region));
+ /*
+ * The double_array() will find a free memory region as the new
+ * reserved.regions, and the used memory region will be reserved, so
+ * there will be one more region exist in the reserved memblock. And the
+ * one more reserved region's size is new_reserved_regions_size.
+ */
+ ASSERT_EQ(memblock.reserved.cnt, INIT_MEMBLOCK_REGIONS + 2);
+ ASSERT_EQ(memblock.reserved.total_size, (INIT_MEMBLOCK_REGIONS + 1) * MEM_SIZE +
+ new_reserved_regions_size);
+ ASSERT_EQ(memblock.reserved.max, INIT_MEMBLOCK_REGIONS * 2);
+
+ /*
+ * The first reserved region is allocated for double array
+ * with the size of new_reserved_regions_size and the base to be
+ * MEMORY_BASE_OFFSET(0, offset) + SZ_32K - new_reserved_regions_size
+ */
+ ASSERT_EQ(memblock.reserved.regions[0].base + memblock.reserved.regions[0].size,
+ MEMORY_BASE_OFFSET(0, offset) + SZ_32K);
+ ASSERT_EQ(memblock.reserved.regions[0].size, new_reserved_regions_size);
+
+ /*
+ * Now memblock_double_array() works fine. Let's check after the
+ * double_array(), the memblock_reserve() still works as normal.
+ */
+ memblock_reserve(r.base, r.size);
+ ASSERT_EQ(memblock.reserved.regions[0].base, r.base);
+ ASSERT_EQ(memblock.reserved.regions[0].size, r.size);
+
+ ASSERT_EQ(memblock.reserved.cnt, INIT_MEMBLOCK_REGIONS + 3);
+ ASSERT_EQ(memblock.reserved.total_size, (INIT_MEMBLOCK_REGIONS + 1) * MEM_SIZE +
+ new_reserved_regions_size +
+ r.size);
+ ASSERT_EQ(memblock.reserved.max, INIT_MEMBLOCK_REGIONS * 2);
+
+ /*
+ * The current reserved.regions is occupying a range of memory that
+ * allocated from dummy_physical_memory_init(). After free the memory,
+ * we must not use it. So restore the origin memory region to make sure
+ * the tests can run as normal and not affected by the double array.
+ */
+ memblock.reserved.regions = orig_region;
+ memblock.reserved.cnt = INIT_MEMBLOCK_RESERVED_REGIONS;
+ }
+
+ dummy_physical_memory_cleanup();
+
+ test_pass_pop();
+
+ return 0;
+}
+
static int memblock_reserve_checks(void)
{
prefix_reset();
@@ -997,6 +1253,8 @@ static int memblock_reserve_checks(void)
memblock_reserve_between_check();
memblock_reserve_near_max_check();
memblock_reserve_many_check();
+ memblock_reserve_all_locations_check();
+ memblock_reserve_many_may_conflict_check();
prefix_pop();
@@ -1295,7 +1553,7 @@ static int memblock_remove_only_region_check(void)
ASSERT_EQ(rgn->base, 0);
ASSERT_EQ(rgn->size, 0);
- ASSERT_EQ(memblock.memory.cnt, 1);
+ ASSERT_EQ(memblock.memory.cnt, 0);
ASSERT_EQ(memblock.memory.total_size, 0);
test_pass_pop();
@@ -1723,7 +1981,7 @@ static int memblock_free_only_region_check(void)
ASSERT_EQ(rgn->base, 0);
ASSERT_EQ(rgn->size, 0);
- ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.cnt, 0);
ASSERT_EQ(memblock.reserved.total_size, 0);
test_pass_pop();
@@ -2129,6 +2387,53 @@ static int memblock_trim_memory_checks(void)
return 0;
}
+static int memblock_overlaps_region_check(void)
+{
+ struct region r = {
+ .base = SZ_1G,
+ .size = SZ_4M
+ };
+
+ PREFIX_PUSH();
+
+ reset_memblock_regions();
+ memblock_add(r.base, r.size);
+
+ /* Far Away */
+ ASSERT_FALSE(memblock_overlaps_region(&memblock.memory, SZ_1M, SZ_1M));
+ ASSERT_FALSE(memblock_overlaps_region(&memblock.memory, SZ_2G, SZ_1M));
+
+ /* Neighbor */
+ ASSERT_FALSE(memblock_overlaps_region(&memblock.memory, SZ_1G - SZ_1M, SZ_1M));
+ ASSERT_FALSE(memblock_overlaps_region(&memblock.memory, SZ_1G + SZ_4M, SZ_1M));
+
+ /* Partial Overlap */
+ ASSERT_TRUE(memblock_overlaps_region(&memblock.memory, SZ_1G - SZ_1M, SZ_2M));
+ ASSERT_TRUE(memblock_overlaps_region(&memblock.memory, SZ_1G + SZ_2M, SZ_2M));
+
+ /* Totally Overlap */
+ ASSERT_TRUE(memblock_overlaps_region(&memblock.memory, SZ_1G, SZ_4M));
+ ASSERT_TRUE(memblock_overlaps_region(&memblock.memory, SZ_1G - SZ_2M, SZ_8M));
+ ASSERT_TRUE(memblock_overlaps_region(&memblock.memory, SZ_1G + SZ_1M, SZ_1M));
+
+ test_pass_pop();
+
+ return 0;
+}
+
+static int memblock_overlaps_region_checks(void)
+{
+ prefix_reset();
+ prefix_push("memblock_overlaps_region");
+ test_print("Running memblock_overlaps_region tests...\n");
+
+ memblock_overlaps_region_check();
+
+ prefix_pop();
+
+ return 0;
+}
+
int memblock_basic_checks(void)
{
memblock_initialization_check();
@@ -2138,6 +2443,7 @@ int memblock_basic_checks(void)
memblock_free_checks();
memblock_bottom_up_checks();
memblock_trim_memory_checks();
+ memblock_overlaps_region_checks();
return 0;
}
diff --git a/tools/testing/memblock/tests/common.c b/tools/testing/memblock/tests/common.c
index f43b6f414983..3250c8e5124b 100644
--- a/tools/testing/memblock/tests/common.c
+++ b/tools/testing/memblock/tests/common.c
@@ -40,13 +40,13 @@ void reset_memblock_regions(void)
{
memset(memblock.memory.regions, 0,
memblock.memory.cnt * sizeof(struct memblock_region));
- memblock.memory.cnt = 1;
+ memblock.memory.cnt = 0;
memblock.memory.max = INIT_MEMBLOCK_REGIONS;
memblock.memory.total_size = 0;
memset(memblock.reserved.regions, 0,
memblock.reserved.cnt * sizeof(struct memblock_region));
- memblock.reserved.cnt = 1;
+ memblock.reserved.cnt = 0;
memblock.reserved.max = INIT_MEMBLOCK_RESERVED_REGIONS;
memblock.reserved.total_size = 0;
}
@@ -61,7 +61,7 @@ void reset_memblock_attributes(void)
static inline void fill_memblock(void)
{
- memset(memory_block.base, 1, MEM_SIZE);
+ memset(memory_block.base, 1, PHYS_MEM_SIZE);
}
void setup_memblock(void)
@@ -103,7 +103,7 @@ void setup_numa_memblock(const unsigned int node_fracs[])
void dummy_physical_memory_init(void)
{
- memory_block.base = malloc(MEM_SIZE);
+ memory_block.base = malloc(PHYS_MEM_SIZE);
assert(memory_block.base);
fill_memblock();
}
diff --git a/tools/testing/memblock/tests/common.h b/tools/testing/memblock/tests/common.h
index b5ec59aa62d7..e1138e06c903 100644
--- a/tools/testing/memblock/tests/common.h
+++ b/tools/testing/memblock/tests/common.h
@@ -12,6 +12,7 @@
#include <../selftests/kselftest.h>
#define MEM_SIZE SZ_32K
+#define PHYS_MEM_SIZE SZ_16M
#define NUMA_NODES 8
#define INIT_MEMBLOCK_REGIONS 128
@@ -39,6 +40,9 @@ enum test_flags {
assert((_expected) == (_seen)); \
} while (0)
+#define ASSERT_TRUE(_seen) ASSERT_EQ(true, _seen)
+#define ASSERT_FALSE(_seen) ASSERT_EQ(false, _seen)
+
/**
* ASSERT_NE():
* Check the condition
diff --git a/tools/testing/selftests/alsa/mixer-test.c b/tools/testing/selftests/alsa/mixer-test.c
index 1c04e5f638a0..2a4b2662035e 100644
--- a/tools/testing/selftests/alsa/mixer-test.c
+++ b/tools/testing/selftests/alsa/mixer-test.c
@@ -33,6 +33,8 @@
struct card_data {
snd_ctl_t *handle;
int card;
+ snd_ctl_card_info_t *info;
+ const char *card_name;
struct pollfd pollfd;
int num_ctls;
snd_ctl_elem_list_t *ctls;
@@ -91,8 +93,26 @@ static void find_controls(void)
err = snd_card_get_longname(card, &card_longname);
if (err != 0)
card_longname = "Unknown";
- ksft_print_msg("Card %d - %s (%s)\n", card,
- card_name, card_longname);
+
+ err = snd_ctl_card_info_malloc(&card_data->info);
+ if (err != 0)
+ ksft_exit_fail_msg("Failed to allocate card info: %d\n",
+ err);
+
+ err = snd_ctl_card_info(card_data->handle, card_data->info);
+ if (err == 0) {
+ card_data->card_name = snd_ctl_card_info_get_id(card_data->info);
+ if (!card_data->card_name)
+ ksft_print_msg("Failed to get card ID\n");
+ } else {
+ ksft_print_msg("Failed to get card info: %d\n", err);
+ }
+
+ if (!card_data->card_name)
+ card_data->card_name = "Unknown";
+
+ ksft_print_msg("Card %d/%s - %s (%s)\n", card,
+ card_data->card_name, card_name, card_longname);
/* Count controls */
snd_ctl_elem_list_malloc(&card_data->ctls);
@@ -389,16 +409,16 @@ static void test_ctl_get_value(struct ctl_data *ctl)
/* If the control is turned off let's be polite */
if (snd_ctl_elem_info_is_inactive(ctl->info)) {
ksft_print_msg("%s is inactive\n", ctl->name);
- ksft_test_result_skip("get_value.%d.%d\n",
- ctl->card->card, ctl->elem);
+ ksft_test_result_skip("get_value.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
return;
}
/* Can't test reading on an unreadable control */
if (!snd_ctl_elem_info_is_readable(ctl->info)) {
ksft_print_msg("%s is not readable\n", ctl->name);
- ksft_test_result_skip("get_value.%d.%d\n",
- ctl->card->card, ctl->elem);
+ ksft_test_result_skip("get_value.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
return;
}
@@ -413,8 +433,8 @@ static void test_ctl_get_value(struct ctl_data *ctl)
err = -EINVAL;
out:
- ksft_test_result(err >= 0, "get_value.%d.%d\n",
- ctl->card->card, ctl->elem);
+ ksft_test_result(err >= 0, "get_value.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
}
static bool strend(const char *haystack, const char *needle)
@@ -431,7 +451,7 @@ static void test_ctl_name(struct ctl_data *ctl)
{
bool name_ok = true;
- ksft_print_msg("%d.%d %s\n", ctl->card->card, ctl->elem,
+ ksft_print_msg("%s.%d %s\n", ctl->card->card_name, ctl->elem,
ctl->name);
/* Only boolean controls should end in Switch */
@@ -453,8 +473,8 @@ static void test_ctl_name(struct ctl_data *ctl)
}
}
- ksft_test_result(name_ok, "name.%d.%d\n",
- ctl->card->card, ctl->elem);
+ ksft_test_result(name_ok, "name.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
}
static void show_values(struct ctl_data *ctl, snd_ctl_elem_value_t *orig_val,
@@ -626,28 +646,41 @@ static int write_and_verify(struct ctl_data *ctl,
}
/*
+ * We can't verify any specific value for volatile controls
+ * but we should still check that whatever we read is a valid
+ * vale for the control.
+ */
+ if (snd_ctl_elem_info_is_volatile(ctl->info)) {
+ if (!ctl_value_valid(ctl, read_val)) {
+ ksft_print_msg("Volatile control %s has invalid value\n",
+ ctl->name);
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+ /*
* Check for an event if the value changed, or confirm that
* there was none if it didn't. We rely on the kernel
* generating the notification before it returns from the
* write, this is currently true, should that ever change this
* will most likely break and need updating.
*/
- if (!snd_ctl_elem_info_is_volatile(ctl->info)) {
- err = wait_for_event(ctl, 0);
- if (snd_ctl_elem_value_compare(initial_val, read_val)) {
- if (err < 1) {
- ksft_print_msg("No event generated for %s\n",
- ctl->name);
- show_values(ctl, initial_val, read_val);
- ctl->event_missing++;
- }
- } else {
- if (err != 0) {
- ksft_print_msg("Spurious event generated for %s\n",
- ctl->name);
- show_values(ctl, initial_val, read_val);
- ctl->event_spurious++;
- }
+ err = wait_for_event(ctl, 0);
+ if (snd_ctl_elem_value_compare(initial_val, read_val)) {
+ if (err < 1) {
+ ksft_print_msg("No event generated for %s\n",
+ ctl->name);
+ show_values(ctl, initial_val, read_val);
+ ctl->event_missing++;
+ }
+ } else {
+ if (err != 0) {
+ ksft_print_msg("Spurious event generated for %s\n",
+ ctl->name);
+ show_values(ctl, initial_val, read_val);
+ ctl->event_spurious++;
}
}
@@ -682,30 +715,30 @@ static void test_ctl_write_default(struct ctl_data *ctl)
/* If the control is turned off let's be polite */
if (snd_ctl_elem_info_is_inactive(ctl->info)) {
ksft_print_msg("%s is inactive\n", ctl->name);
- ksft_test_result_skip("write_default.%d.%d\n",
- ctl->card->card, ctl->elem);
+ ksft_test_result_skip("write_default.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
return;
}
if (!snd_ctl_elem_info_is_writable(ctl->info)) {
ksft_print_msg("%s is not writeable\n", ctl->name);
- ksft_test_result_skip("write_default.%d.%d\n",
- ctl->card->card, ctl->elem);
+ ksft_test_result_skip("write_default.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
return;
}
/* No idea what the default was for unreadable controls */
if (!snd_ctl_elem_info_is_readable(ctl->info)) {
ksft_print_msg("%s couldn't read default\n", ctl->name);
- ksft_test_result_skip("write_default.%d.%d\n",
- ctl->card->card, ctl->elem);
+ ksft_test_result_skip("write_default.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
return;
}
err = write_and_verify(ctl, ctl->def_val, NULL);
- ksft_test_result(err >= 0, "write_default.%d.%d\n",
- ctl->card->card, ctl->elem);
+ ksft_test_result(err >= 0, "write_default.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
}
static bool test_ctl_write_valid_boolean(struct ctl_data *ctl)
@@ -815,15 +848,15 @@ static void test_ctl_write_valid(struct ctl_data *ctl)
/* If the control is turned off let's be polite */
if (snd_ctl_elem_info_is_inactive(ctl->info)) {
ksft_print_msg("%s is inactive\n", ctl->name);
- ksft_test_result_skip("write_valid.%d.%d\n",
- ctl->card->card, ctl->elem);
+ ksft_test_result_skip("write_valid.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
return;
}
if (!snd_ctl_elem_info_is_writable(ctl->info)) {
ksft_print_msg("%s is not writeable\n", ctl->name);
- ksft_test_result_skip("write_valid.%d.%d\n",
- ctl->card->card, ctl->elem);
+ ksft_test_result_skip("write_valid.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
return;
}
@@ -846,16 +879,16 @@ static void test_ctl_write_valid(struct ctl_data *ctl)
default:
/* No tests for this yet */
- ksft_test_result_skip("write_valid.%d.%d\n",
- ctl->card->card, ctl->elem);
+ ksft_test_result_skip("write_valid.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
return;
}
/* Restore the default value to minimise disruption */
write_and_verify(ctl, ctl->def_val, NULL);
- ksft_test_result(pass, "write_valid.%d.%d\n",
- ctl->card->card, ctl->elem);
+ ksft_test_result(pass, "write_valid.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
}
static bool test_ctl_write_invalid_value(struct ctl_data *ctl,
@@ -1027,15 +1060,15 @@ static void test_ctl_write_invalid(struct ctl_data *ctl)
/* If the control is turned off let's be polite */
if (snd_ctl_elem_info_is_inactive(ctl->info)) {
ksft_print_msg("%s is inactive\n", ctl->name);
- ksft_test_result_skip("write_invalid.%d.%d\n",
- ctl->card->card, ctl->elem);
+ ksft_test_result_skip("write_invalid.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
return;
}
if (!snd_ctl_elem_info_is_writable(ctl->info)) {
ksft_print_msg("%s is not writeable\n", ctl->name);
- ksft_test_result_skip("write_invalid.%d.%d\n",
- ctl->card->card, ctl->elem);
+ ksft_test_result_skip("write_invalid.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
return;
}
@@ -1058,28 +1091,28 @@ static void test_ctl_write_invalid(struct ctl_data *ctl)
default:
/* No tests for this yet */
- ksft_test_result_skip("write_invalid.%d.%d\n",
- ctl->card->card, ctl->elem);
+ ksft_test_result_skip("write_invalid.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
return;
}
/* Restore the default value to minimise disruption */
write_and_verify(ctl, ctl->def_val, NULL);
- ksft_test_result(pass, "write_invalid.%d.%d\n",
- ctl->card->card, ctl->elem);
+ ksft_test_result(pass, "write_invalid.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
}
static void test_ctl_event_missing(struct ctl_data *ctl)
{
- ksft_test_result(!ctl->event_missing, "event_missing.%d.%d\n",
- ctl->card->card, ctl->elem);
+ ksft_test_result(!ctl->event_missing, "event_missing.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
}
static void test_ctl_event_spurious(struct ctl_data *ctl)
{
- ksft_test_result(!ctl->event_spurious, "event_spurious.%d.%d\n",
- ctl->card->card, ctl->elem);
+ ksft_test_result(!ctl->event_spurious, "event_spurious.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
}
int main(void)
diff --git a/tools/testing/selftests/alsa/pcm-test.c b/tools/testing/selftests/alsa/pcm-test.c
index de664dedb541..dbd7c222ce93 100644
--- a/tools/testing/selftests/alsa/pcm-test.c
+++ b/tools/testing/selftests/alsa/pcm-test.c
@@ -24,6 +24,8 @@ typedef struct timespec timestamp_t;
struct card_data {
int card;
+ snd_ctl_card_info_t *info;
+ const char *name;
pthread_t thread;
struct card_data *next;
};
@@ -35,6 +37,7 @@ struct pcm_data {
int card;
int device;
int subdevice;
+ const char *card_name;
snd_pcm_stream_t stream;
snd_config_t *pcm_config;
struct pcm_data *next;
@@ -167,6 +170,10 @@ static void find_pcms(void)
config = get_alsalib_config();
while (card >= 0) {
+ card_data = calloc(1, sizeof(*card_data));
+ if (!card_data)
+ ksft_exit_fail_msg("Out of memory\n");
+
sprintf(name, "hw:%d", card);
err = snd_ctl_open_lconf(&handle, name, 0, config);
@@ -182,14 +189,29 @@ static void find_pcms(void)
err = snd_card_get_longname(card, &card_longname);
if (err != 0)
card_longname = "Unknown";
- ksft_print_msg("Card %d - %s (%s)\n", card,
- card_name, card_longname);
+
+ err = snd_ctl_card_info_malloc(&card_data->info);
+ if (err != 0)
+ ksft_exit_fail_msg("Failed to allocate card info: %d\n",
+ err);
+
+ err = snd_ctl_card_info(handle, card_data->info);
+ if (err == 0) {
+ card_data->name = snd_ctl_card_info_get_id(card_data->info);
+ if (!card_data->name)
+ ksft_print_msg("Failed to get card ID\n");
+ } else {
+ ksft_print_msg("Failed to get card info: %d\n", err);
+ }
+
+ if (!card_data->name)
+ card_data->name = "Unknown";
+
+ ksft_print_msg("Card %d/%s - %s (%s)\n", card,
+ card_data->name, card_name, card_longname);
card_config = conf_by_card(card);
- card_data = calloc(1, sizeof(*card_data));
- if (!card_data)
- ksft_exit_fail_msg("Out of memory\n");
card_data->card = card;
card_data->next = card_list;
card_list = card_data;
@@ -218,6 +240,10 @@ static void find_pcms(void)
if (err < 0)
ksft_exit_fail_msg("snd_ctl_pcm_info: %d:%d:%d\n",
dev, 0, stream);
+
+ ksft_print_msg("%s.0 - %s\n", card_data->name,
+ snd_pcm_info_get_id(pcm_info));
+
count = snd_pcm_info_get_subdevices_count(pcm_info);
for (subdev = 0; subdev < count; subdev++) {
sprintf(key, "pcm.%d.%d.%s", dev, subdev, snd_pcm_stream_name(stream));
@@ -232,6 +258,7 @@ static void find_pcms(void)
pcm_data->card = card;
pcm_data->device = dev;
pcm_data->subdevice = subdev;
+ pcm_data->card_name = card_data->name;
pcm_data->stream = stream;
pcm_data->pcm_config = conf_get_subtree(card_config, key, NULL);
pcm_data->next = pcm_list;
@@ -294,9 +321,9 @@ static void test_pcm_time(struct pcm_data *data, enum test_class class,
desc = conf_get_string(pcm_cfg, "description", NULL, NULL);
if (desc)
- ksft_print_msg("%s.%s.%d.%d.%d.%s - %s\n",
+ ksft_print_msg("%s.%s.%s.%d.%d.%s - %s\n",
test_class_name, test_name,
- data->card, data->device, data->subdevice,
+ data->card_name, data->device, data->subdevice,
snd_pcm_stream_name(data->stream),
desc);
@@ -352,9 +379,9 @@ __format:
old_format = format;
format = snd_pcm_format_value(alt_formats[i]);
if (format != SND_PCM_FORMAT_UNKNOWN) {
- ksft_print_msg("%s.%d.%d.%d.%s.%s format %s -> %s\n",
+ ksft_print_msg("%s.%s.%d.%d.%s.%s format %s -> %s\n",
test_name,
- data->card, data->device, data->subdevice,
+ data->card_name, data->device, data->subdevice,
snd_pcm_stream_name(data->stream),
snd_pcm_access_name(access),
snd_pcm_format_name(old_format),
@@ -383,7 +410,7 @@ __format:
goto __close;
}
if (rrate != rate) {
- snprintf(msg, sizeof(msg), "rate mismatch %ld != %d", rate, rrate);
+ snprintf(msg, sizeof(msg), "rate mismatch %ld != %u", rate, rrate);
goto __close;
}
rperiod_size = period_size;
@@ -430,9 +457,9 @@ __format:
goto __close;
}
- ksft_print_msg("%s.%s.%d.%d.%d.%s hw_params.%s.%s.%ld.%ld.%ld.%ld sw_params.%ld\n",
+ ksft_print_msg("%s.%s.%s.%d.%d.%s hw_params.%s.%s.%ld.%ld.%ld.%ld sw_params.%ld\n",
test_class_name, test_name,
- data->card, data->device, data->subdevice,
+ data->card_name, data->device, data->subdevice,
snd_pcm_stream_name(data->stream),
snd_pcm_access_name(access),
snd_pcm_format_name(format),
@@ -491,9 +518,10 @@ __close:
* Anything specified as specific to this system
* should always be supported.
*/
- ksft_test_result(!skip, "%s.%s.%d.%d.%d.%s.params\n",
+ ksft_test_result(!skip, "%s.%s.%s.%d.%d.%s.params\n",
test_class_name, test_name,
- data->card, data->device, data->subdevice,
+ data->card_name, data->device,
+ data->subdevice,
snd_pcm_stream_name(data->stream));
break;
default:
@@ -501,14 +529,16 @@ __close:
}
if (!skip)
- ksft_test_result(pass, "%s.%s.%d.%d.%d.%s\n",
+ ksft_test_result(pass, "%s.%s.%s.%d.%d.%s\n",
test_class_name, test_name,
- data->card, data->device, data->subdevice,
+ data->card_name, data->device,
+ data->subdevice,
snd_pcm_stream_name(data->stream));
else
- ksft_test_result_skip("%s.%s.%d.%d.%d.%s\n",
+ ksft_test_result_skip("%s.%s.%s.%d.%d.%s\n",
test_class_name, test_name,
- data->card, data->device, data->subdevice,
+ data->card_name, data->device,
+ data->subdevice,
snd_pcm_stream_name(data->stream));
if (msg[0])
@@ -609,8 +639,8 @@ int main(void)
conf->filename, conf->config_id);
for (pcm = pcm_missing; pcm != NULL; pcm = pcm->next) {
- ksft_test_result(false, "test.missing.%d.%d.%d.%s\n",
- pcm->card, pcm->device, pcm->subdevice,
+ ksft_test_result(false, "test.missing.%s.%d.%d.%s\n",
+ pcm->card_name, pcm->device, pcm->subdevice,
snd_pcm_stream_name(pcm->stream));
}
diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
index f8962a1dd397..fd28c1157bd3 100644
--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
+++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
@@ -18,6 +18,7 @@
#include <linux/in6.h>
#include <linux/un.h>
#include <net/sock.h>
+#include <linux/namei.h>
#include "bpf_testmod.h"
#include "bpf_testmod_kfunc.h"
@@ -413,6 +414,119 @@ static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = {
.write = bpf_testmod_test_write,
};
+/* bpf_testmod_uprobe sysfs attribute is so far enabled for x86_64 only,
+ * please see test_uretprobe_regs_change test
+ */
+#ifdef __x86_64__
+
+static int
+uprobe_ret_handler(struct uprobe_consumer *self, unsigned long func,
+ struct pt_regs *regs)
+
+{
+ regs->ax = 0x12345678deadbeef;
+ regs->cx = 0x87654321feebdaed;
+ regs->r11 = (u64) -1;
+ return true;
+}
+
+struct testmod_uprobe {
+ struct path path;
+ loff_t offset;
+ struct uprobe_consumer consumer;
+};
+
+static DEFINE_MUTEX(testmod_uprobe_mutex);
+
+static struct testmod_uprobe uprobe = {
+ .consumer.ret_handler = uprobe_ret_handler,
+};
+
+static int testmod_register_uprobe(loff_t offset)
+{
+ int err = -EBUSY;
+
+ if (uprobe.offset)
+ return -EBUSY;
+
+ mutex_lock(&testmod_uprobe_mutex);
+
+ if (uprobe.offset)
+ goto out;
+
+ err = kern_path("/proc/self/exe", LOOKUP_FOLLOW, &uprobe.path);
+ if (err)
+ goto out;
+
+ err = uprobe_register_refctr(d_real_inode(uprobe.path.dentry),
+ offset, 0, &uprobe.consumer);
+ if (err)
+ path_put(&uprobe.path);
+ else
+ uprobe.offset = offset;
+
+out:
+ mutex_unlock(&testmod_uprobe_mutex);
+ return err;
+}
+
+static void testmod_unregister_uprobe(void)
+{
+ mutex_lock(&testmod_uprobe_mutex);
+
+ if (uprobe.offset) {
+ uprobe_unregister(d_real_inode(uprobe.path.dentry),
+ uprobe.offset, &uprobe.consumer);
+ uprobe.offset = 0;
+ }
+
+ mutex_unlock(&testmod_uprobe_mutex);
+}
+
+static ssize_t
+bpf_testmod_uprobe_write(struct file *file, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t len)
+{
+ unsigned long offset = 0;
+ int err = 0;
+
+ if (kstrtoul(buf, 0, &offset))
+ return -EINVAL;
+
+ if (offset)
+ err = testmod_register_uprobe(offset);
+ else
+ testmod_unregister_uprobe();
+
+ return err ?: strlen(buf);
+}
+
+static struct bin_attribute bin_attr_bpf_testmod_uprobe_file __ro_after_init = {
+ .attr = { .name = "bpf_testmod_uprobe", .mode = 0666, },
+ .write = bpf_testmod_uprobe_write,
+};
+
+static int register_bpf_testmod_uprobe(void)
+{
+ return sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_uprobe_file);
+}
+
+static void unregister_bpf_testmod_uprobe(void)
+{
+ testmod_unregister_uprobe();
+ sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_uprobe_file);
+}
+
+#else
+static int register_bpf_testmod_uprobe(void)
+{
+ return 0;
+}
+
+static void unregister_bpf_testmod_uprobe(void) { }
+#endif
+
BTF_KFUNCS_START(bpf_testmod_common_kfunc_ids)
BTF_ID_FLAGS(func, bpf_iter_testmod_seq_new, KF_ITER_NEW)
BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL)
@@ -983,7 +1097,13 @@ static int bpf_testmod_init(void)
return -EINVAL;
sock = NULL;
mutex_init(&sock_lock);
- return sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
+ ret = sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
+ if (ret < 0)
+ return ret;
+ ret = register_bpf_testmod_uprobe();
+ if (ret < 0)
+ return ret;
+ return 0;
}
static void bpf_testmod_exit(void)
@@ -998,6 +1118,7 @@ static void bpf_testmod_exit(void)
bpf_kfunc_close_sock();
sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
+ unregister_bpf_testmod_uprobe();
}
module_init(bpf_testmod_init);
diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c b/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c
new file mode 100644
index 000000000000..bd8c75b620c2
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c
@@ -0,0 +1,385 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+
+#ifdef __x86_64__
+
+#include <unistd.h>
+#include <asm/ptrace.h>
+#include <linux/compiler.h>
+#include <linux/stringify.h>
+#include <sys/wait.h>
+#include <sys/syscall.h>
+#include <sys/prctl.h>
+#include <asm/prctl.h>
+#include "uprobe_syscall.skel.h"
+#include "uprobe_syscall_executed.skel.h"
+
+__naked unsigned long uretprobe_regs_trigger(void)
+{
+ asm volatile (
+ "movq $0xdeadbeef, %rax\n"
+ "ret\n"
+ );
+}
+
+__naked void uretprobe_regs(struct pt_regs *before, struct pt_regs *after)
+{
+ asm volatile (
+ "movq %r15, 0(%rdi)\n"
+ "movq %r14, 8(%rdi)\n"
+ "movq %r13, 16(%rdi)\n"
+ "movq %r12, 24(%rdi)\n"
+ "movq %rbp, 32(%rdi)\n"
+ "movq %rbx, 40(%rdi)\n"
+ "movq %r11, 48(%rdi)\n"
+ "movq %r10, 56(%rdi)\n"
+ "movq %r9, 64(%rdi)\n"
+ "movq %r8, 72(%rdi)\n"
+ "movq %rax, 80(%rdi)\n"
+ "movq %rcx, 88(%rdi)\n"
+ "movq %rdx, 96(%rdi)\n"
+ "movq %rsi, 104(%rdi)\n"
+ "movq %rdi, 112(%rdi)\n"
+ "movq $0, 120(%rdi)\n" /* orig_rax */
+ "movq $0, 128(%rdi)\n" /* rip */
+ "movq $0, 136(%rdi)\n" /* cs */
+ "pushf\n"
+ "pop %rax\n"
+ "movq %rax, 144(%rdi)\n" /* eflags */
+ "movq %rsp, 152(%rdi)\n" /* rsp */
+ "movq $0, 160(%rdi)\n" /* ss */
+
+ /* save 2nd argument */
+ "pushq %rsi\n"
+ "call uretprobe_regs_trigger\n"
+
+ /* save return value and load 2nd argument pointer to rax */
+ "pushq %rax\n"
+ "movq 8(%rsp), %rax\n"
+
+ "movq %r15, 0(%rax)\n"
+ "movq %r14, 8(%rax)\n"
+ "movq %r13, 16(%rax)\n"
+ "movq %r12, 24(%rax)\n"
+ "movq %rbp, 32(%rax)\n"
+ "movq %rbx, 40(%rax)\n"
+ "movq %r11, 48(%rax)\n"
+ "movq %r10, 56(%rax)\n"
+ "movq %r9, 64(%rax)\n"
+ "movq %r8, 72(%rax)\n"
+ "movq %rcx, 88(%rax)\n"
+ "movq %rdx, 96(%rax)\n"
+ "movq %rsi, 104(%rax)\n"
+ "movq %rdi, 112(%rax)\n"
+ "movq $0, 120(%rax)\n" /* orig_rax */
+ "movq $0, 128(%rax)\n" /* rip */
+ "movq $0, 136(%rax)\n" /* cs */
+
+ /* restore return value and 2nd argument */
+ "pop %rax\n"
+ "pop %rsi\n"
+
+ "movq %rax, 80(%rsi)\n"
+
+ "pushf\n"
+ "pop %rax\n"
+
+ "movq %rax, 144(%rsi)\n" /* eflags */
+ "movq %rsp, 152(%rsi)\n" /* rsp */
+ "movq $0, 160(%rsi)\n" /* ss */
+ "ret\n"
+);
+}
+
+static void test_uretprobe_regs_equal(void)
+{
+ struct uprobe_syscall *skel = NULL;
+ struct pt_regs before = {}, after = {};
+ unsigned long *pb = (unsigned long *) &before;
+ unsigned long *pa = (unsigned long *) &after;
+ unsigned long *pp;
+ unsigned int i, cnt;
+ int err;
+
+ skel = uprobe_syscall__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "uprobe_syscall__open_and_load"))
+ goto cleanup;
+
+ err = uprobe_syscall__attach(skel);
+ if (!ASSERT_OK(err, "uprobe_syscall__attach"))
+ goto cleanup;
+
+ uretprobe_regs(&before, &after);
+
+ pp = (unsigned long *) &skel->bss->regs;
+ cnt = sizeof(before)/sizeof(*pb);
+
+ for (i = 0; i < cnt; i++) {
+ unsigned int offset = i * sizeof(unsigned long);
+
+ /*
+ * Check register before and after uretprobe_regs_trigger call
+ * that triggers the uretprobe.
+ */
+ switch (offset) {
+ case offsetof(struct pt_regs, rax):
+ ASSERT_EQ(pa[i], 0xdeadbeef, "return value");
+ break;
+ default:
+ if (!ASSERT_EQ(pb[i], pa[i], "register before-after value check"))
+ fprintf(stdout, "failed register offset %u\n", offset);
+ }
+
+ /*
+ * Check register seen from bpf program and register after
+ * uretprobe_regs_trigger call
+ */
+ switch (offset) {
+ /*
+ * These values will be different (not set in uretprobe_regs),
+ * we don't care.
+ */
+ case offsetof(struct pt_regs, orig_rax):
+ case offsetof(struct pt_regs, rip):
+ case offsetof(struct pt_regs, cs):
+ case offsetof(struct pt_regs, rsp):
+ case offsetof(struct pt_regs, ss):
+ break;
+ default:
+ if (!ASSERT_EQ(pp[i], pa[i], "register prog-after value check"))
+ fprintf(stdout, "failed register offset %u\n", offset);
+ }
+ }
+
+cleanup:
+ uprobe_syscall__destroy(skel);
+}
+
+#define BPF_TESTMOD_UPROBE_TEST_FILE "/sys/kernel/bpf_testmod_uprobe"
+
+static int write_bpf_testmod_uprobe(unsigned long offset)
+{
+ size_t n, ret;
+ char buf[30];
+ int fd;
+
+ n = sprintf(buf, "%lu", offset);
+
+ fd = open(BPF_TESTMOD_UPROBE_TEST_FILE, O_WRONLY);
+ if (fd < 0)
+ return -errno;
+
+ ret = write(fd, buf, n);
+ close(fd);
+ return ret != n ? (int) ret : 0;
+}
+
+static void test_uretprobe_regs_change(void)
+{
+ struct pt_regs before = {}, after = {};
+ unsigned long *pb = (unsigned long *) &before;
+ unsigned long *pa = (unsigned long *) &after;
+ unsigned long cnt = sizeof(before)/sizeof(*pb);
+ unsigned int i, err, offset;
+
+ offset = get_uprobe_offset(uretprobe_regs_trigger);
+
+ err = write_bpf_testmod_uprobe(offset);
+ if (!ASSERT_OK(err, "register_uprobe"))
+ return;
+
+ uretprobe_regs(&before, &after);
+
+ err = write_bpf_testmod_uprobe(0);
+ if (!ASSERT_OK(err, "unregister_uprobe"))
+ return;
+
+ for (i = 0; i < cnt; i++) {
+ unsigned int offset = i * sizeof(unsigned long);
+
+ switch (offset) {
+ case offsetof(struct pt_regs, rax):
+ ASSERT_EQ(pa[i], 0x12345678deadbeef, "rax");
+ break;
+ case offsetof(struct pt_regs, rcx):
+ ASSERT_EQ(pa[i], 0x87654321feebdaed, "rcx");
+ break;
+ case offsetof(struct pt_regs, r11):
+ ASSERT_EQ(pa[i], (__u64) -1, "r11");
+ break;
+ default:
+ if (!ASSERT_EQ(pa[i], pb[i], "register before-after value check"))
+ fprintf(stdout, "failed register offset %u\n", offset);
+ }
+ }
+}
+
+#ifndef __NR_uretprobe
+#define __NR_uretprobe 467
+#endif
+
+__naked unsigned long uretprobe_syscall_call_1(void)
+{
+ /*
+ * Pretend we are uretprobe trampoline to trigger the return
+ * probe invocation in order to verify we get SIGILL.
+ */
+ asm volatile (
+ "pushq %rax\n"
+ "pushq %rcx\n"
+ "pushq %r11\n"
+ "movq $" __stringify(__NR_uretprobe) ", %rax\n"
+ "syscall\n"
+ "popq %r11\n"
+ "popq %rcx\n"
+ "retq\n"
+ );
+}
+
+__naked unsigned long uretprobe_syscall_call(void)
+{
+ asm volatile (
+ "call uretprobe_syscall_call_1\n"
+ "retq\n"
+ );
+}
+
+static void test_uretprobe_syscall_call(void)
+{
+ LIBBPF_OPTS(bpf_uprobe_multi_opts, opts,
+ .retprobe = true,
+ );
+ struct uprobe_syscall_executed *skel;
+ int pid, status, err, go[2], c;
+
+ if (ASSERT_OK(pipe(go), "pipe"))
+ return;
+
+ skel = uprobe_syscall_executed__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "uprobe_syscall_executed__open_and_load"))
+ goto cleanup;
+
+ pid = fork();
+ if (!ASSERT_GE(pid, 0, "fork"))
+ goto cleanup;
+
+ /* child */
+ if (pid == 0) {
+ close(go[1]);
+
+ /* wait for parent's kick */
+ err = read(go[0], &c, 1);
+ if (err != 1)
+ exit(-1);
+
+ uretprobe_syscall_call();
+ _exit(0);
+ }
+
+ skel->links.test = bpf_program__attach_uprobe_multi(skel->progs.test, pid,
+ "/proc/self/exe",
+ "uretprobe_syscall_call", &opts);
+ if (!ASSERT_OK_PTR(skel->links.test, "bpf_program__attach_uprobe_multi"))
+ goto cleanup;
+
+ /* kick the child */
+ write(go[1], &c, 1);
+ err = waitpid(pid, &status, 0);
+ ASSERT_EQ(err, pid, "waitpid");
+
+ /* verify the child got killed with SIGILL */
+ ASSERT_EQ(WIFSIGNALED(status), 1, "WIFSIGNALED");
+ ASSERT_EQ(WTERMSIG(status), SIGILL, "WTERMSIG");
+
+ /* verify the uretprobe program wasn't called */
+ ASSERT_EQ(skel->bss->executed, 0, "executed");
+
+cleanup:
+ uprobe_syscall_executed__destroy(skel);
+ close(go[1]);
+ close(go[0]);
+}
+
+/*
+ * Borrowed from tools/testing/selftests/x86/test_shadow_stack.c.
+ *
+ * For use in inline enablement of shadow stack.
+ *
+ * The program can't return from the point where shadow stack gets enabled
+ * because there will be no address on the shadow stack. So it can't use
+ * syscall() for enablement, since it is a function.
+ *
+ * Based on code from nolibc.h. Keep a copy here because this can't pull
+ * in all of nolibc.h.
+ */
+#define ARCH_PRCTL(arg1, arg2) \
+({ \
+ long _ret; \
+ register long _num asm("eax") = __NR_arch_prctl; \
+ register long _arg1 asm("rdi") = (long)(arg1); \
+ register long _arg2 asm("rsi") = (long)(arg2); \
+ \
+ asm volatile ( \
+ "syscall\n" \
+ : "=a"(_ret) \
+ : "r"(_arg1), "r"(_arg2), \
+ "0"(_num) \
+ : "rcx", "r11", "memory", "cc" \
+ ); \
+ _ret; \
+})
+
+#ifndef ARCH_SHSTK_ENABLE
+#define ARCH_SHSTK_ENABLE 0x5001
+#define ARCH_SHSTK_DISABLE 0x5002
+#define ARCH_SHSTK_SHSTK (1ULL << 0)
+#endif
+
+static void test_uretprobe_shadow_stack(void)
+{
+ if (ARCH_PRCTL(ARCH_SHSTK_ENABLE, ARCH_SHSTK_SHSTK)) {
+ test__skip();
+ return;
+ }
+
+ /* Run all of the uretprobe tests. */
+ test_uretprobe_regs_equal();
+ test_uretprobe_regs_change();
+ test_uretprobe_syscall_call();
+
+ ARCH_PRCTL(ARCH_SHSTK_DISABLE, ARCH_SHSTK_SHSTK);
+}
+#else
+static void test_uretprobe_regs_equal(void)
+{
+ test__skip();
+}
+
+static void test_uretprobe_regs_change(void)
+{
+ test__skip();
+}
+
+static void test_uretprobe_syscall_call(void)
+{
+ test__skip();
+}
+
+static void test_uretprobe_shadow_stack(void)
+{
+ test__skip();
+}
+#endif
+
+void test_uprobe_syscall(void)
+{
+ if (test__start_subtest("uretprobe_regs_equal"))
+ test_uretprobe_regs_equal();
+ if (test__start_subtest("uretprobe_regs_change"))
+ test_uretprobe_regs_change();
+ if (test__start_subtest("uretprobe_syscall_call"))
+ test_uretprobe_syscall_call();
+ if (test__start_subtest("uretprobe_shadow_stack"))
+ test_uretprobe_shadow_stack();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/uretprobe_stack.c b/tools/testing/selftests/bpf/prog_tests/uretprobe_stack.c
new file mode 100644
index 000000000000..6deb8d560ddd
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/uretprobe_stack.c
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include <test_progs.h>
+#include "uretprobe_stack.skel.h"
+#include "../sdt.h"
+
+/* We set up target_1() -> target_2() -> target_3() -> target_4() -> USDT()
+ * call chain, each being traced by our BPF program. On entry or return from
+ * each target_*() we are capturing user stack trace and recording it in
+ * global variable, so that user space part of the test can validate it.
+ *
+ * Note, we put each target function into a custom section to get those
+ * __start_XXX/__stop_XXX symbols, generated by linker for us, which allow us
+ * to know address range of those functions
+ */
+__attribute__((section("uprobe__target_4")))
+__weak int target_4(void)
+{
+ STAP_PROBE1(uretprobe_stack, target, 42);
+ return 42;
+}
+
+extern const void *__start_uprobe__target_4;
+extern const void *__stop_uprobe__target_4;
+
+__attribute__((section("uprobe__target_3")))
+__weak int target_3(void)
+{
+ return target_4();
+}
+
+extern const void *__start_uprobe__target_3;
+extern const void *__stop_uprobe__target_3;
+
+__attribute__((section("uprobe__target_2")))
+__weak int target_2(void)
+{
+ return target_3();
+}
+
+extern const void *__start_uprobe__target_2;
+extern const void *__stop_uprobe__target_2;
+
+__attribute__((section("uprobe__target_1")))
+__weak int target_1(int depth)
+{
+ if (depth < 1)
+ return 1 + target_1(depth + 1);
+ else
+ return target_2();
+}
+
+extern const void *__start_uprobe__target_1;
+extern const void *__stop_uprobe__target_1;
+
+extern const void *__start_uretprobe_stack_sec;
+extern const void *__stop_uretprobe_stack_sec;
+
+struct range {
+ long start;
+ long stop;
+};
+
+static struct range targets[] = {
+ {}, /* we want target_1 to map to target[1], so need 1-based indexing */
+ { (long)&__start_uprobe__target_1, (long)&__stop_uprobe__target_1 },
+ { (long)&__start_uprobe__target_2, (long)&__stop_uprobe__target_2 },
+ { (long)&__start_uprobe__target_3, (long)&__stop_uprobe__target_3 },
+ { (long)&__start_uprobe__target_4, (long)&__stop_uprobe__target_4 },
+};
+
+static struct range caller = {
+ (long)&__start_uretprobe_stack_sec,
+ (long)&__stop_uretprobe_stack_sec,
+};
+
+static void validate_stack(__u64 *ips, int stack_len, int cnt, ...)
+{
+ int i, j;
+ va_list args;
+
+ if (!ASSERT_GT(stack_len, 0, "stack_len"))
+ return;
+
+ stack_len /= 8;
+
+ /* check if we have enough entries to satisfy test expectations */
+ if (!ASSERT_GE(stack_len, cnt, "stack_len2"))
+ return;
+
+ if (env.verbosity >= VERBOSE_NORMAL) {
+ printf("caller: %#lx - %#lx\n", caller.start, caller.stop);
+ for (i = 1; i < ARRAY_SIZE(targets); i++)
+ printf("target_%d: %#lx - %#lx\n", i, targets[i].start, targets[i].stop);
+ for (i = 0; i < stack_len; i++) {
+ for (j = 1; j < ARRAY_SIZE(targets); j++) {
+ if (ips[i] >= targets[j].start && ips[i] < targets[j].stop)
+ break;
+ }
+ if (j < ARRAY_SIZE(targets)) { /* found target match */
+ printf("ENTRY #%d: %#lx (in target_%d)\n", i, (long)ips[i], j);
+ } else if (ips[i] >= caller.start && ips[i] < caller.stop) {
+ printf("ENTRY #%d: %#lx (in caller)\n", i, (long)ips[i]);
+ } else {
+ printf("ENTRY #%d: %#lx\n", i, (long)ips[i]);
+ }
+ }
+ }
+
+ va_start(args, cnt);
+
+ for (i = cnt - 1; i >= 0; i--) {
+ /* most recent entry is the deepest target function */
+ const struct range *t = va_arg(args, const struct range *);
+
+ ASSERT_GE(ips[i], t->start, "addr_start");
+ ASSERT_LT(ips[i], t->stop, "addr_stop");
+ }
+
+ va_end(args);
+}
+
+/* __weak prevents inlining */
+__attribute__((section("uretprobe_stack_sec")))
+__weak void test_uretprobe_stack(void)
+{
+ LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts);
+ struct uretprobe_stack *skel;
+ int err;
+
+ skel = uretprobe_stack__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ err = uretprobe_stack__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto cleanup;
+
+ /* trigger */
+ ASSERT_EQ(target_1(0), 42 + 1, "trigger_return");
+
+ /*
+ * Stacks captured on ENTRY uprobes
+ */
+
+ /* (uprobe 1) target_1 in stack trace*/
+ validate_stack(skel->bss->entry_stack1, skel->bss->entry1_len,
+ 2, &caller, &targets[1]);
+ /* (uprobe 1, recursed) */
+ validate_stack(skel->bss->entry_stack1_recur, skel->bss->entry1_recur_len,
+ 3, &caller, &targets[1], &targets[1]);
+ /* (uprobe 2) caller -> target_1 -> target_1 -> target_2 */
+ validate_stack(skel->bss->entry_stack2, skel->bss->entry2_len,
+ 4, &caller, &targets[1], &targets[1], &targets[2]);
+ /* (uprobe 3) */
+ validate_stack(skel->bss->entry_stack3, skel->bss->entry3_len,
+ 5, &caller, &targets[1], &targets[1], &targets[2], &targets[3]);
+ /* (uprobe 4) caller -> target_1 -> target_1 -> target_2 -> target_3 -> target_4 */
+ validate_stack(skel->bss->entry_stack4, skel->bss->entry4_len,
+ 6, &caller, &targets[1], &targets[1], &targets[2], &targets[3], &targets[4]);
+
+ /* (USDT): full caller -> target_1 -> target_1 -> target_2 (uretprobed)
+ * -> target_3 -> target_4 (uretprobes) chain
+ */
+ validate_stack(skel->bss->usdt_stack, skel->bss->usdt_len,
+ 6, &caller, &targets[1], &targets[1], &targets[2], &targets[3], &targets[4]);
+
+ /*
+ * Now stacks captured on the way out in EXIT uprobes
+ */
+
+ /* (uretprobe 4) everything up to target_4, but excluding it */
+ validate_stack(skel->bss->exit_stack4, skel->bss->exit4_len,
+ 5, &caller, &targets[1], &targets[1], &targets[2], &targets[3]);
+ /* we didn't install uretprobes on target_2 and target_3 */
+ /* (uretprobe 1, recur) first target_1 call only */
+ validate_stack(skel->bss->exit_stack1_recur, skel->bss->exit1_recur_len,
+ 2, &caller, &targets[1]);
+ /* (uretprobe 1) just a caller in the stack trace */
+ validate_stack(skel->bss->exit_stack1, skel->bss->exit1_len,
+ 1, &caller);
+
+cleanup:
+ uretprobe_stack__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/progs/uprobe_syscall.c b/tools/testing/selftests/bpf/progs/uprobe_syscall.c
new file mode 100644
index 000000000000..8a4fa6c7ef59
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/uprobe_syscall.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <string.h>
+
+struct pt_regs regs;
+
+char _license[] SEC("license") = "GPL";
+
+SEC("uretprobe//proc/self/exe:uretprobe_regs_trigger")
+int uretprobe(struct pt_regs *ctx)
+{
+ __builtin_memcpy(&regs, ctx, sizeof(regs));
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/uprobe_syscall_executed.c b/tools/testing/selftests/bpf/progs/uprobe_syscall_executed.c
new file mode 100644
index 000000000000..0d7f1a7db2e2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/uprobe_syscall_executed.c
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <string.h>
+
+struct pt_regs regs;
+
+char _license[] SEC("license") = "GPL";
+
+int executed = 0;
+
+SEC("uretprobe.multi")
+int test(struct pt_regs *regs)
+{
+ executed = 1;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/uretprobe_stack.c b/tools/testing/selftests/bpf/progs/uretprobe_stack.c
new file mode 100644
index 000000000000..9fdcf396b8f4
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/uretprobe_stack.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/usdt.bpf.h>
+
+char _license[] SEC("license") = "GPL";
+
+__u64 entry_stack1[32], exit_stack1[32];
+__u64 entry_stack1_recur[32], exit_stack1_recur[32];
+__u64 entry_stack2[32];
+__u64 entry_stack3[32];
+__u64 entry_stack4[32], exit_stack4[32];
+__u64 usdt_stack[32];
+
+int entry1_len, exit1_len;
+int entry1_recur_len, exit1_recur_len;
+int entry2_len, exit2_len;
+int entry3_len, exit3_len;
+int entry4_len, exit4_len;
+int usdt_len;
+
+#define SZ sizeof(usdt_stack)
+
+SEC("uprobe//proc/self/exe:target_1")
+int BPF_UPROBE(uprobe_1)
+{
+ /* target_1 is recursive wit depth of 2, so we capture two separate
+ * stack traces, depending on which occurence it is
+ */
+ static bool recur = false;
+
+ if (!recur)
+ entry1_len = bpf_get_stack(ctx, &entry_stack1, SZ, BPF_F_USER_STACK);
+ else
+ entry1_recur_len = bpf_get_stack(ctx, &entry_stack1_recur, SZ, BPF_F_USER_STACK);
+
+ recur = true;
+ return 0;
+}
+
+SEC("uretprobe//proc/self/exe:target_1")
+int BPF_URETPROBE(uretprobe_1)
+{
+ /* see above, target_1 is recursive */
+ static bool recur = false;
+
+ /* NOTE: order of returns is reversed to order of entries */
+ if (!recur)
+ exit1_recur_len = bpf_get_stack(ctx, &exit_stack1_recur, SZ, BPF_F_USER_STACK);
+ else
+ exit1_len = bpf_get_stack(ctx, &exit_stack1, SZ, BPF_F_USER_STACK);
+
+ recur = true;
+ return 0;
+}
+
+SEC("uprobe//proc/self/exe:target_2")
+int BPF_UPROBE(uprobe_2)
+{
+ entry2_len = bpf_get_stack(ctx, &entry_stack2, SZ, BPF_F_USER_STACK);
+ return 0;
+}
+
+/* no uretprobe for target_2 */
+
+SEC("uprobe//proc/self/exe:target_3")
+int BPF_UPROBE(uprobe_3)
+{
+ entry3_len = bpf_get_stack(ctx, &entry_stack3, SZ, BPF_F_USER_STACK);
+ return 0;
+}
+
+/* no uretprobe for target_3 */
+
+SEC("uprobe//proc/self/exe:target_4")
+int BPF_UPROBE(uprobe_4)
+{
+ entry4_len = bpf_get_stack(ctx, &entry_stack4, SZ, BPF_F_USER_STACK);
+ return 0;
+}
+
+SEC("uretprobe//proc/self/exe:target_4")
+int BPF_URETPROBE(uretprobe_4)
+{
+ exit4_len = bpf_get_stack(ctx, &exit_stack4, SZ, BPF_F_USER_STACK);
+ return 0;
+}
+
+SEC("usdt//proc/self/exe:uretprobe_stack:target")
+int BPF_USDT(usdt_probe)
+{
+ usdt_len = bpf_get_stack(ctx, &usdt_stack, SZ, BPF_F_USER_STACK);
+ return 0;
+}
diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/fgraph-multi.tc b/tools/testing/selftests/ftrace/test.d/ftrace/fgraph-multi.tc
new file mode 100644
index 000000000000..ff88f97e41fb
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/ftrace/fgraph-multi.tc
@@ -0,0 +1,103 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: ftrace - function graph filters
+# requires: set_ftrace_filter function_graph:tracer
+
+# Make sure that function graph filtering works
+
+INSTANCE1="instances/test1_$$"
+INSTANCE2="instances/test2_$$"
+INSTANCE3="instances/test3_$$"
+
+WD=`pwd`
+
+do_reset() {
+ cd $WD
+ if [ -d $INSTANCE1 ]; then
+ echo nop > $INSTANCE1/current_tracer
+ rmdir $INSTANCE1
+ fi
+ if [ -d $INSTANCE2 ]; then
+ echo nop > $INSTANCE2/current_tracer
+ rmdir $INSTANCE2
+ fi
+ if [ -d $INSTANCE3 ]; then
+ echo nop > $INSTANCE3/current_tracer
+ rmdir $INSTANCE3
+ fi
+}
+
+mkdir $INSTANCE1
+if ! grep -q function_graph $INSTANCE1/available_tracers; then
+ echo "function_graph not allowed with instances"
+ rmdir $INSTANCE1
+ exit_unsupported
+fi
+
+mkdir $INSTANCE2
+mkdir $INSTANCE3
+
+fail() { # msg
+ do_reset
+ echo $1
+ exit_fail
+}
+
+disable_tracing
+clear_trace
+
+do_test() {
+ REGEX=$1
+ TEST=$2
+
+ # filter something, schedule is always good
+ if ! echo "$REGEX" > set_ftrace_filter; then
+ fail "can not enable filter $REGEX"
+ fi
+
+ echo > trace
+ echo function_graph > current_tracer
+ enable_tracing
+ sleep 1
+ # search for functions (has "{" or ";" on the line)
+ echo 0 > tracing_on
+ count=`cat trace | grep -v '^#' | grep -e '{' -e ';' | grep -v "$TEST" | wc -l`
+ echo 1 > tracing_on
+ if [ $count -ne 0 ]; then
+ fail "Graph filtering not working by itself against $TEST?"
+ fi
+
+ # Make sure we did find something
+ echo 0 > tracing_on
+ count=`cat trace | grep -v '^#' | grep -e '{' -e ';' | grep "$TEST" | wc -l`
+ echo 1 > tracing_on
+ if [ $count -eq 0 ]; then
+ fail "No traces found with $TEST?"
+ fi
+}
+
+do_test '*sched*' 'sched'
+cd $INSTANCE1
+do_test '*lock*' 'lock'
+cd $WD
+cd $INSTANCE2
+do_test '*rcu*' 'rcu'
+cd $WD
+cd $INSTANCE3
+echo function_graph > current_tracer
+
+sleep 1
+count=`cat trace | grep -v '^#' | grep -e '{' -e ';' | grep "$TEST" | wc -l`
+if [ $count -eq 0 ]; then
+ fail "No traces found with all tracing?"
+fi
+
+cd $WD
+echo nop > current_tracer
+echo nop > $INSTANCE1/current_tracer
+echo nop > $INSTANCE2/current_tracer
+echo nop > $INSTANCE3/current_tracer
+
+do_reset
+
+exit 0
diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc
index 2f7211254529..8dcce001881d 100644
--- a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc
+++ b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc
@@ -8,12 +8,18 @@
# Also test it on an instance directory
do_function_fork=1
+do_funcgraph_proc=1
if [ ! -f options/function-fork ]; then
do_function_fork=0
echo "no option for function-fork found. Option will not be tested."
fi
+if [ ! -f options/funcgraph-proc ]; then
+ do_funcgraph_proc=0
+ echo "no option for function-fork found. Option will not be tested."
+fi
+
read PID _ < /proc/self/stat
if [ $do_function_fork -eq 1 ]; then
@@ -21,12 +27,19 @@ if [ $do_function_fork -eq 1 ]; then
orig_value=`grep function-fork trace_options`
fi
+if [ $do_funcgraph_proc -eq 1 ]; then
+ orig_value2=`cat options/funcgraph-proc`
+ echo 1 > options/funcgraph-proc
+fi
+
do_reset() {
- if [ $do_function_fork -eq 0 ]; then
- return
+ if [ $do_function_fork -eq 1 ]; then
+ echo $orig_value > trace_options
fi
- echo $orig_value > trace_options
+ if [ $do_funcgraph_proc -eq 1 ]; then
+ echo $orig_value2 > options/funcgraph-proc
+ fi
}
fail() { # msg
@@ -36,13 +49,15 @@ fail() { # msg
}
do_test() {
+ TRACER=$1
+
disable_tracing
echo do_execve* > set_ftrace_filter
echo $FUNCTION_FORK >> set_ftrace_filter
echo $PID > set_ftrace_pid
- echo function > current_tracer
+ echo $TRACER > current_tracer
if [ $do_function_fork -eq 1 ]; then
# don't allow children to be traced
@@ -82,7 +97,11 @@ do_test() {
fi
}
-do_test
+do_test function
+if grep -s function_graph available_tracers; then
+ do_test function_graph
+fi
+
do_reset
exit 0
diff --git a/tools/testing/selftests/hid/hid_bpf.c b/tools/testing/selftests/hid/hid_bpf.c
index f825623e3edc..dc0408a831d0 100644
--- a/tools/testing/selftests/hid/hid_bpf.c
+++ b/tools/testing/selftests/hid/hid_bpf.c
@@ -460,7 +460,7 @@ FIXTURE(hid_bpf) {
int hid_id;
pthread_t tid;
struct hid *skel;
- int hid_links[3]; /* max number of programs loaded in a single test */
+ struct bpf_link *hid_links[3]; /* max number of programs loaded in a single test */
};
static void detach_bpf(FIXTURE_DATA(hid_bpf) * self)
{
@@ -470,9 +470,14 @@ static void detach_bpf(FIXTURE_DATA(hid_bpf) * self)
close(self->hidraw_fd);
self->hidraw_fd = 0;
+ if (!self->skel)
+ return;
+
+ hid__detach(self->skel);
+
for (i = 0; i < ARRAY_SIZE(self->hid_links); i++) {
if (self->hid_links[i])
- close(self->hid_links[i]);
+ bpf_link__destroy(self->hid_links[i]);
}
hid__destroy(self->skel);
@@ -527,14 +532,7 @@ static void load_programs(const struct test_program programs[],
FIXTURE_DATA(hid_bpf) * self,
const FIXTURE_VARIANT(hid_bpf) * variant)
{
- int attach_fd, err = -EINVAL;
- struct attach_prog_args args = {
- .retval = -1,
- };
- DECLARE_LIBBPF_OPTS(bpf_test_run_opts, tattr,
- .ctx_in = &args,
- .ctx_size_in = sizeof(args),
- );
+ int err = -EINVAL;
ASSERT_LE(progs_count, ARRAY_SIZE(self->hid_links))
TH_LOG("too many programs are to be loaded");
@@ -545,37 +543,45 @@ static void load_programs(const struct test_program programs[],
for (int i = 0; i < progs_count; i++) {
struct bpf_program *prog;
+ struct bpf_map *map;
+ int *ops_hid_id;
prog = bpf_object__find_program_by_name(*self->skel->skeleton->obj,
programs[i].name);
ASSERT_OK_PTR(prog) TH_LOG("can not find program by name '%s'", programs[i].name);
bpf_program__set_autoload(prog, true);
+
+ map = bpf_object__find_map_by_name(*self->skel->skeleton->obj,
+ programs[i].name + 4);
+ ASSERT_OK_PTR(map) TH_LOG("can not find struct_ops by name '%s'",
+ programs[i].name + 4);
+
+ /* hid_id is the first field of struct hid_bpf_ops */
+ ops_hid_id = bpf_map__initial_value(map, NULL);
+ ASSERT_OK_PTR(ops_hid_id) TH_LOG("unable to retrieve struct_ops data");
+
+ *ops_hid_id = self->hid_id;
}
err = hid__load(self->skel);
ASSERT_OK(err) TH_LOG("hid_skel_load failed: %d", err);
- attach_fd = bpf_program__fd(self->skel->progs.attach_prog);
- ASSERT_GE(attach_fd, 0) TH_LOG("locate attach_prog: %d", attach_fd);
-
for (int i = 0; i < progs_count; i++) {
- struct bpf_program *prog;
+ struct bpf_map *map;
- prog = bpf_object__find_program_by_name(*self->skel->skeleton->obj,
- programs[i].name);
- ASSERT_OK_PTR(prog) TH_LOG("can not find program by name '%s'", programs[i].name);
-
- args.prog_fd = bpf_program__fd(prog);
- args.hid = self->hid_id;
- args.insert_head = programs[i].insert_head;
- err = bpf_prog_test_run_opts(attach_fd, &tattr);
- ASSERT_GE(args.retval, 0)
- TH_LOG("attach_hid(%s): %d", programs[i].name, args.retval);
+ map = bpf_object__find_map_by_name(*self->skel->skeleton->obj,
+ programs[i].name + 4);
+ ASSERT_OK_PTR(map) TH_LOG("can not find struct_ops by name '%s'",
+ programs[i].name + 4);
- self->hid_links[i] = args.retval;
+ self->hid_links[i] = bpf_map__attach_struct_ops(map);
+ ASSERT_OK_PTR(self->hid_links[i]) TH_LOG("failed to attach struct ops '%s'",
+ programs[i].name + 4);
}
+ hid__attach(self->skel);
+
self->hidraw_fd = open_hidraw(self->dev_id);
ASSERT_GE(self->hidraw_fd, 0) TH_LOG("open_hidraw");
}
@@ -640,6 +646,47 @@ TEST_F(hid_bpf, raw_event)
}
/*
+ * Attach hid_first_event to the given uhid device,
+ * retrieve and open the matching hidraw node,
+ * inject one event in the uhid device,
+ * check that the program sees it and can change the data
+ */
+TEST_F(hid_bpf, subprog_raw_event)
+{
+ const struct test_program progs[] = {
+ { .name = "hid_subprog_first_event" },
+ };
+ __u8 buf[10] = {0};
+ int err;
+
+ LOAD_PROGRAMS(progs);
+
+ /* inject one event */
+ buf[0] = 1;
+ buf[1] = 42;
+ uhid_send_event(_metadata, self->uhid_fd, buf, 6);
+
+ /* read the data from hidraw */
+ memset(buf, 0, sizeof(buf));
+ err = read(self->hidraw_fd, buf, sizeof(buf));
+ ASSERT_EQ(err, 6) TH_LOG("read_hidraw");
+ ASSERT_EQ(buf[0], 1);
+ ASSERT_EQ(buf[2], 47);
+
+ /* inject another event */
+ memset(buf, 0, sizeof(buf));
+ buf[0] = 1;
+ buf[1] = 47;
+ uhid_send_event(_metadata, self->uhid_fd, buf, 6);
+
+ /* read the data from hidraw */
+ memset(buf, 0, sizeof(buf));
+ err = read(self->hidraw_fd, buf, sizeof(buf));
+ ASSERT_EQ(err, 6) TH_LOG("read_hidraw");
+ ASSERT_EQ(buf[2], 52);
+}
+
+/*
* Ensures that we can attach/detach programs
*/
TEST_F(hid_bpf, test_attach_detach)
@@ -648,13 +695,17 @@ TEST_F(hid_bpf, test_attach_detach)
{ .name = "hid_first_event" },
{ .name = "hid_second_event" },
};
+ struct bpf_link *link;
__u8 buf[10] = {0};
- int err, link;
+ int err, link_fd;
LOAD_PROGRAMS(progs);
link = self->hid_links[0];
- ASSERT_GT(link, 0) TH_LOG("HID-BPF link not created");
+ ASSERT_OK_PTR(link) TH_LOG("HID-BPF link not created");
+
+ link_fd = bpf_link__fd(link);
+ ASSERT_GE(link_fd, 0) TH_LOG("HID-BPF link FD not valid");
/* inject one event */
buf[0] = 1;
@@ -673,7 +724,7 @@ TEST_F(hid_bpf, test_attach_detach)
/* pin the first program and immediately unpin it */
#define PIN_PATH "/sys/fs/bpf/hid_first_event"
- err = bpf_obj_pin(link, PIN_PATH);
+ err = bpf_obj_pin(link_fd, PIN_PATH);
ASSERT_OK(err) TH_LOG("error while calling bpf_obj_pin");
remove(PIN_PATH);
#undef PIN_PATH
@@ -876,6 +927,325 @@ TEST_F(hid_bpf, test_hid_user_raw_request_call)
}
/*
+ * Call hid_hw_raw_request against the given uhid device,
+ * check that the program is called and prevents the
+ * call to uhid.
+ */
+TEST_F(hid_bpf, test_hid_filter_raw_request_call)
+{
+ const struct test_program progs[] = {
+ { .name = "hid_test_filter_raw_request" },
+ };
+ __u8 buf[10] = {0};
+ int err;
+
+ LOAD_PROGRAMS(progs);
+
+ /* first check that we did not attach to device_event */
+
+ /* inject one event */
+ buf[0] = 1;
+ buf[1] = 42;
+ uhid_send_event(_metadata, self->uhid_fd, buf, 6);
+
+ /* read the data from hidraw */
+ memset(buf, 0, sizeof(buf));
+ err = read(self->hidraw_fd, buf, sizeof(buf));
+ ASSERT_EQ(err, 6) TH_LOG("read_hidraw");
+ ASSERT_EQ(buf[0], 1);
+ ASSERT_EQ(buf[1], 42);
+ ASSERT_EQ(buf[2], 0) TH_LOG("leftovers_from_previous_test");
+
+ /* now check that our program is preventing hid_hw_raw_request() */
+
+ /* emit hid_hw_raw_request from hidraw */
+ /* Get Feature */
+ memset(buf, 0, sizeof(buf));
+ buf[0] = 0x1; /* Report Number */
+ err = ioctl(self->hidraw_fd, HIDIOCGFEATURE(sizeof(buf)), buf);
+ ASSERT_LT(err, 0) TH_LOG("unexpected success while reading HIDIOCGFEATURE: %d", err);
+ ASSERT_EQ(errno, 20) TH_LOG("unexpected error code while reading HIDIOCGFEATURE: %d",
+ errno);
+
+ /* remove our bpf program and check that we can now emit commands */
+
+ /* detach the program */
+ detach_bpf(self);
+
+ self->hidraw_fd = open_hidraw(self->dev_id);
+ ASSERT_GE(self->hidraw_fd, 0) TH_LOG("open_hidraw");
+
+ err = ioctl(self->hidraw_fd, HIDIOCGFEATURE(sizeof(buf)), buf);
+ ASSERT_GE(err, 0) TH_LOG("error while reading HIDIOCGFEATURE: %d", err);
+}
+
+/*
+ * Call hid_hw_raw_request against the given uhid device,
+ * check that the program is called and can issue the call
+ * to uhid and transform the answer.
+ */
+TEST_F(hid_bpf, test_hid_change_raw_request_call)
+{
+ const struct test_program progs[] = {
+ { .name = "hid_test_hidraw_raw_request" },
+ };
+ __u8 buf[10] = {0};
+ int err;
+
+ LOAD_PROGRAMS(progs);
+
+ /* emit hid_hw_raw_request from hidraw */
+ /* Get Feature */
+ memset(buf, 0, sizeof(buf));
+ buf[0] = 0x1; /* Report Number */
+ err = ioctl(self->hidraw_fd, HIDIOCGFEATURE(sizeof(buf)), buf);
+ ASSERT_EQ(err, 3) TH_LOG("unexpected returned size while reading HIDIOCGFEATURE: %d", err);
+
+ ASSERT_EQ(buf[0], 2);
+ ASSERT_EQ(buf[1], 3);
+ ASSERT_EQ(buf[2], 4);
+}
+
+/*
+ * Call hid_hw_raw_request against the given uhid device,
+ * check that the program is not making infinite loops.
+ */
+TEST_F(hid_bpf, test_hid_infinite_loop_raw_request_call)
+{
+ const struct test_program progs[] = {
+ { .name = "hid_test_infinite_loop_raw_request" },
+ };
+ __u8 buf[10] = {0};
+ int err;
+
+ LOAD_PROGRAMS(progs);
+
+ /* emit hid_hw_raw_request from hidraw */
+ /* Get Feature */
+ memset(buf, 0, sizeof(buf));
+ buf[0] = 0x1; /* Report Number */
+ err = ioctl(self->hidraw_fd, HIDIOCGFEATURE(sizeof(buf)), buf);
+ ASSERT_EQ(err, 3) TH_LOG("unexpected returned size while reading HIDIOCGFEATURE: %d", err);
+}
+
+/*
+ * Call hid_hw_output_report against the given uhid device,
+ * check that the program is called and prevents the
+ * call to uhid.
+ */
+TEST_F(hid_bpf, test_hid_filter_output_report_call)
+{
+ const struct test_program progs[] = {
+ { .name = "hid_test_filter_output_report" },
+ };
+ __u8 buf[10] = {0};
+ int err;
+
+ LOAD_PROGRAMS(progs);
+
+ /* first check that we did not attach to device_event */
+
+ /* inject one event */
+ buf[0] = 1;
+ buf[1] = 42;
+ uhid_send_event(_metadata, self->uhid_fd, buf, 6);
+
+ /* read the data from hidraw */
+ memset(buf, 0, sizeof(buf));
+ err = read(self->hidraw_fd, buf, sizeof(buf));
+ ASSERT_EQ(err, 6) TH_LOG("read_hidraw");
+ ASSERT_EQ(buf[0], 1);
+ ASSERT_EQ(buf[1], 42);
+ ASSERT_EQ(buf[2], 0) TH_LOG("leftovers_from_previous_test");
+
+ /* now check that our program is preventing hid_hw_output_report() */
+
+ buf[0] = 1; /* report ID */
+ buf[1] = 2;
+ buf[2] = 42;
+
+ err = write(self->hidraw_fd, buf, 3);
+ ASSERT_LT(err, 0) TH_LOG("unexpected success while sending hid_hw_output_report: %d", err);
+ ASSERT_EQ(errno, 25) TH_LOG("unexpected error code while sending hid_hw_output_report: %d",
+ errno);
+
+ /* remove our bpf program and check that we can now emit commands */
+
+ /* detach the program */
+ detach_bpf(self);
+
+ self->hidraw_fd = open_hidraw(self->dev_id);
+ ASSERT_GE(self->hidraw_fd, 0) TH_LOG("open_hidraw");
+
+ err = write(self->hidraw_fd, buf, 3);
+ ASSERT_GE(err, 0) TH_LOG("error while sending hid_hw_output_report: %d", err);
+}
+
+/*
+ * Call hid_hw_output_report against the given uhid device,
+ * check that the program is called and can issue the call
+ * to uhid and transform the answer.
+ */
+TEST_F(hid_bpf, test_hid_change_output_report_call)
+{
+ const struct test_program progs[] = {
+ { .name = "hid_test_hidraw_output_report" },
+ };
+ __u8 buf[10] = {0};
+ int err;
+
+ LOAD_PROGRAMS(progs);
+
+ /* emit hid_hw_output_report from hidraw */
+ buf[0] = 1; /* report ID */
+ buf[1] = 2;
+ buf[2] = 42;
+
+ err = write(self->hidraw_fd, buf, 10);
+ ASSERT_EQ(err, 2) TH_LOG("unexpected returned size while sending hid_hw_output_report: %d",
+ err);
+}
+
+/*
+ * Call hid_hw_output_report against the given uhid device,
+ * check that the program is not making infinite loops.
+ */
+TEST_F(hid_bpf, test_hid_infinite_loop_output_report_call)
+{
+ const struct test_program progs[] = {
+ { .name = "hid_test_infinite_loop_output_report" },
+ };
+ __u8 buf[10] = {0};
+ int err;
+
+ LOAD_PROGRAMS(progs);
+
+ /* emit hid_hw_output_report from hidraw */
+ buf[0] = 1; /* report ID */
+ buf[1] = 2;
+ buf[2] = 42;
+
+ err = write(self->hidraw_fd, buf, 8);
+ ASSERT_EQ(err, 2) TH_LOG("unexpected returned size while sending hid_hw_output_report: %d",
+ err);
+}
+
+/*
+ * Attach hid_multiply_event_wq to the given uhid device,
+ * retrieve and open the matching hidraw node,
+ * inject one event in the uhid device,
+ * check that the program sees it and can add extra data
+ */
+TEST_F(hid_bpf, test_multiply_events_wq)
+{
+ const struct test_program progs[] = {
+ { .name = "hid_test_multiply_events_wq" },
+ };
+ __u8 buf[10] = {0};
+ int err;
+
+ LOAD_PROGRAMS(progs);
+
+ /* inject one event */
+ buf[0] = 1;
+ buf[1] = 42;
+ uhid_send_event(_metadata, self->uhid_fd, buf, 6);
+
+ /* read the data from hidraw */
+ memset(buf, 0, sizeof(buf));
+ err = read(self->hidraw_fd, buf, sizeof(buf));
+ ASSERT_EQ(err, 6) TH_LOG("read_hidraw");
+ ASSERT_EQ(buf[0], 1);
+ ASSERT_EQ(buf[1], 47);
+
+ usleep(100000);
+
+ /* read the data from hidraw */
+ memset(buf, 0, sizeof(buf));
+ err = read(self->hidraw_fd, buf, sizeof(buf));
+ ASSERT_EQ(err, 9) TH_LOG("read_hidraw");
+ ASSERT_EQ(buf[0], 2);
+ ASSERT_EQ(buf[1], 3);
+}
+
+/*
+ * Attach hid_multiply_event to the given uhid device,
+ * retrieve and open the matching hidraw node,
+ * inject one event in the uhid device,
+ * check that the program sees it and can add extra data
+ */
+TEST_F(hid_bpf, test_multiply_events)
+{
+ const struct test_program progs[] = {
+ { .name = "hid_test_multiply_events" },
+ };
+ __u8 buf[10] = {0};
+ int err;
+
+ LOAD_PROGRAMS(progs);
+
+ /* inject one event */
+ buf[0] = 1;
+ buf[1] = 42;
+ uhid_send_event(_metadata, self->uhid_fd, buf, 6);
+
+ /* read the data from hidraw */
+ memset(buf, 0, sizeof(buf));
+ err = read(self->hidraw_fd, buf, sizeof(buf));
+ ASSERT_EQ(err, 9) TH_LOG("read_hidraw");
+ ASSERT_EQ(buf[0], 2);
+ ASSERT_EQ(buf[1], 47);
+
+ /* read the data from hidraw */
+ memset(buf, 0, sizeof(buf));
+ err = read(self->hidraw_fd, buf, sizeof(buf));
+ ASSERT_EQ(err, 9) TH_LOG("read_hidraw");
+ ASSERT_EQ(buf[0], 2);
+ ASSERT_EQ(buf[1], 52);
+}
+
+/*
+ * Call hid_bpf_input_report against the given uhid device,
+ * check that the program is not making infinite loops.
+ */
+TEST_F(hid_bpf, test_hid_infinite_loop_input_report_call)
+{
+ const struct test_program progs[] = {
+ { .name = "hid_test_infinite_loop_input_report" },
+ };
+ __u8 buf[10] = {0};
+ int err;
+
+ LOAD_PROGRAMS(progs);
+
+ /* emit hid_hw_output_report from hidraw */
+ buf[0] = 1; /* report ID */
+ buf[1] = 2;
+ buf[2] = 42;
+
+ uhid_send_event(_metadata, self->uhid_fd, buf, 6);
+
+ /* read the data from hidraw */
+ memset(buf, 0, sizeof(buf));
+ err = read(self->hidraw_fd, buf, sizeof(buf));
+ ASSERT_EQ(err, 6) TH_LOG("read_hidraw");
+ ASSERT_EQ(buf[0], 1);
+ ASSERT_EQ(buf[1], 3);
+
+ /* read the data from hidraw: hid_bpf_try_input_report should work exactly one time */
+ memset(buf, 0, sizeof(buf));
+ err = read(self->hidraw_fd, buf, sizeof(buf));
+ ASSERT_EQ(err, 6) TH_LOG("read_hidraw");
+ ASSERT_EQ(buf[0], 1);
+ ASSERT_EQ(buf[1], 4);
+
+ /* read the data from hidraw: there should be none */
+ memset(buf, 0, sizeof(buf));
+ err = read(self->hidraw_fd, buf, sizeof(buf));
+ ASSERT_EQ(err, -1) TH_LOG("read_hidraw");
+}
+
+/*
* Attach hid_insert{0,1,2} to the given uhid device,
* retrieve and open the matching hidraw node,
* inject one event in the uhid device,
diff --git a/tools/testing/selftests/hid/progs/hid.c b/tools/testing/selftests/hid/progs/hid.c
index f67d35def142..ee9bbbcf751b 100644
--- a/tools/testing/selftests/hid/progs/hid.c
+++ b/tools/testing/selftests/hid/progs/hid.c
@@ -14,8 +14,8 @@ struct attach_prog_args {
__u64 callback_check = 52;
__u64 callback2_check = 52;
-SEC("?fmod_ret/hid_bpf_device_event")
-int BPF_PROG(hid_first_event, struct hid_bpf_ctx *hid_ctx)
+SEC("?struct_ops/hid_device_event")
+int BPF_PROG(hid_first_event, struct hid_bpf_ctx *hid_ctx, enum hid_report_type type)
{
__u8 *rw_data = hid_bpf_get_data(hid_ctx, 0 /* offset */, 3 /* size */);
@@ -29,8 +29,38 @@ int BPF_PROG(hid_first_event, struct hid_bpf_ctx *hid_ctx)
return hid_ctx->size;
}
-SEC("?fmod_ret/hid_bpf_device_event")
-int BPF_PROG(hid_second_event, struct hid_bpf_ctx *hid_ctx)
+SEC(".struct_ops.link")
+struct hid_bpf_ops first_event = {
+ .hid_device_event = (void *)hid_first_event,
+ .hid_id = 2,
+};
+
+int __hid_subprog_first_event(struct hid_bpf_ctx *hid_ctx, enum hid_report_type type)
+{
+ __u8 *rw_data = hid_bpf_get_data(hid_ctx, 0 /* offset */, 3 /* size */);
+
+ if (!rw_data)
+ return 0; /* EPERM check */
+
+ rw_data[2] = rw_data[1] + 5;
+
+ return hid_ctx->size;
+}
+
+SEC("?struct_ops/hid_device_event")
+int BPF_PROG(hid_subprog_first_event, struct hid_bpf_ctx *hid_ctx, enum hid_report_type type)
+{
+ return __hid_subprog_first_event(hid_ctx, type);
+}
+
+SEC(".struct_ops.link")
+struct hid_bpf_ops subprog_first_event = {
+ .hid_device_event = (void *)hid_subprog_first_event,
+ .hid_id = 2,
+};
+
+SEC("?struct_ops/hid_device_event")
+int BPF_PROG(hid_second_event, struct hid_bpf_ctx *hid_ctx, enum hid_report_type type)
{
__u8 *rw_data = hid_bpf_get_data(hid_ctx, 0 /* offset */, 4 /* size */);
@@ -42,8 +72,13 @@ int BPF_PROG(hid_second_event, struct hid_bpf_ctx *hid_ctx)
return hid_ctx->size;
}
-SEC("?fmod_ret/hid_bpf_device_event")
-int BPF_PROG(hid_change_report_id, struct hid_bpf_ctx *hid_ctx)
+SEC(".struct_ops.link")
+struct hid_bpf_ops second_event = {
+ .hid_device_event = (void *)hid_second_event,
+};
+
+SEC("?struct_ops/hid_device_event")
+int BPF_PROG(hid_change_report_id, struct hid_bpf_ctx *hid_ctx, enum hid_report_type type)
{
__u8 *rw_data = hid_bpf_get_data(hid_ctx, 0 /* offset */, 3 /* size */);
@@ -55,15 +90,10 @@ int BPF_PROG(hid_change_report_id, struct hid_bpf_ctx *hid_ctx)
return 9;
}
-SEC("syscall")
-int attach_prog(struct attach_prog_args *ctx)
-{
- ctx->retval = hid_bpf_attach_prog(ctx->hid,
- ctx->prog_fd,
- ctx->insert_head ? HID_BPF_FLAG_INSERT_HEAD :
- HID_BPF_FLAG_NONE);
- return 0;
-}
+SEC(".struct_ops.link")
+struct hid_bpf_ops change_report_id = {
+ .hid_device_event = (void *)hid_change_report_id,
+};
struct hid_hw_request_syscall_args {
/* data needs to come at offset 0 so we can use it in calls */
@@ -181,7 +211,12 @@ static const __u8 rdesc[] = {
0xc0, /* END_COLLECTION */
};
-SEC("?fmod_ret/hid_bpf_rdesc_fixup")
+/*
+ * the following program is marked as sleepable (struct_ops.s).
+ * This is not strictly mandatory but is a nice test for
+ * sleepable struct_ops
+ */
+SEC("?struct_ops.s/hid_rdesc_fixup")
int BPF_PROG(hid_rdesc_fixup, struct hid_bpf_ctx *hid_ctx)
{
__u8 *data = hid_bpf_get_data(hid_ctx, 0 /* offset */, 4096 /* size */);
@@ -200,8 +235,13 @@ int BPF_PROG(hid_rdesc_fixup, struct hid_bpf_ctx *hid_ctx)
return sizeof(rdesc) + 73;
}
-SEC("?fmod_ret/hid_bpf_device_event")
-int BPF_PROG(hid_test_insert1, struct hid_bpf_ctx *hid_ctx)
+SEC(".struct_ops.link")
+struct hid_bpf_ops rdesc_fixup = {
+ .hid_rdesc_fixup = (void *)hid_rdesc_fixup,
+};
+
+SEC("?struct_ops/hid_device_event")
+int BPF_PROG(hid_test_insert1, struct hid_bpf_ctx *hid_ctx, enum hid_report_type type)
{
__u8 *data = hid_bpf_get_data(hid_ctx, 0 /* offset */, 4 /* size */);
@@ -217,8 +257,14 @@ int BPF_PROG(hid_test_insert1, struct hid_bpf_ctx *hid_ctx)
return 0;
}
-SEC("?fmod_ret/hid_bpf_device_event")
-int BPF_PROG(hid_test_insert2, struct hid_bpf_ctx *hid_ctx)
+SEC(".struct_ops.link")
+struct hid_bpf_ops test_insert1 = {
+ .hid_device_event = (void *)hid_test_insert1,
+ .flags = BPF_F_BEFORE,
+};
+
+SEC("?struct_ops/hid_device_event")
+int BPF_PROG(hid_test_insert2, struct hid_bpf_ctx *hid_ctx, enum hid_report_type type)
{
__u8 *data = hid_bpf_get_data(hid_ctx, 0 /* offset */, 4 /* size */);
@@ -234,8 +280,13 @@ int BPF_PROG(hid_test_insert2, struct hid_bpf_ctx *hid_ctx)
return 0;
}
-SEC("?fmod_ret/hid_bpf_device_event")
-int BPF_PROG(hid_test_insert3, struct hid_bpf_ctx *hid_ctx)
+SEC(".struct_ops.link")
+struct hid_bpf_ops test_insert2 = {
+ .hid_device_event = (void *)hid_test_insert2,
+};
+
+SEC("?struct_ops/hid_device_event")
+int BPF_PROG(hid_test_insert3, struct hid_bpf_ctx *hid_ctx, enum hid_report_type type)
{
__u8 *data = hid_bpf_get_data(hid_ctx, 0 /* offset */, 4 /* size */);
@@ -250,3 +301,300 @@ int BPF_PROG(hid_test_insert3, struct hid_bpf_ctx *hid_ctx)
return 0;
}
+
+SEC(".struct_ops.link")
+struct hid_bpf_ops test_insert3 = {
+ .hid_device_event = (void *)hid_test_insert3,
+};
+
+SEC("?struct_ops/hid_hw_request")
+int BPF_PROG(hid_test_filter_raw_request, struct hid_bpf_ctx *hctx, unsigned char reportnum,
+ enum hid_report_type rtype, enum hid_class_request reqtype, __u64 source)
+{
+ return -20;
+}
+
+SEC(".struct_ops.link")
+struct hid_bpf_ops test_filter_raw_request = {
+ .hid_hw_request = (void *)hid_test_filter_raw_request,
+};
+
+static struct file *current_file;
+
+SEC("fentry/hidraw_open")
+int BPF_PROG(hidraw_open, struct inode *inode, struct file *file)
+{
+ current_file = file;
+ return 0;
+}
+
+SEC("?struct_ops.s/hid_hw_request")
+int BPF_PROG(hid_test_hidraw_raw_request, struct hid_bpf_ctx *hctx, unsigned char reportnum,
+ enum hid_report_type rtype, enum hid_class_request reqtype, __u64 source)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 3 /* size */);
+ int ret;
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ /* check if the incoming request comes from our hidraw operation */
+ if (source == (__u64)current_file) {
+ data[0] = reportnum;
+
+ ret = hid_bpf_hw_request(hctx, data, 2, rtype, reqtype);
+ if (ret != 2)
+ return -1;
+ data[0] = reportnum + 1;
+ data[1] = reportnum + 2;
+ data[2] = reportnum + 3;
+ return 3;
+ }
+
+ return 0;
+}
+
+SEC(".struct_ops.link")
+struct hid_bpf_ops test_hidraw_raw_request = {
+ .hid_hw_request = (void *)hid_test_hidraw_raw_request,
+};
+
+SEC("?struct_ops.s/hid_hw_request")
+int BPF_PROG(hid_test_infinite_loop_raw_request, struct hid_bpf_ctx *hctx, unsigned char reportnum,
+ enum hid_report_type rtype, enum hid_class_request reqtype, __u64 source)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 3 /* size */);
+ int ret;
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ /* always forward the request as-is to the device, hid-bpf should prevent
+ * infinite loops.
+ */
+ data[0] = reportnum;
+
+ ret = hid_bpf_hw_request(hctx, data, 2, rtype, reqtype);
+ if (ret == 2)
+ return 3;
+
+ return 0;
+}
+
+SEC(".struct_ops.link")
+struct hid_bpf_ops test_infinite_loop_raw_request = {
+ .hid_hw_request = (void *)hid_test_infinite_loop_raw_request,
+};
+
+SEC("?struct_ops/hid_hw_output_report")
+int BPF_PROG(hid_test_filter_output_report, struct hid_bpf_ctx *hctx, unsigned char reportnum,
+ enum hid_report_type rtype, enum hid_class_request reqtype, __u64 source)
+{
+ return -25;
+}
+
+SEC(".struct_ops.link")
+struct hid_bpf_ops test_filter_output_report = {
+ .hid_hw_output_report = (void *)hid_test_filter_output_report,
+};
+
+SEC("?struct_ops.s/hid_hw_output_report")
+int BPF_PROG(hid_test_hidraw_output_report, struct hid_bpf_ctx *hctx, __u64 source)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 3 /* size */);
+ int ret;
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ /* check if the incoming request comes from our hidraw operation */
+ if (source == (__u64)current_file)
+ return hid_bpf_hw_output_report(hctx, data, 2);
+
+ return 0;
+}
+
+SEC(".struct_ops.link")
+struct hid_bpf_ops test_hidraw_output_report = {
+ .hid_hw_output_report = (void *)hid_test_hidraw_output_report,
+};
+
+SEC("?struct_ops.s/hid_hw_output_report")
+int BPF_PROG(hid_test_infinite_loop_output_report, struct hid_bpf_ctx *hctx, __u64 source)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 3 /* size */);
+ int ret;
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ /* always forward the request as-is to the device, hid-bpf should prevent
+ * infinite loops.
+ */
+
+ ret = hid_bpf_hw_output_report(hctx, data, 2);
+ if (ret == 2)
+ return 2;
+
+ return 0;
+}
+
+SEC(".struct_ops.link")
+struct hid_bpf_ops test_infinite_loop_output_report = {
+ .hid_hw_output_report = (void *)hid_test_infinite_loop_output_report,
+};
+
+struct elem {
+ struct bpf_wq work;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct elem);
+} hmap SEC(".maps");
+
+static int wq_cb_sleepable(void *map, int *key, struct bpf_wq *work)
+{
+ __u8 buf[9] = {2, 3, 4, 5, 6, 7, 8, 9, 10};
+ struct hid_bpf_ctx *hid_ctx;
+
+ hid_ctx = hid_bpf_allocate_context(*key);
+ if (!hid_ctx)
+ return 0; /* EPERM check */
+
+ hid_bpf_input_report(hid_ctx, HID_INPUT_REPORT, buf, sizeof(buf));
+
+ hid_bpf_release_context(hid_ctx);
+
+ return 0;
+}
+
+static int test_inject_input_report_callback(int *key)
+{
+ struct elem init = {}, *val;
+ struct bpf_wq *wq;
+
+ if (bpf_map_update_elem(&hmap, key, &init, 0))
+ return -1;
+
+ val = bpf_map_lookup_elem(&hmap, key);
+ if (!val)
+ return -2;
+
+ wq = &val->work;
+ if (bpf_wq_init(wq, &hmap, 0) != 0)
+ return -3;
+
+ if (bpf_wq_set_callback(wq, wq_cb_sleepable, 0))
+ return -4;
+
+ if (bpf_wq_start(wq, 0))
+ return -5;
+
+ return 0;
+}
+
+SEC("?struct_ops/hid_device_event")
+int BPF_PROG(hid_test_multiply_events_wq, struct hid_bpf_ctx *hid_ctx, enum hid_report_type type)
+{
+ __u8 *data = hid_bpf_get_data(hid_ctx, 0 /* offset */, 9 /* size */);
+ int hid = hid_ctx->hid->id;
+ int ret;
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ if (data[0] != 1)
+ return 0;
+
+ ret = test_inject_input_report_callback(&hid);
+ if (ret)
+ return ret;
+
+ data[1] += 5;
+
+ return 0;
+}
+
+SEC(".struct_ops.link")
+struct hid_bpf_ops test_multiply_events_wq = {
+ .hid_device_event = (void *)hid_test_multiply_events_wq,
+};
+
+SEC("?struct_ops/hid_device_event")
+int BPF_PROG(hid_test_multiply_events, struct hid_bpf_ctx *hid_ctx, enum hid_report_type type)
+{
+ __u8 *data = hid_bpf_get_data(hid_ctx, 0 /* offset */, 9 /* size */);
+ __u8 buf[9];
+ int ret;
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ if (data[0] != 1)
+ return 0;
+
+ /*
+ * we have to use an intermediate buffer as hid_bpf_input_report
+ * will memset data to \0
+ */
+ __builtin_memcpy(buf, data, sizeof(buf));
+
+ buf[0] = 2;
+ buf[1] += 5;
+ ret = hid_bpf_try_input_report(hid_ctx, HID_INPUT_REPORT, buf, sizeof(buf));
+ if (ret < 0)
+ return ret;
+
+ /*
+ * In real world we should reset the original buffer as data might be garbage now,
+ * but it actually now has the content of 'buf'
+ */
+ data[1] += 5;
+
+ return 9;
+}
+
+SEC(".struct_ops.link")
+struct hid_bpf_ops test_multiply_events = {
+ .hid_device_event = (void *)hid_test_multiply_events,
+};
+
+SEC("?struct_ops/hid_device_event")
+int BPF_PROG(hid_test_infinite_loop_input_report, struct hid_bpf_ctx *hctx,
+ enum hid_report_type report_type, __u64 source)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 6 /* size */);
+ __u8 buf[6];
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ /*
+ * we have to use an intermediate buffer as hid_bpf_input_report
+ * will memset data to \0
+ */
+ __builtin_memcpy(buf, data, sizeof(buf));
+
+ /* always forward the request as-is to the device, hid-bpf should prevent
+ * infinite loops.
+ * the return value is ignored so the event is passing to userspace.
+ */
+
+ hid_bpf_try_input_report(hctx, report_type, buf, sizeof(buf));
+
+ /* each time we process the event, we increment by one data[1]:
+ * after each successful call to hid_bpf_try_input_report, buf
+ * has been memcopied into data by the kernel.
+ */
+ data[1] += 1;
+
+ return 0;
+}
+
+SEC(".struct_ops.link")
+struct hid_bpf_ops test_infinite_loop_input_report = {
+ .hid_device_event = (void *)hid_test_infinite_loop_input_report,
+};
diff --git a/tools/testing/selftests/hid/progs/hid_bpf_helpers.h b/tools/testing/selftests/hid/progs/hid_bpf_helpers.h
index 9cd56821d0f1..cfe37f491906 100644
--- a/tools/testing/selftests/hid/progs/hid_bpf_helpers.h
+++ b/tools/testing/selftests/hid/progs/hid_bpf_helpers.h
@@ -7,6 +7,7 @@
/* "undefine" structs and enums in vmlinux.h, because we "override" them below */
#define hid_bpf_ctx hid_bpf_ctx___not_used
+#define hid_bpf_ops hid_bpf_ops___not_used
#define hid_report_type hid_report_type___not_used
#define hid_class_request hid_class_request___not_used
#define hid_bpf_attach_flags hid_bpf_attach_flags___not_used
@@ -20,13 +21,11 @@
#define HID_REQ_SET_REPORT HID_REQ_SET_REPORT___not_used
#define HID_REQ_SET_IDLE HID_REQ_SET_IDLE___not_used
#define HID_REQ_SET_PROTOCOL HID_REQ_SET_PROTOCOL___not_used
-#define HID_BPF_FLAG_NONE HID_BPF_FLAG_NONE___not_used
-#define HID_BPF_FLAG_INSERT_HEAD HID_BPF_FLAG_INSERT_HEAD___not_used
-#define HID_BPF_FLAG_MAX HID_BPF_FLAG_MAX___not_used
#include "vmlinux.h"
#undef hid_bpf_ctx
+#undef hid_bpf_ops
#undef hid_report_type
#undef hid_class_request
#undef hid_bpf_attach_flags
@@ -40,9 +39,6 @@
#undef HID_REQ_SET_REPORT
#undef HID_REQ_SET_IDLE
#undef HID_REQ_SET_PROTOCOL
-#undef HID_BPF_FLAG_NONE
-#undef HID_BPF_FLAG_INSERT_HEAD
-#undef HID_BPF_FLAG_MAX
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
@@ -57,10 +53,8 @@ enum hid_report_type {
};
struct hid_bpf_ctx {
- __u32 index;
- const struct hid_device *hid;
+ struct hid_device *hid;
__u32 allocated_size;
- enum hid_report_type report_type;
union {
__s32 retval;
__s32 size;
@@ -76,17 +70,28 @@ enum hid_class_request {
HID_REQ_SET_PROTOCOL = 0x0B,
};
-enum hid_bpf_attach_flags {
- HID_BPF_FLAG_NONE = 0,
- HID_BPF_FLAG_INSERT_HEAD = _BITUL(0),
- HID_BPF_FLAG_MAX,
+struct hid_bpf_ops {
+ int hid_id;
+ u32 flags;
+ struct list_head list;
+ int (*hid_device_event)(struct hid_bpf_ctx *ctx, enum hid_report_type report_type,
+ u64 source);
+ int (*hid_rdesc_fixup)(struct hid_bpf_ctx *ctx);
+ int (*hid_hw_request)(struct hid_bpf_ctx *ctx, unsigned char reportnum,
+ enum hid_report_type rtype, enum hid_class_request reqtype,
+ u64 source);
+ int (*hid_hw_output_report)(struct hid_bpf_ctx *ctx, u64 source);
+ struct hid_device *hdev;
};
+#ifndef BPF_F_BEFORE
+#define BPF_F_BEFORE (1U << 3)
+#endif
+
/* following are kfuncs exported by HID for HID-BPF */
extern __u8 *hid_bpf_get_data(struct hid_bpf_ctx *ctx,
unsigned int offset,
const size_t __sz) __ksym;
-extern int hid_bpf_attach_prog(unsigned int hid_id, int prog_fd, u32 flags) __ksym;
extern struct hid_bpf_ctx *hid_bpf_allocate_context(unsigned int hid_id) __ksym;
extern void hid_bpf_release_context(struct hid_bpf_ctx *ctx) __ksym;
extern int hid_bpf_hw_request(struct hid_bpf_ctx *ctx,
@@ -100,5 +105,18 @@ extern int hid_bpf_input_report(struct hid_bpf_ctx *ctx,
enum hid_report_type type,
__u8 *data,
size_t buf__sz) __ksym;
+extern int hid_bpf_try_input_report(struct hid_bpf_ctx *ctx,
+ enum hid_report_type type,
+ __u8 *data,
+ size_t buf__sz) __ksym;
+
+/* bpf_wq implementation */
+extern int bpf_wq_init(struct bpf_wq *wq, void *p__map, unsigned int flags) __weak __ksym;
+extern int bpf_wq_start(struct bpf_wq *wq, unsigned int flags) __weak __ksym;
+extern int bpf_wq_set_callback_impl(struct bpf_wq *wq,
+ int (callback_fn)(void *map, int *key, struct bpf_wq *wq),
+ unsigned int flags__k, void *aux__ign) __ksym;
+#define bpf_wq_set_callback(timer, cb, flags) \
+ bpf_wq_set_callback_impl(timer, cb, flags, NULL)
#endif /* __HID_BPF_HELPERS_H */
diff --git a/tools/testing/selftests/iommu/iommufd.c b/tools/testing/selftests/iommu/iommufd.c
index edf1c99c9936..6343f4053bd4 100644
--- a/tools/testing/selftests/iommu/iommufd.c
+++ b/tools/testing/selftests/iommu/iommufd.c
@@ -279,6 +279,9 @@ TEST_F(iommufd_ioas, alloc_hwpt_nested)
uint32_t parent_hwpt_id = 0;
uint32_t parent_hwpt_id_not_work = 0;
uint32_t test_hwpt_id = 0;
+ uint32_t iopf_hwpt_id;
+ uint32_t fault_id;
+ uint32_t fault_fd;
if (self->device_id) {
/* Negative tests */
@@ -326,6 +329,7 @@ TEST_F(iommufd_ioas, alloc_hwpt_nested)
sizeof(data));
/* Allocate two nested hwpts sharing one common parent hwpt */
+ test_ioctl_fault_alloc(&fault_id, &fault_fd);
test_cmd_hwpt_alloc_nested(self->device_id, parent_hwpt_id, 0,
&nested_hwpt_id[0],
IOMMU_HWPT_DATA_SELFTEST, &data,
@@ -334,6 +338,14 @@ TEST_F(iommufd_ioas, alloc_hwpt_nested)
&nested_hwpt_id[1],
IOMMU_HWPT_DATA_SELFTEST, &data,
sizeof(data));
+ test_err_hwpt_alloc_iopf(ENOENT, self->device_id, parent_hwpt_id,
+ UINT32_MAX, IOMMU_HWPT_FAULT_ID_VALID,
+ &iopf_hwpt_id, IOMMU_HWPT_DATA_SELFTEST,
+ &data, sizeof(data));
+ test_cmd_hwpt_alloc_iopf(self->device_id, parent_hwpt_id, fault_id,
+ IOMMU_HWPT_FAULT_ID_VALID, &iopf_hwpt_id,
+ IOMMU_HWPT_DATA_SELFTEST, &data,
+ sizeof(data));
test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[0],
IOMMU_TEST_IOTLB_DEFAULT);
test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[1],
@@ -504,14 +516,24 @@ TEST_F(iommufd_ioas, alloc_hwpt_nested)
_test_ioctl_destroy(self->fd, nested_hwpt_id[1]));
test_ioctl_destroy(nested_hwpt_id[0]);
+ /* Switch from nested_hwpt_id[1] to iopf_hwpt_id */
+ test_cmd_mock_domain_replace(self->stdev_id, iopf_hwpt_id);
+ EXPECT_ERRNO(EBUSY,
+ _test_ioctl_destroy(self->fd, iopf_hwpt_id));
+ /* Trigger an IOPF on the device */
+ test_cmd_trigger_iopf(self->device_id, fault_fd);
+
/* Detach from nested_hwpt_id[1] and destroy it */
test_cmd_mock_domain_replace(self->stdev_id, parent_hwpt_id);
test_ioctl_destroy(nested_hwpt_id[1]);
+ test_ioctl_destroy(iopf_hwpt_id);
/* Detach from the parent hw_pagetable and destroy it */
test_cmd_mock_domain_replace(self->stdev_id, self->ioas_id);
test_ioctl_destroy(parent_hwpt_id);
test_ioctl_destroy(parent_hwpt_id_not_work);
+ close(fault_fd);
+ test_ioctl_destroy(fault_id);
} else {
test_err_hwpt_alloc(ENOENT, self->device_id, self->ioas_id, 0,
&parent_hwpt_id);
@@ -1722,10 +1744,17 @@ FIXTURE_VARIANT(iommufd_dirty_tracking)
FIXTURE_SETUP(iommufd_dirty_tracking)
{
+ unsigned long size;
int mmap_flags;
void *vrc;
int rc;
+ if (variant->buffer_size < MOCK_PAGE_SIZE) {
+ SKIP(return,
+ "Skipping buffer_size=%lu, less than MOCK_PAGE_SIZE=%lu",
+ variant->buffer_size, MOCK_PAGE_SIZE);
+ }
+
self->fd = open("/dev/iommu", O_RDWR);
ASSERT_NE(-1, self->fd);
@@ -1749,12 +1778,11 @@ FIXTURE_SETUP(iommufd_dirty_tracking)
assert(vrc == self->buffer);
self->page_size = MOCK_PAGE_SIZE;
- self->bitmap_size =
- variant->buffer_size / self->page_size / BITS_PER_BYTE;
+ self->bitmap_size = variant->buffer_size / self->page_size;
/* Provision with an extra (PAGE_SIZE) for the unaligned case */
- rc = posix_memalign(&self->bitmap, PAGE_SIZE,
- self->bitmap_size + PAGE_SIZE);
+ size = DIV_ROUND_UP(self->bitmap_size, BITS_PER_BYTE);
+ rc = posix_memalign(&self->bitmap, PAGE_SIZE, size + PAGE_SIZE);
assert(!rc);
assert(self->bitmap);
assert((uintptr_t)self->bitmap % PAGE_SIZE == 0);
@@ -1775,51 +1803,63 @@ FIXTURE_SETUP(iommufd_dirty_tracking)
FIXTURE_TEARDOWN(iommufd_dirty_tracking)
{
munmap(self->buffer, variant->buffer_size);
- munmap(self->bitmap, self->bitmap_size);
+ munmap(self->bitmap, DIV_ROUND_UP(self->bitmap_size, BITS_PER_BYTE));
teardown_iommufd(self->fd, _metadata);
}
-FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128k)
+FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty8k)
+{
+ /* half of an u8 index bitmap */
+ .buffer_size = 8UL * 1024UL,
+};
+
+FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty16k)
+{
+ /* one u8 index bitmap */
+ .buffer_size = 16UL * 1024UL,
+};
+
+FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty64k)
{
/* one u32 index bitmap */
- .buffer_size = 128UL * 1024UL,
+ .buffer_size = 64UL * 1024UL,
};
-FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty256k)
+FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128k)
{
/* one u64 index bitmap */
- .buffer_size = 256UL * 1024UL,
+ .buffer_size = 128UL * 1024UL,
};
-FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty640k)
+FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty320k)
{
/* two u64 index and trailing end bitmap */
- .buffer_size = 640UL * 1024UL,
+ .buffer_size = 320UL * 1024UL,
};
-FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M)
+FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty64M)
{
- /* 4K bitmap (128M IOVA range) */
- .buffer_size = 128UL * 1024UL * 1024UL,
+ /* 4K bitmap (64M IOVA range) */
+ .buffer_size = 64UL * 1024UL * 1024UL,
};
-FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M_huge)
+FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty64M_huge)
{
- /* 4K bitmap (128M IOVA range) */
- .buffer_size = 128UL * 1024UL * 1024UL,
+ /* 4K bitmap (64M IOVA range) */
+ .buffer_size = 64UL * 1024UL * 1024UL,
.hugepages = true,
};
-FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty256M)
+FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M)
{
- /* 8K bitmap (256M IOVA range) */
- .buffer_size = 256UL * 1024UL * 1024UL,
+ /* 8K bitmap (128M IOVA range) */
+ .buffer_size = 128UL * 1024UL * 1024UL,
};
-FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty256M_huge)
+FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M_huge)
{
- /* 8K bitmap (256M IOVA range) */
- .buffer_size = 256UL * 1024UL * 1024UL,
+ /* 8K bitmap (128M IOVA range) */
+ .buffer_size = 128UL * 1024UL * 1024UL,
.hugepages = true,
};
diff --git a/tools/testing/selftests/iommu/iommufd_fail_nth.c b/tools/testing/selftests/iommu/iommufd_fail_nth.c
index f590417cd67a..c5d5e69452b0 100644
--- a/tools/testing/selftests/iommu/iommufd_fail_nth.c
+++ b/tools/testing/selftests/iommu/iommufd_fail_nth.c
@@ -615,7 +615,7 @@ TEST_FAIL_NTH(basic_fail_nth, device)
if (_test_cmd_get_hw_info(self->fd, idev_id, &info, sizeof(info), NULL))
return -1;
- if (_test_cmd_hwpt_alloc(self->fd, idev_id, ioas_id, 0, &hwpt_id,
+ if (_test_cmd_hwpt_alloc(self->fd, idev_id, ioas_id, 0, 0, &hwpt_id,
IOMMU_HWPT_DATA_NONE, 0, 0))
return -1;
diff --git a/tools/testing/selftests/iommu/iommufd_utils.h b/tools/testing/selftests/iommu/iommufd_utils.h
index 8d2b46b2114d..40f6f14ce136 100644
--- a/tools/testing/selftests/iommu/iommufd_utils.h
+++ b/tools/testing/selftests/iommu/iommufd_utils.h
@@ -22,6 +22,8 @@
#define BIT_MASK(nr) (1UL << ((nr) % __BITS_PER_LONG))
#define BIT_WORD(nr) ((nr) / __BITS_PER_LONG)
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+
static inline void set_bit(unsigned int nr, unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
@@ -153,7 +155,7 @@ static int _test_cmd_mock_domain_replace(int fd, __u32 stdev_id, __u32 pt_id,
EXPECT_ERRNO(_errno, _test_cmd_mock_domain_replace(self->fd, stdev_id, \
pt_id, NULL))
-static int _test_cmd_hwpt_alloc(int fd, __u32 device_id, __u32 pt_id,
+static int _test_cmd_hwpt_alloc(int fd, __u32 device_id, __u32 pt_id, __u32 ft_id,
__u32 flags, __u32 *hwpt_id, __u32 data_type,
void *data, size_t data_len)
{
@@ -165,6 +167,7 @@ static int _test_cmd_hwpt_alloc(int fd, __u32 device_id, __u32 pt_id,
.data_type = data_type,
.data_len = data_len,
.data_uptr = (uint64_t)data,
+ .fault_id = ft_id,
};
int ret;
@@ -177,24 +180,36 @@ static int _test_cmd_hwpt_alloc(int fd, __u32 device_id, __u32 pt_id,
}
#define test_cmd_hwpt_alloc(device_id, pt_id, flags, hwpt_id) \
- ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, flags, \
+ ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, 0, flags, \
hwpt_id, IOMMU_HWPT_DATA_NONE, NULL, \
0))
#define test_err_hwpt_alloc(_errno, device_id, pt_id, flags, hwpt_id) \
EXPECT_ERRNO(_errno, _test_cmd_hwpt_alloc( \
- self->fd, device_id, pt_id, flags, \
+ self->fd, device_id, pt_id, 0, flags, \
hwpt_id, IOMMU_HWPT_DATA_NONE, NULL, 0))
#define test_cmd_hwpt_alloc_nested(device_id, pt_id, flags, hwpt_id, \
data_type, data, data_len) \
- ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, flags, \
+ ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, 0, flags, \
hwpt_id, data_type, data, data_len))
#define test_err_hwpt_alloc_nested(_errno, device_id, pt_id, flags, hwpt_id, \
data_type, data, data_len) \
EXPECT_ERRNO(_errno, \
- _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, flags, \
+ _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, 0, flags, \
hwpt_id, data_type, data, data_len))
+#define test_cmd_hwpt_alloc_iopf(device_id, pt_id, fault_id, flags, hwpt_id, \
+ data_type, data, data_len) \
+ ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, fault_id, \
+ flags, hwpt_id, data_type, data, \
+ data_len))
+#define test_err_hwpt_alloc_iopf(_errno, device_id, pt_id, fault_id, flags, \
+ hwpt_id, data_type, data, data_len) \
+ EXPECT_ERRNO(_errno, \
+ _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, fault_id, \
+ flags, hwpt_id, data_type, data, \
+ data_len))
+
#define test_cmd_hwpt_check_iotlb(hwpt_id, iotlb_id, expected) \
({ \
struct iommu_test_cmd test_cmd = { \
@@ -346,12 +361,12 @@ static int _test_cmd_mock_domain_set_dirty(int fd, __u32 hwpt_id, size_t length,
static int _test_mock_dirty_bitmaps(int fd, __u32 hwpt_id, size_t length,
__u64 iova, size_t page_size,
size_t pte_page_size, __u64 *bitmap,
- __u64 bitmap_size, __u32 flags,
+ __u64 nbits, __u32 flags,
struct __test_metadata *_metadata)
{
unsigned long npte = pte_page_size / page_size, pteset = 2 * npte;
- unsigned long nbits = bitmap_size * BITS_PER_BYTE;
unsigned long j, i, nr = nbits / pteset ?: 1;
+ unsigned long bitmap_size = DIV_ROUND_UP(nbits, BITS_PER_BYTE);
__u64 out_dirty = 0;
/* Mark all even bits as dirty in the mock domain */
@@ -684,3 +699,66 @@ static int _test_cmd_get_hw_info(int fd, __u32 device_id, void *data,
#define test_cmd_get_hw_capabilities(device_id, caps, mask) \
ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, NULL, 0, &caps))
+
+static int _test_ioctl_fault_alloc(int fd, __u32 *fault_id, __u32 *fault_fd)
+{
+ struct iommu_fault_alloc cmd = {
+ .size = sizeof(cmd),
+ };
+ int ret;
+
+ ret = ioctl(fd, IOMMU_FAULT_QUEUE_ALLOC, &cmd);
+ if (ret)
+ return ret;
+ *fault_id = cmd.out_fault_id;
+ *fault_fd = cmd.out_fault_fd;
+ return 0;
+}
+
+#define test_ioctl_fault_alloc(fault_id, fault_fd) \
+ ({ \
+ ASSERT_EQ(0, _test_ioctl_fault_alloc(self->fd, fault_id, \
+ fault_fd)); \
+ ASSERT_NE(0, *(fault_id)); \
+ ASSERT_NE(0, *(fault_fd)); \
+ })
+
+static int _test_cmd_trigger_iopf(int fd, __u32 device_id, __u32 fault_fd)
+{
+ struct iommu_test_cmd trigger_iopf_cmd = {
+ .size = sizeof(trigger_iopf_cmd),
+ .op = IOMMU_TEST_OP_TRIGGER_IOPF,
+ .trigger_iopf = {
+ .dev_id = device_id,
+ .pasid = 0x1,
+ .grpid = 0x2,
+ .perm = IOMMU_PGFAULT_PERM_READ | IOMMU_PGFAULT_PERM_WRITE,
+ .addr = 0xdeadbeaf,
+ },
+ };
+ struct iommu_hwpt_page_response response = {
+ .code = IOMMUFD_PAGE_RESP_SUCCESS,
+ };
+ struct iommu_hwpt_pgfault fault = {};
+ ssize_t bytes;
+ int ret;
+
+ ret = ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_TRIGGER_IOPF), &trigger_iopf_cmd);
+ if (ret)
+ return ret;
+
+ bytes = read(fault_fd, &fault, sizeof(fault));
+ if (bytes <= 0)
+ return -EIO;
+
+ response.cookie = fault.cookie;
+
+ bytes = write(fault_fd, &response, sizeof(response));
+ if (bytes <= 0)
+ return -EIO;
+
+ return 0;
+}
+
+#define test_cmd_trigger_iopf(device_id, fault_fd) \
+ ASSERT_EQ(0, _test_cmd_trigger_iopf(self->fd, device_id, fault_fd))
diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
index 73895711cdf4..5f3c28fc8624 100755
--- a/tools/testing/selftests/net/fib_tests.sh
+++ b/tools/testing/selftests/net/fib_tests.sh
@@ -1737,53 +1737,53 @@ ipv4_rt_dsfield()
# DSCP 0x10 should match the specific route, no matter the ECN bits
$IP route get fibmatch 172.16.102.1 dsfield 0x10 | \
- grep -q "via 172.16.103.2"
+ grep -q "172.16.102.0/24 tos 0x10 via 172.16.103.2"
log_test $? 0 "IPv4 route with DSCP and ECN:Not-ECT"
$IP route get fibmatch 172.16.102.1 dsfield 0x11 | \
- grep -q "via 172.16.103.2"
+ grep -q "172.16.102.0/24 tos 0x10 via 172.16.103.2"
log_test $? 0 "IPv4 route with DSCP and ECN:ECT(1)"
$IP route get fibmatch 172.16.102.1 dsfield 0x12 | \
- grep -q "via 172.16.103.2"
+ grep -q "172.16.102.0/24 tos 0x10 via 172.16.103.2"
log_test $? 0 "IPv4 route with DSCP and ECN:ECT(0)"
$IP route get fibmatch 172.16.102.1 dsfield 0x13 | \
- grep -q "via 172.16.103.2"
+ grep -q "172.16.102.0/24 tos 0x10 via 172.16.103.2"
log_test $? 0 "IPv4 route with DSCP and ECN:CE"
# Unknown DSCP should match the generic route, no matter the ECN bits
$IP route get fibmatch 172.16.102.1 dsfield 0x14 | \
- grep -q "via 172.16.101.2"
+ grep -q "172.16.102.0/24 via 172.16.101.2"
log_test $? 0 "IPv4 route with unknown DSCP and ECN:Not-ECT"
$IP route get fibmatch 172.16.102.1 dsfield 0x15 | \
- grep -q "via 172.16.101.2"
+ grep -q "172.16.102.0/24 via 172.16.101.2"
log_test $? 0 "IPv4 route with unknown DSCP and ECN:ECT(1)"
$IP route get fibmatch 172.16.102.1 dsfield 0x16 | \
- grep -q "via 172.16.101.2"
+ grep -q "172.16.102.0/24 via 172.16.101.2"
log_test $? 0 "IPv4 route with unknown DSCP and ECN:ECT(0)"
$IP route get fibmatch 172.16.102.1 dsfield 0x17 | \
- grep -q "via 172.16.101.2"
+ grep -q "172.16.102.0/24 via 172.16.101.2"
log_test $? 0 "IPv4 route with unknown DSCP and ECN:CE"
# Null DSCP should match the generic route, no matter the ECN bits
$IP route get fibmatch 172.16.102.1 dsfield 0x00 | \
- grep -q "via 172.16.101.2"
+ grep -q "172.16.102.0/24 via 172.16.101.2"
log_test $? 0 "IPv4 route with no DSCP and ECN:Not-ECT"
$IP route get fibmatch 172.16.102.1 dsfield 0x01 | \
- grep -q "via 172.16.101.2"
+ grep -q "172.16.102.0/24 via 172.16.101.2"
log_test $? 0 "IPv4 route with no DSCP and ECN:ECT(1)"
$IP route get fibmatch 172.16.102.1 dsfield 0x02 | \
- grep -q "via 172.16.101.2"
+ grep -q "172.16.102.0/24 via 172.16.101.2"
log_test $? 0 "IPv4 route with no DSCP and ECN:ECT(0)"
$IP route get fibmatch 172.16.102.1 dsfield 0x03 | \
- grep -q "via 172.16.101.2"
+ grep -q "172.16.102.0/24 via 172.16.101.2"
log_test $? 0 "IPv4 route with no DSCP and ECN:CE"
}
diff --git a/tools/testing/selftests/net/forwarding/bridge_fdb_learning_limit.sh b/tools/testing/selftests/net/forwarding/bridge_fdb_learning_limit.sh
index 0760a34b7114..a21b7085da2e 100755
--- a/tools/testing/selftests/net/forwarding/bridge_fdb_learning_limit.sh
+++ b/tools/testing/selftests/net/forwarding/bridge_fdb_learning_limit.sh
@@ -178,6 +178,22 @@ fdb_del()
check_err $? "Failed to remove a FDB entry of type ${type}"
}
+check_fdb_n_learned_support()
+{
+ if ! ip link help bridge 2>&1 | grep -q "fdb_max_learned"; then
+ echo "SKIP: iproute2 too old, missing bridge max learned support"
+ exit $ksft_skip
+ fi
+
+ ip link add dev br0 type bridge
+ local learned=$(fdb_get_n_learned)
+ ip link del dev br0
+ if [ "$learned" == "null" ]; then
+ echo "SKIP: kernel too old; bridge fdb_n_learned feature not supported."
+ exit $ksft_skip
+ fi
+}
+
check_accounting_one_type()
{
local type=$1 is_counted=$2 overrides_learned=$3
@@ -274,6 +290,8 @@ check_limit()
done
}
+check_fdb_n_learned_support
+
trap cleanup EXIT
setup_prepare
diff --git a/tools/testing/selftests/net/netfilter/nft_concat_range.sh b/tools/testing/selftests/net/netfilter/nft_concat_range.sh
index 6d66240e149c..47088b005390 100755
--- a/tools/testing/selftests/net/netfilter/nft_concat_range.sh
+++ b/tools/testing/selftests/net/netfilter/nft_concat_range.sh
@@ -27,7 +27,7 @@ TYPES="net_port port_net net6_port port_proto net6_port_mac net6_port_mac_proto
net6_port_net6_port net_port_mac_proto_net"
# Reported bugs, also described by TYPE_ variables below
-BUGS="flush_remove_add reload"
+BUGS="flush_remove_add reload net_port_proto_match"
# List of possible paths to pktgen script from kernel tree for performance tests
PKTGEN_SCRIPT_PATHS="
@@ -371,6 +371,22 @@ race_repeat 0
perf_duration 0
"
+TYPE_net_port_proto_match="
+display net,port,proto
+type_spec ipv4_addr . inet_service . inet_proto
+chain_spec ip daddr . udp dport . meta l4proto
+dst addr4 port proto
+src
+start 1
+count 9
+src_delta 9
+tools sendip bash
+proto udp
+
+race_repeat 0
+
+perf_duration 0
+"
# Set template for all tests, types and rules are filled in depending on test
set_template='
flush ruleset
@@ -1555,6 +1571,64 @@ test_bug_reload() {
nft flush ruleset
}
+# - add ranged element, check that packets match it
+# - delete element again, check it is gone
+test_bug_net_port_proto_match() {
+ setup veth send_"${proto}" set || return ${ksft_skip}
+ rstart=${start}
+
+ range_size=1
+ for i in $(seq 1 10); do
+ for j in $(seq 1 20) ; do
+ elem=$(printf "10.%d.%d.0/24 . %d1-%d0 . 6-17 " ${i} ${j} ${i} "$((i+1))")
+
+ nft "add element inet filter test { $elem }" || return 1
+ nft "get element inet filter test { $elem }" | grep -q "$elem"
+ if [ $? -ne 0 ];then
+ local got=$(nft "get element inet filter test { $elem }")
+ err "post-add: should have returned $elem but got $got"
+ return 1
+ fi
+ done
+ done
+
+ # recheck after set was filled
+ for i in $(seq 1 10); do
+ for j in $(seq 1 20) ; do
+ elem=$(printf "10.%d.%d.0/24 . %d1-%d0 . 6-17 " ${i} ${j} ${i} "$((i+1))")
+
+ nft "get element inet filter test { $elem }" | grep -q "$elem"
+ if [ $? -ne 0 ];then
+ local got=$(nft "get element inet filter test { $elem }")
+ err "post-fill: should have returned $elem but got $got"
+ return 1
+ fi
+ done
+ done
+
+ # random del and re-fetch
+ for i in $(seq 1 10); do
+ for j in $(seq 1 20) ; do
+ local rnd=$((RANDOM%10))
+ local got=""
+
+ elem=$(printf "10.%d.%d.0/24 . %d1-%d0 . 6-17 " ${i} ${j} ${i} "$((i+1))")
+ if [ $rnd -gt 0 ];then
+ continue
+ fi
+
+ nft "delete element inet filter test { $elem }"
+ got=$(nft "get element inet filter test { $elem }" 2>/dev/null)
+ if [ $? -eq 0 ];then
+ err "post-delete: query for $elem returned $got instead of error."
+ return 1
+ fi
+ done
+ done
+
+ nft flush ruleset
+}
+
test_reported_issues() {
eval test_bug_"${subtest}"
}
diff --git a/tools/testing/selftests/x86/test_shadow_stack.c b/tools/testing/selftests/x86/test_shadow_stack.c
index ee909a7927f9..21af54d5f4ea 100644
--- a/tools/testing/selftests/x86/test_shadow_stack.c
+++ b/tools/testing/selftests/x86/test_shadow_stack.c
@@ -34,6 +34,7 @@
#include <sys/ptrace.h>
#include <sys/signal.h>
#include <linux/elf.h>
+#include <linux/perf_event.h>
/*
* Define the ABI defines if needed, so people can run the tests
@@ -734,6 +735,144 @@ int test_32bit(void)
return !segv_triggered;
}
+static int parse_uint_from_file(const char *file, const char *fmt)
+{
+ int err, ret;
+ FILE *f;
+
+ f = fopen(file, "re");
+ if (!f) {
+ err = -errno;
+ printf("failed to open '%s': %d\n", file, err);
+ return err;
+ }
+ err = fscanf(f, fmt, &ret);
+ if (err != 1) {
+ err = err == EOF ? -EIO : -errno;
+ printf("failed to parse '%s': %d\n", file, err);
+ fclose(f);
+ return err;
+ }
+ fclose(f);
+ return ret;
+}
+
+static int determine_uprobe_perf_type(void)
+{
+ const char *file = "/sys/bus/event_source/devices/uprobe/type";
+
+ return parse_uint_from_file(file, "%d\n");
+}
+
+static int determine_uprobe_retprobe_bit(void)
+{
+ const char *file = "/sys/bus/event_source/devices/uprobe/format/retprobe";
+
+ return parse_uint_from_file(file, "config:%d\n");
+}
+
+static ssize_t get_uprobe_offset(const void *addr)
+{
+ size_t start, end, base;
+ char buf[256];
+ bool found = false;
+ FILE *f;
+
+ f = fopen("/proc/self/maps", "r");
+ if (!f)
+ return -errno;
+
+ while (fscanf(f, "%zx-%zx %s %zx %*[^\n]\n", &start, &end, buf, &base) == 4) {
+ if (buf[2] == 'x' && (uintptr_t)addr >= start && (uintptr_t)addr < end) {
+ found = true;
+ break;
+ }
+ }
+
+ fclose(f);
+
+ if (!found)
+ return -ESRCH;
+
+ return (uintptr_t)addr - start + base;
+}
+
+static __attribute__((noinline)) void uretprobe_trigger(void)
+{
+ asm volatile ("");
+}
+
+/*
+ * This test setups return uprobe, which is sensitive to shadow stack
+ * (crashes without extra fix). After executing the uretprobe we fail
+ * the test if we receive SIGSEGV, no crash means we're good.
+ *
+ * Helper functions above borrowed from bpf selftests.
+ */
+static int test_uretprobe(void)
+{
+ const size_t attr_sz = sizeof(struct perf_event_attr);
+ const char *file = "/proc/self/exe";
+ int bit, fd = 0, type, err = 1;
+ struct perf_event_attr attr;
+ struct sigaction sa = {};
+ ssize_t offset;
+
+ type = determine_uprobe_perf_type();
+ if (type < 0) {
+ if (type == -ENOENT)
+ printf("[SKIP]\tUretprobe test, uprobes are not available\n");
+ return 0;
+ }
+
+ offset = get_uprobe_offset(uretprobe_trigger);
+ if (offset < 0)
+ return 1;
+
+ bit = determine_uprobe_retprobe_bit();
+ if (bit < 0)
+ return 1;
+
+ sa.sa_sigaction = segv_gp_handler;
+ sa.sa_flags = SA_SIGINFO;
+ if (sigaction(SIGSEGV, &sa, NULL))
+ return 1;
+
+ /* Setup return uprobe through perf event interface. */
+ memset(&attr, 0, attr_sz);
+ attr.size = attr_sz;
+ attr.type = type;
+ attr.config = 1 << bit;
+ attr.config1 = (__u64) (unsigned long) file;
+ attr.config2 = offset;
+
+ fd = syscall(__NR_perf_event_open, &attr, 0 /* pid */, -1 /* cpu */,
+ -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
+ if (fd < 0)
+ goto out;
+
+ if (sigsetjmp(jmp_buffer, 1))
+ goto out;
+
+ ARCH_PRCTL(ARCH_SHSTK_ENABLE, ARCH_SHSTK_SHSTK);
+
+ /*
+ * This either segfaults and goes through sigsetjmp above
+ * or succeeds and we're good.
+ */
+ uretprobe_trigger();
+
+ printf("[OK]\tUretprobe test\n");
+ err = 0;
+
+out:
+ ARCH_PRCTL(ARCH_SHSTK_DISABLE, ARCH_SHSTK_SHSTK);
+ signal(SIGSEGV, SIG_DFL);
+ if (fd)
+ close(fd);
+ return err;
+}
+
void segv_handler_ptrace(int signum, siginfo_t *si, void *uc)
{
/* The SSP adjustment caused a segfault. */
@@ -926,6 +1065,12 @@ int main(int argc, char *argv[])
goto out;
}
+ if (test_uretprobe()) {
+ ret = 1;
+ printf("[FAIL]\turetprobe test\n");
+ goto out;
+ }
+
return ret;
out: