From 5504f67944484495a5d8504d11fb998af05fe248 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Mon, 15 Nov 2021 12:28:23 +0100 Subject: perf test sigtrap: Add basic stress test for sigtrap handling Add basic stress test for sigtrap handling as a perf tool built-in test. This allows sanity checking the basic sigtrap functionality from within the perf tool. Committer notes: Reported that !root was getting -EPERM, applied a fixup from Marco to set .exclude_{hv,kernel} that made it work. Signed-off-by: Marco Elver Tested-by: Arnaldo Carvalho de Melo Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Fabian Hemmer Cc: Ian Rogers Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: kasan-dev@googlegroups.com Link: http://lore.kernel.org/lkml/20211115112822.4077224-1-elver@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/Build | 1 + tools/perf/tests/builtin-test.c | 1 + tools/perf/tests/sigtrap.c | 156 ++++++++++++++++++++++++++++++++++++++++ tools/perf/tests/tests.h | 1 + 4 files changed, 159 insertions(+) create mode 100644 tools/perf/tests/sigtrap.c diff --git a/tools/perf/tests/Build b/tools/perf/tests/Build index 803ca426f8e6..af2b37ef7c70 100644 --- a/tools/perf/tests/Build +++ b/tools/perf/tests/Build @@ -65,6 +65,7 @@ perf-y += pe-file-parsing.o perf-y += expand-cgroup.o perf-y += perf-time-to-tsc.o perf-y += dlfilter-test.o +perf-y += sigtrap.o $(OUTPUT)tests/llvm-src-base.c: tests/bpf-script-example.c tests/Build $(call rule_mkdir) diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c index 8cb5a1c3489e..f1e6d2a3a578 100644 --- a/tools/perf/tests/builtin-test.c +++ b/tools/perf/tests/builtin-test.c @@ -107,6 +107,7 @@ static struct test_suite *generic_tests[] = { &suite__expand_cgroup_events, &suite__perf_time_to_tsc, &suite__dlfilter, + &suite__sigtrap, NULL, }; diff --git a/tools/perf/tests/sigtrap.c b/tools/perf/tests/sigtrap.c new file mode 100644 index 000000000000..de409f21f952 --- /dev/null +++ b/tools/perf/tests/sigtrap.c @@ -0,0 +1,156 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Basic test for sigtrap support. + * + * Copyright (C) 2021, Google LLC. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cloexec.h" +#include "debug.h" +#include "event.h" +#include "tests.h" +#include "../perf-sys.h" + +#define NUM_THREADS 5 + +static struct { + int tids_want_signal; /* Which threads still want a signal. */ + int signal_count; /* Sanity check number of signals received. */ + volatile int iterate_on; /* Variable to set breakpoint on. */ + siginfo_t first_siginfo; /* First observed siginfo_t. */ +} ctx; + +#define TEST_SIG_DATA (~(unsigned long)(&ctx.iterate_on)) + +static struct perf_event_attr make_event_attr(void) +{ + struct perf_event_attr attr = { + .type = PERF_TYPE_BREAKPOINT, + .size = sizeof(attr), + .sample_period = 1, + .disabled = 1, + .bp_addr = (unsigned long)&ctx.iterate_on, + .bp_type = HW_BREAKPOINT_RW, + .bp_len = HW_BREAKPOINT_LEN_1, + .inherit = 1, /* Children inherit events ... */ + .inherit_thread = 1, /* ... but only cloned with CLONE_THREAD. */ + .remove_on_exec = 1, /* Required by sigtrap. */ + .sigtrap = 1, /* Request synchronous SIGTRAP on event. */ + .sig_data = TEST_SIG_DATA, + .exclude_kernel = 1, /* To allow */ + .exclude_hv = 1, /* running as !root */ + }; + return attr; +} + +static void +sigtrap_handler(int signum __maybe_unused, siginfo_t *info, void *ucontext __maybe_unused) +{ + if (!__atomic_fetch_add(&ctx.signal_count, 1, __ATOMIC_RELAXED)) + ctx.first_siginfo = *info; + __atomic_fetch_sub(&ctx.tids_want_signal, syscall(SYS_gettid), __ATOMIC_RELAXED); +} + +static void *test_thread(void *arg) +{ + pthread_barrier_t *barrier = (pthread_barrier_t *)arg; + pid_t tid = syscall(SYS_gettid); + int i; + + pthread_barrier_wait(barrier); + + __atomic_fetch_add(&ctx.tids_want_signal, tid, __ATOMIC_RELAXED); + for (i = 0; i < ctx.iterate_on - 1; i++) + __atomic_fetch_add(&ctx.tids_want_signal, tid, __ATOMIC_RELAXED); + + return NULL; +} + +static int run_test_threads(pthread_t *threads, pthread_barrier_t *barrier) +{ + int i; + + pthread_barrier_wait(barrier); + for (i = 0; i < NUM_THREADS; i++) + TEST_ASSERT_EQUAL("pthread_join() failed", pthread_join(threads[i], NULL), 0); + + return TEST_OK; +} + +static int run_stress_test(int fd, pthread_t *threads, pthread_barrier_t *barrier) +{ + int ret; + + ctx.iterate_on = 3000; + + TEST_ASSERT_EQUAL("misfired signal?", ctx.signal_count, 0); + TEST_ASSERT_EQUAL("enable failed", ioctl(fd, PERF_EVENT_IOC_ENABLE, 0), 0); + ret = run_test_threads(threads, barrier); + TEST_ASSERT_EQUAL("disable failed", ioctl(fd, PERF_EVENT_IOC_DISABLE, 0), 0); + + TEST_ASSERT_EQUAL("unexpected sigtraps", ctx.signal_count, NUM_THREADS * ctx.iterate_on); + TEST_ASSERT_EQUAL("missing signals or incorrectly delivered", ctx.tids_want_signal, 0); + TEST_ASSERT_VAL("unexpected si_addr", ctx.first_siginfo.si_addr == &ctx.iterate_on); +#if 0 /* FIXME: enable when libc's signal.h has si_perf_{type,data} */ + TEST_ASSERT_EQUAL("unexpected si_perf_type", ctx.first_siginfo.si_perf_type, + PERF_TYPE_BREAKPOINT); + TEST_ASSERT_EQUAL("unexpected si_perf_data", ctx.first_siginfo.si_perf_data, + TEST_SIG_DATA); +#endif + + return ret; +} + +static int test__sigtrap(struct test_suite *test __maybe_unused, int subtest __maybe_unused) +{ + struct perf_event_attr attr = make_event_attr(); + struct sigaction action = {}; + struct sigaction oldact; + pthread_t threads[NUM_THREADS]; + pthread_barrier_t barrier; + int i, fd, ret = TEST_FAIL; + + pthread_barrier_init(&barrier, NULL, NUM_THREADS + 1); + + action.sa_flags = SA_SIGINFO | SA_NODEFER; + action.sa_sigaction = sigtrap_handler; + sigemptyset(&action.sa_mask); + if (sigaction(SIGTRAP, &action, &oldact)) { + pr_debug("FAILED sigaction()\n"); + goto out; + } + + fd = sys_perf_event_open(&attr, 0, -1, -1, perf_event_open_cloexec_flag()); + if (fd < 0) { + pr_debug("FAILED sys_perf_event_open()\n"); + goto out_restore_sigaction; + } + + for (i = 0; i < NUM_THREADS; i++) { + if (pthread_create(&threads[i], NULL, test_thread, &barrier)) { + pr_debug("FAILED pthread_create()"); + goto out_close_perf_event; + } + } + + ret = run_stress_test(fd, threads, &barrier); + +out_close_perf_event: + close(fd); +out_restore_sigaction: + sigaction(SIGTRAP, &oldact, NULL); +out: + pthread_barrier_destroy(&barrier); + return ret; +} + +DEFINE_SUITE("Sigtrap", sigtrap); diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h index 8f65098110fc..5bbb8f6a48fc 100644 --- a/tools/perf/tests/tests.h +++ b/tools/perf/tests/tests.h @@ -146,6 +146,7 @@ DECLARE_SUITE(pe_file_parsing); DECLARE_SUITE(expand_cgroup_events); DECLARE_SUITE(perf_time_to_tsc); DECLARE_SUITE(dlfilter); +DECLARE_SUITE(sigtrap); /* * PowerPC and S390 do not support creation of instruction breakpoints using the -- cgit v1.2.3 From e9c08f722924c58041d2e0d90ea27140a4625776 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 16 Nov 2021 09:51:48 -0300 Subject: perf test sigtrap: Print errno string when failing Helps a bit the user figuring out why it is failing: Before: $ perf test sigtrap 73: Sigtrap : FAILED! $ perf test -v sigtrap 73: Sigtrap : --- start --- test child forked, pid 3816772 FAILED sys_perf_event_open() test child finished with -1 ---- end ---- Sigtrap: FAILED! $ After: $ perf test sigtrap 73: Sigtrap : FAILED! $ perf test -v sigtrap 73: Sigtrap : --- start --- test child forked, pid 3816772 FAILED sys_perf_event_open(): Permission denied test child finished with -1 ---- end ---- Sigtrap: FAILED! $ Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Fabian Hemmer Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Marco Elver Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: kasan-dev@googlegroups.com Link: http://lore.kernel.org/lkml/YZOpSVOCXe0zWeRs@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/sigtrap.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/tools/perf/tests/sigtrap.c b/tools/perf/tests/sigtrap.c index de409f21f952..1004bf0e7cc9 100644 --- a/tools/perf/tests/sigtrap.c +++ b/tools/perf/tests/sigtrap.c @@ -5,9 +5,11 @@ * Copyright (C) 2021, Google LLC. */ +#include #include #include #include +#include #include #include #include @@ -117,6 +119,7 @@ static int test__sigtrap(struct test_suite *test __maybe_unused, int subtest __m struct sigaction oldact; pthread_t threads[NUM_THREADS]; pthread_barrier_t barrier; + char sbuf[STRERR_BUFSIZE]; int i, fd, ret = TEST_FAIL; pthread_barrier_init(&barrier, NULL, NUM_THREADS + 1); @@ -125,19 +128,19 @@ static int test__sigtrap(struct test_suite *test __maybe_unused, int subtest __m action.sa_sigaction = sigtrap_handler; sigemptyset(&action.sa_mask); if (sigaction(SIGTRAP, &action, &oldact)) { - pr_debug("FAILED sigaction()\n"); + pr_debug("FAILED sigaction(): %s\n", str_error_r(errno, sbuf, sizeof(sbuf))); goto out; } fd = sys_perf_event_open(&attr, 0, -1, -1, perf_event_open_cloexec_flag()); if (fd < 0) { - pr_debug("FAILED sys_perf_event_open()\n"); + pr_debug("FAILED sys_perf_event_open(): %s\n", str_error_r(errno, sbuf, sizeof(sbuf))); goto out_restore_sigaction; } for (i = 0; i < NUM_THREADS; i++) { if (pthread_create(&threads[i], NULL, test_thread, &barrier)) { - pr_debug("FAILED pthread_create()"); + pr_debug("FAILED pthread_create(): %s\n", str_error_r(errno, sbuf, sizeof(sbuf))); goto out_close_perf_event; } } -- cgit v1.2.3 From c77a78c29177f9a614915e5158a7b6bb89e0e8db Mon Sep 17 00:00:00 2001 From: John Garry Date: Sat, 30 Oct 2021 00:30:41 +0800 Subject: tools build: Enable warnings through HOSTCFLAGS The tools build system uses KBUILD_HOSTCFLAGS symbol for obvious purposes. However this is not set for anything under tools/ As such, host tools apps built have no compiler warnings enabled. Declare HOSTCFLAGS for perf tools build, and also use that symbol in declaration of host_c_flags. HOSTCFLAGS comes from EXTRA_WARNINGS, which is independent of target platform/arch warning flags. Suggested-by: Jiri Olsa Signed-off-by: John Garry Acked-by: Jiri Olsa Cc: Alexander Shishkin Cc: Ian Rogers Cc: Ingo Molnar Cc: Laura Abbott Cc: Masahiro Yamada Cc: Namhyung Kim Cc: Peter Zijlstra Link: https://lore.kernel.org/r/1635525041-151876-1-git-send-email-john.garry@huawei.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/build/Build.include | 2 +- tools/perf/Makefile.config | 5 +++++ tools/perf/Makefile.perf | 2 +- 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/tools/build/Build.include b/tools/build/Build.include index 2cf3b1bde86e..c2a95ab47379 100644 --- a/tools/build/Build.include +++ b/tools/build/Build.include @@ -99,7 +99,7 @@ cxx_flags = -Wp,-MD,$(depfile) -Wp,-MT,$@ $(CXXFLAGS) -D"BUILD_STR(s)=\#s" $(CXX ### ## HOSTCC C flags -host_c_flags = -Wp,-MD,$(depfile) -Wp,-MT,$@ $(KBUILD_HOSTCFLAGS) -D"BUILD_STR(s)=\#s" $(HOSTCFLAGS_$(basetarget).o) $(HOSTCFLAGS_$(obj)) +host_c_flags = -Wp,-MD,$(depfile) -Wp,-MT,$@ $(HOSTCFLAGS) -D"BUILD_STR(s)=\#s" $(HOSTCFLAGS_$(basetarget).o) $(HOSTCFLAGS_$(obj)) # output directory for tests below TMPOUT = .tmp_$$$$ diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config index 3df74cf5651a..94bb53b0cebd 100644 --- a/tools/perf/Makefile.config +++ b/tools/perf/Makefile.config @@ -17,6 +17,7 @@ detected = $(shell echo "$(1)=y" >> $(OUTPUT).config-detected) detected_var = $(shell echo "$(1)=$($(1))" >> $(OUTPUT).config-detected) CFLAGS := $(EXTRA_CFLAGS) $(filter-out -Wnested-externs,$(EXTRA_WARNINGS)) +HOSTCFLAGS := $(filter-out -Wnested-externs,$(EXTRA_WARNINGS)) include $(srctree)/tools/scripts/Makefile.arch @@ -211,6 +212,7 @@ endif ifneq ($(WERROR),0) CORE_CFLAGS += -Werror CXXFLAGS += -Werror + HOSTCFLAGS += -Werror endif ifndef DEBUG @@ -290,6 +292,9 @@ CXXFLAGS += -ggdb3 CXXFLAGS += -funwind-tables CXXFLAGS += -Wno-strict-aliasing +HOSTCFLAGS += -Wall +HOSTCFLAGS += -Wextra + # Enforce a non-executable stack, as we may regress (again) in the future by # adding assembler files missing the .GNU-stack linker note. LDFLAGS += -Wl,-z,noexecstack diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf index 80522bcfafe0..164a37523781 100644 --- a/tools/perf/Makefile.perf +++ b/tools/perf/Makefile.perf @@ -226,7 +226,7 @@ else endif export srctree OUTPUT RM CC CXX LD AR CFLAGS CXXFLAGS V BISON FLEX AWK -export HOSTCC HOSTLD HOSTAR +export HOSTCC HOSTLD HOSTAR HOSTCFLAGS include $(srctree)/tools/build/Makefile.include -- cgit v1.2.3 From 9a5b2d1afa9f888335ab63e922ba5eed31383020 Mon Sep 17 00:00:00 2001 From: Shunsuke Nakamura Date: Tue, 9 Nov 2021 17:58:29 +0900 Subject: libperf: Adopt perf_counts_values__scale() from tools/perf/util Move perf_counts_values__scale() from tools/perf/util to tools/lib/perf so that it can be used with libperf. Committer notes: As noted by Jiri, use __s8 instead of s8 on the exported function. Signed-off-by: Shunsuke Nakamura Acked-by: Jiri Olsa Cc: Alexander Shishkin Cc: Ingo Molnar Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Rob Herring Link: https://lore.kernel.org/r/20211109085831.3770594-2-nakamura.shun@fujitsu.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/lib/perf/evsel.c | 19 +++++++++++++++++++ tools/lib/perf/include/perf/evsel.h | 4 ++++ tools/lib/perf/libperf.map | 1 + tools/perf/util/evsel.c | 19 ------------------- tools/perf/util/evsel.h | 3 --- 5 files changed, 24 insertions(+), 22 deletions(-) diff --git a/tools/lib/perf/evsel.c b/tools/lib/perf/evsel.c index 8441e3e1aaac..68f83d2c27c1 100644 --- a/tools/lib/perf/evsel.c +++ b/tools/lib/perf/evsel.c @@ -431,3 +431,22 @@ void perf_evsel__free_id(struct perf_evsel *evsel) zfree(&evsel->id); evsel->ids = 0; } + +void perf_counts_values__scale(struct perf_counts_values *count, + bool scale, __s8 *pscaled) +{ + s8 scaled = 0; + + if (scale) { + if (count->run == 0) { + scaled = -1; + count->val = 0; + } else if (count->run < count->ena) { + scaled = 1; + count->val = (u64)((double)count->val * count->ena / count->run); + } + } + + if (pscaled) + *pscaled = scaled; +} diff --git a/tools/lib/perf/include/perf/evsel.h b/tools/lib/perf/include/perf/evsel.h index 60eae25076d3..f401c7484bec 100644 --- a/tools/lib/perf/include/perf/evsel.h +++ b/tools/lib/perf/include/perf/evsel.h @@ -4,6 +4,8 @@ #include #include +#include +#include struct perf_evsel; struct perf_event_attr; @@ -39,5 +41,7 @@ LIBPERF_API int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu); LIBPERF_API struct perf_cpu_map *perf_evsel__cpus(struct perf_evsel *evsel); LIBPERF_API struct perf_thread_map *perf_evsel__threads(struct perf_evsel *evsel); LIBPERF_API struct perf_event_attr *perf_evsel__attr(struct perf_evsel *evsel); +LIBPERF_API void perf_counts_values__scale(struct perf_counts_values *count, + bool scale, __s8 *pscaled); #endif /* __LIBPERF_EVSEL_H */ diff --git a/tools/lib/perf/libperf.map b/tools/lib/perf/libperf.map index 71468606e8a7..5979bf92d98f 100644 --- a/tools/lib/perf/libperf.map +++ b/tools/lib/perf/libperf.map @@ -50,6 +50,7 @@ LIBPERF_0.0.1 { perf_mmap__read_init; perf_mmap__read_done; perf_mmap__read_event; + perf_counts_values__scale; local: *; }; diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index ac0127be0459..656c30b988ce 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -1476,25 +1476,6 @@ void evsel__compute_deltas(struct evsel *evsel, int cpu, int thread, count->run = count->run - tmp.run; } -void perf_counts_values__scale(struct perf_counts_values *count, - bool scale, s8 *pscaled) -{ - s8 scaled = 0; - - if (scale) { - if (count->run == 0) { - scaled = -1; - count->val = 0; - } else if (count->run < count->ena) { - scaled = 1; - count->val = (u64)((double) count->val * count->ena / count->run); - } - } - - if (pscaled) - *pscaled = scaled; -} - static int evsel__read_one(struct evsel *evsel, int cpu, int thread) { struct perf_counts_values *count = perf_counts(evsel->counts, cpu, thread); diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index 29d49a8c1e92..99aa3363def7 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -195,9 +195,6 @@ static inline int evsel__nr_cpus(struct evsel *evsel) return evsel__cpus(evsel)->nr; } -void perf_counts_values__scale(struct perf_counts_values *count, - bool scale, s8 *pscaled); - void evsel__compute_deltas(struct evsel *evsel, int cpu, int thread, struct perf_counts_values *count); -- cgit v1.2.3 From f2c4dcf191904d28d710290eea4a623710eee57c Mon Sep 17 00:00:00 2001 From: Shunsuke Nakamura Date: Tue, 9 Nov 2021 17:58:30 +0900 Subject: libperf: Remove scaling process from perf_mmap__read_self() Remove the scaling process from perf_mmap__read_self(), and unify the counters that can be obtained from perf_evsel__read() to "no scaling". Signed-off-by: Shunsuke Nakamura Acked-by: Jiri Olsa Cc: Alexander Shishkin Cc: Ingo Molnar Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Rob Herring Link: https://lore.kernel.org/r/20211109085831.3770594-3-nakamura.shun@fujitsu.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/lib/perf/mmap.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/tools/lib/perf/mmap.c b/tools/lib/perf/mmap.c index c89dfa5f67b3..aaa457904008 100644 --- a/tools/lib/perf/mmap.c +++ b/tools/lib/perf/mmap.c @@ -353,8 +353,6 @@ int perf_mmap__read_self(struct perf_mmap *map, struct perf_counts_values *count count->ena += delta; if (idx) count->run += delta; - - cnt = mul_u64_u64_div64(cnt, count->ena, count->run); } count->val = cnt; -- cgit v1.2.3 From a7f3713f6bf207e6d8dd484704dba6089f7ad8db Mon Sep 17 00:00:00 2001 From: Shunsuke Nakamura Date: Tue, 9 Nov 2021 17:58:31 +0900 Subject: libperf tests: Add test_stat_multiplexing test Adds a test for a counter obtained using read() system call during multiplexing. $ sudo make tests -C ./tools/lib/perf/ V=1 make: Entering directory '/home/nakamura/build_work/build_kernel/linux_kernel/linux/tools/lib/perf' make -f /home/nakamura/build_work/build_kernel/linux_kernel/linux/tools/build/Makefile.build dir=. obj=libperf make -C /home/nakamura/build_work/build_kernel/linux_kernel/linux/tools/lib/api/ O= libapi.a make -f /home/nakamura/build_work/build_kernel/linux_kernel/linux/tools/build/Makefile.build dir=./fd obj=libapi make -f /home/nakamura/build_work/build_kernel/linux_kernel/linux/tools/build/Makefile.build dir=./fs obj=libapi make -f /home/nakamura/build_work/build_kernel/linux_kernel/linux/tools/build/Makefile.build dir=. obj=tests make -f /home/nakamura/build_work/build_kernel/linux_kernel/linux/tools/build/Makefile.build dir=./tests obj=tests running static: - running tests/test-cpumap.c...OK - running tests/test-threadmap.c...OK - running tests/test-evlist.c... Event 0 -- Raw count = 298049842, run = 270269503, enable = 456262127 Scaled count = 503160191 (59.24%, 270269503/456262127) Event 1 -- Raw count = 299134173, run = 271075173, enable = 456257234 Scaled count = 503484435 (59.41%, 271075173/456257234) Event 2 -- Raw count = 300461996, run = 272069283, enable = 456253417 Scaled count = 503867290 (59.63%, 272069283/456253417) Event 3 -- Raw count = 301308704, run = 273063387, enable = 456249352 Scaled count = 503443183 (59.85%, 273063387/456249352) Event 4 -- Raw count = 302531164, run = 274102932, enable = 456244712 Scaled count = 503563543 (60.08%, 274102932/456244712) Event 5 -- Raw count = 303710254, run = 275406214, enable = 456228165 Scaled count = 503115633 (60.37%, 275406214/456228165) Event 6 -- Raw count = 304531302, run = 276396076, enable = 456221130 Scaled count = 502661313 (60.58%, 276396076/456221130) Event 7 -- Raw count = 304486460, run = 276601890, enable = 456213754 Scaled count = 502205212 (60.63%, 276601890/456213754) Event 8 -- Raw count = 304116681, run = 276631326, enable = 456205562 Scaled count = 501532936 (60.64%, 276631326/456205562) Event 9 -- Raw count = 303567766, run = 276188567, enable = 456196839 Scaled count = 501420666 (60.54%, 276188567/456196839) Event 10 -- Raw count = 302238014, run = 275144001, enable = 456185300 Scaled count = 501106833 (60.31%, 275144001/456185300) Event 11 -- Raw count = 300805716, run = 273824589, enable = 456175608 Scaled count = 501124573 (60.03%, 273824589/456175608) Event 12 -- Raw count = 299959051, run = 272834556, enable = 456166593 Scaled count = 501517477 (59.81%, 272834556/456166593) Event 13 -- Raw count = 299037090, run = 271820805, enable = 456157086 Scaled count = 501830195 (59.59%, 271820805/456157086) Event 14 -- Raw count = 298327042, run = 270784311, enable = 456147546 Scaled count = 502544433 (59.36%, 270784311/456147546) Expected: 501614268 High: 503867290 Low: 298049842 Average: 502438527 Average Error = 0.16% OK - running tests/test-evsel.c... loop = 65536, count = 328182 loop = 131072, count = 660214 loop = 262144, count = 1315534 loop = 524288, count = 2635364 loop = 1048576, count = 5271971 loop = 65536, count = 491952 loop = 131072, count = 850061 loop = 262144, count = 1648608 loop = 524288, count = 3162059 loop = 1048576, count = 6353393 OK running dynamic: - running tests/test-cpumap.c...OK - running tests/test-threadmap.c...OK - running tests/test-evlist.c... Event 0 -- Raw count = 300218292, run = 297528154, enable = 496789343 Scaled count = 501281125 (59.89%, 297528154/496789343) Event 1 -- Raw count = 301438606, run = 298515328, enable = 496784768 Scaled count = 501649643 (60.09%, 298515328/496784768) Event 2 -- Raw count = 302342618, run = 298798983, enable = 496782015 Scaled count = 502673648 (60.15%, 298798983/496782015) Event 3 -- Raw count = 303132319, run = 299230407, enable = 496778508 Scaled count = 503256412 (60.23%, 299230407/496778508) Event 4 -- Raw count = 302758195, run = 299218047, enable = 496774243 Scaled count = 502651743 (60.23%, 299218047/496774243) Event 5 -- Raw count = 303158458, run = 299204274, enable = 496769146 Scaled count = 503334281 (60.23%, 299204274/496769146) Event 6 -- Raw count = 303471397, run = 299197479, enable = 496763124 Scaled count = 503859189 (60.23%, 299197479/496763124) Event 7 -- Raw count = 303583387, run = 299196861, enable = 496756458 Scaled count = 504039405 (60.23%, 299196861/496756458) Event 8 -- Raw count = 303096897, run = 299186924, enable = 496748667 Scaled count = 503240507 (60.23%, 299186924/496748667) Event 9 -- Raw count = 301424173, run = 297845086, enable = 496739994 Scaled count = 502709122 (59.96%, 297845086/496739994) Event 10 -- Raw count = 300876415, run = 296851339, enable = 496729034 Scaled count = 503464297 (59.76%, 296851339/496729034) Event 11 -- Raw count = 300239338, run = 296547963, enable = 496719538 Scaled count = 502902612 (59.70%, 296547963/496719538) Event 12 -- Raw count = 299751948, run = 296547195, enable = 496710036 Scaled count = 502077926 (59.70%, 296547195/496710036) Event 13 -- Raw count = 299341883, run = 296549981, enable = 496700423 Scaled count = 501376663 (59.70%, 296549981/496700423) Event 14 -- Raw count = 299145476, run = 296561684, enable = 496690949 Scaled count = 501018366 (59.71%, 296561684/496690949) Expected: 501669431 High: 504039405 Low: 300218292 Average: 502635662 Average Error = 0.19% OK - running tests/test-evsel.c... loop = 65536, count = 329275 loop = 131072, count = 664638 loop = 262144, count = 1315367 loop = 524288, count = 2629617 loop = 1048576, count = 5273657 loop = 65536, count = 459641 loop = 131072, count = 978402 loop = 262144, count = 1581219 loop = 524288, count = 3774908 loop = 1048576, count = 7694417 OK make: Leaving directory '/home/nakamura/build_work/build_kernel/linux_kernel/linux/tools/lib/perf' Signed-off-by: Shunsuke Nakamura Acked-by: Jiri Olsa Cc: Alexander Shishkin Cc: Ingo Molnar Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Rob Herring Link: https://lore.kernel.org/r/20211109085831.3770594-4-nakamura.shun@fujitsu.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/lib/perf/tests/test-evlist.c | 157 +++++++++++++++++++++++++++++++++++++ 1 file changed, 157 insertions(+) diff --git a/tools/lib/perf/tests/test-evlist.c b/tools/lib/perf/tests/test-evlist.c index ce91a582f0e4..520a78267743 100644 --- a/tools/lib/perf/tests/test-evlist.c +++ b/tools/lib/perf/tests/test-evlist.c @@ -21,6 +21,9 @@ #include "tests.h" #include +#define EVENT_NUM 15 +#define WAIT_COUNT 100000000UL + static int libperf_print(enum libperf_print_level level, const char *fmt, va_list ap) { @@ -413,6 +416,159 @@ static int test_mmap_cpus(void) return 0; } +static double display_error(long long average, + long long high, + long long low, + long long expected) +{ + double error; + + error = (((double)average - expected) / expected) * 100.0; + + __T_VERBOSE(" Expected: %lld\n", expected); + __T_VERBOSE(" High: %lld Low: %lld Average: %lld\n", + high, low, average); + + __T_VERBOSE(" Average Error = %.2f%%\n", error); + + return error; +} + +static int test_stat_multiplexing(void) +{ + struct perf_counts_values expected_counts = { .val = 0 }; + struct perf_counts_values counts[EVENT_NUM] = {{ .val = 0 },}; + struct perf_thread_map *threads; + struct perf_evlist *evlist; + struct perf_evsel *evsel; + struct perf_event_attr attr = { + .type = PERF_TYPE_HARDWARE, + .config = PERF_COUNT_HW_INSTRUCTIONS, + .read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | + PERF_FORMAT_TOTAL_TIME_RUNNING, + .disabled = 1, + }; + int err, i, nonzero = 0; + unsigned long count; + long long max = 0, min = 0, avg = 0; + double error = 0.0; + s8 scaled = 0; + + /* read for non-multiplexing event count */ + threads = perf_thread_map__new_dummy(); + __T("failed to create threads", threads); + + perf_thread_map__set_pid(threads, 0, 0); + + evsel = perf_evsel__new(&attr); + __T("failed to create evsel", evsel); + + err = perf_evsel__open(evsel, NULL, threads); + __T("failed to open evsel", err == 0); + + err = perf_evsel__enable(evsel); + __T("failed to enable evsel", err == 0); + + /* wait loop */ + count = WAIT_COUNT; + while (count--) + ; + + perf_evsel__read(evsel, 0, 0, &expected_counts); + __T("failed to read value for evsel", expected_counts.val != 0); + __T("failed to read non-multiplexing event count", + expected_counts.ena == expected_counts.run); + + err = perf_evsel__disable(evsel); + __T("failed to enable evsel", err == 0); + + perf_evsel__close(evsel); + perf_evsel__delete(evsel); + + perf_thread_map__put(threads); + + /* read for multiplexing event count */ + threads = perf_thread_map__new_dummy(); + __T("failed to create threads", threads); + + perf_thread_map__set_pid(threads, 0, 0); + + evlist = perf_evlist__new(); + __T("failed to create evlist", evlist); + + for (i = 0; i < EVENT_NUM; i++) { + evsel = perf_evsel__new(&attr); + __T("failed to create evsel", evsel); + + perf_evlist__add(evlist, evsel); + } + perf_evlist__set_maps(evlist, NULL, threads); + + err = perf_evlist__open(evlist); + __T("failed to open evsel", err == 0); + + perf_evlist__enable(evlist); + + /* wait loop */ + count = WAIT_COUNT; + while (count--) + ; + + i = 0; + perf_evlist__for_each_evsel(evlist, evsel) { + perf_evsel__read(evsel, 0, 0, &counts[i]); + __T("failed to read value for evsel", counts[i].val != 0); + i++; + } + + perf_evlist__disable(evlist); + + min = counts[0].val; + for (i = 0; i < EVENT_NUM; i++) { + __T_VERBOSE("Event %2d -- Raw count = %lu, run = %lu, enable = %lu\n", + i, counts[i].val, counts[i].run, counts[i].ena); + + perf_counts_values__scale(&counts[i], true, &scaled); + if (scaled == 1) { + __T_VERBOSE("\t Scaled count = %lu (%.2lf%%, %lu/%lu)\n", + counts[i].val, + (double)counts[i].run / (double)counts[i].ena * 100.0, + counts[i].run, counts[i].ena); + } else if (scaled == -1) { + __T_VERBOSE("\t Not Runnnig\n"); + } else { + __T_VERBOSE("\t Not Scaling\n"); + } + + if (counts[i].val > max) + max = counts[i].val; + + if (counts[i].val < min) + min = counts[i].val; + + avg += counts[i].val; + + if (counts[i].val != 0) + nonzero++; + } + + if (nonzero != 0) + avg = avg / nonzero; + else + avg = 0; + + error = display_error(avg, max, min, expected_counts.val); + + __T("Error out of range!", ((error <= 1.0) && (error >= -1.0))); + + perf_evlist__close(evlist); + perf_evlist__delete(evlist); + + perf_thread_map__put(threads); + + return 0; +} + int test_evlist(int argc, char **argv) { __T_START; @@ -424,6 +580,7 @@ int test_evlist(int argc, char **argv) test_stat_thread_enable(); test_mmap_thread(); test_mmap_cpus(); + test_stat_multiplexing(); __T_END; return tests_failed == 0 ? 0 : -1; -- cgit v1.2.3 From 4edb117e6472ca0c0730887dba8b30cba0a3705e Mon Sep 17 00:00:00 2001 From: Sandipan Das Date: Tue, 23 Nov 2021 14:16:12 +0530 Subject: perf docs: Add info on AMD raw event encoding AMD processors have events with event select codes and unit masks larger than a byte. The core PMU, for example, uses 12-bit event select codes split between bits 0-7 and 32-35 of the PERF_CTL MSRs as can be seen from /sys/bus/event_sources/devices/cpu/format/*. The Processor Programming Reference (PPR) lists the event codes as unified 12-bit hexadecimal values instead and the split between the bits is not apparent to someone who is not aware of the layout of the PERF_CTL MSRs. 8-bit event select codes continue to work as the layout matches that of the PERF_CTL MSRs i.e. bits 0-7 for event select and 8-15 for unit mask. This adds more details in the perf man pages about using /sys/bus/event_sources/devices/*/format/* for determining the correct raw event encoding scheme. E.g. the "op_cache_hit_miss.op_cache_hit" event with code 0x28f and umask 0x03 can be programmed using its symbolic name as: $ sudo perf --debug perf-event-open stat -e op_cache_hit_miss.op_cache_hit sleep 1 ------------------------------------------------------------ perf_event_attr: type 4 size 128 config 0x20000038f sample_type IDENTIFIER read_format TOTAL_TIME_ENABLED|TOTAL_TIME_RUNNING disabled 1 inherit 1 enable_on_exec 1 exclude_guest 1 ------------------------------------------------------------ [...] One might use a simple eventsel+umask combination based on what the current man pages say and incorrectly program the event as: $ sudo perf --debug perf-event-open stat -e r0328f sleep 1 ------------------------------------------------------------ perf_event_attr: type 4 size 128 config 0x328f sample_type IDENTIFIER read_format TOTAL_TIME_ENABLED|TOTAL_TIME_RUNNING disabled 1 inherit 1 enable_on_exec 1 exclude_guest 1 ------------------------------------------------------------ [...] When it should have been based on the format from sysfs: $ cat /sys/bus/event_source/devices/cpu/format/event config:0-7,32-35 $ sudo perf --debug perf-event-open stat -e r20000038f sleep 1 ------------------------------------------------------------ perf_event_attr: type 4 size 128 config 0x20000038f sample_type IDENTIFIER read_format TOTAL_TIME_ENABLED|TOTAL_TIME_RUNNING disabled 1 inherit 1 enable_on_exec 1 exclude_guest 1 ------------------------------------------------------------ [...] Reviewed-by: Kajol Jain Signed-off-by: Sandipan Das Acked-by: Jiri Olsa Cc: Ananth Narayan Cc: Kim Phillips Cc: Ravi Bangoria Cc: Robert Richter Cc: Santosh Shukla Link: https://lore.kernel.org/r/20211123084613.243792-1-sandipan.das@amd.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Documentation/perf-list.txt | 34 +++++++++++++++++++++++++++++++- tools/perf/Documentation/perf-record.txt | 6 ++++-- tools/perf/Documentation/perf-stat.txt | 6 ++++-- tools/perf/Documentation/perf-top.txt | 7 ++++--- 4 files changed, 45 insertions(+), 8 deletions(-) diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt index 4dc8d0af19df..a922a95289a9 100644 --- a/tools/perf/Documentation/perf-list.txt +++ b/tools/perf/Documentation/perf-list.txt @@ -94,7 +94,7 @@ RAW HARDWARE EVENT DESCRIPTOR Even when an event is not available in a symbolic form within perf right now, it can be encoded in a per processor specific way. -For instance For x86 CPUs NNN represents the raw register encoding with the +For instance on x86 CPUs, N is a hexadecimal value that represents the raw register encoding with the layout of IA32_PERFEVTSELx MSRs (see [Intel® 64 and IA-32 Architectures Software Developer's Manual Volume 3B: System Programming Guide] Figure 30-1 Layout of IA32_PERFEVTSELx MSRs) or AMD's PerfEvtSeln (see [AMD64 Architecture Programmer’s Manual Volume 2: System Programming], Page 344, Figure 13-7 Performance Event-Select Register (PerfEvtSeln)). @@ -126,6 +126,38 @@ It's also possible to use pmu syntax: perf record -e cpu/r1a8/ ... perf record -e cpu/r0x1a8/ ... +Some processors, like those from AMD, support event codes and unit masks +larger than a byte. In such cases, the bits corresponding to the event +configuration parameters can be seen with: + + cat /sys/bus/event_source/devices//format/ + +Example: + +If the AMD docs for an EPYC 7713 processor describe an event as: + + Event Umask Event Mask + Num. Value Mnemonic Description + + 28FH 03H op_cache_hit_miss.op_cache_hit Counts Op Cache micro-tag + hit events. + +raw encoding of 0x0328F cannot be used since the upper nibble of the +EventSelect bits have to be specified via bits 32-35 as can be seen with: + + cat /sys/bus/event_source/devices/cpu/format/event + +raw encoding of 0x20000038F should be used instead: + + perf stat -e r20000038f -a sleep 1 + perf record -e r20000038f ... + +It's also possible to use pmu syntax: + + perf record -e r20000038f -a sleep 1 + perf record -e cpu/r20000038f/ ... + perf record -e cpu/r0x20000038f/ ... + You should refer to the processor specific documentation for getting these details. Some of them are referenced in the SEE ALSO section below. diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt index 3cf7bac67239..55df7b073a55 100644 --- a/tools/perf/Documentation/perf-record.txt +++ b/tools/perf/Documentation/perf-record.txt @@ -30,8 +30,10 @@ OPTIONS - a symbolic event name (use 'perf list' to list all events) - - a raw PMU event (eventsel+umask) in the form of rNNN where NNN is a - hexadecimal event descriptor. + - a raw PMU event in the form of rN where N is a hexadecimal value + that represents the raw register encoding with the layout of the + event control registers as described by entries in + /sys/bus/event_sources/devices/cpu/format/*. - a symbolic or raw PMU event followed by an optional colon and a list of event modifiers, e.g., cpu-cycles:p. See the diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt index 7e6fb7cbc0f4..604e6f2301ea 100644 --- a/tools/perf/Documentation/perf-stat.txt +++ b/tools/perf/Documentation/perf-stat.txt @@ -36,8 +36,10 @@ report:: - a symbolic event name (use 'perf list' to list all events) - - a raw PMU event (eventsel+umask) in the form of rNNN where NNN is a - hexadecimal event descriptor. + - a raw PMU event in the form of rN where N is a hexadecimal value + that represents the raw register encoding with the layout of the + event control registers as described by entries in + /sys/bus/event_sources/devices/cpu/format/*. - a symbolic or raw PMU event followed by an optional colon and a list of event modifiers, e.g., cpu-cycles:p. See the diff --git a/tools/perf/Documentation/perf-top.txt b/tools/perf/Documentation/perf-top.txt index 9898a32b8d9c..cac3dfbee7d8 100644 --- a/tools/perf/Documentation/perf-top.txt +++ b/tools/perf/Documentation/perf-top.txt @@ -38,9 +38,10 @@ Default is to monitor all CPUS. -e :: --event=:: Select the PMU event. Selection can be a symbolic event name - (use 'perf list' to list all events) or a raw PMU - event (eventsel+umask) in the form of rNNN where NNN is a - hexadecimal event descriptor. + (use 'perf list' to list all events) or a raw PMU event in the form + of rN where N is a hexadecimal value that represents the raw register + encoding with the layout of the event control registers as described + by entries in /sys/bus/event_sources/devices/cpu/format/*. -E :: --entries=:: -- cgit v1.2.3 From 7a2e14962cd43400c353cdc05550f580a284dcb9 Mon Sep 17 00:00:00 2001 From: Sandipan Das Date: Tue, 23 Nov 2021 14:16:13 +0530 Subject: perf docs: Update link to AMD documentation This updates the link to documentation on AMD processors. The new link points to a page where users can find the Processor Programming Reference (PPR) documents for the family and model codes corresponding to processors they are using. Signed-off-by: Sandipan Das Acked-by: Jiri Olsa Cc: Ananth Narayan Cc: Kajol Jain Cc: Kim Phillips Cc: Ravi Bangoria Cc: Robert Richter Cc: Santosh Shukla Link: https://lore.kernel.org/r/20211123084613.243792-2-sandipan.das@amd.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Documentation/perf-list.txt | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt index a922a95289a9..57384a97c04f 100644 --- a/tools/perf/Documentation/perf-list.txt +++ b/tools/perf/Documentation/perf-list.txt @@ -81,7 +81,11 @@ On AMD systems it is implemented using IBS (up to precise-level 2). The precise modifier works with event types 0x76 (cpu-cycles, CPU clocks not halted) and 0xC1 (micro-ops retired). Both events map to IBS execution sampling (IBS op) with the IBS Op Counter Control bit -(IbsOpCntCtl) set respectively (see AMD64 Architecture Programmer’s +(IbsOpCntCtl) set respectively (see the +Core Complex (CCX) -> Processor x86 Core -> Instruction Based Sampling (IBS) +section of the [AMD Processor Programming Reference (PPR)] relevant to the +family, model and stepping of the processor being used). + Manual Volume 2: System Programming, 13.3 Instruction-Based Sampling). Examples to use IBS: @@ -96,8 +100,10 @@ it can be encoded in a per processor specific way. For instance on x86 CPUs, N is a hexadecimal value that represents the raw register encoding with the layout of IA32_PERFEVTSELx MSRs (see [Intel® 64 and IA-32 Architectures Software Developer's Manual Volume 3B: System Programming Guide] Figure 30-1 Layout -of IA32_PERFEVTSELx MSRs) or AMD's PerfEvtSeln (see [AMD64 Architecture Programmer’s Manual Volume 2: System Programming], Page 344, -Figure 13-7 Performance Event-Select Register (PerfEvtSeln)). +of IA32_PERFEVTSELx MSRs) or AMD's PERF_CTL MSRs (see the +Core Complex (CCX) -> Processor x86 Core -> MSR Registers section of the +[AMD Processor Programming Reference (PPR)] relevant to the family, model +and stepping of the processor being used). Note: Only the following bit fields can be set in x86 counter registers: event, umask, edge, inv, cmask. Esp. guest/host only and @@ -348,4 +354,4 @@ SEE ALSO linkperf:perf-stat[1], linkperf:perf-top[1], linkperf:perf-record[1], http://www.intel.com/sdm/[Intel® 64 and IA-32 Architectures Software Developer's Manual Volume 3B: System Programming Guide], -http://support.amd.com/us/Processor_TechDocs/24593_APM_v2.pdf[AMD64 Architecture Programmer’s Manual Volume 2: System Programming] +https://bugzilla.kernel.org/show_bug.cgi?id=206537[AMD Processor Programming Reference (PPR)] -- cgit v1.2.3 From b4515ad6e1c8b195e3bd02a5a15b1c74119ea367 Mon Sep 17 00:00:00 2001 From: Gang Li Date: Tue, 23 Nov 2021 15:40:17 +0800 Subject: perf trace: Enable ignore_missing_thread for trace perf already support ignore_missing_thread for -u/-p, but not yet applied to `perf trace`. This patch enables ignore_missing_thread for `perf trace`. Signed-off-by: Gang Li Cc: Alexander Shishkin Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1481538943-21874-6-git-send-email-jolsa@kernel.org Link: http://lkml.kernel.org/r/1513148513-6974-1-git-send-email-zhangmengting@huawei.com Link: http://lore.kernel.org/lkml/20211123074018.11406-1-ligang.bdlg@bytedance.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-trace.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index 0b52e08e558e..94d62a92f1a1 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -3950,6 +3950,9 @@ static int trace__run(struct trace *trace, int argc, const char **argv) evlist__add(evlist, pgfault_min); } + /* Enable ignoring missing threads when -u/-p option is defined. */ + trace->opts.ignore_missing_thread = trace->opts.target.uid != UINT_MAX || trace->opts.target.pid; + if (trace->sched && evlist__add_newtp(evlist, "sched", "sched_stat_runtime", trace__sched_stat_runtime)) goto out_error_sched_stat_runtime; -- cgit v1.2.3 From 6b6b16b3bb612757f7bc697496b9f5d6765512a6 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 23 Nov 2021 17:52:26 -0800 Subject: perf metric: Reduce multiplexing with duration_time It is common to use the same counters with and without duration_time. The ID sharing code treats duration_time as if it were a hardware event placed in the same group. This causes unnecessary multiplexing such as in the following example where l3_cache_access isn't shared: $ perf stat -M l3 -a sleep 1 Performance counter stats for 'system wide': 3,117,007 l3_cache_miss # 199.5 MB/s l3_rd_bw # 43.6 % l3_hits # 56.4 % l3_miss (50.00%) 5,526,447 l3_cache_access (50.00%) 5,392,435 l3_cache_access # 5389191.2 access/s l3_access_rate (50.00%) 1,000,601,901 ns duration_time 1.000601901 seconds time elapsed Fix this by placing duration_time in all groups unless metric sharing has been disabled on the command line: $ perf stat -M l3 -a sleep 1 Performance counter stats for 'system wide': 3,597,972 l3_cache_miss # 230.3 MB/s l3_rd_bw # 48.0 % l3_hits # 52.0 % l3_miss 6,914,459 l3_cache_access # 6909935.9 access/s l3_access_rate 1,000,654,579 ns duration_time 1.000654579 seconds time elapsed $ perf stat --metric-no-merge -M l3 -a sleep 1 Performance counter stats for 'system wide': 3,501,834 l3_cache_miss # 53.5 % l3_miss (24.99%) 6,548,173 l3_cache_access (24.99%) 3,417,622 l3_cache_miss # 45.7 % l3_hits (25.04%) 6,294,062 l3_cache_access (25.04%) 5,923,238 l3_cache_access # 5919688.1 access/s l3_access_rate (24.99%) 1,000,599,683 ns duration_time 3,607,486 l3_cache_miss # 230.9 MB/s l3_rd_bw (49.97%) 1.000599683 seconds time elapsed v2. Doesn't count duration_time in the metric_list_cmp function that sorts larger metrics first. Without this a metric with duration_time and an event is sorted the same as a metric with two events, possibly not allowing the first metric to share with the second. Signed-off-by: Ian Rogers Acked-by: Jiri Olsa Cc: Alexander Shishkin Cc: Andi Kleen Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Mark Rutland Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lore.kernel.org/lkml/20211124015226.3317994-1-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/metricgroup.c | 42 +++++++++++++++++++++++++++++++++--------- 1 file changed, 33 insertions(+), 9 deletions(-) diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c index fffe02aae3ed..51c99cb08abf 100644 --- a/tools/perf/util/metricgroup.c +++ b/tools/perf/util/metricgroup.c @@ -1115,13 +1115,27 @@ out: return ret; } +/** + * metric_list_cmp - list_sort comparator that sorts metrics with more events to + * the front. duration_time is excluded from the count. + */ static int metric_list_cmp(void *priv __maybe_unused, const struct list_head *l, const struct list_head *r) { const struct metric *left = container_of(l, struct metric, nd); const struct metric *right = container_of(r, struct metric, nd); + struct expr_id_data *data; + int left_count, right_count; + + left_count = hashmap__size(left->pctx->ids); + if (!expr__get_id(left->pctx, "duration_time", &data)) + left_count--; + + right_count = hashmap__size(right->pctx->ids); + if (!expr__get_id(right->pctx, "duration_time", &data)) + right_count--; - return hashmap__size(right->pctx->ids) - hashmap__size(left->pctx->ids); + return right_count - left_count; } /** @@ -1299,14 +1313,16 @@ err_out: /** * parse_ids - Build the event string for the ids and parse them creating an * evlist. The encoded metric_ids are decoded. + * @metric_no_merge: is metric sharing explicitly disabled. * @fake_pmu: used when testing metrics not supported by the current CPU. * @ids: the event identifiers parsed from a metric. * @modifier: any modifiers added to the events. * @has_constraint: false if events should be placed in a weak group. * @out_evlist: the created list of events. */ -static int parse_ids(struct perf_pmu *fake_pmu, struct expr_parse_ctx *ids, - const char *modifier, bool has_constraint, struct evlist **out_evlist) +static int parse_ids(bool metric_no_merge, struct perf_pmu *fake_pmu, + struct expr_parse_ctx *ids, const char *modifier, + bool has_constraint, struct evlist **out_evlist) { struct parse_events_error parse_error; struct evlist *parsed_evlist; @@ -1314,12 +1330,19 @@ static int parse_ids(struct perf_pmu *fake_pmu, struct expr_parse_ctx *ids, int ret; *out_evlist = NULL; - if (hashmap__size(ids->ids) == 0) { + if (!metric_no_merge || hashmap__size(ids->ids) == 0) { char *tmp; /* - * No ids/events in the expression parsing context. Events may - * have been removed because of constant evaluation, e.g.: - * event1 if #smt_on else 0 + * We may fail to share events between metrics because + * duration_time isn't present in one metric. For example, a + * ratio of cache misses doesn't need duration_time but the same + * events may be used for a misses per second. Events without + * sharing implies multiplexing, that is best avoided, so place + * duration_time in every group. + * + * Also, there may be no ids/events in the expression parsing + * context because of constant evaluation, e.g.: + * event1 if #smt_on else 0 * Add a duration_time event to avoid a parse error on an empty * string. */ @@ -1387,7 +1410,8 @@ static int parse_groups(struct evlist *perf_evlist, const char *str, ret = build_combined_expr_ctx(&metric_list, &combined); if (!ret && combined && hashmap__size(combined->ids)) { - ret = parse_ids(fake_pmu, combined, /*modifier=*/NULL, + ret = parse_ids(metric_no_merge, fake_pmu, combined, + /*modifier=*/NULL, /*has_constraint=*/true, &combined_evlist); } @@ -1435,7 +1459,7 @@ static int parse_groups(struct evlist *perf_evlist, const char *str, } } if (!metric_evlist) { - ret = parse_ids(fake_pmu, m->pctx, m->modifier, + ret = parse_ids(metric_no_merge, fake_pmu, m->pctx, m->modifier, m->has_constraint, &m->evlist); if (ret) goto out; -- cgit v1.2.3 From ecdcf630d71f3b4c64097cad0add561cd5010c02 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 30 Nov 2021 09:49:44 -0800 Subject: perf evlist: Allow setting arbitrary leader The leader of a group is the first, but allow it to be an arbitrary list member so that for Intel topdown events slots may always be the group leader. Reviewed-by: Kajol Jain Signed-off-by: Ian Rogers Acked-by: Jiri Olsa Cc: Alexander Shishkin Cc: Andi Kleen Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Mark Rutland Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Riccardo Mancini Cc: Stephane Eranian Cc: Vineet Singh Link: http://lore.kernel.org/lkml/20211130174945.247604-1-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/lib/perf/evlist.c | 15 +++++++++------ tools/lib/perf/include/internal/evlist.h | 2 +- tools/perf/util/parse-events.c | 4 ++-- 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/tools/lib/perf/evlist.c b/tools/lib/perf/evlist.c index e37dfad31383..245acbc53bd3 100644 --- a/tools/lib/perf/evlist.c +++ b/tools/lib/perf/evlist.c @@ -643,14 +643,14 @@ perf_evlist__next_mmap(struct perf_evlist *evlist, struct perf_mmap *map, return overwrite ? evlist->mmap_ovw_first : evlist->mmap_first; } -void __perf_evlist__set_leader(struct list_head *list) +void __perf_evlist__set_leader(struct list_head *list, struct perf_evsel *leader) { - struct perf_evsel *evsel, *leader; + struct perf_evsel *first, *last, *evsel; - leader = list_entry(list->next, struct perf_evsel, node); - evsel = list_entry(list->prev, struct perf_evsel, node); + first = list_first_entry(list, struct perf_evsel, node); + last = list_last_entry(list, struct perf_evsel, node); - leader->nr_members = evsel->idx - leader->idx + 1; + leader->nr_members = last->idx - first->idx + 1; __perf_evlist__for_each_entry(list, evsel) evsel->leader = leader; @@ -659,7 +659,10 @@ void __perf_evlist__set_leader(struct list_head *list) void perf_evlist__set_leader(struct perf_evlist *evlist) { if (evlist->nr_entries) { + struct perf_evsel *first = list_entry(evlist->entries.next, + struct perf_evsel, node); + evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0; - __perf_evlist__set_leader(&evlist->entries); + __perf_evlist__set_leader(&evlist->entries, first); } } diff --git a/tools/lib/perf/include/internal/evlist.h b/tools/lib/perf/include/internal/evlist.h index f366dbad6a88..6f74269a3ad4 100644 --- a/tools/lib/perf/include/internal/evlist.h +++ b/tools/lib/perf/include/internal/evlist.h @@ -127,5 +127,5 @@ int perf_evlist__id_add_fd(struct perf_evlist *evlist, void perf_evlist__reset_id_hash(struct perf_evlist *evlist); -void __perf_evlist__set_leader(struct list_head *list); +void __perf_evlist__set_leader(struct list_head *list, struct perf_evsel *leader); #endif /* __LIBPERF_INTERNAL_EVLIST_H */ diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index ba74fdf74af9..1d68167ab611 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -1837,8 +1837,8 @@ void parse_events__set_leader(char *name, struct list_head *list, if (parse_events__set_leader_for_uncore_aliase(name, list, parse_state)) return; - __perf_evlist__set_leader(list); - leader = list_entry(list->next, struct evsel, core.node); + leader = list_first_entry(list, struct evsel, core.node); + __perf_evlist__set_leader(list, &leader->core); leader->group_name = name ? strdup(name) : NULL; } -- cgit v1.2.3 From 94dbfd6781a0e87b6faa6012810eb22e7d5b8a70 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 30 Nov 2021 09:49:45 -0800 Subject: perf parse-events: Architecture specific leader override Currently topdown events must appear after a slots event: $ perf stat -e '{slots,topdown-fe-bound}' /bin/true Performance counter stats for '/bin/true': 3,183,090 slots 986,133 topdown-fe-bound Reversing the events yields: $ perf stat -e '{topdown-fe-bound,slots}' /bin/true Error: The sys_perf_event_open() syscall returned with 22 (Invalid argument) for event (topdown-fe-bound). For metrics the order of events is determined by iterating over a hashmap, and so slots isn't guaranteed to be first which can yield this error. Change the set_leader in parse-events, called when a group is closed, so that rather than always making the first event the leader, if the slots event exists then it is made the leader. It is then moved to the head of the evlist otherwise it won't be opened in the correct order. The result is: $ perf stat -e '{topdown-fe-bound,slots}' /bin/true Performance counter stats for '/bin/true': 3,274,795 slots 1,001,702 topdown-fe-bound A problem with this approach is the slots event is identified by name, names can be overwritten like 'cpu/slots,name=foo/' and this causes the leader change to fail. The change also modifies and fixes mixed groups like, with the change: $ perf stat -e '{instructions,slots,topdown-fe-bound}' -a -- sleep 2 Performance counter stats for 'system wide': 5574985410 slots 971981616 instructions 1348461887 topdown-fe-bound 2.001263120 seconds time elapsed Without the change: $ perf stat -e '{instructions,slots,topdown-fe-bound}' -a -- sleep 2 Performance counter stats for 'system wide': instructions slots topdown-fe-bound 2.006247990 seconds time elapsed Something that may be undesirable here is that the events are reordered in the output. Reviewed-by: Kajol Jain Signed-off-by: Ian Rogers Acked-by: Jiri Olsa Cc: Alexander Shishkin Cc: Andi Kleen Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Mark Rutland Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Riccardo Mancini Cc: Stephane Eranian Cc: Vineet Singh Link: http://lore.kernel.org/lkml/20211130174945.247604-2-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/arch/x86/util/evlist.c | 17 +++++++++++++++++ tools/perf/util/evlist.h | 1 + tools/perf/util/parse-events.c | 8 +++++++- 3 files changed, 25 insertions(+), 1 deletion(-) diff --git a/tools/perf/arch/x86/util/evlist.c b/tools/perf/arch/x86/util/evlist.c index 0b0951030a2f..f924246eff78 100644 --- a/tools/perf/arch/x86/util/evlist.c +++ b/tools/perf/arch/x86/util/evlist.c @@ -17,3 +17,20 @@ int arch_evlist__add_default_attrs(struct evlist *evlist) else return parse_events(evlist, TOPDOWN_L1_EVENTS, NULL); } + +struct evsel *arch_evlist__leader(struct list_head *list) +{ + struct evsel *evsel, *first; + + first = list_first_entry(list, struct evsel, core.node); + + if (!pmu_have_event("cpu", "slots")) + return first; + + __evlist__for_each_entry(list, evsel) { + if (evsel->pmu_name && !strcmp(evsel->pmu_name, "cpu") && + evsel->name && strstr(evsel->name, "slots")) + return evsel; + } + return first; +} diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index 97bfb8d0be4f..993437ffe429 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h @@ -110,6 +110,7 @@ int __evlist__add_default_attrs(struct evlist *evlist, __evlist__add_default_attrs(evlist, array, ARRAY_SIZE(array)) int arch_evlist__add_default_attrs(struct evlist *evlist); +struct evsel *arch_evlist__leader(struct list_head *list); int evlist__add_dummy(struct evlist *evlist); diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 1d68167ab611..acf20ce98ce9 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -1824,6 +1824,11 @@ out: return ret; } +__weak struct evsel *arch_evlist__leader(struct list_head *list) +{ + return list_first_entry(list, struct evsel, core.node); +} + void parse_events__set_leader(char *name, struct list_head *list, struct parse_events_state *parse_state) { @@ -1837,9 +1842,10 @@ void parse_events__set_leader(char *name, struct list_head *list, if (parse_events__set_leader_for_uncore_aliase(name, list, parse_state)) return; - leader = list_first_entry(list, struct evsel, core.node); + leader = arch_evlist__leader(list); __perf_evlist__set_leader(list, &leader->core); leader->group_name = name ? strdup(name) : NULL; + list_move(&leader->core.node, list); } /* list_event is assumed to point to malloc'ed memory */ -- cgit v1.2.3 From ed17b1914978eddb2b01f2d34577f1c82518c650 Mon Sep 17 00:00:00 2001 From: Uwe Kleine-König Date: Fri, 3 Dec 2021 22:05:44 +0100 Subject: perf tools: Drop requirement for libstdc++.so for libopencsd check MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It's possible to link against libopencsd_c_api without having libstdc++.so available, only libstdc++.so.6.0.28 (or whatever version is in use) needs to be available. The same holds true for libopencsd.so. When -lstdc++ (or -lopencsd) is explicitly passed to the linker however the .so file must be available. So wrap adding the dependencies into a check for static linking that actually requires adding them all. The same construct is already used for some other tests in the same file to reduce dependencies in the dynamic linking case. Fixes: 573cf5c9a152 ("perf build: Add missing -lstdc++ when linking with libopencsd") Reviewed-by: James Clark Signed-off-by: Uwe Kleine-König Cc: Adrian Bunk Cc: Alexander Shishkin Cc: Branislav Rankov Cc: Diederik de Haas Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Link: https://lore.kernel.org/all/20211203210544.1137935-1-uwe@kleine-koenig.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Makefile.config | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config index 94bb53b0cebd..96ad944ca6a8 100644 --- a/tools/perf/Makefile.config +++ b/tools/perf/Makefile.config @@ -144,7 +144,10 @@ FEATURE_CHECK_LDFLAGS-libcrypto = -lcrypto ifdef CSINCLUDES LIBOPENCSD_CFLAGS := -I$(CSINCLUDES) endif -OPENCSDLIBS := -lopencsd_c_api -lopencsd -lstdc++ +OPENCSDLIBS := -lopencsd_c_api +ifeq ($(findstring -static,${LDFLAGS}),-static) + OPENCSDLIBS += -lopencsd -lstdc++ +endif ifdef CSLIBS LIBOPENCSD_LDFLAGS := -L$(CSLIBS) endif -- cgit v1.2.3 From e69dc84282fb474cb87097c6c945d8f90e05a4d9 Mon Sep 17 00:00:00 2001 From: Jin Yao Date: Thu, 9 Sep 2021 14:22:15 +0800 Subject: perf stat: Support --cputype option for hybrid events In previous patch, we have supported the syntax which enables the event on a specified pmu, such as: cpu_core// cpu_atom// While this syntax is not very easy for applying on a set of events or applying on a group. In following example, we have to explicitly assign the pmu prefix. # ./perf stat -e '{cpu_core/cycles/,cpu_core/instructions/}' -- sleep 1 Performance counter stats for 'sleep 1': 1,158,545 cpu_core/cycles/ 1,003,113 cpu_core/instructions/ 1.002428712 seconds time elapsed A much easier way is: # ./perf stat --cputype core -e '{cycles,instructions}' -- sleep 1 Performance counter stats for 'sleep 1': 1,101,071 cpu_core/cycles/ 939,892 cpu_core/instructions/ 1.002363142 seconds time elapsed For this example, the '--cputype' enables the events from specified pmu (cpu_core). If '--cputype' conflicts with pmu prefix, '--cputype' is ignored. # ./perf stat --cputype core -e cycles,cpu_atom/instructions/ -a -- sleep 1 Performance counter stats for 'system wide': 21,003,407 cpu_core/cycles/ 367,886 cpu_atom/instructions/ 1.002203520 seconds time elapsed Signed-off-by: Jin Yao Cc: Alexander Shishkin Cc: Andi Kleen Cc: Jin Yao Cc: Jiri Olsa Cc: Kan Liang Cc: Peter Zijlstra Link: http://lore.kernel.org/lkml/20210909062215.10278-1-yao.jin@linux.intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Documentation/perf-stat.txt | 4 ++++ tools/perf/builtin-stat.c | 24 ++++++++++++++++++++++++ tools/perf/util/evlist.h | 1 + tools/perf/util/parse-events-hybrid.c | 9 ++++++--- 4 files changed, 35 insertions(+), 3 deletions(-) diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt index 604e6f2301ea..c06c341e72b9 100644 --- a/tools/perf/Documentation/perf-stat.txt +++ b/tools/perf/Documentation/perf-stat.txt @@ -495,6 +495,10 @@ This option can be enabled in perf config by setting the variable $ perf config stat.no-csv-summary=true +--cputype:: +Only enable events on applying cpu with this type for hybrid platform +(e.g. core or atom)" + EXAMPLES -------- diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 7974933dbc77..f6ca2b054c5b 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -1168,6 +1168,26 @@ static int parse_stat_cgroups(const struct option *opt, return parse_cgroups(opt, str, unset); } +static int parse_hybrid_type(const struct option *opt, + const char *str, + int unset __maybe_unused) +{ + struct evlist *evlist = *(struct evlist **)opt->value; + + if (!list_empty(&evlist->core.entries)) { + fprintf(stderr, "Must define cputype before events/metrics\n"); + return -1; + } + + evlist->hybrid_pmu_name = perf_pmu__hybrid_type_to_pmu(str); + if (!evlist->hybrid_pmu_name) { + fprintf(stderr, "--cputype %s is not supported!\n", str); + return -1; + } + + return 0; +} + static struct option stat_options[] = { OPT_BOOLEAN('T', "transaction", &transaction_run, "hardware transaction statistics"), @@ -1282,6 +1302,10 @@ static struct option stat_options[] = { "don't print 'summary' for CSV summary output"), OPT_BOOLEAN(0, "quiet", &stat_config.quiet, "don't print output (useful with record)"), + OPT_CALLBACK(0, "cputype", &evsel_list, "hybrid cpu type", + "Only enable events on applying cpu with this type " + "for hybrid platform (e.g. core or atom)", + parse_hybrid_type), #ifdef HAVE_LIBPFM OPT_CALLBACK(0, "pfm-events", &evsel_list, "event", "libpfm4 event selector. use 'perf list' to list available events", diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index 993437ffe429..27594900a052 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h @@ -64,6 +64,7 @@ struct evlist { struct evsel *selected; struct events_stats stats; struct perf_env *env; + const char *hybrid_pmu_name; void (*trace_event_sample_raw)(struct evlist *evlist, union perf_event *event, struct perf_sample *sample); diff --git a/tools/perf/util/parse-events-hybrid.c b/tools/perf/util/parse-events-hybrid.c index 9fc86971027b..284f8eabd3b9 100644 --- a/tools/perf/util/parse-events-hybrid.c +++ b/tools/perf/util/parse-events-hybrid.c @@ -63,10 +63,13 @@ static int create_event_hybrid(__u32 config_type, int *idx, static int pmu_cmp(struct parse_events_state *parse_state, struct perf_pmu *pmu) { - if (!parse_state->hybrid_pmu_name) - return 0; + if (parse_state->evlist && parse_state->evlist->hybrid_pmu_name) + return strcmp(parse_state->evlist->hybrid_pmu_name, pmu->name); + + if (parse_state->hybrid_pmu_name) + return strcmp(parse_state->hybrid_pmu_name, pmu->name); - return strcmp(parse_state->hybrid_pmu_name, pmu->name); + return 0; } static int add_hw_hybrid(struct parse_events_state *parse_state, -- cgit v1.2.3 From 8ff4f20f3eb55dea0dbbe5e32043ab6b7427882f Mon Sep 17 00:00:00 2001 From: Andrew Kilroy Date: Fri, 3 Dec 2021 12:35:22 +0000 Subject: perf vendor events arm64: Fix JSON indentation to 4 spaces standard Correct indentation to 4 spaces, same as the other JSON files. Reviewed-by: John Garry Signed-off-by: Andrew Kilroy Cc: Alexander Shishkin Cc: Jiri Olsa Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Namhyung Kim Cc: Will Deacon Cc: linux-arm-kernel@lists.infradead.org Link: http://lore.kernel.org/lkml/20211203123525.31127-2-andrew.kilroy@arm.com Signed-off-by: Arnaldo Carvalho de Melo --- .../pmu-events/arch/arm64/armv8-recommended.json | 202 ++++++++++----------- 1 file changed, 101 insertions(+), 101 deletions(-) diff --git a/tools/perf/pmu-events/arch/arm64/armv8-recommended.json b/tools/perf/pmu-events/arch/arm64/armv8-recommended.json index d0a19866563d..210afa856091 100644 --- a/tools/perf/pmu-events/arch/arm64/armv8-recommended.json +++ b/tools/perf/pmu-events/arch/arm64/armv8-recommended.json @@ -148,305 +148,305 @@ "EventCode": "0x60", "EventName": "BUS_ACCESS_RD", "BriefDescription": "Bus access read" - }, - { + }, + { "PublicDescription": "Bus access write", "EventCode": "0x61", "EventName": "BUS_ACCESS_WR", "BriefDescription": "Bus access write" - }, - { + }, + { "PublicDescription": "Bus access, Normal, Cacheable, Shareable", "EventCode": "0x62", "EventName": "BUS_ACCESS_SHARED", "BriefDescription": "Bus access, Normal, Cacheable, Shareable" - }, - { + }, + { "PublicDescription": "Bus access, not Normal, Cacheable, Shareable", "EventCode": "0x63", "EventName": "BUS_ACCESS_NOT_SHARED", "BriefDescription": "Bus access, not Normal, Cacheable, Shareable" - }, - { + }, + { "PublicDescription": "Bus access, Normal", "EventCode": "0x64", "EventName": "BUS_ACCESS_NORMAL", "BriefDescription": "Bus access, Normal" - }, - { + }, + { "PublicDescription": "Bus access, peripheral", "EventCode": "0x65", "EventName": "BUS_ACCESS_PERIPH", "BriefDescription": "Bus access, peripheral" - }, - { + }, + { "PublicDescription": "Data memory access, read", "EventCode": "0x66", "EventName": "MEM_ACCESS_RD", "BriefDescription": "Data memory access, read" - }, - { + }, + { "PublicDescription": "Data memory access, write", "EventCode": "0x67", "EventName": "MEM_ACCESS_WR", "BriefDescription": "Data memory access, write" - }, - { + }, + { "PublicDescription": "Unaligned access, read", "EventCode": "0x68", "EventName": "UNALIGNED_LD_SPEC", "BriefDescription": "Unaligned access, read" - }, - { + }, + { "PublicDescription": "Unaligned access, write", "EventCode": "0x69", "EventName": "UNALIGNED_ST_SPEC", "BriefDescription": "Unaligned access, write" - }, - { + }, + { "PublicDescription": "Unaligned access", "EventCode": "0x6a", "EventName": "UNALIGNED_LDST_SPEC", "BriefDescription": "Unaligned access" - }, - { + }, + { "PublicDescription": "Exclusive operation speculatively executed, LDREX or LDX", "EventCode": "0x6c", "EventName": "LDREX_SPEC", "BriefDescription": "Exclusive operation speculatively executed, LDREX or LDX" - }, - { + }, + { "PublicDescription": "Exclusive operation speculatively executed, STREX or STX pass", "EventCode": "0x6d", "EventName": "STREX_PASS_SPEC", "BriefDescription": "Exclusive operation speculatively executed, STREX or STX pass" - }, - { + }, + { "PublicDescription": "Exclusive operation speculatively executed, STREX or STX fail", "EventCode": "0x6e", "EventName": "STREX_FAIL_SPEC", "BriefDescription": "Exclusive operation speculatively executed, STREX or STX fail" - }, - { + }, + { "PublicDescription": "Exclusive operation speculatively executed, STREX or STX", "EventCode": "0x6f", "EventName": "STREX_SPEC", "BriefDescription": "Exclusive operation speculatively executed, STREX or STX" - }, - { + }, + { "PublicDescription": "Operation speculatively executed, load", "EventCode": "0x70", "EventName": "LD_SPEC", "BriefDescription": "Operation speculatively executed, load" - }, - { + }, + { "PublicDescription": "Operation speculatively executed, store", "EventCode": "0x71", "EventName": "ST_SPEC", "BriefDescription": "Operation speculatively executed, store" - }, - { + }, + { "PublicDescription": "Operation speculatively executed, load or store", "EventCode": "0x72", "EventName": "LDST_SPEC", "BriefDescription": "Operation speculatively executed, load or store" - }, - { + }, + { "PublicDescription": "Operation speculatively executed, integer data processing", "EventCode": "0x73", "EventName": "DP_SPEC", "BriefDescription": "Operation speculatively executed, integer data processing" - }, - { + }, + { "PublicDescription": "Operation speculatively executed, Advanced SIMD instruction", "EventCode": "0x74", "EventName": "ASE_SPEC", "BriefDescription": "Operation speculatively executed, Advanced SIMD instruction" - }, - { + }, + { "PublicDescription": "Operation speculatively executed, floating-point instruction", "EventCode": "0x75", "EventName": "VFP_SPEC", "BriefDescription": "Operation speculatively executed, floating-point instruction" - }, - { + }, + { "PublicDescription": "Operation speculatively executed, software change of the PC", "EventCode": "0x76", "EventName": "PC_WRITE_SPEC", "BriefDescription": "Operation speculatively executed, software change of the PC" - }, - { + }, + { "PublicDescription": "Operation speculatively executed, Cryptographic instruction", "EventCode": "0x77", "EventName": "CRYPTO_SPEC", "BriefDescription": "Operation speculatively executed, Cryptographic instruction" - }, - { + }, + { "PublicDescription": "Branch speculatively executed, immediate branch", "EventCode": "0x78", "EventName": "BR_IMMED_SPEC", "BriefDescription": "Branch speculatively executed, immediate branch" - }, - { + }, + { "PublicDescription": "Branch speculatively executed, procedure return", "EventCode": "0x79", "EventName": "BR_RETURN_SPEC", "BriefDescription": "Branch speculatively executed, procedure return" - }, - { + }, + { "PublicDescription": "Branch speculatively executed, indirect branch", "EventCode": "0x7a", "EventName": "BR_INDIRECT_SPEC", "BriefDescription": "Branch speculatively executed, indirect branch" - }, - { + }, + { "PublicDescription": "Barrier speculatively executed, ISB", "EventCode": "0x7c", "EventName": "ISB_SPEC", "BriefDescription": "Barrier speculatively executed, ISB" - }, - { + }, + { "PublicDescription": "Barrier speculatively executed, DSB", "EventCode": "0x7d", "EventName": "DSB_SPEC", "BriefDescription": "Barrier speculatively executed, DSB" - }, - { + }, + { "PublicDescription": "Barrier speculatively executed, DMB", "EventCode": "0x7e", "EventName": "DMB_SPEC", "BriefDescription": "Barrier speculatively executed, DMB" - }, - { + }, + { "PublicDescription": "Exception taken, Other synchronous", "EventCode": "0x81", "EventName": "EXC_UNDEF", "BriefDescription": "Exception taken, Other synchronous" - }, - { + }, + { "PublicDescription": "Exception taken, Supervisor Call", "EventCode": "0x82", "EventName": "EXC_SVC", "BriefDescription": "Exception taken, Supervisor Call" - }, - { + }, + { "PublicDescription": "Exception taken, Instruction Abort", "EventCode": "0x83", "EventName": "EXC_PABORT", "BriefDescription": "Exception taken, Instruction Abort" - }, - { + }, + { "PublicDescription": "Exception taken, Data Abort and SError", "EventCode": "0x84", "EventName": "EXC_DABORT", "BriefDescription": "Exception taken, Data Abort and SError" - }, - { + }, + { "PublicDescription": "Exception taken, IRQ", "EventCode": "0x86", "EventName": "EXC_IRQ", "BriefDescription": "Exception taken, IRQ" - }, - { + }, + { "PublicDescription": "Exception taken, FIQ", "EventCode": "0x87", "EventName": "EXC_FIQ", "BriefDescription": "Exception taken, FIQ" - }, - { + }, + { "PublicDescription": "Exception taken, Secure Monitor Call", "EventCode": "0x88", "EventName": "EXC_SMC", "BriefDescription": "Exception taken, Secure Monitor Call" - }, - { + }, + { "PublicDescription": "Exception taken, Hypervisor Call", "EventCode": "0x8a", "EventName": "EXC_HVC", "BriefDescription": "Exception taken, Hypervisor Call" - }, - { + }, + { "PublicDescription": "Exception taken, Instruction Abort not taken locally", "EventCode": "0x8b", "EventName": "EXC_TRAP_PABORT", "BriefDescription": "Exception taken, Instruction Abort not taken locally" - }, - { + }, + { "PublicDescription": "Exception taken, Data Abort or SError not taken locally", "EventCode": "0x8c", "EventName": "EXC_TRAP_DABORT", "BriefDescription": "Exception taken, Data Abort or SError not taken locally" - }, - { + }, + { "PublicDescription": "Exception taken, Other traps not taken locally", "EventCode": "0x8d", "EventName": "EXC_TRAP_OTHER", "BriefDescription": "Exception taken, Other traps not taken locally" - }, - { + }, + { "PublicDescription": "Exception taken, IRQ not taken locally", "EventCode": "0x8e", "EventName": "EXC_TRAP_IRQ", "BriefDescription": "Exception taken, IRQ not taken locally" - }, - { + }, + { "PublicDescription": "Exception taken, FIQ not taken locally", "EventCode": "0x8f", "EventName": "EXC_TRAP_FIQ", "BriefDescription": "Exception taken, FIQ not taken locally" - }, - { + }, + { "PublicDescription": "Release consistency operation speculatively executed, Load-Acquire", "EventCode": "0x90", "EventName": "RC_LD_SPEC", "BriefDescription": "Release consistency operation speculatively executed, Load-Acquire" - }, - { + }, + { "PublicDescription": "Release consistency operation speculatively executed, Store-Release", "EventCode": "0x91", "EventName": "RC_ST_SPEC", "BriefDescription": "Release consistency operation speculatively executed, Store-Release" - }, - { + }, + { "PublicDescription": "Attributable Level 3 data or unified cache access, read", "EventCode": "0xa0", "EventName": "L3D_CACHE_RD", "BriefDescription": "Attributable Level 3 data or unified cache access, read" - }, - { + }, + { "PublicDescription": "Attributable Level 3 data or unified cache access, write", "EventCode": "0xa1", "EventName": "L3D_CACHE_WR", "BriefDescription": "Attributable Level 3 data or unified cache access, write" - }, - { + }, + { "PublicDescription": "Attributable Level 3 data or unified cache refill, read", "EventCode": "0xa2", "EventName": "L3D_CACHE_REFILL_RD", "BriefDescription": "Attributable Level 3 data or unified cache refill, read" - }, - { + }, + { "PublicDescription": "Attributable Level 3 data or unified cache refill, write", "EventCode": "0xa3", "EventName": "L3D_CACHE_REFILL_WR", "BriefDescription": "Attributable Level 3 data or unified cache refill, write" - }, - { + }, + { "PublicDescription": "Attributable Level 3 data or unified cache Write-Back, victim", "EventCode": "0xa6", "EventName": "L3D_CACHE_WB_VICTIM", "BriefDescription": "Attributable Level 3 data or unified cache Write-Back, victim" - }, - { + }, + { "PublicDescription": "Attributable Level 3 data or unified cache Write-Back, cache clean", "EventCode": "0xa7", "EventName": "L3D_CACHE_WB_CLEAN", "BriefDescription": "Attributable Level 3 data or unified cache Write-Back, cache clean" - }, - { + }, + { "PublicDescription": "Attributable Level 3 data or unified cache access, invalidate", "EventCode": "0xa8", "EventName": "L3D_CACHE_INVAL", "BriefDescription": "Attributable Level 3 data or unified cache access, invalidate" - } + } ] -- cgit v1.2.3 From f0a29c9647ff8bbb424641f79bc1894e83dec218 Mon Sep 17 00:00:00 2001 From: Sohaib Mohamed Date: Fri, 19 Nov 2021 08:14:08 +0200 Subject: perf bench: Use unbuffered output when pipe/tee'ing to a file The output of 'perf bench' gets buffered when I pipe it to a file or to tee, in such a way that I can see it only at the end. E.g. $ perf bench internals synthesize -t < output comes out fine after each test run > $ perf bench internals synthesize -t | tee file.txt < output comes out only at the end of all tests > This patch resolves this issue for 'bench' and 'test' subcommands. See, also: $ perf bench mem all | tee file.txt $ perf bench sched all | tee file.txt $ perf bench internals all -t | tee file.txt $ perf bench internals all | tee file.txt Committer testing: It really gets staggered, i.e. outputs in bursts, when the buffer fills up and has to be drained to make up space for more output. Suggested-by: Riccardo Mancini Signed-off-by: Sohaib Mohamed Tested-by: Arnaldo Carvalho de Melo Acked-by: Jiri Olsa Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Fabian Hemmer Cc: Ian Rogers Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lore.kernel.org/lkml/20211119061409.78004-1-sohaib.amhmd@gmail.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-bench.c | 5 +++-- tools/perf/tests/builtin-test.c | 3 +++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/tools/perf/builtin-bench.c b/tools/perf/builtin-bench.c index d0895162c2ba..d291f3a8af5f 100644 --- a/tools/perf/builtin-bench.c +++ b/tools/perf/builtin-bench.c @@ -226,7 +226,6 @@ static void run_collection(struct collection *coll) if (!bench->fn) break; printf("# Running %s/%s benchmark...\n", coll->name, bench->name); - fflush(stdout); argv[1] = bench->name; run_bench(coll->name, bench->name, bench->fn, 1, argv); @@ -247,6 +246,9 @@ int cmd_bench(int argc, const char **argv) struct collection *coll; int ret = 0; + /* Unbuffered output */ + setvbuf(stdout, NULL, _IONBF, 0); + if (argc < 2) { /* No collection specified. */ print_usage(); @@ -300,7 +302,6 @@ int cmd_bench(int argc, const char **argv) if (bench_format == BENCH_FORMAT_DEFAULT) printf("# Running '%s/%s' benchmark:\n", coll->name, bench->name); - fflush(stdout); ret = run_bench(coll->name, bench->name, bench->fn, argc-1, argv+1); goto end; } diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c index f1e6d2a3a578..1fb9f2a11d63 100644 --- a/tools/perf/tests/builtin-test.c +++ b/tools/perf/tests/builtin-test.c @@ -607,6 +607,9 @@ int cmd_test(int argc, const char **argv) if (ret < 0) return ret; + /* Unbuffered output */ + setvbuf(stdout, NULL, _IONBF, 0); + argc = parse_options_subcommand(argc, argv, test_options, test_subcommands, test_usage, 0); if (argc >= 1 && !strcmp(argv[0], "list")) return perf_test__list(argc - 1, argv + 1); -- cgit v1.2.3 From b0fde9c6e291e528878ea3713997777713fe44c6 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Wed, 1 Dec 2021 14:08:55 -0800 Subject: perf arm-spe: Add SPE total latency as PERF_SAMPLE_WEIGHT Use total latency info in the SPE counter packet as sample weight so that we can see it in local_weight and (global) weight sort keys. Maybe we can use PERF_SAMPLE_WEIGHT_STRUCT to support ins_lat as well but I'm not sure which latency it matches. So just adding total latency first. Reviewed-by: Leo Yan Signed-off-by: Namhyung Kim Cc: Andi Kleen Cc: German Gomez Cc: Ian Rogers Cc: James Clark Cc: Jiri Olsa Cc: Mark Rutland Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lore.kernel.org/lkml/20211201220855.1260688-1-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/arm-spe-decoder/arm-spe-decoder.c | 2 ++ tools/perf/util/arm-spe-decoder/arm-spe-decoder.h | 1 + tools/perf/util/arm-spe.c | 5 ++++- 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c index 3fc528c9270c..5e390a1a79ab 100644 --- a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c +++ b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c @@ -179,6 +179,8 @@ static int arm_spe_read_record(struct arm_spe_decoder *decoder) decoder->record.phys_addr = ip; break; case ARM_SPE_COUNTER: + if (idx == SPE_CNT_PKT_HDR_INDEX_TOTAL_LAT) + decoder->record.latency = payload; break; case ARM_SPE_CONTEXT: decoder->record.context_id = payload; diff --git a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h index 46a8556a9e95..69b31084d6be 100644 --- a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h +++ b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h @@ -33,6 +33,7 @@ struct arm_spe_record { enum arm_spe_sample_type type; int err; u32 op; + u32 latency; u64 from_ip; u64 to_ip; u64 timestamp; diff --git a/tools/perf/util/arm-spe.c b/tools/perf/util/arm-spe.c index fccac06b573a..8a3828f86901 100644 --- a/tools/perf/util/arm-spe.c +++ b/tools/perf/util/arm-spe.c @@ -330,6 +330,7 @@ static int arm_spe__synth_mem_sample(struct arm_spe_queue *speq, sample.addr = record->virt_addr; sample.phys_addr = record->phys_addr; sample.data_src = data_src; + sample.weight = record->latency; return arm_spe_deliver_synth_event(spe, speq, event, &sample); } @@ -347,6 +348,7 @@ static int arm_spe__synth_branch_sample(struct arm_spe_queue *speq, sample.id = spe_events_id; sample.stream_id = spe_events_id; sample.addr = record->to_ip; + sample.weight = record->latency; return arm_spe_deliver_synth_event(spe, speq, event, &sample); } @@ -993,7 +995,8 @@ arm_spe_synth_events(struct arm_spe *spe, struct perf_session *session) attr.type = PERF_TYPE_HARDWARE; attr.sample_type = evsel->core.attr.sample_type & PERF_SAMPLE_MASK; attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID | - PERF_SAMPLE_PERIOD | PERF_SAMPLE_DATA_SRC; + PERF_SAMPLE_PERIOD | PERF_SAMPLE_DATA_SRC | + PERF_SAMPLE_WEIGHT; if (spe->timeless_decoding) attr.sample_type &= ~(u64)PERF_SAMPLE_TIME; else -- cgit v1.2.3 From 888569dbcd80a0bb87371e9fb0fa3802c7aa9b14 Mon Sep 17 00:00:00 2001 From: Salvatore Bonaccorso Date: Tue, 23 Nov 2021 22:18:21 +0100 Subject: perf dlfilter: Drop unused variable MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Compiling tools/perf/dlfilters/dlfilter-test-api-v0.c result in: checking for stdlib.h... dlfilters/dlfilter-test-api-v0.c: In function ‘filter_event’: dlfilters/dlfilter-test-api-v0.c:311:29: warning: unused variable ‘d’ [-Wunused-variable] 311 | struct filter_data *d = data; | So remove the variable now. Reviewed-by: German Gomez Signed-off-by: Salvatore Bonaccorso Acked-by: Adrian Hunter Cc: Alexander Shishkin Cc: Ingo Molnar Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Link: https://lore.kernel.org/r/20211123211821.132924-1-carnil@debian.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/dlfilters/dlfilter-test-api-v0.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/tools/perf/dlfilters/dlfilter-test-api-v0.c b/tools/perf/dlfilters/dlfilter-test-api-v0.c index 7565a1852c74..b17eb52a0694 100644 --- a/tools/perf/dlfilters/dlfilter-test-api-v0.c +++ b/tools/perf/dlfilters/dlfilter-test-api-v0.c @@ -308,8 +308,6 @@ int filter_event_early(void *data, const struct perf_dlfilter_sample *sample, vo int filter_event(void *data, const struct perf_dlfilter_sample *sample, void *ctx) { - struct filter_data *d = data; - pr_debug("%s API\n", __func__); return do_checks(data, sample, ctx, false); -- cgit v1.2.3 From 3987d65f45ed51a4650e911baa68f9b6ed4623cb Mon Sep 17 00:00:00 2001 From: Andrew Kilroy Date: Fri, 10 Dec 2021 12:37:04 +0000 Subject: perf vendor events: For the Arm Neoverse N2 Updates the common and microarch json file to add counters available in the Arm Neoverse N2 chip, but should also apply to other ArmV8 and ArmV9 cpus. Specified in ArmV8 architecture reference manual https://developer.arm.com/documentation/ddi0487/gb/?lang=en Some of the counters added to armv8-common-and-microarch.json are specified in the ArmV9 architecture reference manual supplement (issue A.a): https://developer.arm.com/documentation/ddi0608/aa The additional ArmV9 counters are TRB_WRAP TRCEXTOUT0 TRCEXTOUT1 TRCEXTOUT2 TRCEXTOUT3 CTI_TRIGOUT4 CTI_TRIGOUT5 CTI_TRIGOUT6 CTI_TRIGOUT7 This patch also adds files in pmu-events/arch/arm64/arm/neoverse-n2 for perf list to output the counter names in categories. Counters on the Neoverse N2 are stated in its reference manual: https://developer.arm.com/documentation/102099/0000 Reviewed-by: John Garry Signed-off-by: Andrew Kilroy Cc: Alexander Shishkin Cc: Jiri Olsa Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Namhyung Kim Cc: Will Deacon Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20211210123706.7490-2-andrew.kilroy@arm.com Signed-off-by: Arnaldo Carvalho de Melo --- .../arch/arm64/arm/neoverse-n2/branch.json | 8 + .../pmu-events/arch/arm64/arm/neoverse-n2/bus.json | 20 +++ .../arch/arm64/arm/neoverse-n2/cache.json | 155 ++++++++++++++++ .../arch/arm64/arm/neoverse-n2/exception.json | 47 +++++ .../arch/arm64/arm/neoverse-n2/instruction.json | 143 +++++++++++++++ .../arch/arm64/arm/neoverse-n2/memory.json | 38 ++++ .../arch/arm64/arm/neoverse-n2/other.json | 5 + .../arch/arm64/arm/neoverse-n2/pipeline.json | 23 +++ .../pmu-events/arch/arm64/arm/neoverse-n2/spe.json | 14 ++ .../arch/arm64/arm/neoverse-n2/trace.json | 29 +++ .../arch/arm64/armv8-common-and-microarch.json | 198 +++++++++++++++++++++ tools/perf/pmu-events/arch/arm64/mapfile.csv | 1 + 12 files changed, 681 insertions(+) create mode 100644 tools/perf/pmu-events/arch/arm64/arm/neoverse-n2/branch.json create mode 100644 tools/perf/pmu-events/arch/arm64/arm/neoverse-n2/bus.json create mode 100644 tools/perf/pmu-events/arch/arm64/arm/neoverse-n2/cache.json create mode 100644 tools/perf/pmu-events/arch/arm64/arm/neoverse-n2/exception.json create mode 100644 tools/perf/pmu-events/arch/arm64/arm/neoverse-n2/instruction.json create mode 100644 tools/perf/pmu-events/arch/arm64/arm/neoverse-n2/memory.json create mode 100644 tools/perf/pmu-events/arch/arm64/arm/neoverse-n2/other.json create mode 100644 tools/perf/pmu-events/arch/arm64/arm/neoverse-n2/pipeline.json create mode 100644 tools/perf/pmu-events/arch/arm64/arm/neoverse-n2/spe.json create mode 100644 tools/perf/pmu-events/arch/arm64/arm/neoverse-n2/trace.json diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2/branch.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2/branch.json new file mode 100644 index 000000000000..79f2016c53b0 --- /dev/null +++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2/branch.json @@ -0,0 +1,8 @@ +[ + { + "ArchStdEvent": "BR_MIS_PRED" + }, + { + "ArchStdEvent": "BR_PRED" + } +] diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2/bus.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2/bus.json new file mode 100644 index 000000000000..579c1c993d17 --- /dev/null +++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2/bus.json @@ -0,0 +1,20 @@ +[ + { + "ArchStdEvent": "CPU_CYCLES" + }, + { + "ArchStdEvent": "BUS_ACCESS" + }, + { + "ArchStdEvent": "BUS_CYCLES" + }, + { + "ArchStdEvent": "BUS_ACCESS_RD" + }, + { + "ArchStdEvent": "BUS_ACCESS_WR" + }, + { + "ArchStdEvent": "CNT_CYCLES" + } +] diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2/cache.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2/cache.json new file mode 100644 index 000000000000..0141f749bff3 --- /dev/null +++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2/cache.json @@ -0,0 +1,155 @@ +[ + { + "ArchStdEvent": "L1I_CACHE_REFILL" + }, + { + "ArchStdEvent": "L1I_TLB_REFILL" + }, + { + "ArchStdEvent": "L1D_CACHE_REFILL" + }, + { + "ArchStdEvent": "L1D_CACHE" + }, + { + "ArchStdEvent": "L1D_TLB_REFILL" + }, + { + "ArchStdEvent": "L1I_CACHE" + }, + { + "ArchStdEvent": "L1D_CACHE_WB" + }, + { + "ArchStdEvent": "L2D_CACHE" + }, + { + "ArchStdEvent": "L2D_CACHE_REFILL" + }, + { + "ArchStdEvent": "L2D_CACHE_WB" + }, + { + "ArchStdEvent": "L2D_CACHE_ALLOCATE" + }, + { + "ArchStdEvent": "L1D_TLB" + }, + { + "ArchStdEvent": "L1I_TLB" + }, + { + "ArchStdEvent": "L3D_CACHE_ALLOCATE" + }, + { + "ArchStdEvent": "L3D_CACHE_REFILL" + }, + { + "ArchStdEvent": "L3D_CACHE" + }, + { + "ArchStdEvent": "L2D_TLB_REFILL" + }, + { + "ArchStdEvent": "L2D_TLB" + }, + { + "ArchStdEvent": "DTLB_WALK" + }, + { + "ArchStdEvent": "ITLB_WALK" + }, + { + "ArchStdEvent": "LL_CACHE_RD" + }, + { + "ArchStdEvent": "LL_CACHE_MISS_RD" + }, + { + "ArchStdEvent": "L1D_CACHE_LMISS_RD" + }, + { + "ArchStdEvent": "L1D_CACHE_RD" + }, + { + "ArchStdEvent": "L1D_CACHE_WR" + }, + { + "ArchStdEvent": "L1D_CACHE_REFILL_RD" + }, + { + "ArchStdEvent": "L1D_CACHE_REFILL_WR" + }, + { + "ArchStdEvent": "L1D_CACHE_REFILL_INNER" + }, + { + "ArchStdEvent": "L1D_CACHE_REFILL_OUTER" + }, + { + "ArchStdEvent": "L1D_CACHE_WB_VICTIM" + }, + { + "ArchStdEvent": "L1D_CACHE_WB_CLEAN" + }, + { + "ArchStdEvent": "L1D_CACHE_INVAL" + }, + { + "ArchStdEvent": "L1D_TLB_REFILL_RD" + }, + { + "ArchStdEvent": "L1D_TLB_REFILL_WR" + }, + { + "ArchStdEvent": "L1D_TLB_RD" + }, + { + "ArchStdEvent": "L1D_TLB_WR" + }, + { + "ArchStdEvent": "L2D_CACHE_RD" + }, + { + "ArchStdEvent": "L2D_CACHE_WR" + }, + { + "ArchStdEvent": "L2D_CACHE_REFILL_RD" + }, + { + "ArchStdEvent": "L2D_CACHE_REFILL_WR" + }, + { + "ArchStdEvent": "L2D_CACHE_WB_VICTIM" + }, + { + "ArchStdEvent": "L2D_CACHE_WB_CLEAN" + }, + { + "ArchStdEvent": "L2D_CACHE_INVAL" + }, + { + "ArchStdEvent": "L2D_TLB_REFILL_RD" + }, + { + "ArchStdEvent": "L2D_TLB_REFILL_WR" + }, + { + "ArchStdEvent": "L2D_TLB_RD" + }, + { + "ArchStdEvent": "L2D_TLB_WR" + }, + { + "ArchStdEvent": "L3D_CACHE_RD" + }, + { + "ArchStdEvent": "L1I_CACHE_LMISS" + }, + { + "ArchStdEvent": "L2D_CACHE_LMISS_RD" + }, + { + "ArchStdEvent": "L3D_CACHE_LMISS_RD" + } +] diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2/exception.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2/exception.json new file mode 100644 index 000000000000..344a2d552ad5 --- /dev/null +++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2/exception.json @@ -0,0 +1,47 @@ +[ + { + "ArchStdEvent": "EXC_TAKEN" + }, + { + "ArchStdEvent": "MEMORY_ERROR" + }, + { + "ArchStdEvent": "EXC_UNDEF" + }, + { + "ArchStdEvent": "EXC_SVC" + }, + { + "ArchStdEvent": "EXC_PABORT" + }, + { + "ArchStdEvent": "EXC_DABORT" + }, + { + "ArchStdEvent": "EXC_IRQ" + }, + { + "ArchStdEvent": "EXC_FIQ" + }, + { + "ArchStdEvent": "EXC_SMC" + }, + { + "ArchStdEvent": "EXC_HVC" + }, + { + "ArchStdEvent": "EXC_TRAP_PABORT" + }, + { + "ArchStdEvent": "EXC_TRAP_DABORT" + }, + { + "ArchStdEvent": "EXC_TRAP_OTHER" + }, + { + "ArchStdEvent": "EXC_TRAP_IRQ" + }, + { + "ArchStdEvent": "EXC_TRAP_FIQ" + } +] diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2/instruction.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2/instruction.json new file mode 100644 index 000000000000..e57cd55937c6 --- /dev/null +++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2/instruction.json @@ -0,0 +1,143 @@ +[ + { + "ArchStdEvent": "SW_INCR" + }, + { + "ArchStdEvent": "INST_RETIRED" + }, + { + "ArchStdEvent": "EXC_RETURN" + }, + { + "ArchStdEvent": "CID_WRITE_RETIRED" + }, + { + "ArchStdEvent": "INST_SPEC" + }, + { + "ArchStdEvent": "TTBR_WRITE_RETIRED" + }, + { + "ArchStdEvent": "BR_RETIRED" + }, + { + "ArchStdEvent": "BR_MIS_PRED_RETIRED" + }, + { + "ArchStdEvent": "OP_RETIRED" + }, + { + "ArchStdEvent": "OP_SPEC" + }, + { + "ArchStdEvent": "LDREX_SPEC" + }, + { + "ArchStdEvent": "STREX_PASS_SPEC" + }, + { + "ArchStdEvent": "STREX_FAIL_SPEC" + }, + { + "ArchStdEvent": "STREX_SPEC" + }, + { + "ArchStdEvent": "LD_SPEC" + }, + { + "ArchStdEvent": "ST_SPEC" + }, + { + "ArchStdEvent": "DP_SPEC" + }, + { + "ArchStdEvent": "ASE_SPEC" + }, + { + "ArchStdEvent": "VFP_SPEC" + }, + { + "ArchStdEvent": "PC_WRITE_SPEC" + }, + { + "ArchStdEvent": "CRYPTO_SPEC" + }, + { + "ArchStdEvent": "BR_IMMED_SPEC" + }, + { + "ArchStdEvent": "BR_RETURN_SPEC" + }, + { + "ArchStdEvent": "BR_INDIRECT_SPEC" + }, + { + "ArchStdEvent": "ISB_SPEC" + }, + { + "ArchStdEvent": "DSB_SPEC" + }, + { + "ArchStdEvent": "DMB_SPEC" + }, + { + "ArchStdEvent": "RC_LD_SPEC" + }, + { + "ArchStdEvent": "RC_ST_SPEC" + }, + { + "ArchStdEvent": "ASE_INST_SPEC" + }, + { + "ArchStdEvent": "SVE_INST_SPEC" + }, + { + "ArchStdEvent": "FP_HP_SPEC" + }, + { + "ArchStdEvent": "FP_SP_SPEC" + }, + { + "ArchStdEvent": "FP_DP_SPEC" + }, + { + "ArchStdEvent": "SVE_PRED_SPEC" + }, + { + "ArchStdEvent": "SVE_PRED_EMPTY_SPEC" + }, + { + "ArchStdEvent": "SVE_PRED_FULL_SPEC" + }, + { + "ArchStdEvent": "SVE_PRED_PARTIAL_SPEC" + }, + { + "ArchStdEvent": "SVE_PRED_NOT_FULL_SPEC" + }, + { + "ArchStdEvent": "SVE_LDFF_SPEC" + }, + { + "ArchStdEvent": "SVE_LDFF_FAULT_SPEC" + }, + { + "ArchStdEvent": "FP_SCALE_OPS_SPEC" + }, + { + "ArchStdEvent": "FP_FIXED_OPS_SPEC" + }, + { + "ArchStdEvent": "ASE_SVE_INT8_SPEC" + }, + { + "ArchStdEvent": "ASE_SVE_INT16_SPEC" + }, + { + "ArchStdEvent": "ASE_SVE_INT32_SPEC" + }, + { + "ArchStdEvent": "ASE_SVE_INT64_SPEC" + } +] diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2/memory.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2/memory.json new file mode 100644 index 000000000000..e522113aeb96 --- /dev/null +++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2/memory.json @@ -0,0 +1,38 @@ +[ + { + "ArchStdEvent": "MEM_ACCESS" + }, + { + "ArchStdEvent": "MEM_ACCESS_RD" + }, + { + "ArchStdEvent": "MEM_ACCESS_WR" + }, + { + "ArchStdEvent": "UNALIGNED_LD_SPEC" + }, + { + "ArchStdEvent": "UNALIGNED_ST_SPEC" + }, + { + "ArchStdEvent": "UNALIGNED_LDST_SPEC" + }, + { + "ArchStdEvent": "LDST_ALIGN_LAT" + }, + { + "ArchStdEvent": "LD_ALIGN_LAT" + }, + { + "ArchStdEvent": "ST_ALIGN_LAT" + }, + { + "ArchStdEvent": "MEM_ACCESS_CHECKED" + }, + { + "ArchStdEvent": "MEM_ACCESS_CHECKED_RD" + }, + { + "ArchStdEvent": "MEM_ACCESS_CHECKED_WR" + } +] diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2/other.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2/other.json new file mode 100644 index 000000000000..20d8365756c5 --- /dev/null +++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2/other.json @@ -0,0 +1,5 @@ +[ + { + "ArchStdEvent": "REMOTE_ACCESS" + } +] diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2/pipeline.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2/pipeline.json new file mode 100644 index 000000000000..f9fae15f7555 --- /dev/null +++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2/pipeline.json @@ -0,0 +1,23 @@ +[ + { + "ArchStdEvent": "STALL_FRONTEND" + }, + { + "ArchStdEvent": "STALL_BACKEND" + }, + { + "ArchStdEvent": "STALL" + }, + { + "ArchStdEvent": "STALL_SLOT_BACKEND" + }, + { + "ArchStdEvent": "STALL_SLOT_FRONTEND" + }, + { + "ArchStdEvent": "STALL_SLOT" + }, + { + "ArchStdEvent": "STALL_BACKEND_MEM" + } +] diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2/spe.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2/spe.json new file mode 100644 index 000000000000..20f2165c85fe --- /dev/null +++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2/spe.json @@ -0,0 +1,14 @@ +[ + { + "ArchStdEvent": "SAMPLE_POP" + }, + { + "ArchStdEvent": "SAMPLE_FEED" + }, + { + "ArchStdEvent": "SAMPLE_FILTRATE" + }, + { + "ArchStdEvent": "SAMPLE_COLLISION" + } +] diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2/trace.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2/trace.json new file mode 100644 index 000000000000..3116135c59e2 --- /dev/null +++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2/trace.json @@ -0,0 +1,29 @@ +[ + { + "ArchStdEvent": "TRB_WRAP" + }, + { + "ArchStdEvent": "TRCEXTOUT0" + }, + { + "ArchStdEvent": "TRCEXTOUT1" + }, + { + "ArchStdEvent": "TRCEXTOUT2" + }, + { + "ArchStdEvent": "TRCEXTOUT3" + }, + { + "ArchStdEvent": "CTI_TRIGOUT4" + }, + { + "ArchStdEvent": "CTI_TRIGOUT5" + }, + { + "ArchStdEvent": "CTI_TRIGOUT6" + }, + { + "ArchStdEvent": "CTI_TRIGOUT7" + } +] diff --git a/tools/perf/pmu-events/arch/arm64/armv8-common-and-microarch.json b/tools/perf/pmu-events/arch/arm64/armv8-common-and-microarch.json index 423767510aff..80d7a70829a0 100644 --- a/tools/perf/pmu-events/arch/arm64/armv8-common-and-microarch.json +++ b/tools/perf/pmu-events/arch/arm64/armv8-common-and-microarch.json @@ -299,6 +299,30 @@ "EventName": "STALL_SLOT", "BriefDescription": "No operation sent for execution on a slot" }, + { + "PublicDescription": "Sample Population", + "EventCode": "0x4000", + "EventName": "SAMPLE_POP", + "BriefDescription": "Sample Population" + }, + { + "PublicDescription": "Sample Taken", + "EventCode": "0x4001", + "EventName": "SAMPLE_FEED", + "BriefDescription": "Sample Taken" + }, + { + "PublicDescription": "Sample Taken and not removed by filtering", + "EventCode": "0x4002", + "EventName": "SAMPLE_FILTRATE", + "BriefDescription": "Sample Taken and not removed by filtering" + }, + { + "PublicDescription": "Sample collided with previous sample", + "EventCode": "0x4003", + "EventName": "SAMPLE_COLLISION", + "BriefDescription": "Sample collided with previous sample" + }, { "PublicDescription": "Constant frequency cycles. The counter increments at a constant frequency equal to the rate of increment of the system counter, CNTPCT_EL0.", "EventCode": "0x4004", @@ -329,6 +353,96 @@ "EventName": "L3D_CACHE_LMISS_RD", "BriefDescription": "Level 3 data cache long-latency read miss" }, + { + "PublicDescription": "Trace buffer current write pointer wrapped", + "EventCode": "0x400C", + "EventName": "TRB_WRAP", + "BriefDescription": "Trace buffer current write pointer wrapped" + }, + { + "PublicDescription": "PE Trace Unit external output 0", + "EventCode": "0x4010", + "EventName": "TRCEXTOUT0", + "BriefDescription": "PE Trace Unit external output 0" + }, + { + "PublicDescription": "PE Trace Unit external output 1", + "EventCode": "0x4011", + "EventName": "TRCEXTOUT1", + "BriefDescription": "PE Trace Unit external output 1" + }, + { + "PublicDescription": "PE Trace Unit external output 2", + "EventCode": "0x4012", + "EventName": "TRCEXTOUT2", + "BriefDescription": "PE Trace Unit external output 2" + }, + { + "PublicDescription": "PE Trace Unit external output 3", + "EventCode": "0x4013", + "EventName": "TRCEXTOUT3", + "BriefDescription": "PE Trace Unit external output 3" + }, + { + "PublicDescription": "Cross-trigger Interface output trigger 4", + "EventCode": "0x4018", + "EventName": "CTI_TRIGOUT4", + "BriefDescription": "Cross-trigger Interface output trigger 4" + }, + { + "PublicDescription": "Cross-trigger Interface output trigger 5 ", + "EventCode": "0x4019", + "EventName": "CTI_TRIGOUT5", + "BriefDescription": "Cross-trigger Interface output trigger 5 " + }, + { + "PublicDescription": "Cross-trigger Interface output trigger 6", + "EventCode": "0x401A", + "EventName": "CTI_TRIGOUT6", + "BriefDescription": "Cross-trigger Interface output trigger 6" + }, + { + "PublicDescription": "Cross-trigger Interface output trigger 7", + "EventCode": "0x401B", + "EventName": "CTI_TRIGOUT7", + "BriefDescription": "Cross-trigger Interface output trigger 7" + }, + { + "PublicDescription": "Access with additional latency from alignment", + "EventCode": "0x4020", + "EventName": "LDST_ALIGN_LAT", + "BriefDescription": "Access with additional latency from alignment" + }, + { + "PublicDescription": "Load with additional latency from alignment", + "EventCode": "0x4021", + "EventName": "LD_ALIGN_LAT", + "BriefDescription": "Load with additional latency from alignment" + }, + { + "PublicDescription": "Store with additional latency from alignment", + "EventCode": "0x4022", + "EventName": "ST_ALIGN_LAT", + "BriefDescription": "Store with additional latency from alignment" + }, + { + "PublicDescription": "Checked data memory access", + "EventCode": "0x4024", + "EventName": "MEM_ACCESS_CHECKED", + "BriefDescription": "Checked data memory access" + }, + { + "PublicDescription": "Checked data memory access, read", + "EventCode": "0x4025", + "EventName": "MEM_ACCESS_CHECKED_RD", + "BriefDescription": "Checked data memory access, read" + }, + { + "PublicDescription": "Checked data memory access, write", + "EventCode": "0x4026", + "EventName": "MEM_ACCESS_CHECKED_WR", + "BriefDescription": "Checked data memory access, write" + }, { "PublicDescription": "SIMD Instruction architecturally executed.", "EventCode": "0x8000", @@ -341,6 +455,18 @@ "EventName": "SVE_INST_RETIRED", "BriefDescription": "Instruction architecturally executed, SVE." }, + { + "PublicDescription": "ASE operations speculatively executed", + "EventCode": "0x8005", + "EventName": "ASE_INST_SPEC", + "BriefDescription": "ASE operations speculatively executed" + }, + { + "PublicDescription": "SVE operations speculatively executed", + "EventCode": "0x8006", + "EventName": "SVE_INST_SPEC", + "BriefDescription": "SVE operations speculatively executed" + }, { "PublicDescription": "Microarchitectural operation, Operations speculatively executed.", "EventCode": "0x8008", @@ -359,6 +485,24 @@ "EventName": "FP_SPEC", "BriefDescription": "Floating-point Operations speculatively executed." }, + { + "PublicDescription": "Floating-point half-precision operations speculatively executed", + "EventCode": "0x8014", + "EventName": "FP_HP_SPEC", + "BriefDescription": "Floating-point half-precision operations speculatively executed" + }, + { + "PublicDescription": "Floating-point single-precision operations speculatively executed", + "EventCode": "0x8018", + "EventName": "FP_SP_SPEC", + "BriefDescription": "Floating-point single-precision operations speculatively executed" + }, + { + "PublicDescription": "Floating-point double-precision operations speculatively executed", + "EventCode": "0x801C", + "EventName": "FP_DP_SPEC", + "BriefDescription": "Floating-point double-precision operations speculatively executed" + }, { "PublicDescription": "Floating-point FMA Operations speculatively executed.", "EventCode": "0x8028", @@ -389,6 +533,30 @@ "EventName": "SVE_PRED_SPEC", "BriefDescription": "SVE predicated Operations speculatively executed." }, + { + "PublicDescription": "SVE predicated operations with no active predicates speculatively executed", + "EventCode": "0x8075", + "EventName": "SVE_PRED_EMPTY_SPEC", + "BriefDescription": "SVE predicated operations with no active predicates speculatively executed" + }, + { + "PublicDescription": "SVE predicated operations speculatively executed with all active predicates", + "EventCode": "0x8076", + "EventName": "SVE_PRED_FULL_SPEC", + "BriefDescription": "SVE predicated operations speculatively executed with all active predicates" + }, + { + "PublicDescription": "SVE predicated operations speculatively executed with partially active predicates", + "EventCode": "0x8077", + "EventName": "SVE_PRED_PARTIAL_SPEC", + "BriefDescription": "SVE predicated operations speculatively executed with partially active predicates" + }, + { + "PublicDescription": "SVE predicated operations with empty or partially active predicates", + "EventCode": "0x8079", + "EventName": "SVE_PRED_NOT_FULL_SPEC", + "BriefDescription": "SVE predicated operations with empty or partially active predicates" + }, { "PublicDescription": "SVE MOVPRFX Operations speculatively executed.", "EventCode": "0x807C", @@ -497,6 +665,12 @@ "EventName": "SVE_LDFF_SPEC", "BriefDescription": "SVE First-fault load Operations speculatively executed." }, + { + "PublicDescription": "SVE first-fault load operations speculatively executed which set FFR bit to 0", + "EventCode": "0x80BD", + "EventName": "SVE_LDFF_FAULT_SPEC", + "BriefDescription": "SVE first-fault load operations speculatively executed which set FFR bit to 0" + }, { "PublicDescription": "Scalable floating-point element Operations speculatively executed.", "EventCode": "0x80C0", @@ -544,5 +718,29 @@ "EventCode": "0x80C7", "EventName": "FP_DP_FIXED_OPS_SPEC", "BriefDescription": "Non-scalable double-precision floating-point element Operations speculatively executed." + }, + { + "PublicDescription": "Advanced SIMD and SVE 8-bit integer operations speculatively executed", + "EventCode": "0x80E3", + "EventName": "ASE_SVE_INT8_SPEC", + "BriefDescription": "Advanced SIMD and SVE 8-bit integer operations speculatively executed" + }, + { + "PublicDescription": "Advanced SIMD and SVE 16-bit integer operations speculatively executed", + "EventCode": "0x80E7", + "EventName": "ASE_SVE_INT16_SPEC", + "BriefDescription": "Advanced SIMD and SVE 16-bit integer operations speculatively executed" + }, + { + "PublicDescription": "Advanced SIMD and SVE 32-bit integer operations speculatively executed", + "EventCode": "0x80EB", + "EventName": "ASE_SVE_INT32_SPEC", + "BriefDescription": "Advanced SIMD and SVE 32-bit integer operations speculatively executed" + }, + { + "PublicDescription": "Advanced SIMD and SVE 64-bit integer operations speculatively executed", + "EventCode": "0x80EF", + "EventName": "ASE_SVE_INT64_SPEC", + "BriefDescription": "Advanced SIMD and SVE 64-bit integer operations speculatively executed" } ] diff --git a/tools/perf/pmu-events/arch/arm64/mapfile.csv b/tools/perf/pmu-events/arch/arm64/mapfile.csv index 31d8b57ca9bb..b899db48c12a 100644 --- a/tools/perf/pmu-events/arch/arm64/mapfile.csv +++ b/tools/perf/pmu-events/arch/arm64/mapfile.csv @@ -19,6 +19,7 @@ 0x00000000410fd0b0,v1,arm/cortex-a76-n1,core 0x00000000410fd0c0,v1,arm/cortex-a76-n1,core 0x00000000410fd400,v1,arm/neoverse-v1,core +0x00000000410fd490,v1,arm/neoverse-n2,core 0x00000000420f5160,v1,cavium/thunderx2,core 0x00000000430f0af0,v1,cavium/thunderx2,core 0x00000000460f0010,v1,fujitsu/a64fx,core -- cgit v1.2.3 From 6732f10b11c63112ce34a064b247c00e090b9514 Mon Sep 17 00:00:00 2001 From: Andrew Kilroy Date: Fri, 10 Dec 2021 12:37:05 +0000 Subject: perf vendor events: Rename arm64 arch std event files A previous commit adds pmu events into the files armv8-common-and-microarch.json armv8-recommended.json that are actually specified in an armv9 reference supplement, not armv8. As such, naming the files with the armv8 prefix seems artificial. This patch renames the files to reflect that these two files are for arch std events regardless of whether they are defined in armv8 or armv9. Reviewed-by: John Garry Signed-off-by: Andrew Kilroy Cc: Alexander Shishkin Cc: Jiri Olsa Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Namhyung Kim Cc: Will Deacon Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20211210123706.7490-3-andrew.kilroy@arm.com Signed-off-by: Arnaldo Carvalho de Melo --- .../arch/arm64/armv8-common-and-microarch.json | 746 --------------------- .../pmu-events/arch/arm64/armv8-recommended.json | 452 ------------- .../arch/arm64/common-and-microarch.json | 746 +++++++++++++++++++++ tools/perf/pmu-events/arch/arm64/recommended.json | 452 +++++++++++++ 4 files changed, 1198 insertions(+), 1198 deletions(-) delete mode 100644 tools/perf/pmu-events/arch/arm64/armv8-common-and-microarch.json delete mode 100644 tools/perf/pmu-events/arch/arm64/armv8-recommended.json create mode 100644 tools/perf/pmu-events/arch/arm64/common-and-microarch.json create mode 100644 tools/perf/pmu-events/arch/arm64/recommended.json diff --git a/tools/perf/pmu-events/arch/arm64/armv8-common-and-microarch.json b/tools/perf/pmu-events/arch/arm64/armv8-common-and-microarch.json deleted file mode 100644 index 80d7a70829a0..000000000000 --- a/tools/perf/pmu-events/arch/arm64/armv8-common-and-microarch.json +++ /dev/null @@ -1,746 +0,0 @@ -[ - { - "PublicDescription": "Instruction architecturally executed, Condition code check pass, software increment", - "EventCode": "0x00", - "EventName": "SW_INCR", - "BriefDescription": "Instruction architecturally executed, Condition code check pass, software increment" - }, - { - "PublicDescription": "Level 1 instruction cache refill", - "EventCode": "0x01", - "EventName": "L1I_CACHE_REFILL", - "BriefDescription": "Level 1 instruction cache refill" - }, - { - "PublicDescription": "Attributable Level 1 instruction TLB refill", - "EventCode": "0x02", - "EventName": "L1I_TLB_REFILL", - "BriefDescription": "Attributable Level 1 instruction TLB refill" - }, - { - "PublicDescription": "Level 1 data cache refill", - "EventCode": "0x03", - "EventName": "L1D_CACHE_REFILL", - "BriefDescription": "Level 1 data cache refill" - }, - { - "PublicDescription": "Level 1 data cache access", - "EventCode": "0x04", - "EventName": "L1D_CACHE", - "BriefDescription": "Level 1 data cache access" - }, - { - "PublicDescription": "Attributable Level 1 data TLB refill", - "EventCode": "0x05", - "EventName": "L1D_TLB_REFILL", - "BriefDescription": "Attributable Level 1 data TLB refill" - }, - { - "PublicDescription": "Instruction architecturally executed", - "EventCode": "0x08", - "EventName": "INST_RETIRED", - "BriefDescription": "Instruction architecturally executed" - }, - { - "PublicDescription": "Exception taken", - "EventCode": "0x09", - "EventName": "EXC_TAKEN", - "BriefDescription": "Exception taken" - }, - { - "PublicDescription": "Instruction architecturally executed, condition check pass, exception return", - "EventCode": "0x0a", - "EventName": "EXC_RETURN", - "BriefDescription": "Instruction architecturally executed, condition check pass, exception return" - }, - { - "PublicDescription": "Instruction architecturally executed, condition code check pass, write to CONTEXTIDR", - "EventCode": "0x0b", - "EventName": "CID_WRITE_RETIRED", - "BriefDescription": "Instruction architecturally executed, condition code check pass, write to CONTEXTIDR" - }, - { - "PublicDescription": "Mispredicted or not predicted branch speculatively executed", - "EventCode": "0x10", - "EventName": "BR_MIS_PRED", - "BriefDescription": "Mispredicted or not predicted branch speculatively executed" - }, - { - "PublicDescription": "Cycle", - "EventCode": "0x11", - "EventName": "CPU_CYCLES", - "BriefDescription": "Cycle" - }, - { - "PublicDescription": "Predictable branch speculatively executed", - "EventCode": "0x12", - "EventName": "BR_PRED", - "BriefDescription": "Predictable branch speculatively executed" - }, - { - "PublicDescription": "Data memory access", - "EventCode": "0x13", - "EventName": "MEM_ACCESS", - "BriefDescription": "Data memory access" - }, - { - "PublicDescription": "Attributable Level 1 instruction cache access", - "EventCode": "0x14", - "EventName": "L1I_CACHE", - "BriefDescription": "Attributable Level 1 instruction cache access" - }, - { - "PublicDescription": "Attributable Level 1 data cache write-back", - "EventCode": "0x15", - "EventName": "L1D_CACHE_WB", - "BriefDescription": "Attributable Level 1 data cache write-back" - }, - { - "PublicDescription": "Level 2 data cache access", - "EventCode": "0x16", - "EventName": "L2D_CACHE", - "BriefDescription": "Level 2 data cache access" - }, - { - "PublicDescription": "Level 2 data refill", - "EventCode": "0x17", - "EventName": "L2D_CACHE_REFILL", - "BriefDescription": "Level 2 data refill" - }, - { - "PublicDescription": "Attributable Level 2 data cache write-back", - "EventCode": "0x18", - "EventName": "L2D_CACHE_WB", - "BriefDescription": "Attributable Level 2 data cache write-back" - }, - { - "PublicDescription": "Attributable Bus access", - "EventCode": "0x19", - "EventName": "BUS_ACCESS", - "BriefDescription": "Attributable Bus access" - }, - { - "PublicDescription": "Local memory error", - "EventCode": "0x1a", - "EventName": "MEMORY_ERROR", - "BriefDescription": "Local memory error" - }, - { - "PublicDescription": "Operation speculatively executed", - "EventCode": "0x1b", - "EventName": "INST_SPEC", - "BriefDescription": "Operation speculatively executed" - }, - { - "PublicDescription": "Instruction architecturally executed, Condition code check pass, write to TTBR", - "EventCode": "0x1c", - "EventName": "TTBR_WRITE_RETIRED", - "BriefDescription": "Instruction architecturally executed, Condition code check pass, write to TTBR" - }, - { - "PublicDescription": "Bus cycle", - "EventCode": "0x1D", - "EventName": "BUS_CYCLES", - "BriefDescription": "Bus cycle" - }, - { - "PublicDescription": "Attributable Level 2 data cache allocation without refill", - "EventCode": "0x20", - "EventName": "L2D_CACHE_ALLOCATE", - "BriefDescription": "Attributable Level 2 data cache allocation without refill" - }, - { - "PublicDescription": "Instruction architecturally executed, branch", - "EventCode": "0x21", - "EventName": "BR_RETIRED", - "BriefDescription": "Instruction architecturally executed, branch" - }, - { - "PublicDescription": "Instruction architecturally executed, mispredicted branch", - "EventCode": "0x22", - "EventName": "BR_MIS_PRED_RETIRED", - "BriefDescription": "Instruction architecturally executed, mispredicted branch" - }, - { - "PublicDescription": "No operation issued because of the frontend", - "EventCode": "0x23", - "EventName": "STALL_FRONTEND", - "BriefDescription": "No operation issued because of the frontend" - }, - { - "PublicDescription": "No operation issued due to the backend", - "EventCode": "0x24", - "EventName": "STALL_BACKEND", - "BriefDescription": "No operation issued due to the backend" - }, - { - "PublicDescription": "Attributable Level 1 data or unified TLB access", - "EventCode": "0x25", - "EventName": "L1D_TLB", - "BriefDescription": "Attributable Level 1 data or unified TLB access" - }, - { - "PublicDescription": "Attributable Level 1 instruction TLB access", - "EventCode": "0x26", - "EventName": "L1I_TLB", - "BriefDescription": "Attributable Level 1 instruction TLB access" - }, - { - "PublicDescription": "Attributable Level 3 data cache allocation without refill", - "EventCode": "0x29", - "EventName": "L3D_CACHE_ALLOCATE", - "BriefDescription": "Attributable Level 3 data cache allocation without refill" - }, - { - "PublicDescription": "Attributable Level 3 data cache refill", - "EventCode": "0x2A", - "EventName": "L3D_CACHE_REFILL", - "BriefDescription": "Attributable Level 3 data cache refill" - }, - { - "PublicDescription": "Attributable Level 3 data cache access", - "EventCode": "0x2B", - "EventName": "L3D_CACHE", - "BriefDescription": "Attributable Level 3 data cache access" - }, - { - "PublicDescription": "Attributable Level 2 data TLB refill", - "EventCode": "0x2D", - "EventName": "L2D_TLB_REFILL", - "BriefDescription": "Attributable Level 2 data TLB refill" - }, - { - "PublicDescription": "Attributable Level 2 instruction TLB refill.", - "EventCode": "0x2E", - "EventName": "L2I_TLB_REFILL", - "BriefDescription": "Attributable Level 2 instruction TLB refill." - }, - { - "PublicDescription": "Attributable Level 2 data or unified TLB access", - "EventCode": "0x2F", - "EventName": "L2D_TLB", - "BriefDescription": "Attributable Level 2 data or unified TLB access" - }, - { - "PublicDescription": "Attributable Level 2 instruction TLB access.", - "EventCode": "0x30", - "EventName": "L2I_TLB", - "BriefDescription": "Attributable Level 2 instruction TLB access." - }, - { - "PublicDescription": "Access to another socket in a multi-socket system", - "EventCode": "0x31", - "EventName": "REMOTE_ACCESS", - "BriefDescription": "Access to another socket in a multi-socket system" - }, - { - "PublicDescription": "Access to data TLB causes a translation table walk", - "EventCode": "0x34", - "EventName": "DTLB_WALK", - "BriefDescription": "Access to data TLB causes a translation table walk" - }, - { - "PublicDescription": "Access to instruction TLB that causes a translation table walk", - "EventCode": "0x35", - "EventName": "ITLB_WALK", - "BriefDescription": "Access to instruction TLB that causes a translation table walk" - }, - { - "PublicDescription": "Attributable Last level cache memory read", - "EventCode": "0x36", - "EventName": "LL_CACHE_RD", - "BriefDescription": "Attributable Last level cache memory read" - }, - { - "PublicDescription": "Last level cache miss, read", - "EventCode": "0x37", - "EventName": "LL_CACHE_MISS_RD", - "BriefDescription": "Last level cache miss, read" - }, - { - "PublicDescription": "Level 1 data cache long-latency read miss. The counter counts each memory read access counted by L1D_CACHE that incurs additional latency because it returns data from outside the Level 1 data or unified cache of this processing element.", - "EventCode": "0x39", - "EventName": "L1D_CACHE_LMISS_RD", - "BriefDescription": "Level 1 data cache long-latency read miss" - }, - { - "PublicDescription": "Micro-operation architecturally executed. The counter counts each operation counted by OP_SPEC that would be executed in a simple sequential execution of the program.", - "EventCode": "0x3A", - "EventName": "OP_RETIRED", - "BriefDescription": "Micro-operation architecturally executed" - }, - { - "PublicDescription": "Micro-operation speculatively executed. The counter counts the number of operations executed by the processing element, including those that are executed speculatively and would not be executed in a simple sequential execution of the program.", - "EventCode": "0x3B", - "EventName": "OP_SPEC", - "BriefDescription": "Micro-operation speculatively executed" - }, - { - "PublicDescription": "No operation sent for execution. The counter counts every attributable cycle on which no attributable instruction or operation was sent for execution on this processing element.", - "EventCode": "0x3C", - "EventName": "STALL", - "BriefDescription": "No operation sent for execution" - }, - { - "PublicDescription": "No operation sent for execution on a slot due to the backend. Counts each slot counted by STALL_SLOT where no attributable instruction or operation was sent for execution because the backend is unable to accept it.", - "EventCode": "0x3D", - "EventName": "STALL_SLOT_BACKEND", - "BriefDescription": "No operation sent for execution on a slot due to the backend" - }, - { - "PublicDescription": "No operation sent for execution on a slot due to the frontend. Counts each slot counted by STALL_SLOT where no attributable instruction or operation was sent for execution because there was no attributable instruction or operation available to issue from the processing element from the frontend for the slot.", - "EventCode": "0x3E", - "EventName": "STALL_SLOT_FRONTEND", - "BriefDescription": "No operation sent for execution on a slot due to the frontend" - }, - { - "PublicDescription": "No operation sent for execution on a slot. The counter counts on each attributable cycle the number of instruction or operation slots that were not occupied by an instruction or operation attributable to the processing element.", - "EventCode": "0x3F", - "EventName": "STALL_SLOT", - "BriefDescription": "No operation sent for execution on a slot" - }, - { - "PublicDescription": "Sample Population", - "EventCode": "0x4000", - "EventName": "SAMPLE_POP", - "BriefDescription": "Sample Population" - }, - { - "PublicDescription": "Sample Taken", - "EventCode": "0x4001", - "EventName": "SAMPLE_FEED", - "BriefDescription": "Sample Taken" - }, - { - "PublicDescription": "Sample Taken and not removed by filtering", - "EventCode": "0x4002", - "EventName": "SAMPLE_FILTRATE", - "BriefDescription": "Sample Taken and not removed by filtering" - }, - { - "PublicDescription": "Sample collided with previous sample", - "EventCode": "0x4003", - "EventName": "SAMPLE_COLLISION", - "BriefDescription": "Sample collided with previous sample" - }, - { - "PublicDescription": "Constant frequency cycles. The counter increments at a constant frequency equal to the rate of increment of the system counter, CNTPCT_EL0.", - "EventCode": "0x4004", - "EventName": "CNT_CYCLES", - "BriefDescription": "Constant frequency cycles" - }, - { - "PublicDescription": "Memory stall cycles. The counter counts each cycle counted by STALL_BACKEND where there is a cache miss in the last level of cache within the processing element clock domain", - "EventCode": "0x4005", - "EventName": "STALL_BACKEND_MEM", - "BriefDescription": "Memory stall cycles" - }, - { - "PublicDescription": "Level 1 instruction cache long-latency read miss. If the L1I_CACHE_RD event is implemented, the counter counts each access counted by L1I_CACHE_RD that incurs additional latency because it returns instructions from outside of the Level 1 instruction cache of this PE. If the L1I_CACHE_RD event is not implemented, the counter counts each access counted by L1I_CACHE that incurs additional latency because it returns instructions from outside the Level 1 instruction cache of this PE. The event indicates to software that the access missed in the Level 1 instruction cache and might have a significant performance impact due to the additional latency, compared to the latency of an access that hits in the Level 1 instruction cache.", - "EventCode": "0x4006", - "EventName": "L1I_CACHE_LMISS", - "BriefDescription": "Level 1 instruction cache long-latency read miss" - }, - { - "PublicDescription": "Level 2 data cache long-latency read miss. The counter counts each memory read access counted by L2D_CACHE that incurs additional latency because it returns data from outside the Level 2 data or unified cache of this processing element. The event indicates to software that the access missed in the Level 2 data or unified cache and might have a significant performance impact compared to the latency of an access that hits in the Level 2 data or unified cache.", - "EventCode": "0x4009", - "EventName": "L2D_CACHE_LMISS_RD", - "BriefDescription": "Level 2 data cache long-latency read miss" - }, - { - "PublicDescription": "Level 3 data cache long-latency read miss. The counter counts each memory read access counted by L3D_CACHE that incurs additional latency because it returns data from outside the Level 3 data or unified cache of this processing element. The event indicates to software that the access missed in the Level 3 data or unified cache and might have a significant performance impact compared to the latency of an access that hits in the Level 3 data or unified cache.", - "EventCode": "0x400B", - "EventName": "L3D_CACHE_LMISS_RD", - "BriefDescription": "Level 3 data cache long-latency read miss" - }, - { - "PublicDescription": "Trace buffer current write pointer wrapped", - "EventCode": "0x400C", - "EventName": "TRB_WRAP", - "BriefDescription": "Trace buffer current write pointer wrapped" - }, - { - "PublicDescription": "PE Trace Unit external output 0", - "EventCode": "0x4010", - "EventName": "TRCEXTOUT0", - "BriefDescription": "PE Trace Unit external output 0" - }, - { - "PublicDescription": "PE Trace Unit external output 1", - "EventCode": "0x4011", - "EventName": "TRCEXTOUT1", - "BriefDescription": "PE Trace Unit external output 1" - }, - { - "PublicDescription": "PE Trace Unit external output 2", - "EventCode": "0x4012", - "EventName": "TRCEXTOUT2", - "BriefDescription": "PE Trace Unit external output 2" - }, - { - "PublicDescription": "PE Trace Unit external output 3", - "EventCode": "0x4013", - "EventName": "TRCEXTOUT3", - "BriefDescription": "PE Trace Unit external output 3" - }, - { - "PublicDescription": "Cross-trigger Interface output trigger 4", - "EventCode": "0x4018", - "EventName": "CTI_TRIGOUT4", - "BriefDescription": "Cross-trigger Interface output trigger 4" - }, - { - "PublicDescription": "Cross-trigger Interface output trigger 5 ", - "EventCode": "0x4019", - "EventName": "CTI_TRIGOUT5", - "BriefDescription": "Cross-trigger Interface output trigger 5 " - }, - { - "PublicDescription": "Cross-trigger Interface output trigger 6", - "EventCode": "0x401A", - "EventName": "CTI_TRIGOUT6", - "BriefDescription": "Cross-trigger Interface output trigger 6" - }, - { - "PublicDescription": "Cross-trigger Interface output trigger 7", - "EventCode": "0x401B", - "EventName": "CTI_TRIGOUT7", - "BriefDescription": "Cross-trigger Interface output trigger 7" - }, - { - "PublicDescription": "Access with additional latency from alignment", - "EventCode": "0x4020", - "EventName": "LDST_ALIGN_LAT", - "BriefDescription": "Access with additional latency from alignment" - }, - { - "PublicDescription": "Load with additional latency from alignment", - "EventCode": "0x4021", - "EventName": "LD_ALIGN_LAT", - "BriefDescription": "Load with additional latency from alignment" - }, - { - "PublicDescription": "Store with additional latency from alignment", - "EventCode": "0x4022", - "EventName": "ST_ALIGN_LAT", - "BriefDescription": "Store with additional latency from alignment" - }, - { - "PublicDescription": "Checked data memory access", - "EventCode": "0x4024", - "EventName": "MEM_ACCESS_CHECKED", - "BriefDescription": "Checked data memory access" - }, - { - "PublicDescription": "Checked data memory access, read", - "EventCode": "0x4025", - "EventName": "MEM_ACCESS_CHECKED_RD", - "BriefDescription": "Checked data memory access, read" - }, - { - "PublicDescription": "Checked data memory access, write", - "EventCode": "0x4026", - "EventName": "MEM_ACCESS_CHECKED_WR", - "BriefDescription": "Checked data memory access, write" - }, - { - "PublicDescription": "SIMD Instruction architecturally executed.", - "EventCode": "0x8000", - "EventName": "SIMD_INST_RETIRED", - "BriefDescription": "SIMD Instruction architecturally executed." - }, - { - "PublicDescription": "Instruction architecturally executed, SVE.", - "EventCode": "0x8002", - "EventName": "SVE_INST_RETIRED", - "BriefDescription": "Instruction architecturally executed, SVE." - }, - { - "PublicDescription": "ASE operations speculatively executed", - "EventCode": "0x8005", - "EventName": "ASE_INST_SPEC", - "BriefDescription": "ASE operations speculatively executed" - }, - { - "PublicDescription": "SVE operations speculatively executed", - "EventCode": "0x8006", - "EventName": "SVE_INST_SPEC", - "BriefDescription": "SVE operations speculatively executed" - }, - { - "PublicDescription": "Microarchitectural operation, Operations speculatively executed.", - "EventCode": "0x8008", - "EventName": "UOP_SPEC", - "BriefDescription": "Microarchitectural operation, Operations speculatively executed." - }, - { - "PublicDescription": "SVE Math accelerator Operations speculatively executed.", - "EventCode": "0x800E", - "EventName": "SVE_MATH_SPEC", - "BriefDescription": "SVE Math accelerator Operations speculatively executed." - }, - { - "PublicDescription": "Floating-point Operations speculatively executed.", - "EventCode": "0x8010", - "EventName": "FP_SPEC", - "BriefDescription": "Floating-point Operations speculatively executed." - }, - { - "PublicDescription": "Floating-point half-precision operations speculatively executed", - "EventCode": "0x8014", - "EventName": "FP_HP_SPEC", - "BriefDescription": "Floating-point half-precision operations speculatively executed" - }, - { - "PublicDescription": "Floating-point single-precision operations speculatively executed", - "EventCode": "0x8018", - "EventName": "FP_SP_SPEC", - "BriefDescription": "Floating-point single-precision operations speculatively executed" - }, - { - "PublicDescription": "Floating-point double-precision operations speculatively executed", - "EventCode": "0x801C", - "EventName": "FP_DP_SPEC", - "BriefDescription": "Floating-point double-precision operations speculatively executed" - }, - { - "PublicDescription": "Floating-point FMA Operations speculatively executed.", - "EventCode": "0x8028", - "EventName": "FP_FMA_SPEC", - "BriefDescription": "Floating-point FMA Operations speculatively executed." - }, - { - "PublicDescription": "Floating-point reciprocal estimate Operations speculatively executed.", - "EventCode": "0x8034", - "EventName": "FP_RECPE_SPEC", - "BriefDescription": "Floating-point reciprocal estimate Operations speculatively executed." - }, - { - "PublicDescription": "floating-point convert Operations speculatively executed.", - "EventCode": "0x8038", - "EventName": "FP_CVT_SPEC", - "BriefDescription": "floating-point convert Operations speculatively executed." - }, - { - "PublicDescription": "Advanced SIMD and SVE integer Operations speculatively executed.", - "EventCode": "0x8043", - "EventName": "ASE_SVE_INT_SPEC", - "BriefDescription": "Advanced SIMD and SVE integer Operations speculatively executed." - }, - { - "PublicDescription": "SVE predicated Operations speculatively executed.", - "EventCode": "0x8074", - "EventName": "SVE_PRED_SPEC", - "BriefDescription": "SVE predicated Operations speculatively executed." - }, - { - "PublicDescription": "SVE predicated operations with no active predicates speculatively executed", - "EventCode": "0x8075", - "EventName": "SVE_PRED_EMPTY_SPEC", - "BriefDescription": "SVE predicated operations with no active predicates speculatively executed" - }, - { - "PublicDescription": "SVE predicated operations speculatively executed with all active predicates", - "EventCode": "0x8076", - "EventName": "SVE_PRED_FULL_SPEC", - "BriefDescription": "SVE predicated operations speculatively executed with all active predicates" - }, - { - "PublicDescription": "SVE predicated operations speculatively executed with partially active predicates", - "EventCode": "0x8077", - "EventName": "SVE_PRED_PARTIAL_SPEC", - "BriefDescription": "SVE predicated operations speculatively executed with partially active predicates" - }, - { - "PublicDescription": "SVE predicated operations with empty or partially active predicates", - "EventCode": "0x8079", - "EventName": "SVE_PRED_NOT_FULL_SPEC", - "BriefDescription": "SVE predicated operations with empty or partially active predicates" - }, - { - "PublicDescription": "SVE MOVPRFX Operations speculatively executed.", - "EventCode": "0x807C", - "EventName": "SVE_MOVPRFX_SPEC", - "BriefDescription": "SVE MOVPRFX Operations speculatively executed." - }, - { - "PublicDescription": "SVE MOVPRFX unfused Operations speculatively executed.", - "EventCode": "0x807F", - "EventName": "SVE_MOVPRFX_U_SPEC", - "BriefDescription": "SVE MOVPRFX unfused Operations speculatively executed." - }, - { - "PublicDescription": "Advanced SIMD and SVE load Operations speculatively executed.", - "EventCode": "0x8085", - "EventName": "ASE_SVE_LD_SPEC", - "BriefDescription": "Advanced SIMD and SVE load Operations speculatively executed." - }, - { - "PublicDescription": "Advanced SIMD and SVE store Operations speculatively executed.", - "EventCode": "0x8086", - "EventName": "ASE_SVE_ST_SPEC", - "BriefDescription": "Advanced SIMD and SVE store Operations speculatively executed." - }, - { - "PublicDescription": "Prefetch Operations speculatively executed.", - "EventCode": "0x8087", - "EventName": "PRF_SPEC", - "BriefDescription": "Prefetch Operations speculatively executed." - }, - { - "PublicDescription": "General-purpose register load Operations speculatively executed.", - "EventCode": "0x8089", - "EventName": "BASE_LD_REG_SPEC", - "BriefDescription": "General-purpose register load Operations speculatively executed." - }, - { - "PublicDescription": "General-purpose register store Operations speculatively executed.", - "EventCode": "0x808A", - "EventName": "BASE_ST_REG_SPEC", - "BriefDescription": "General-purpose register store Operations speculatively executed." - }, - { - "PublicDescription": "SVE unpredicated load register Operations speculatively executed.", - "EventCode": "0x8091", - "EventName": "SVE_LDR_REG_SPEC", - "BriefDescription": "SVE unpredicated load register Operations speculatively executed." - }, - { - "PublicDescription": "SVE unpredicated store register Operations speculatively executed.", - "EventCode": "0x8092", - "EventName": "SVE_STR_REG_SPEC", - "BriefDescription": "SVE unpredicated store register Operations speculatively executed." - }, - { - "PublicDescription": "SVE load predicate register Operations speculatively executed.", - "EventCode": "0x8095", - "EventName": "SVE_LDR_PREG_SPEC", - "BriefDescription": "SVE load predicate register Operations speculatively executed." - }, - { - "PublicDescription": "SVE store predicate register Operations speculatively executed.", - "EventCode": "0x8096", - "EventName": "SVE_STR_PREG_SPEC", - "BriefDescription": "SVE store predicate register Operations speculatively executed." - }, - { - "PublicDescription": "SVE contiguous prefetch element Operations speculatively executed.", - "EventCode": "0x809F", - "EventName": "SVE_PRF_CONTIG_SPEC", - "BriefDescription": "SVE contiguous prefetch element Operations speculatively executed." - }, - { - "PublicDescription": "Advanced SIMD and SVE contiguous load multiple vector Operations speculatively executed.", - "EventCode": "0x80A5", - "EventName": "ASE_SVE_LD_MULTI_SPEC", - "BriefDescription": "Advanced SIMD and SVE contiguous load multiple vector Operations speculatively executed." - }, - { - "PublicDescription": "Advanced SIMD and SVE contiguous store multiple vector Operations speculatively executed.", - "EventCode": "0x80A6", - "EventName": "ASE_SVE_ST_MULTI_SPEC", - "BriefDescription": "Advanced SIMD and SVE contiguous store multiple vector Operations speculatively executed." - }, - { - "PublicDescription": "SVE gather-load Operations speculatively executed.", - "EventCode": "0x80AD", - "EventName": "SVE_LD_GATHER_SPEC", - "BriefDescription": "SVE gather-load Operations speculatively executed." - }, - { - "PublicDescription": "SVE scatter-store Operations speculatively executed.", - "EventCode": "0x80AE", - "EventName": "SVE_ST_SCATTER_SPEC", - "BriefDescription": "SVE scatter-store Operations speculatively executed." - }, - { - "PublicDescription": "SVE gather-prefetch Operations speculatively executed.", - "EventCode": "0x80AF", - "EventName": "SVE_PRF_GATHER_SPEC", - "BriefDescription": "SVE gather-prefetch Operations speculatively executed." - }, - { - "PublicDescription": "SVE First-fault load Operations speculatively executed.", - "EventCode": "0x80BC", - "EventName": "SVE_LDFF_SPEC", - "BriefDescription": "SVE First-fault load Operations speculatively executed." - }, - { - "PublicDescription": "SVE first-fault load operations speculatively executed which set FFR bit to 0", - "EventCode": "0x80BD", - "EventName": "SVE_LDFF_FAULT_SPEC", - "BriefDescription": "SVE first-fault load operations speculatively executed which set FFR bit to 0" - }, - { - "PublicDescription": "Scalable floating-point element Operations speculatively executed.", - "EventCode": "0x80C0", - "EventName": "FP_SCALE_OPS_SPEC", - "BriefDescription": "Scalable floating-point element Operations speculatively executed." - }, - { - "PublicDescription": "Non-scalable floating-point element Operations speculatively executed.", - "EventCode": "0x80C1", - "EventName": "FP_FIXED_OPS_SPEC", - "BriefDescription": "Non-scalable floating-point element Operations speculatively executed." - }, - { - "PublicDescription": "Scalable half-precision floating-point element Operations speculatively executed.", - "EventCode": "0x80C2", - "EventName": "FP_HP_SCALE_OPS_SPEC", - "BriefDescription": "Scalable half-precision floating-point element Operations speculatively executed." - }, - { - "PublicDescription": "Non-scalable half-precision floating-point element Operations speculatively executed.", - "EventCode": "0x80C3", - "EventName": "FP_HP_FIXED_OPS_SPEC", - "BriefDescription": "Non-scalable half-precision floating-point element Operations speculatively executed." - }, - { - "PublicDescription": "Scalable single-precision floating-point element Operations speculatively executed.", - "EventCode": "0x80C4", - "EventName": "FP_SP_SCALE_OPS_SPEC", - "BriefDescription": "Scalable single-precision floating-point element Operations speculatively executed." - }, - { - "PublicDescription": "Non-scalable single-precision floating-point element Operations speculatively executed.", - "EventCode": "0x80C5", - "EventName": "FP_SP_FIXED_OPS_SPEC", - "BriefDescription": "Non-scalable single-precision floating-point element Operations speculatively executed." - }, - { - "PublicDescription": "Scalable double-precision floating-point element Operations speculatively executed.", - "EventCode": "0x80C6", - "EventName": "FP_DP_SCALE_OPS_SPEC", - "BriefDescription": "Scalable double-precision floating-point element Operations speculatively executed." - }, - { - "PublicDescription": "Non-scalable double-precision floating-point element Operations speculatively executed.", - "EventCode": "0x80C7", - "EventName": "FP_DP_FIXED_OPS_SPEC", - "BriefDescription": "Non-scalable double-precision floating-point element Operations speculatively executed." - }, - { - "PublicDescription": "Advanced SIMD and SVE 8-bit integer operations speculatively executed", - "EventCode": "0x80E3", - "EventName": "ASE_SVE_INT8_SPEC", - "BriefDescription": "Advanced SIMD and SVE 8-bit integer operations speculatively executed" - }, - { - "PublicDescription": "Advanced SIMD and SVE 16-bit integer operations speculatively executed", - "EventCode": "0x80E7", - "EventName": "ASE_SVE_INT16_SPEC", - "BriefDescription": "Advanced SIMD and SVE 16-bit integer operations speculatively executed" - }, - { - "PublicDescription": "Advanced SIMD and SVE 32-bit integer operations speculatively executed", - "EventCode": "0x80EB", - "EventName": "ASE_SVE_INT32_SPEC", - "BriefDescription": "Advanced SIMD and SVE 32-bit integer operations speculatively executed" - }, - { - "PublicDescription": "Advanced SIMD and SVE 64-bit integer operations speculatively executed", - "EventCode": "0x80EF", - "EventName": "ASE_SVE_INT64_SPEC", - "BriefDescription": "Advanced SIMD and SVE 64-bit integer operations speculatively executed" - } -] diff --git a/tools/perf/pmu-events/arch/arm64/armv8-recommended.json b/tools/perf/pmu-events/arch/arm64/armv8-recommended.json deleted file mode 100644 index 210afa856091..000000000000 --- a/tools/perf/pmu-events/arch/arm64/armv8-recommended.json +++ /dev/null @@ -1,452 +0,0 @@ -[ - { - "PublicDescription": "Attributable Level 1 data cache access, read", - "EventCode": "0x40", - "EventName": "L1D_CACHE_RD", - "BriefDescription": "L1D cache access, read" - }, - { - "PublicDescription": "Attributable Level 1 data cache access, write", - "EventCode": "0x41", - "EventName": "L1D_CACHE_WR", - "BriefDescription": "L1D cache access, write" - }, - { - "PublicDescription": "Attributable Level 1 data cache refill, read", - "EventCode": "0x42", - "EventName": "L1D_CACHE_REFILL_RD", - "BriefDescription": "L1D cache refill, read" - }, - { - "PublicDescription": "Attributable Level 1 data cache refill, write", - "EventCode": "0x43", - "EventName": "L1D_CACHE_REFILL_WR", - "BriefDescription": "L1D cache refill, write" - }, - { - "PublicDescription": "Attributable Level 1 data cache refill, inner", - "EventCode": "0x44", - "EventName": "L1D_CACHE_REFILL_INNER", - "BriefDescription": "L1D cache refill, inner" - }, - { - "PublicDescription": "Attributable Level 1 data cache refill, outer", - "EventCode": "0x45", - "EventName": "L1D_CACHE_REFILL_OUTER", - "BriefDescription": "L1D cache refill, outer" - }, - { - "PublicDescription": "Attributable Level 1 data cache Write-Back, victim", - "EventCode": "0x46", - "EventName": "L1D_CACHE_WB_VICTIM", - "BriefDescription": "L1D cache Write-Back, victim" - }, - { - "PublicDescription": "Level 1 data cache Write-Back, cleaning and coherency", - "EventCode": "0x47", - "EventName": "L1D_CACHE_WB_CLEAN", - "BriefDescription": "L1D cache Write-Back, cleaning and coherency" - }, - { - "PublicDescription": "Attributable Level 1 data cache invalidate", - "EventCode": "0x48", - "EventName": "L1D_CACHE_INVAL", - "BriefDescription": "L1D cache invalidate" - }, - { - "PublicDescription": "Attributable Level 1 data TLB refill, read", - "EventCode": "0x4C", - "EventName": "L1D_TLB_REFILL_RD", - "BriefDescription": "L1D tlb refill, read" - }, - { - "PublicDescription": "Attributable Level 1 data TLB refill, write", - "EventCode": "0x4D", - "EventName": "L1D_TLB_REFILL_WR", - "BriefDescription": "L1D tlb refill, write" - }, - { - "PublicDescription": "Attributable Level 1 data or unified TLB access, read", - "EventCode": "0x4E", - "EventName": "L1D_TLB_RD", - "BriefDescription": "L1D tlb access, read" - }, - { - "PublicDescription": "Attributable Level 1 data or unified TLB access, write", - "EventCode": "0x4F", - "EventName": "L1D_TLB_WR", - "BriefDescription": "L1D tlb access, write" - }, - { - "PublicDescription": "Attributable Level 2 data cache access, read", - "EventCode": "0x50", - "EventName": "L2D_CACHE_RD", - "BriefDescription": "L2D cache access, read" - }, - { - "PublicDescription": "Attributable Level 2 data cache access, write", - "EventCode": "0x51", - "EventName": "L2D_CACHE_WR", - "BriefDescription": "L2D cache access, write" - }, - { - "PublicDescription": "Attributable Level 2 data cache refill, read", - "EventCode": "0x52", - "EventName": "L2D_CACHE_REFILL_RD", - "BriefDescription": "L2D cache refill, read" - }, - { - "PublicDescription": "Attributable Level 2 data cache refill, write", - "EventCode": "0x53", - "EventName": "L2D_CACHE_REFILL_WR", - "BriefDescription": "L2D cache refill, write" - }, - { - "PublicDescription": "Attributable Level 2 data cache Write-Back, victim", - "EventCode": "0x56", - "EventName": "L2D_CACHE_WB_VICTIM", - "BriefDescription": "L2D cache Write-Back, victim" - }, - { - "PublicDescription": "Level 2 data cache Write-Back, cleaning and coherency", - "EventCode": "0x57", - "EventName": "L2D_CACHE_WB_CLEAN", - "BriefDescription": "L2D cache Write-Back, cleaning and coherency" - }, - { - "PublicDescription": "Attributable Level 2 data cache invalidate", - "EventCode": "0x58", - "EventName": "L2D_CACHE_INVAL", - "BriefDescription": "L2D cache invalidate" - }, - { - "PublicDescription": "Attributable Level 2 data or unified TLB refill, read", - "EventCode": "0x5c", - "EventName": "L2D_TLB_REFILL_RD", - "BriefDescription": "L2D cache refill, read" - }, - { - "PublicDescription": "Attributable Level 2 data or unified TLB refill, write", - "EventCode": "0x5d", - "EventName": "L2D_TLB_REFILL_WR", - "BriefDescription": "L2D cache refill, write" - }, - { - "PublicDescription": "Attributable Level 2 data or unified TLB access, read", - "EventCode": "0x5e", - "EventName": "L2D_TLB_RD", - "BriefDescription": "L2D cache access, read" - }, - { - "PublicDescription": "Attributable Level 2 data or unified TLB access, write", - "EventCode": "0x5f", - "EventName": "L2D_TLB_WR", - "BriefDescription": "L2D cache access, write" - }, - { - "PublicDescription": "Bus access read", - "EventCode": "0x60", - "EventName": "BUS_ACCESS_RD", - "BriefDescription": "Bus access read" - }, - { - "PublicDescription": "Bus access write", - "EventCode": "0x61", - "EventName": "BUS_ACCESS_WR", - "BriefDescription": "Bus access write" - }, - { - "PublicDescription": "Bus access, Normal, Cacheable, Shareable", - "EventCode": "0x62", - "EventName": "BUS_ACCESS_SHARED", - "BriefDescription": "Bus access, Normal, Cacheable, Shareable" - }, - { - "PublicDescription": "Bus access, not Normal, Cacheable, Shareable", - "EventCode": "0x63", - "EventName": "BUS_ACCESS_NOT_SHARED", - "BriefDescription": "Bus access, not Normal, Cacheable, Shareable" - }, - { - "PublicDescription": "Bus access, Normal", - "EventCode": "0x64", - "EventName": "BUS_ACCESS_NORMAL", - "BriefDescription": "Bus access, Normal" - }, - { - "PublicDescription": "Bus access, peripheral", - "EventCode": "0x65", - "EventName": "BUS_ACCESS_PERIPH", - "BriefDescription": "Bus access, peripheral" - }, - { - "PublicDescription": "Data memory access, read", - "EventCode": "0x66", - "EventName": "MEM_ACCESS_RD", - "BriefDescription": "Data memory access, read" - }, - { - "PublicDescription": "Data memory access, write", - "EventCode": "0x67", - "EventName": "MEM_ACCESS_WR", - "BriefDescription": "Data memory access, write" - }, - { - "PublicDescription": "Unaligned access, read", - "EventCode": "0x68", - "EventName": "UNALIGNED_LD_SPEC", - "BriefDescription": "Unaligned access, read" - }, - { - "PublicDescription": "Unaligned access, write", - "EventCode": "0x69", - "EventName": "UNALIGNED_ST_SPEC", - "BriefDescription": "Unaligned access, write" - }, - { - "PublicDescription": "Unaligned access", - "EventCode": "0x6a", - "EventName": "UNALIGNED_LDST_SPEC", - "BriefDescription": "Unaligned access" - }, - { - "PublicDescription": "Exclusive operation speculatively executed, LDREX or LDX", - "EventCode": "0x6c", - "EventName": "LDREX_SPEC", - "BriefDescription": "Exclusive operation speculatively executed, LDREX or LDX" - }, - { - "PublicDescription": "Exclusive operation speculatively executed, STREX or STX pass", - "EventCode": "0x6d", - "EventName": "STREX_PASS_SPEC", - "BriefDescription": "Exclusive operation speculatively executed, STREX or STX pass" - }, - { - "PublicDescription": "Exclusive operation speculatively executed, STREX or STX fail", - "EventCode": "0x6e", - "EventName": "STREX_FAIL_SPEC", - "BriefDescription": "Exclusive operation speculatively executed, STREX or STX fail" - }, - { - "PublicDescription": "Exclusive operation speculatively executed, STREX or STX", - "EventCode": "0x6f", - "EventName": "STREX_SPEC", - "BriefDescription": "Exclusive operation speculatively executed, STREX or STX" - }, - { - "PublicDescription": "Operation speculatively executed, load", - "EventCode": "0x70", - "EventName": "LD_SPEC", - "BriefDescription": "Operation speculatively executed, load" - }, - { - "PublicDescription": "Operation speculatively executed, store", - "EventCode": "0x71", - "EventName": "ST_SPEC", - "BriefDescription": "Operation speculatively executed, store" - }, - { - "PublicDescription": "Operation speculatively executed, load or store", - "EventCode": "0x72", - "EventName": "LDST_SPEC", - "BriefDescription": "Operation speculatively executed, load or store" - }, - { - "PublicDescription": "Operation speculatively executed, integer data processing", - "EventCode": "0x73", - "EventName": "DP_SPEC", - "BriefDescription": "Operation speculatively executed, integer data processing" - }, - { - "PublicDescription": "Operation speculatively executed, Advanced SIMD instruction", - "EventCode": "0x74", - "EventName": "ASE_SPEC", - "BriefDescription": "Operation speculatively executed, Advanced SIMD instruction" - }, - { - "PublicDescription": "Operation speculatively executed, floating-point instruction", - "EventCode": "0x75", - "EventName": "VFP_SPEC", - "BriefDescription": "Operation speculatively executed, floating-point instruction" - }, - { - "PublicDescription": "Operation speculatively executed, software change of the PC", - "EventCode": "0x76", - "EventName": "PC_WRITE_SPEC", - "BriefDescription": "Operation speculatively executed, software change of the PC" - }, - { - "PublicDescription": "Operation speculatively executed, Cryptographic instruction", - "EventCode": "0x77", - "EventName": "CRYPTO_SPEC", - "BriefDescription": "Operation speculatively executed, Cryptographic instruction" - }, - { - "PublicDescription": "Branch speculatively executed, immediate branch", - "EventCode": "0x78", - "EventName": "BR_IMMED_SPEC", - "BriefDescription": "Branch speculatively executed, immediate branch" - }, - { - "PublicDescription": "Branch speculatively executed, procedure return", - "EventCode": "0x79", - "EventName": "BR_RETURN_SPEC", - "BriefDescription": "Branch speculatively executed, procedure return" - }, - { - "PublicDescription": "Branch speculatively executed, indirect branch", - "EventCode": "0x7a", - "EventName": "BR_INDIRECT_SPEC", - "BriefDescription": "Branch speculatively executed, indirect branch" - }, - { - "PublicDescription": "Barrier speculatively executed, ISB", - "EventCode": "0x7c", - "EventName": "ISB_SPEC", - "BriefDescription": "Barrier speculatively executed, ISB" - }, - { - "PublicDescription": "Barrier speculatively executed, DSB", - "EventCode": "0x7d", - "EventName": "DSB_SPEC", - "BriefDescription": "Barrier speculatively executed, DSB" - }, - { - "PublicDescription": "Barrier speculatively executed, DMB", - "EventCode": "0x7e", - "EventName": "DMB_SPEC", - "BriefDescription": "Barrier speculatively executed, DMB" - }, - { - "PublicDescription": "Exception taken, Other synchronous", - "EventCode": "0x81", - "EventName": "EXC_UNDEF", - "BriefDescription": "Exception taken, Other synchronous" - }, - { - "PublicDescription": "Exception taken, Supervisor Call", - "EventCode": "0x82", - "EventName": "EXC_SVC", - "BriefDescription": "Exception taken, Supervisor Call" - }, - { - "PublicDescription": "Exception taken, Instruction Abort", - "EventCode": "0x83", - "EventName": "EXC_PABORT", - "BriefDescription": "Exception taken, Instruction Abort" - }, - { - "PublicDescription": "Exception taken, Data Abort and SError", - "EventCode": "0x84", - "EventName": "EXC_DABORT", - "BriefDescription": "Exception taken, Data Abort and SError" - }, - { - "PublicDescription": "Exception taken, IRQ", - "EventCode": "0x86", - "EventName": "EXC_IRQ", - "BriefDescription": "Exception taken, IRQ" - }, - { - "PublicDescription": "Exception taken, FIQ", - "EventCode": "0x87", - "EventName": "EXC_FIQ", - "BriefDescription": "Exception taken, FIQ" - }, - { - "PublicDescription": "Exception taken, Secure Monitor Call", - "EventCode": "0x88", - "EventName": "EXC_SMC", - "BriefDescription": "Exception taken, Secure Monitor Call" - }, - { - "PublicDescription": "Exception taken, Hypervisor Call", - "EventCode": "0x8a", - "EventName": "EXC_HVC", - "BriefDescription": "Exception taken, Hypervisor Call" - }, - { - "PublicDescription": "Exception taken, Instruction Abort not taken locally", - "EventCode": "0x8b", - "EventName": "EXC_TRAP_PABORT", - "BriefDescription": "Exception taken, Instruction Abort not taken locally" - }, - { - "PublicDescription": "Exception taken, Data Abort or SError not taken locally", - "EventCode": "0x8c", - "EventName": "EXC_TRAP_DABORT", - "BriefDescription": "Exception taken, Data Abort or SError not taken locally" - }, - { - "PublicDescription": "Exception taken, Other traps not taken locally", - "EventCode": "0x8d", - "EventName": "EXC_TRAP_OTHER", - "BriefDescription": "Exception taken, Other traps not taken locally" - }, - { - "PublicDescription": "Exception taken, IRQ not taken locally", - "EventCode": "0x8e", - "EventName": "EXC_TRAP_IRQ", - "BriefDescription": "Exception taken, IRQ not taken locally" - }, - { - "PublicDescription": "Exception taken, FIQ not taken locally", - "EventCode": "0x8f", - "EventName": "EXC_TRAP_FIQ", - "BriefDescription": "Exception taken, FIQ not taken locally" - }, - { - "PublicDescription": "Release consistency operation speculatively executed, Load-Acquire", - "EventCode": "0x90", - "EventName": "RC_LD_SPEC", - "BriefDescription": "Release consistency operation speculatively executed, Load-Acquire" - }, - { - "PublicDescription": "Release consistency operation speculatively executed, Store-Release", - "EventCode": "0x91", - "EventName": "RC_ST_SPEC", - "BriefDescription": "Release consistency operation speculatively executed, Store-Release" - }, - { - "PublicDescription": "Attributable Level 3 data or unified cache access, read", - "EventCode": "0xa0", - "EventName": "L3D_CACHE_RD", - "BriefDescription": "Attributable Level 3 data or unified cache access, read" - }, - { - "PublicDescription": "Attributable Level 3 data or unified cache access, write", - "EventCode": "0xa1", - "EventName": "L3D_CACHE_WR", - "BriefDescription": "Attributable Level 3 data or unified cache access, write" - }, - { - "PublicDescription": "Attributable Level 3 data or unified cache refill, read", - "EventCode": "0xa2", - "EventName": "L3D_CACHE_REFILL_RD", - "BriefDescription": "Attributable Level 3 data or unified cache refill, read" - }, - { - "PublicDescription": "Attributable Level 3 data or unified cache refill, write", - "EventCode": "0xa3", - "EventName": "L3D_CACHE_REFILL_WR", - "BriefDescription": "Attributable Level 3 data or unified cache refill, write" - }, - { - "PublicDescription": "Attributable Level 3 data or unified cache Write-Back, victim", - "EventCode": "0xa6", - "EventName": "L3D_CACHE_WB_VICTIM", - "BriefDescription": "Attributable Level 3 data or unified cache Write-Back, victim" - }, - { - "PublicDescription": "Attributable Level 3 data or unified cache Write-Back, cache clean", - "EventCode": "0xa7", - "EventName": "L3D_CACHE_WB_CLEAN", - "BriefDescription": "Attributable Level 3 data or unified cache Write-Back, cache clean" - }, - { - "PublicDescription": "Attributable Level 3 data or unified cache access, invalidate", - "EventCode": "0xa8", - "EventName": "L3D_CACHE_INVAL", - "BriefDescription": "Attributable Level 3 data or unified cache access, invalidate" - } -] diff --git a/tools/perf/pmu-events/arch/arm64/common-and-microarch.json b/tools/perf/pmu-events/arch/arm64/common-and-microarch.json new file mode 100644 index 000000000000..80d7a70829a0 --- /dev/null +++ b/tools/perf/pmu-events/arch/arm64/common-and-microarch.json @@ -0,0 +1,746 @@ +[ + { + "PublicDescription": "Instruction architecturally executed, Condition code check pass, software increment", + "EventCode": "0x00", + "EventName": "SW_INCR", + "BriefDescription": "Instruction architecturally executed, Condition code check pass, software increment" + }, + { + "PublicDescription": "Level 1 instruction cache refill", + "EventCode": "0x01", + "EventName": "L1I_CACHE_REFILL", + "BriefDescription": "Level 1 instruction cache refill" + }, + { + "PublicDescription": "Attributable Level 1 instruction TLB refill", + "EventCode": "0x02", + "EventName": "L1I_TLB_REFILL", + "BriefDescription": "Attributable Level 1 instruction TLB refill" + }, + { + "PublicDescription": "Level 1 data cache refill", + "EventCode": "0x03", + "EventName": "L1D_CACHE_REFILL", + "BriefDescription": "Level 1 data cache refill" + }, + { + "PublicDescription": "Level 1 data cache access", + "EventCode": "0x04", + "EventName": "L1D_CACHE", + "BriefDescription": "Level 1 data cache access" + }, + { + "PublicDescription": "Attributable Level 1 data TLB refill", + "EventCode": "0x05", + "EventName": "L1D_TLB_REFILL", + "BriefDescription": "Attributable Level 1 data TLB refill" + }, + { + "PublicDescription": "Instruction architecturally executed", + "EventCode": "0x08", + "EventName": "INST_RETIRED", + "BriefDescription": "Instruction architecturally executed" + }, + { + "PublicDescription": "Exception taken", + "EventCode": "0x09", + "EventName": "EXC_TAKEN", + "BriefDescription": "Exception taken" + }, + { + "PublicDescription": "Instruction architecturally executed, condition check pass, exception return", + "EventCode": "0x0a", + "EventName": "EXC_RETURN", + "BriefDescription": "Instruction architecturally executed, condition check pass, exception return" + }, + { + "PublicDescription": "Instruction architecturally executed, condition code check pass, write to CONTEXTIDR", + "EventCode": "0x0b", + "EventName": "CID_WRITE_RETIRED", + "BriefDescription": "Instruction architecturally executed, condition code check pass, write to CONTEXTIDR" + }, + { + "PublicDescription": "Mispredicted or not predicted branch speculatively executed", + "EventCode": "0x10", + "EventName": "BR_MIS_PRED", + "BriefDescription": "Mispredicted or not predicted branch speculatively executed" + }, + { + "PublicDescription": "Cycle", + "EventCode": "0x11", + "EventName": "CPU_CYCLES", + "BriefDescription": "Cycle" + }, + { + "PublicDescription": "Predictable branch speculatively executed", + "EventCode": "0x12", + "EventName": "BR_PRED", + "BriefDescription": "Predictable branch speculatively executed" + }, + { + "PublicDescription": "Data memory access", + "EventCode": "0x13", + "EventName": "MEM_ACCESS", + "BriefDescription": "Data memory access" + }, + { + "PublicDescription": "Attributable Level 1 instruction cache access", + "EventCode": "0x14", + "EventName": "L1I_CACHE", + "BriefDescription": "Attributable Level 1 instruction cache access" + }, + { + "PublicDescription": "Attributable Level 1 data cache write-back", + "EventCode": "0x15", + "EventName": "L1D_CACHE_WB", + "BriefDescription": "Attributable Level 1 data cache write-back" + }, + { + "PublicDescription": "Level 2 data cache access", + "EventCode": "0x16", + "EventName": "L2D_CACHE", + "BriefDescription": "Level 2 data cache access" + }, + { + "PublicDescription": "Level 2 data refill", + "EventCode": "0x17", + "EventName": "L2D_CACHE_REFILL", + "BriefDescription": "Level 2 data refill" + }, + { + "PublicDescription": "Attributable Level 2 data cache write-back", + "EventCode": "0x18", + "EventName": "L2D_CACHE_WB", + "BriefDescription": "Attributable Level 2 data cache write-back" + }, + { + "PublicDescription": "Attributable Bus access", + "EventCode": "0x19", + "EventName": "BUS_ACCESS", + "BriefDescription": "Attributable Bus access" + }, + { + "PublicDescription": "Local memory error", + "EventCode": "0x1a", + "EventName": "MEMORY_ERROR", + "BriefDescription": "Local memory error" + }, + { + "PublicDescription": "Operation speculatively executed", + "EventCode": "0x1b", + "EventName": "INST_SPEC", + "BriefDescription": "Operation speculatively executed" + }, + { + "PublicDescription": "Instruction architecturally executed, Condition code check pass, write to TTBR", + "EventCode": "0x1c", + "EventName": "TTBR_WRITE_RETIRED", + "BriefDescription": "Instruction architecturally executed, Condition code check pass, write to TTBR" + }, + { + "PublicDescription": "Bus cycle", + "EventCode": "0x1D", + "EventName": "BUS_CYCLES", + "BriefDescription": "Bus cycle" + }, + { + "PublicDescription": "Attributable Level 2 data cache allocation without refill", + "EventCode": "0x20", + "EventName": "L2D_CACHE_ALLOCATE", + "BriefDescription": "Attributable Level 2 data cache allocation without refill" + }, + { + "PublicDescription": "Instruction architecturally executed, branch", + "EventCode": "0x21", + "EventName": "BR_RETIRED", + "BriefDescription": "Instruction architecturally executed, branch" + }, + { + "PublicDescription": "Instruction architecturally executed, mispredicted branch", + "EventCode": "0x22", + "EventName": "BR_MIS_PRED_RETIRED", + "BriefDescription": "Instruction architecturally executed, mispredicted branch" + }, + { + "PublicDescription": "No operation issued because of the frontend", + "EventCode": "0x23", + "EventName": "STALL_FRONTEND", + "BriefDescription": "No operation issued because of the frontend" + }, + { + "PublicDescription": "No operation issued due to the backend", + "EventCode": "0x24", + "EventName": "STALL_BACKEND", + "BriefDescription": "No operation issued due to the backend" + }, + { + "PublicDescription": "Attributable Level 1 data or unified TLB access", + "EventCode": "0x25", + "EventName": "L1D_TLB", + "BriefDescription": "Attributable Level 1 data or unified TLB access" + }, + { + "PublicDescription": "Attributable Level 1 instruction TLB access", + "EventCode": "0x26", + "EventName": "L1I_TLB", + "BriefDescription": "Attributable Level 1 instruction TLB access" + }, + { + "PublicDescription": "Attributable Level 3 data cache allocation without refill", + "EventCode": "0x29", + "EventName": "L3D_CACHE_ALLOCATE", + "BriefDescription": "Attributable Level 3 data cache allocation without refill" + }, + { + "PublicDescription": "Attributable Level 3 data cache refill", + "EventCode": "0x2A", + "EventName": "L3D_CACHE_REFILL", + "BriefDescription": "Attributable Level 3 data cache refill" + }, + { + "PublicDescription": "Attributable Level 3 data cache access", + "EventCode": "0x2B", + "EventName": "L3D_CACHE", + "BriefDescription": "Attributable Level 3 data cache access" + }, + { + "PublicDescription": "Attributable Level 2 data TLB refill", + "EventCode": "0x2D", + "EventName": "L2D_TLB_REFILL", + "BriefDescription": "Attributable Level 2 data TLB refill" + }, + { + "PublicDescription": "Attributable Level 2 instruction TLB refill.", + "EventCode": "0x2E", + "EventName": "L2I_TLB_REFILL", + "BriefDescription": "Attributable Level 2 instruction TLB refill." + }, + { + "PublicDescription": "Attributable Level 2 data or unified TLB access", + "EventCode": "0x2F", + "EventName": "L2D_TLB", + "BriefDescription": "Attributable Level 2 data or unified TLB access" + }, + { + "PublicDescription": "Attributable Level 2 instruction TLB access.", + "EventCode": "0x30", + "EventName": "L2I_TLB", + "BriefDescription": "Attributable Level 2 instruction TLB access." + }, + { + "PublicDescription": "Access to another socket in a multi-socket system", + "EventCode": "0x31", + "EventName": "REMOTE_ACCESS", + "BriefDescription": "Access to another socket in a multi-socket system" + }, + { + "PublicDescription": "Access to data TLB causes a translation table walk", + "EventCode": "0x34", + "EventName": "DTLB_WALK", + "BriefDescription": "Access to data TLB causes a translation table walk" + }, + { + "PublicDescription": "Access to instruction TLB that causes a translation table walk", + "EventCode": "0x35", + "EventName": "ITLB_WALK", + "BriefDescription": "Access to instruction TLB that causes a translation table walk" + }, + { + "PublicDescription": "Attributable Last level cache memory read", + "EventCode": "0x36", + "EventName": "LL_CACHE_RD", + "BriefDescription": "Attributable Last level cache memory read" + }, + { + "PublicDescription": "Last level cache miss, read", + "EventCode": "0x37", + "EventName": "LL_CACHE_MISS_RD", + "BriefDescription": "Last level cache miss, read" + }, + { + "PublicDescription": "Level 1 data cache long-latency read miss. The counter counts each memory read access counted by L1D_CACHE that incurs additional latency because it returns data from outside the Level 1 data or unified cache of this processing element.", + "EventCode": "0x39", + "EventName": "L1D_CACHE_LMISS_RD", + "BriefDescription": "Level 1 data cache long-latency read miss" + }, + { + "PublicDescription": "Micro-operation architecturally executed. The counter counts each operation counted by OP_SPEC that would be executed in a simple sequential execution of the program.", + "EventCode": "0x3A", + "EventName": "OP_RETIRED", + "BriefDescription": "Micro-operation architecturally executed" + }, + { + "PublicDescription": "Micro-operation speculatively executed. The counter counts the number of operations executed by the processing element, including those that are executed speculatively and would not be executed in a simple sequential execution of the program.", + "EventCode": "0x3B", + "EventName": "OP_SPEC", + "BriefDescription": "Micro-operation speculatively executed" + }, + { + "PublicDescription": "No operation sent for execution. The counter counts every attributable cycle on which no attributable instruction or operation was sent for execution on this processing element.", + "EventCode": "0x3C", + "EventName": "STALL", + "BriefDescription": "No operation sent for execution" + }, + { + "PublicDescription": "No operation sent for execution on a slot due to the backend. Counts each slot counted by STALL_SLOT where no attributable instruction or operation was sent for execution because the backend is unable to accept it.", + "EventCode": "0x3D", + "EventName": "STALL_SLOT_BACKEND", + "BriefDescription": "No operation sent for execution on a slot due to the backend" + }, + { + "PublicDescription": "No operation sent for execution on a slot due to the frontend. Counts each slot counted by STALL_SLOT where no attributable instruction or operation was sent for execution because there was no attributable instruction or operation available to issue from the processing element from the frontend for the slot.", + "EventCode": "0x3E", + "EventName": "STALL_SLOT_FRONTEND", + "BriefDescription": "No operation sent for execution on a slot due to the frontend" + }, + { + "PublicDescription": "No operation sent for execution on a slot. The counter counts on each attributable cycle the number of instruction or operation slots that were not occupied by an instruction or operation attributable to the processing element.", + "EventCode": "0x3F", + "EventName": "STALL_SLOT", + "BriefDescription": "No operation sent for execution on a slot" + }, + { + "PublicDescription": "Sample Population", + "EventCode": "0x4000", + "EventName": "SAMPLE_POP", + "BriefDescription": "Sample Population" + }, + { + "PublicDescription": "Sample Taken", + "EventCode": "0x4001", + "EventName": "SAMPLE_FEED", + "BriefDescription": "Sample Taken" + }, + { + "PublicDescription": "Sample Taken and not removed by filtering", + "EventCode": "0x4002", + "EventName": "SAMPLE_FILTRATE", + "BriefDescription": "Sample Taken and not removed by filtering" + }, + { + "PublicDescription": "Sample collided with previous sample", + "EventCode": "0x4003", + "EventName": "SAMPLE_COLLISION", + "BriefDescription": "Sample collided with previous sample" + }, + { + "PublicDescription": "Constant frequency cycles. The counter increments at a constant frequency equal to the rate of increment of the system counter, CNTPCT_EL0.", + "EventCode": "0x4004", + "EventName": "CNT_CYCLES", + "BriefDescription": "Constant frequency cycles" + }, + { + "PublicDescription": "Memory stall cycles. The counter counts each cycle counted by STALL_BACKEND where there is a cache miss in the last level of cache within the processing element clock domain", + "EventCode": "0x4005", + "EventName": "STALL_BACKEND_MEM", + "BriefDescription": "Memory stall cycles" + }, + { + "PublicDescription": "Level 1 instruction cache long-latency read miss. If the L1I_CACHE_RD event is implemented, the counter counts each access counted by L1I_CACHE_RD that incurs additional latency because it returns instructions from outside of the Level 1 instruction cache of this PE. If the L1I_CACHE_RD event is not implemented, the counter counts each access counted by L1I_CACHE that incurs additional latency because it returns instructions from outside the Level 1 instruction cache of this PE. The event indicates to software that the access missed in the Level 1 instruction cache and might have a significant performance impact due to the additional latency, compared to the latency of an access that hits in the Level 1 instruction cache.", + "EventCode": "0x4006", + "EventName": "L1I_CACHE_LMISS", + "BriefDescription": "Level 1 instruction cache long-latency read miss" + }, + { + "PublicDescription": "Level 2 data cache long-latency read miss. The counter counts each memory read access counted by L2D_CACHE that incurs additional latency because it returns data from outside the Level 2 data or unified cache of this processing element. The event indicates to software that the access missed in the Level 2 data or unified cache and might have a significant performance impact compared to the latency of an access that hits in the Level 2 data or unified cache.", + "EventCode": "0x4009", + "EventName": "L2D_CACHE_LMISS_RD", + "BriefDescription": "Level 2 data cache long-latency read miss" + }, + { + "PublicDescription": "Level 3 data cache long-latency read miss. The counter counts each memory read access counted by L3D_CACHE that incurs additional latency because it returns data from outside the Level 3 data or unified cache of this processing element. The event indicates to software that the access missed in the Level 3 data or unified cache and might have a significant performance impact compared to the latency of an access that hits in the Level 3 data or unified cache.", + "EventCode": "0x400B", + "EventName": "L3D_CACHE_LMISS_RD", + "BriefDescription": "Level 3 data cache long-latency read miss" + }, + { + "PublicDescription": "Trace buffer current write pointer wrapped", + "EventCode": "0x400C", + "EventName": "TRB_WRAP", + "BriefDescription": "Trace buffer current write pointer wrapped" + }, + { + "PublicDescription": "PE Trace Unit external output 0", + "EventCode": "0x4010", + "EventName": "TRCEXTOUT0", + "BriefDescription": "PE Trace Unit external output 0" + }, + { + "PublicDescription": "PE Trace Unit external output 1", + "EventCode": "0x4011", + "EventName": "TRCEXTOUT1", + "BriefDescription": "PE Trace Unit external output 1" + }, + { + "PublicDescription": "PE Trace Unit external output 2", + "EventCode": "0x4012", + "EventName": "TRCEXTOUT2", + "BriefDescription": "PE Trace Unit external output 2" + }, + { + "PublicDescription": "PE Trace Unit external output 3", + "EventCode": "0x4013", + "EventName": "TRCEXTOUT3", + "BriefDescription": "PE Trace Unit external output 3" + }, + { + "PublicDescription": "Cross-trigger Interface output trigger 4", + "EventCode": "0x4018", + "EventName": "CTI_TRIGOUT4", + "BriefDescription": "Cross-trigger Interface output trigger 4" + }, + { + "PublicDescription": "Cross-trigger Interface output trigger 5 ", + "EventCode": "0x4019", + "EventName": "CTI_TRIGOUT5", + "BriefDescription": "Cross-trigger Interface output trigger 5 " + }, + { + "PublicDescription": "Cross-trigger Interface output trigger 6", + "EventCode": "0x401A", + "EventName": "CTI_TRIGOUT6", + "BriefDescription": "Cross-trigger Interface output trigger 6" + }, + { + "PublicDescription": "Cross-trigger Interface output trigger 7", + "EventCode": "0x401B", + "EventName": "CTI_TRIGOUT7", + "BriefDescription": "Cross-trigger Interface output trigger 7" + }, + { + "PublicDescription": "Access with additional latency from alignment", + "EventCode": "0x4020", + "EventName": "LDST_ALIGN_LAT", + "BriefDescription": "Access with additional latency from alignment" + }, + { + "PublicDescription": "Load with additional latency from alignment", + "EventCode": "0x4021", + "EventName": "LD_ALIGN_LAT", + "BriefDescription": "Load with additional latency from alignment" + }, + { + "PublicDescription": "Store with additional latency from alignment", + "EventCode": "0x4022", + "EventName": "ST_ALIGN_LAT", + "BriefDescription": "Store with additional latency from alignment" + }, + { + "PublicDescription": "Checked data memory access", + "EventCode": "0x4024", + "EventName": "MEM_ACCESS_CHECKED", + "BriefDescription": "Checked data memory access" + }, + { + "PublicDescription": "Checked data memory access, read", + "EventCode": "0x4025", + "EventName": "MEM_ACCESS_CHECKED_RD", + "BriefDescription": "Checked data memory access, read" + }, + { + "PublicDescription": "Checked data memory access, write", + "EventCode": "0x4026", + "EventName": "MEM_ACCESS_CHECKED_WR", + "BriefDescription": "Checked data memory access, write" + }, + { + "PublicDescription": "SIMD Instruction architecturally executed.", + "EventCode": "0x8000", + "EventName": "SIMD_INST_RETIRED", + "BriefDescription": "SIMD Instruction architecturally executed." + }, + { + "PublicDescription": "Instruction architecturally executed, SVE.", + "EventCode": "0x8002", + "EventName": "SVE_INST_RETIRED", + "BriefDescription": "Instruction architecturally executed, SVE." + }, + { + "PublicDescription": "ASE operations speculatively executed", + "EventCode": "0x8005", + "EventName": "ASE_INST_SPEC", + "BriefDescription": "ASE operations speculatively executed" + }, + { + "PublicDescription": "SVE operations speculatively executed", + "EventCode": "0x8006", + "EventName": "SVE_INST_SPEC", + "BriefDescription": "SVE operations speculatively executed" + }, + { + "PublicDescription": "Microarchitectural operation, Operations speculatively executed.", + "EventCode": "0x8008", + "EventName": "UOP_SPEC", + "BriefDescription": "Microarchitectural operation, Operations speculatively executed." + }, + { + "PublicDescription": "SVE Math accelerator Operations speculatively executed.", + "EventCode": "0x800E", + "EventName": "SVE_MATH_SPEC", + "BriefDescription": "SVE Math accelerator Operations speculatively executed." + }, + { + "PublicDescription": "Floating-point Operations speculatively executed.", + "EventCode": "0x8010", + "EventName": "FP_SPEC", + "BriefDescription": "Floating-point Operations speculatively executed." + }, + { + "PublicDescription": "Floating-point half-precision operations speculatively executed", + "EventCode": "0x8014", + "EventName": "FP_HP_SPEC", + "BriefDescription": "Floating-point half-precision operations speculatively executed" + }, + { + "PublicDescription": "Floating-point single-precision operations speculatively executed", + "EventCode": "0x8018", + "EventName": "FP_SP_SPEC", + "BriefDescription": "Floating-point single-precision operations speculatively executed" + }, + { + "PublicDescription": "Floating-point double-precision operations speculatively executed", + "EventCode": "0x801C", + "EventName": "FP_DP_SPEC", + "BriefDescription": "Floating-point double-precision operations speculatively executed" + }, + { + "PublicDescription": "Floating-point FMA Operations speculatively executed.", + "EventCode": "0x8028", + "EventName": "FP_FMA_SPEC", + "BriefDescription": "Floating-point FMA Operations speculatively executed." + }, + { + "PublicDescription": "Floating-point reciprocal estimate Operations speculatively executed.", + "EventCode": "0x8034", + "EventName": "FP_RECPE_SPEC", + "BriefDescription": "Floating-point reciprocal estimate Operations speculatively executed." + }, + { + "PublicDescription": "floating-point convert Operations speculatively executed.", + "EventCode": "0x8038", + "EventName": "FP_CVT_SPEC", + "BriefDescription": "floating-point convert Operations speculatively executed." + }, + { + "PublicDescription": "Advanced SIMD and SVE integer Operations speculatively executed.", + "EventCode": "0x8043", + "EventName": "ASE_SVE_INT_SPEC", + "BriefDescription": "Advanced SIMD and SVE integer Operations speculatively executed." + }, + { + "PublicDescription": "SVE predicated Operations speculatively executed.", + "EventCode": "0x8074", + "EventName": "SVE_PRED_SPEC", + "BriefDescription": "SVE predicated Operations speculatively executed." + }, + { + "PublicDescription": "SVE predicated operations with no active predicates speculatively executed", + "EventCode": "0x8075", + "EventName": "SVE_PRED_EMPTY_SPEC", + "BriefDescription": "SVE predicated operations with no active predicates speculatively executed" + }, + { + "PublicDescription": "SVE predicated operations speculatively executed with all active predicates", + "EventCode": "0x8076", + "EventName": "SVE_PRED_FULL_SPEC", + "BriefDescription": "SVE predicated operations speculatively executed with all active predicates" + }, + { + "PublicDescription": "SVE predicated operations speculatively executed with partially active predicates", + "EventCode": "0x8077", + "EventName": "SVE_PRED_PARTIAL_SPEC", + "BriefDescription": "SVE predicated operations speculatively executed with partially active predicates" + }, + { + "PublicDescription": "SVE predicated operations with empty or partially active predicates", + "EventCode": "0x8079", + "EventName": "SVE_PRED_NOT_FULL_SPEC", + "BriefDescription": "SVE predicated operations with empty or partially active predicates" + }, + { + "PublicDescription": "SVE MOVPRFX Operations speculatively executed.", + "EventCode": "0x807C", + "EventName": "SVE_MOVPRFX_SPEC", + "BriefDescription": "SVE MOVPRFX Operations speculatively executed." + }, + { + "PublicDescription": "SVE MOVPRFX unfused Operations speculatively executed.", + "EventCode": "0x807F", + "EventName": "SVE_MOVPRFX_U_SPEC", + "BriefDescription": "SVE MOVPRFX unfused Operations speculatively executed." + }, + { + "PublicDescription": "Advanced SIMD and SVE load Operations speculatively executed.", + "EventCode": "0x8085", + "EventName": "ASE_SVE_LD_SPEC", + "BriefDescription": "Advanced SIMD and SVE load Operations speculatively executed." + }, + { + "PublicDescription": "Advanced SIMD and SVE store Operations speculatively executed.", + "EventCode": "0x8086", + "EventName": "ASE_SVE_ST_SPEC", + "BriefDescription": "Advanced SIMD and SVE store Operations speculatively executed." + }, + { + "PublicDescription": "Prefetch Operations speculatively executed.", + "EventCode": "0x8087", + "EventName": "PRF_SPEC", + "BriefDescription": "Prefetch Operations speculatively executed." + }, + { + "PublicDescription": "General-purpose register load Operations speculatively executed.", + "EventCode": "0x8089", + "EventName": "BASE_LD_REG_SPEC", + "BriefDescription": "General-purpose register load Operations speculatively executed." + }, + { + "PublicDescription": "General-purpose register store Operations speculatively executed.", + "EventCode": "0x808A", + "EventName": "BASE_ST_REG_SPEC", + "BriefDescription": "General-purpose register store Operations speculatively executed." + }, + { + "PublicDescription": "SVE unpredicated load register Operations speculatively executed.", + "EventCode": "0x8091", + "EventName": "SVE_LDR_REG_SPEC", + "BriefDescription": "SVE unpredicated load register Operations speculatively executed." + }, + { + "PublicDescription": "SVE unpredicated store register Operations speculatively executed.", + "EventCode": "0x8092", + "EventName": "SVE_STR_REG_SPEC", + "BriefDescription": "SVE unpredicated store register Operations speculatively executed." + }, + { + "PublicDescription": "SVE load predicate register Operations speculatively executed.", + "EventCode": "0x8095", + "EventName": "SVE_LDR_PREG_SPEC", + "BriefDescription": "SVE load predicate register Operations speculatively executed." + }, + { + "PublicDescription": "SVE store predicate register Operations speculatively executed.", + "EventCode": "0x8096", + "EventName": "SVE_STR_PREG_SPEC", + "BriefDescription": "SVE store predicate register Operations speculatively executed." + }, + { + "PublicDescription": "SVE contiguous prefetch element Operations speculatively executed.", + "EventCode": "0x809F", + "EventName": "SVE_PRF_CONTIG_SPEC", + "BriefDescription": "SVE contiguous prefetch element Operations speculatively executed." + }, + { + "PublicDescription": "Advanced SIMD and SVE contiguous load multiple vector Operations speculatively executed.", + "EventCode": "0x80A5", + "EventName": "ASE_SVE_LD_MULTI_SPEC", + "BriefDescription": "Advanced SIMD and SVE contiguous load multiple vector Operations speculatively executed." + }, + { + "PublicDescription": "Advanced SIMD and SVE contiguous store multiple vector Operations speculatively executed.", + "EventCode": "0x80A6", + "EventName": "ASE_SVE_ST_MULTI_SPEC", + "BriefDescription": "Advanced SIMD and SVE contiguous store multiple vector Operations speculatively executed." + }, + { + "PublicDescription": "SVE gather-load Operations speculatively executed.", + "EventCode": "0x80AD", + "EventName": "SVE_LD_GATHER_SPEC", + "BriefDescription": "SVE gather-load Operations speculatively executed." + }, + { + "PublicDescription": "SVE scatter-store Operations speculatively executed.", + "EventCode": "0x80AE", + "EventName": "SVE_ST_SCATTER_SPEC", + "BriefDescription": "SVE scatter-store Operations speculatively executed." + }, + { + "PublicDescription": "SVE gather-prefetch Operations speculatively executed.", + "EventCode": "0x80AF", + "EventName": "SVE_PRF_GATHER_SPEC", + "BriefDescription": "SVE gather-prefetch Operations speculatively executed." + }, + { + "PublicDescription": "SVE First-fault load Operations speculatively executed.", + "EventCode": "0x80BC", + "EventName": "SVE_LDFF_SPEC", + "BriefDescription": "SVE First-fault load Operations speculatively executed." + }, + { + "PublicDescription": "SVE first-fault load operations speculatively executed which set FFR bit to 0", + "EventCode": "0x80BD", + "EventName": "SVE_LDFF_FAULT_SPEC", + "BriefDescription": "SVE first-fault load operations speculatively executed which set FFR bit to 0" + }, + { + "PublicDescription": "Scalable floating-point element Operations speculatively executed.", + "EventCode": "0x80C0", + "EventName": "FP_SCALE_OPS_SPEC", + "BriefDescription": "Scalable floating-point element Operations speculatively executed." + }, + { + "PublicDescription": "Non-scalable floating-point element Operations speculatively executed.", + "EventCode": "0x80C1", + "EventName": "FP_FIXED_OPS_SPEC", + "BriefDescription": "Non-scalable floating-point element Operations speculatively executed." + }, + { + "PublicDescription": "Scalable half-precision floating-point element Operations speculatively executed.", + "EventCode": "0x80C2", + "EventName": "FP_HP_SCALE_OPS_SPEC", + "BriefDescription": "Scalable half-precision floating-point element Operations speculatively executed." + }, + { + "PublicDescription": "Non-scalable half-precision floating-point element Operations speculatively executed.", + "EventCode": "0x80C3", + "EventName": "FP_HP_FIXED_OPS_SPEC", + "BriefDescription": "Non-scalable half-precision floating-point element Operations speculatively executed." + }, + { + "PublicDescription": "Scalable single-precision floating-point element Operations speculatively executed.", + "EventCode": "0x80C4", + "EventName": "FP_SP_SCALE_OPS_SPEC", + "BriefDescription": "Scalable single-precision floating-point element Operations speculatively executed." + }, + { + "PublicDescription": "Non-scalable single-precision floating-point element Operations speculatively executed.", + "EventCode": "0x80C5", + "EventName": "FP_SP_FIXED_OPS_SPEC", + "BriefDescription": "Non-scalable single-precision floating-point element Operations speculatively executed." + }, + { + "PublicDescription": "Scalable double-precision floating-point element Operations speculatively executed.", + "EventCode": "0x80C6", + "EventName": "FP_DP_SCALE_OPS_SPEC", + "BriefDescription": "Scalable double-precision floating-point element Operations speculatively executed." + }, + { + "PublicDescription": "Non-scalable double-precision floating-point element Operations speculatively executed.", + "EventCode": "0x80C7", + "EventName": "FP_DP_FIXED_OPS_SPEC", + "BriefDescription": "Non-scalable double-precision floating-point element Operations speculatively executed." + }, + { + "PublicDescription": "Advanced SIMD and SVE 8-bit integer operations speculatively executed", + "EventCode": "0x80E3", + "EventName": "ASE_SVE_INT8_SPEC", + "BriefDescription": "Advanced SIMD and SVE 8-bit integer operations speculatively executed" + }, + { + "PublicDescription": "Advanced SIMD and SVE 16-bit integer operations speculatively executed", + "EventCode": "0x80E7", + "EventName": "ASE_SVE_INT16_SPEC", + "BriefDescription": "Advanced SIMD and SVE 16-bit integer operations speculatively executed" + }, + { + "PublicDescription": "Advanced SIMD and SVE 32-bit integer operations speculatively executed", + "EventCode": "0x80EB", + "EventName": "ASE_SVE_INT32_SPEC", + "BriefDescription": "Advanced SIMD and SVE 32-bit integer operations speculatively executed" + }, + { + "PublicDescription": "Advanced SIMD and SVE 64-bit integer operations speculatively executed", + "EventCode": "0x80EF", + "EventName": "ASE_SVE_INT64_SPEC", + "BriefDescription": "Advanced SIMD and SVE 64-bit integer operations speculatively executed" + } +] diff --git a/tools/perf/pmu-events/arch/arm64/recommended.json b/tools/perf/pmu-events/arch/arm64/recommended.json new file mode 100644 index 000000000000..210afa856091 --- /dev/null +++ b/tools/perf/pmu-events/arch/arm64/recommended.json @@ -0,0 +1,452 @@ +[ + { + "PublicDescription": "Attributable Level 1 data cache access, read", + "EventCode": "0x40", + "EventName": "L1D_CACHE_RD", + "BriefDescription": "L1D cache access, read" + }, + { + "PublicDescription": "Attributable Level 1 data cache access, write", + "EventCode": "0x41", + "EventName": "L1D_CACHE_WR", + "BriefDescription": "L1D cache access, write" + }, + { + "PublicDescription": "Attributable Level 1 data cache refill, read", + "EventCode": "0x42", + "EventName": "L1D_CACHE_REFILL_RD", + "BriefDescription": "L1D cache refill, read" + }, + { + "PublicDescription": "Attributable Level 1 data cache refill, write", + "EventCode": "0x43", + "EventName": "L1D_CACHE_REFILL_WR", + "BriefDescription": "L1D cache refill, write" + }, + { + "PublicDescription": "Attributable Level 1 data cache refill, inner", + "EventCode": "0x44", + "EventName": "L1D_CACHE_REFILL_INNER", + "BriefDescription": "L1D cache refill, inner" + }, + { + "PublicDescription": "Attributable Level 1 data cache refill, outer", + "EventCode": "0x45", + "EventName": "L1D_CACHE_REFILL_OUTER", + "BriefDescription": "L1D cache refill, outer" + }, + { + "PublicDescription": "Attributable Level 1 data cache Write-Back, victim", + "EventCode": "0x46", + "EventName": "L1D_CACHE_WB_VICTIM", + "BriefDescription": "L1D cache Write-Back, victim" + }, + { + "PublicDescription": "Level 1 data cache Write-Back, cleaning and coherency", + "EventCode": "0x47", + "EventName": "L1D_CACHE_WB_CLEAN", + "BriefDescription": "L1D cache Write-Back, cleaning and coherency" + }, + { + "PublicDescription": "Attributable Level 1 data cache invalidate", + "EventCode": "0x48", + "EventName": "L1D_CACHE_INVAL", + "BriefDescription": "L1D cache invalidate" + }, + { + "PublicDescription": "Attributable Level 1 data TLB refill, read", + "EventCode": "0x4C", + "EventName": "L1D_TLB_REFILL_RD", + "BriefDescription": "L1D tlb refill, read" + }, + { + "PublicDescription": "Attributable Level 1 data TLB refill, write", + "EventCode": "0x4D", + "EventName": "L1D_TLB_REFILL_WR", + "BriefDescription": "L1D tlb refill, write" + }, + { + "PublicDescription": "Attributable Level 1 data or unified TLB access, read", + "EventCode": "0x4E", + "EventName": "L1D_TLB_RD", + "BriefDescription": "L1D tlb access, read" + }, + { + "PublicDescription": "Attributable Level 1 data or unified TLB access, write", + "EventCode": "0x4F", + "EventName": "L1D_TLB_WR", + "BriefDescription": "L1D tlb access, write" + }, + { + "PublicDescription": "Attributable Level 2 data cache access, read", + "EventCode": "0x50", + "EventName": "L2D_CACHE_RD", + "BriefDescription": "L2D cache access, read" + }, + { + "PublicDescription": "Attributable Level 2 data cache access, write", + "EventCode": "0x51", + "EventName": "L2D_CACHE_WR", + "BriefDescription": "L2D cache access, write" + }, + { + "PublicDescription": "Attributable Level 2 data cache refill, read", + "EventCode": "0x52", + "EventName": "L2D_CACHE_REFILL_RD", + "BriefDescription": "L2D cache refill, read" + }, + { + "PublicDescription": "Attributable Level 2 data cache refill, write", + "EventCode": "0x53", + "EventName": "L2D_CACHE_REFILL_WR", + "BriefDescription": "L2D cache refill, write" + }, + { + "PublicDescription": "Attributable Level 2 data cache Write-Back, victim", + "EventCode": "0x56", + "EventName": "L2D_CACHE_WB_VICTIM", + "BriefDescription": "L2D cache Write-Back, victim" + }, + { + "PublicDescription": "Level 2 data cache Write-Back, cleaning and coherency", + "EventCode": "0x57", + "EventName": "L2D_CACHE_WB_CLEAN", + "BriefDescription": "L2D cache Write-Back, cleaning and coherency" + }, + { + "PublicDescription": "Attributable Level 2 data cache invalidate", + "EventCode": "0x58", + "EventName": "L2D_CACHE_INVAL", + "BriefDescription": "L2D cache invalidate" + }, + { + "PublicDescription": "Attributable Level 2 data or unified TLB refill, read", + "EventCode": "0x5c", + "EventName": "L2D_TLB_REFILL_RD", + "BriefDescription": "L2D cache refill, read" + }, + { + "PublicDescription": "Attributable Level 2 data or unified TLB refill, write", + "EventCode": "0x5d", + "EventName": "L2D_TLB_REFILL_WR", + "BriefDescription": "L2D cache refill, write" + }, + { + "PublicDescription": "Attributable Level 2 data or unified TLB access, read", + "EventCode": "0x5e", + "EventName": "L2D_TLB_RD", + "BriefDescription": "L2D cache access, read" + }, + { + "PublicDescription": "Attributable Level 2 data or unified TLB access, write", + "EventCode": "0x5f", + "EventName": "L2D_TLB_WR", + "BriefDescription": "L2D cache access, write" + }, + { + "PublicDescription": "Bus access read", + "EventCode": "0x60", + "EventName": "BUS_ACCESS_RD", + "BriefDescription": "Bus access read" + }, + { + "PublicDescription": "Bus access write", + "EventCode": "0x61", + "EventName": "BUS_ACCESS_WR", + "BriefDescription": "Bus access write" + }, + { + "PublicDescription": "Bus access, Normal, Cacheable, Shareable", + "EventCode": "0x62", + "EventName": "BUS_ACCESS_SHARED", + "BriefDescription": "Bus access, Normal, Cacheable, Shareable" + }, + { + "PublicDescription": "Bus access, not Normal, Cacheable, Shareable", + "EventCode": "0x63", + "EventName": "BUS_ACCESS_NOT_SHARED", + "BriefDescription": "Bus access, not Normal, Cacheable, Shareable" + }, + { + "PublicDescription": "Bus access, Normal", + "EventCode": "0x64", + "EventName": "BUS_ACCESS_NORMAL", + "BriefDescription": "Bus access, Normal" + }, + { + "PublicDescription": "Bus access, peripheral", + "EventCode": "0x65", + "EventName": "BUS_ACCESS_PERIPH", + "BriefDescription": "Bus access, peripheral" + }, + { + "PublicDescription": "Data memory access, read", + "EventCode": "0x66", + "EventName": "MEM_ACCESS_RD", + "BriefDescription": "Data memory access, read" + }, + { + "PublicDescription": "Data memory access, write", + "EventCode": "0x67", + "EventName": "MEM_ACCESS_WR", + "BriefDescription": "Data memory access, write" + }, + { + "PublicDescription": "Unaligned access, read", + "EventCode": "0x68", + "EventName": "UNALIGNED_LD_SPEC", + "BriefDescription": "Unaligned access, read" + }, + { + "PublicDescription": "Unaligned access, write", + "EventCode": "0x69", + "EventName": "UNALIGNED_ST_SPEC", + "BriefDescription": "Unaligned access, write" + }, + { + "PublicDescription": "Unaligned access", + "EventCode": "0x6a", + "EventName": "UNALIGNED_LDST_SPEC", + "BriefDescription": "Unaligned access" + }, + { + "PublicDescription": "Exclusive operation speculatively executed, LDREX or LDX", + "EventCode": "0x6c", + "EventName": "LDREX_SPEC", + "BriefDescription": "Exclusive operation speculatively executed, LDREX or LDX" + }, + { + "PublicDescription": "Exclusive operation speculatively executed, STREX or STX pass", + "EventCode": "0x6d", + "EventName": "STREX_PASS_SPEC", + "BriefDescription": "Exclusive operation speculatively executed, STREX or STX pass" + }, + { + "PublicDescription": "Exclusive operation speculatively executed, STREX or STX fail", + "EventCode": "0x6e", + "EventName": "STREX_FAIL_SPEC", + "BriefDescription": "Exclusive operation speculatively executed, STREX or STX fail" + }, + { + "PublicDescription": "Exclusive operation speculatively executed, STREX or STX", + "EventCode": "0x6f", + "EventName": "STREX_SPEC", + "BriefDescription": "Exclusive operation speculatively executed, STREX or STX" + }, + { + "PublicDescription": "Operation speculatively executed, load", + "EventCode": "0x70", + "EventName": "LD_SPEC", + "BriefDescription": "Operation speculatively executed, load" + }, + { + "PublicDescription": "Operation speculatively executed, store", + "EventCode": "0x71", + "EventName": "ST_SPEC", + "BriefDescription": "Operation speculatively executed, store" + }, + { + "PublicDescription": "Operation speculatively executed, load or store", + "EventCode": "0x72", + "EventName": "LDST_SPEC", + "BriefDescription": "Operation speculatively executed, load or store" + }, + { + "PublicDescription": "Operation speculatively executed, integer data processing", + "EventCode": "0x73", + "EventName": "DP_SPEC", + "BriefDescription": "Operation speculatively executed, integer data processing" + }, + { + "PublicDescription": "Operation speculatively executed, Advanced SIMD instruction", + "EventCode": "0x74", + "EventName": "ASE_SPEC", + "BriefDescription": "Operation speculatively executed, Advanced SIMD instruction" + }, + { + "PublicDescription": "Operation speculatively executed, floating-point instruction", + "EventCode": "0x75", + "EventName": "VFP_SPEC", + "BriefDescription": "Operation speculatively executed, floating-point instruction" + }, + { + "PublicDescription": "Operation speculatively executed, software change of the PC", + "EventCode": "0x76", + "EventName": "PC_WRITE_SPEC", + "BriefDescription": "Operation speculatively executed, software change of the PC" + }, + { + "PublicDescription": "Operation speculatively executed, Cryptographic instruction", + "EventCode": "0x77", + "EventName": "CRYPTO_SPEC", + "BriefDescription": "Operation speculatively executed, Cryptographic instruction" + }, + { + "PublicDescription": "Branch speculatively executed, immediate branch", + "EventCode": "0x78", + "EventName": "BR_IMMED_SPEC", + "BriefDescription": "Branch speculatively executed, immediate branch" + }, + { + "PublicDescription": "Branch speculatively executed, procedure return", + "EventCode": "0x79", + "EventName": "BR_RETURN_SPEC", + "BriefDescription": "Branch speculatively executed, procedure return" + }, + { + "PublicDescription": "Branch speculatively executed, indirect branch", + "EventCode": "0x7a", + "EventName": "BR_INDIRECT_SPEC", + "BriefDescription": "Branch speculatively executed, indirect branch" + }, + { + "PublicDescription": "Barrier speculatively executed, ISB", + "EventCode": "0x7c", + "EventName": "ISB_SPEC", + "BriefDescription": "Barrier speculatively executed, ISB" + }, + { + "PublicDescription": "Barrier speculatively executed, DSB", + "EventCode": "0x7d", + "EventName": "DSB_SPEC", + "BriefDescription": "Barrier speculatively executed, DSB" + }, + { + "PublicDescription": "Barrier speculatively executed, DMB", + "EventCode": "0x7e", + "EventName": "DMB_SPEC", + "BriefDescription": "Barrier speculatively executed, DMB" + }, + { + "PublicDescription": "Exception taken, Other synchronous", + "EventCode": "0x81", + "EventName": "EXC_UNDEF", + "BriefDescription": "Exception taken, Other synchronous" + }, + { + "PublicDescription": "Exception taken, Supervisor Call", + "EventCode": "0x82", + "EventName": "EXC_SVC", + "BriefDescription": "Exception taken, Supervisor Call" + }, + { + "PublicDescription": "Exception taken, Instruction Abort", + "EventCode": "0x83", + "EventName": "EXC_PABORT", + "BriefDescription": "Exception taken, Instruction Abort" + }, + { + "PublicDescription": "Exception taken, Data Abort and SError", + "EventCode": "0x84", + "EventName": "EXC_DABORT", + "BriefDescription": "Exception taken, Data Abort and SError" + }, + { + "PublicDescription": "Exception taken, IRQ", + "EventCode": "0x86", + "EventName": "EXC_IRQ", + "BriefDescription": "Exception taken, IRQ" + }, + { + "PublicDescription": "Exception taken, FIQ", + "EventCode": "0x87", + "EventName": "EXC_FIQ", + "BriefDescription": "Exception taken, FIQ" + }, + { + "PublicDescription": "Exception taken, Secure Monitor Call", + "EventCode": "0x88", + "EventName": "EXC_SMC", + "BriefDescription": "Exception taken, Secure Monitor Call" + }, + { + "PublicDescription": "Exception taken, Hypervisor Call", + "EventCode": "0x8a", + "EventName": "EXC_HVC", + "BriefDescription": "Exception taken, Hypervisor Call" + }, + { + "PublicDescription": "Exception taken, Instruction Abort not taken locally", + "EventCode": "0x8b", + "EventName": "EXC_TRAP_PABORT", + "BriefDescription": "Exception taken, Instruction Abort not taken locally" + }, + { + "PublicDescription": "Exception taken, Data Abort or SError not taken locally", + "EventCode": "0x8c", + "EventName": "EXC_TRAP_DABORT", + "BriefDescription": "Exception taken, Data Abort or SError not taken locally" + }, + { + "PublicDescription": "Exception taken, Other traps not taken locally", + "EventCode": "0x8d", + "EventName": "EXC_TRAP_OTHER", + "BriefDescription": "Exception taken, Other traps not taken locally" + }, + { + "PublicDescription": "Exception taken, IRQ not taken locally", + "EventCode": "0x8e", + "EventName": "EXC_TRAP_IRQ", + "BriefDescription": "Exception taken, IRQ not taken locally" + }, + { + "PublicDescription": "Exception taken, FIQ not taken locally", + "EventCode": "0x8f", + "EventName": "EXC_TRAP_FIQ", + "BriefDescription": "Exception taken, FIQ not taken locally" + }, + { + "PublicDescription": "Release consistency operation speculatively executed, Load-Acquire", + "EventCode": "0x90", + "EventName": "RC_LD_SPEC", + "BriefDescription": "Release consistency operation speculatively executed, Load-Acquire" + }, + { + "PublicDescription": "Release consistency operation speculatively executed, Store-Release", + "EventCode": "0x91", + "EventName": "RC_ST_SPEC", + "BriefDescription": "Release consistency operation speculatively executed, Store-Release" + }, + { + "PublicDescription": "Attributable Level 3 data or unified cache access, read", + "EventCode": "0xa0", + "EventName": "L3D_CACHE_RD", + "BriefDescription": "Attributable Level 3 data or unified cache access, read" + }, + { + "PublicDescription": "Attributable Level 3 data or unified cache access, write", + "EventCode": "0xa1", + "EventName": "L3D_CACHE_WR", + "BriefDescription": "Attributable Level 3 data or unified cache access, write" + }, + { + "PublicDescription": "Attributable Level 3 data or unified cache refill, read", + "EventCode": "0xa2", + "EventName": "L3D_CACHE_REFILL_RD", + "BriefDescription": "Attributable Level 3 data or unified cache refill, read" + }, + { + "PublicDescription": "Attributable Level 3 data or unified cache refill, write", + "EventCode": "0xa3", + "EventName": "L3D_CACHE_REFILL_WR", + "BriefDescription": "Attributable Level 3 data or unified cache refill, write" + }, + { + "PublicDescription": "Attributable Level 3 data or unified cache Write-Back, victim", + "EventCode": "0xa6", + "EventName": "L3D_CACHE_WB_VICTIM", + "BriefDescription": "Attributable Level 3 data or unified cache Write-Back, victim" + }, + { + "PublicDescription": "Attributable Level 3 data or unified cache Write-Back, cache clean", + "EventCode": "0xa7", + "EventName": "L3D_CACHE_WB_CLEAN", + "BriefDescription": "Attributable Level 3 data or unified cache Write-Back, cache clean" + }, + { + "PublicDescription": "Attributable Level 3 data or unified cache access, invalidate", + "EventCode": "0xa8", + "EventName": "L3D_CACHE_INVAL", + "BriefDescription": "Attributable Level 3 data or unified cache access, invalidate" + } +] -- cgit v1.2.3 From 7cc9680c4be7c1da7a3570711f01273b781b936b Mon Sep 17 00:00:00 2001 From: James Clark Date: Wed, 8 Dec 2021 11:54:35 +0000 Subject: perf cs-etm: Remove duplicate and incorrect aux size checks There are two checks, one is for size when running without admin, but this one is covered by the driver and reported on in more detail here (builtin-record.c): pr_err("Permission error mapping pages.\n" "Consider increasing " "/proc/sys/kernel/perf_event_mlock_kb,\n" "or try again with a smaller value of -m/--mmap_pages.\n" "(current value: %u,%u)\n", This had the effect of artificially limiting the aux buffer size to a value smaller than what was allowed because perf_event_mlock_kb wasn't taken into account. The second is to check for a power of two, but this is covered here (evlist.c): pr_info("rounding mmap pages size to %s (%lu pages)\n", buf, pages); Reviewed-by: Leo Yan Signed-off-by: James Clark Cc: Alexander Shishkin Cc: Jiri Olsa Cc: John Garry Cc: Mark Rutland Cc: Mathieu Poirier Cc: Mike Leach Cc: Namhyung Kim Cc: Suzuki Poulouse Cc: Will Deacon Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20211208115435.610101-1-james.clark@arm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/arch/arm/util/cs-etm.c | 19 ------------------- 1 file changed, 19 deletions(-) diff --git a/tools/perf/arch/arm/util/cs-etm.c b/tools/perf/arch/arm/util/cs-etm.c index 293a23bf8be3..8a3d54a86c9c 100644 --- a/tools/perf/arch/arm/util/cs-etm.c +++ b/tools/perf/arch/arm/util/cs-etm.c @@ -407,25 +407,6 @@ static int cs_etm_recording_options(struct auxtrace_record *itr, } - /* Validate auxtrace_mmap_pages provided by user */ - if (opts->auxtrace_mmap_pages) { - unsigned int max_page = (KiB(128) / page_size); - size_t sz = opts->auxtrace_mmap_pages * (size_t)page_size; - - if (!privileged && - opts->auxtrace_mmap_pages > max_page) { - opts->auxtrace_mmap_pages = max_page; - pr_err("auxtrace too big, truncating to %d\n", - max_page); - } - - if (!is_power_of_2(sz)) { - pr_err("Invalid mmap size for %s: must be a power of 2\n", - CORESIGHT_ETM_PMU_NAME); - return -EINVAL; - } - } - if (opts->auxtrace_snapshot_mode) pr_debug2("%s snapshot size: %zu\n", CORESIGHT_ETM_PMU_NAME, opts->auxtrace_snapshot_size); -- cgit v1.2.3 From 8acf3793eae4d809658b1ebeed68d818d6d38142 Mon Sep 17 00:00:00 2001 From: Miaoqian Lin Date: Sun, 12 Dec 2021 13:56:09 +0000 Subject: perf bpf-loader: Use IS_ERR_OR_NULL() to clean code and fix check Use IS_ERR_OR_NULL() to make the code cleaner. Also if the priv is NULL, it's improper to call PTR_ERR(priv). Signed-off-by: Miaoqian Lin Cc: Alexander Shishkin Cc: Alexei Starovoitov Cc: Andrii Nakryiko Cc: Daniel Borkmann Cc: Jiri Olsa Cc: John Fastabend Cc: KP Singh Cc: Mark Rutland Cc: Martin KaFai Lau Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Song Liu Cc: Yonghong Song Cc: bpf@vger.kernel.org Cc: netdev@vger.kernel.org Cc: unlisted-recipients Link: http://lore.kernel.org/lkml/20211212135613.20000-1-linmq006@gmail.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/bpf-loader.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c index fbb3c4057c30..22662fc85cc9 100644 --- a/tools/perf/util/bpf-loader.c +++ b/tools/perf/util/bpf-loader.c @@ -421,7 +421,7 @@ preproc_gen_prologue(struct bpf_program *prog, int n, size_t prologue_cnt = 0; int i, err; - if (IS_ERR(priv) || !priv || priv->is_tp) + if (IS_ERR_OR_NULL(priv) || priv->is_tp) goto errout; pev = &priv->pev; @@ -570,7 +570,7 @@ static int hook_load_preprocessor(struct bpf_program *prog) bool need_prologue = false; int err, i; - if (IS_ERR(priv) || !priv) { + if (IS_ERR_OR_NULL(priv)) { pr_debug("Internal error when hook preprocessor\n"); return -BPF_LOADER_ERRNO__INTERNAL; } @@ -642,8 +642,11 @@ int bpf__probe(struct bpf_object *obj) goto out; priv = bpf_program__priv(prog); - if (IS_ERR(priv) || !priv) { - err = PTR_ERR(priv); + if (IS_ERR_OR_NULL(priv)) { + if (!priv) + err = -BPF_LOADER_ERRNO__INTERNAL; + else + err = PTR_ERR(priv); goto out; } @@ -693,7 +696,7 @@ int bpf__unprobe(struct bpf_object *obj) struct bpf_prog_priv *priv = bpf_program__priv(prog); int i; - if (IS_ERR(priv) || !priv || priv->is_tp) + if (IS_ERR_OR_NULL(priv) || priv->is_tp) continue; for (i = 0; i < priv->pev.ntevs; i++) { @@ -751,7 +754,7 @@ int bpf__foreach_event(struct bpf_object *obj, struct perf_probe_event *pev; int i, fd; - if (IS_ERR(priv) || !priv) { + if (IS_ERR_OR_NULL(priv)) { pr_debug("bpf: failed to get private field\n"); return -BPF_LOADER_ERRNO__INTERNAL; } -- cgit v1.2.3 From 017f7d1fac1c40eba6d97490a75bc1999914ef75 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Sun, 12 Dec 2021 22:21:22 +0000 Subject: libperf tests: Fix a spelling mistake "Runnnig" -> "Running" There is a spelling mistake in a __T_VERBOSE message. Fix it. Signed-off-by: Colin Ian King Cc: Alexander Shishkin Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: kernel-janitors@vger.kernel.org Link: http://lore.kernel.org/lkml/20211212222122.478537-1-colin.i.king@gmail.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/lib/perf/tests/test-evlist.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/lib/perf/tests/test-evlist.c b/tools/lib/perf/tests/test-evlist.c index 520a78267743..e7afff12c35a 100644 --- a/tools/lib/perf/tests/test-evlist.c +++ b/tools/lib/perf/tests/test-evlist.c @@ -535,7 +535,7 @@ static int test_stat_multiplexing(void) (double)counts[i].run / (double)counts[i].ena * 100.0, counts[i].run, counts[i].ena); } else if (scaled == -1) { - __T_VERBOSE("\t Not Runnnig\n"); + __T_VERBOSE("\t Not Running\n"); } else { __T_VERBOSE("\t Not Scaling\n"); } -- cgit v1.2.3 From 5d28a17c1c0e9dbd299015f91800aff3466ebedf Mon Sep 17 00:00:00 2001 From: Leo Yan Date: Sun, 12 Dec 2021 21:47:20 +0800 Subject: perf namespaces: Add helper nsinfo__is_in_root_namespace() Refactors code for gathering PID infos, it creates the function nsinfo__get_nspid() to parse process 'status' node in folder '/proc'. Base on the refactoring, this patch introduces a new helper nsinfo__is_in_root_namespace(), it returns true when the caller runs in the root PID namespace. Signed-off-by: Leo Yan Cc: Alexander Shishkin Cc: Alexei Starovoitov Cc: Andrii Nakryiko Cc: Daniel Borkmann Cc: Jin Yao Cc: Jiri Olsa Cc: John Fastabend Cc: John Garry Cc: KP Singh Cc: Mark Rutland Cc: Martin KaFai Lau Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Song Liu Cc: Yonatan Goldschmidt Cc: Yonghong Song Cc: bpf@vger.kernel.org Cc: netdev@vger.kernel.org Link: http://lore.kernel.org/lkml/20211212134721.1721245-2-leo.yan@linaro.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/namespaces.c | 76 +++++++++++++++++++++++++++----------------- tools/perf/util/namespaces.h | 2 ++ 2 files changed, 48 insertions(+), 30 deletions(-) diff --git a/tools/perf/util/namespaces.c b/tools/perf/util/namespaces.c index 608b20c72a5c..48aa3217300b 100644 --- a/tools/perf/util/namespaces.c +++ b/tools/perf/util/namespaces.c @@ -60,17 +60,49 @@ void namespaces__free(struct namespaces *namespaces) free(namespaces); } +static int nsinfo__get_nspid(struct nsinfo *nsi, const char *path) +{ + FILE *f = NULL; + char *statln = NULL; + size_t linesz = 0; + char *nspid; + + f = fopen(path, "r"); + if (f == NULL) + return -1; + + while (getline(&statln, &linesz, f) != -1) { + /* Use tgid if CONFIG_PID_NS is not defined. */ + if (strstr(statln, "Tgid:") != NULL) { + nsi->tgid = (pid_t)strtol(strrchr(statln, '\t'), + NULL, 10); + nsi->nstgid = nsi->tgid; + } + + if (strstr(statln, "NStgid:") != NULL) { + nspid = strrchr(statln, '\t'); + nsi->nstgid = (pid_t)strtol(nspid, NULL, 10); + /* + * If innermost tgid is not the first, process is in a different + * PID namespace. + */ + nsi->in_pidns = (statln + sizeof("NStgid:") - 1) != nspid; + break; + } + } + + fclose(f); + free(statln); + return 0; +} + int nsinfo__init(struct nsinfo *nsi) { char oldns[PATH_MAX]; char spath[PATH_MAX]; char *newns = NULL; - char *statln = NULL; - char *nspid; struct stat old_stat; struct stat new_stat; - FILE *f = NULL; - size_t linesz = 0; int rv = -1; if (snprintf(oldns, PATH_MAX, "/proc/self/ns/mnt") >= PATH_MAX) @@ -100,34 +132,9 @@ int nsinfo__init(struct nsinfo *nsi) if (snprintf(spath, PATH_MAX, "/proc/%d/status", nsi->pid) >= PATH_MAX) goto out; - f = fopen(spath, "r"); - if (f == NULL) - goto out; - - while (getline(&statln, &linesz, f) != -1) { - /* Use tgid if CONFIG_PID_NS is not defined. */ - if (strstr(statln, "Tgid:") != NULL) { - nsi->tgid = (pid_t)strtol(strrchr(statln, '\t'), - NULL, 10); - nsi->nstgid = nsi->tgid; - } - - if (strstr(statln, "NStgid:") != NULL) { - nspid = strrchr(statln, '\t'); - nsi->nstgid = (pid_t)strtol(nspid, NULL, 10); - /* If innermost tgid is not the first, process is in a different - * PID namespace. - */ - nsi->in_pidns = (statln + sizeof("NStgid:") - 1) != nspid; - break; - } - } - rv = 0; + rv = nsinfo__get_nspid(nsi, spath); out: - if (f != NULL) - (void) fclose(f); - free(statln); free(newns); return rv; } @@ -299,3 +306,12 @@ int nsinfo__stat(const char *filename, struct stat *st, struct nsinfo *nsi) return ret; } + +bool nsinfo__is_in_root_namespace(void) +{ + struct nsinfo nsi; + + memset(&nsi, 0x0, sizeof(nsi)); + nsinfo__get_nspid(&nsi, "/proc/self/status"); + return !nsi.in_pidns; +} diff --git a/tools/perf/util/namespaces.h b/tools/perf/util/namespaces.h index ad9775db7b9c..9ceea9643507 100644 --- a/tools/perf/util/namespaces.h +++ b/tools/perf/util/namespaces.h @@ -59,6 +59,8 @@ void nsinfo__mountns_exit(struct nscookie *nc); char *nsinfo__realpath(const char *path, struct nsinfo *nsi); int nsinfo__stat(const char *filename, struct stat *st, struct nsinfo *nsi); +bool nsinfo__is_in_root_namespace(void); + static inline void __nsinfo__zput(struct nsinfo **nsip) { if (nsip) { -- cgit v1.2.3 From d3b58af9a8276c24a2aa80a059d87d99f5216d3b Mon Sep 17 00:00:00 2001 From: German Gomez Date: Tue, 7 Dec 2021 18:06:51 +0000 Subject: perf arm64: Rename perf_event_arm_regs for ARM64 registers The registers for ARM and ARM64 are enumerated using two enums that have the same name. In order to be able to import both headers, the name of one can be replaced using the C preprocessor like so: #define perf_event_arm_regs perf_event_arm64_regs #include #undef perf_event_arm_regs This patch updates all imports of ARM64's perf_regs.h in order to prevent the naming collision. Reviewed-by: Athira Jajeev Signed-off-by: German Gomez Tested-by: Athira Jajeev Cc: Alexander Shishkin Cc: Jiri Olsa Cc: John Garry Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Namhyung Kim Cc: Will Deacon Cc: linux-arm-kernel@lists.infradead.org Cc: linux-csky@vger.kernel.org Cc: linux-riscv@lists.infradead.org Link: https://lore.kernel.org/r/20211207180653.1147374-3-german.gomez@arm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/arch/arm64/include/perf_regs.h | 2 ++ tools/perf/util/libunwind/arm64.c | 2 ++ 2 files changed, 4 insertions(+) diff --git a/tools/perf/arch/arm64/include/perf_regs.h b/tools/perf/arch/arm64/include/perf_regs.h index fa3e07459f76..1f0d78b9f23b 100644 --- a/tools/perf/arch/arm64/include/perf_regs.h +++ b/tools/perf/arch/arm64/include/perf_regs.h @@ -4,7 +4,9 @@ #include #include +#define perf_event_arm_regs perf_event_arm64_regs #include +#undef perf_event_arm_regs void perf_regs_load(u64 *regs); diff --git a/tools/perf/util/libunwind/arm64.c b/tools/perf/util/libunwind/arm64.c index c397be0c2e32..15f60fd09424 100644 --- a/tools/perf/util/libunwind/arm64.c +++ b/tools/perf/util/libunwind/arm64.c @@ -23,7 +23,9 @@ #include "unwind.h" #include "libunwind-aarch64.h" +#define perf_event_arm_regs perf_event_arm64_regs #include <../../../../arch/arm64/include/uapi/asm/perf_regs.h> +#undef perf_event_arm_regs #include "../../arch/arm64/util/unwind-libunwind.c" /* NO_LIBUNWIND_DEBUG_FRAME is a feature flag for local libunwind, -- cgit v1.2.3 From 83869019c74cc2d01c96a3be2463a4eebe362224 Mon Sep 17 00:00:00 2001 From: German Gomez Date: Tue, 7 Dec 2021 18:06:52 +0000 Subject: perf arch: Support register names from all archs When reading a perf.data file with register values, there is a mismatch between the names and the values of the registers because the tool is built using only the register names from the local architecture. Reading a perf.data file that was recorded on ARM64, gives the following erroneous output on an X86 machine: # perf report -i perf_arm64.data -D [...] 24661932634451 0x698 [0x21d0]: PERF_RECORD_SAMPLE(IP, 0x1): 43239/43239: 0xffffc5be8f100f98 period: 1 addr: 0 ... user regs: mask 0x1ffffffff ABI 64-bit .... AX 0x0000ffffd1515817 .... BX 0x0000ffffd1515480 .... CX 0x0000aaaadabf6c80 .... DX 0x000000000000002e .... SI 0x0000000040100401 .... DI 0x0040600200000080 .... BP 0x0000ffffd1510e10 .... SP 0x0000000000000000 .... IP 0x00000000000000dd .... FLAGS 0x0000ffffd1510cd0 .... CS 0x0000000000000000 .... SS 0x0000000000000030 .... DS 0x0000ffffa569a208 .... ES 0x0000000000000000 .... FS 0x0000000000000000 .... GS 0x0000000000000000 .... R8 0x0000aaaad3de9650 .... R9 0x0000ffffa57397f0 .... R10 0x0000000000000001 .... R11 0x0000ffffa57fd000 .... R12 0x0000ffffd1515817 .... R13 0x0000ffffd1515480 .... R14 0x0000aaaadabf6c80 .... R15 0x0000000000000000 .... unknown 0x0000000000000001 .... unknown 0x0000000000000000 .... unknown 0x0000000000000000 .... unknown 0x0000000000000000 .... unknown 0x0000000000000000 .... unknown 0x0000ffffd1510d90 .... unknown 0x0000ffffa5739b90 .... unknown 0x0000ffffd1510d80 .... XMM0 0x0000ffffa57392c8 ... thread: perf-exec:43239 ...... dso: [kernel.kallsyms] As can be seen, the register names correspond to X86 registers, even though the perf.data file was recorded on an ARM64 system. After this patch, the output of the command displays the correct register names: # perf report -i perf_arm64.data -D [...] 24661932634451 0x698 [0x21d0]: PERF_RECORD_SAMPLE(IP, 0x1): 43239/43239: 0xffffc5be8f100f98 period: 1 addr: 0 ... user regs: mask 0x1ffffffff ABI 64-bit .... x0 0x0000ffffd1515817 .... x1 0x0000ffffd1515480 .... x2 0x0000aaaadabf6c80 .... x3 0x000000000000002e .... x4 0x0000000040100401 .... x5 0x0040600200000080 .... x6 0x0000ffffd1510e10 .... x7 0x0000000000000000 .... x8 0x00000000000000dd .... x9 0x0000ffffd1510cd0 .... x10 0x0000000000000000 .... x11 0x0000000000000030 .... x12 0x0000ffffa569a208 .... x13 0x0000000000000000 .... x14 0x0000000000000000 .... x15 0x0000000000000000 .... x16 0x0000aaaad3de9650 .... x17 0x0000ffffa57397f0 .... x18 0x0000000000000001 .... x19 0x0000ffffa57fd000 .... x20 0x0000ffffd1515817 .... x21 0x0000ffffd1515480 .... x22 0x0000aaaadabf6c80 .... x23 0x0000000000000000 .... x24 0x0000000000000001 .... x25 0x0000000000000000 .... x26 0x0000000000000000 .... x27 0x0000000000000000 .... x28 0x0000000000000000 .... x29 0x0000ffffd1510d90 .... lr 0x0000ffffa5739b90 .... sp 0x0000ffffd1510d80 .... pc 0x0000ffffa57392c8 ... thread: perf-exec:43239 ...... dso: [kernel.kallsyms] Tester comments: Athira reports: "Looks good to me. Tested this patchset in powerpc by capturing regs in powerpc and doing perf report to read the data from x86." Reported-by: Alexandre Truong Reviewed-by: Athira Jajeev Signed-off-by: German Gomez Tested-by: Athira Jajeev Cc: Alexander Shishkin Cc: Jiri Olsa Cc: John Garry Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Namhyung Kim Cc: Will Deacon Cc: linux-arm-kernel@lists.infradead.org Cc: linux-csky@vger.kernel.org Cc: linux-riscv@lists.infradead.org Link: https://lore.kernel.org/r/20211207180653.1147374-4-german.gomez@arm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/arch/arm/include/perf_regs.h | 42 -- tools/perf/arch/arm64/include/perf_regs.h | 76 --- tools/perf/arch/csky/include/perf_regs.h | 82 --- tools/perf/arch/mips/include/perf_regs.h | 69 --- tools/perf/arch/powerpc/include/perf_regs.h | 66 -- tools/perf/arch/riscv/include/perf_regs.h | 74 --- tools/perf/arch/s390/include/perf_regs.h | 78 --- tools/perf/arch/x86/include/perf_regs.h | 82 --- tools/perf/builtin-script.c | 18 +- tools/perf/util/perf_regs.c | 666 +++++++++++++++++++++ tools/perf/util/perf_regs.h | 10 +- .../util/scripting-engines/trace-event-python.c | 10 +- tools/perf/util/session.c | 25 +- 13 files changed, 697 insertions(+), 601 deletions(-) diff --git a/tools/perf/arch/arm/include/perf_regs.h b/tools/perf/arch/arm/include/perf_regs.h index 4085419283d0..99a06550e25d 100644 --- a/tools/perf/arch/arm/include/perf_regs.h +++ b/tools/perf/arch/arm/include/perf_regs.h @@ -15,46 +15,4 @@ void perf_regs_load(u64 *regs); #define PERF_REG_IP PERF_REG_ARM_PC #define PERF_REG_SP PERF_REG_ARM_SP -static inline const char *__perf_reg_name(int id) -{ - switch (id) { - case PERF_REG_ARM_R0: - return "r0"; - case PERF_REG_ARM_R1: - return "r1"; - case PERF_REG_ARM_R2: - return "r2"; - case PERF_REG_ARM_R3: - return "r3"; - case PERF_REG_ARM_R4: - return "r4"; - case PERF_REG_ARM_R5: - return "r5"; - case PERF_REG_ARM_R6: - return "r6"; - case PERF_REG_ARM_R7: - return "r7"; - case PERF_REG_ARM_R8: - return "r8"; - case PERF_REG_ARM_R9: - return "r9"; - case PERF_REG_ARM_R10: - return "r10"; - case PERF_REG_ARM_FP: - return "fp"; - case PERF_REG_ARM_IP: - return "ip"; - case PERF_REG_ARM_SP: - return "sp"; - case PERF_REG_ARM_LR: - return "lr"; - case PERF_REG_ARM_PC: - return "pc"; - default: - return NULL; - } - - return NULL; -} - #endif /* ARCH_PERF_REGS_H */ diff --git a/tools/perf/arch/arm64/include/perf_regs.h b/tools/perf/arch/arm64/include/perf_regs.h index 1f0d78b9f23b..35a3cc775b39 100644 --- a/tools/perf/arch/arm64/include/perf_regs.h +++ b/tools/perf/arch/arm64/include/perf_regs.h @@ -17,80 +17,4 @@ void perf_regs_load(u64 *regs); #define PERF_REG_IP PERF_REG_ARM64_PC #define PERF_REG_SP PERF_REG_ARM64_SP -static inline const char *__perf_reg_name(int id) -{ - switch (id) { - case PERF_REG_ARM64_X0: - return "x0"; - case PERF_REG_ARM64_X1: - return "x1"; - case PERF_REG_ARM64_X2: - return "x2"; - case PERF_REG_ARM64_X3: - return "x3"; - case PERF_REG_ARM64_X4: - return "x4"; - case PERF_REG_ARM64_X5: - return "x5"; - case PERF_REG_ARM64_X6: - return "x6"; - case PERF_REG_ARM64_X7: - return "x7"; - case PERF_REG_ARM64_X8: - return "x8"; - case PERF_REG_ARM64_X9: - return "x9"; - case PERF_REG_ARM64_X10: - return "x10"; - case PERF_REG_ARM64_X11: - return "x11"; - case PERF_REG_ARM64_X12: - return "x12"; - case PERF_REG_ARM64_X13: - return "x13"; - case PERF_REG_ARM64_X14: - return "x14"; - case PERF_REG_ARM64_X15: - return "x15"; - case PERF_REG_ARM64_X16: - return "x16"; - case PERF_REG_ARM64_X17: - return "x17"; - case PERF_REG_ARM64_X18: - return "x18"; - case PERF_REG_ARM64_X19: - return "x19"; - case PERF_REG_ARM64_X20: - return "x20"; - case PERF_REG_ARM64_X21: - return "x21"; - case PERF_REG_ARM64_X22: - return "x22"; - case PERF_REG_ARM64_X23: - return "x23"; - case PERF_REG_ARM64_X24: - return "x24"; - case PERF_REG_ARM64_X25: - return "x25"; - case PERF_REG_ARM64_X26: - return "x26"; - case PERF_REG_ARM64_X27: - return "x27"; - case PERF_REG_ARM64_X28: - return "x28"; - case PERF_REG_ARM64_X29: - return "x29"; - case PERF_REG_ARM64_SP: - return "sp"; - case PERF_REG_ARM64_LR: - return "lr"; - case PERF_REG_ARM64_PC: - return "pc"; - default: - return NULL; - } - - return NULL; -} - #endif /* ARCH_PERF_REGS_H */ diff --git a/tools/perf/arch/csky/include/perf_regs.h b/tools/perf/arch/csky/include/perf_regs.h index 25ac3bdcb9d1..1afcc0e916c2 100644 --- a/tools/perf/arch/csky/include/perf_regs.h +++ b/tools/perf/arch/csky/include/perf_regs.h @@ -15,86 +15,4 @@ #define PERF_REG_IP PERF_REG_CSKY_PC #define PERF_REG_SP PERF_REG_CSKY_SP -static inline const char *__perf_reg_name(int id) -{ - switch (id) { - case PERF_REG_CSKY_A0: - return "a0"; - case PERF_REG_CSKY_A1: - return "a1"; - case PERF_REG_CSKY_A2: - return "a2"; - case PERF_REG_CSKY_A3: - return "a3"; - case PERF_REG_CSKY_REGS0: - return "regs0"; - case PERF_REG_CSKY_REGS1: - return "regs1"; - case PERF_REG_CSKY_REGS2: - return "regs2"; - case PERF_REG_CSKY_REGS3: - return "regs3"; - case PERF_REG_CSKY_REGS4: - return "regs4"; - case PERF_REG_CSKY_REGS5: - return "regs5"; - case PERF_REG_CSKY_REGS6: - return "regs6"; - case PERF_REG_CSKY_REGS7: - return "regs7"; - case PERF_REG_CSKY_REGS8: - return "regs8"; - case PERF_REG_CSKY_REGS9: - return "regs9"; - case PERF_REG_CSKY_SP: - return "sp"; - case PERF_REG_CSKY_LR: - return "lr"; - case PERF_REG_CSKY_PC: - return "pc"; -#if defined(__CSKYABIV2__) - case PERF_REG_CSKY_EXREGS0: - return "exregs0"; - case PERF_REG_CSKY_EXREGS1: - return "exregs1"; - case PERF_REG_CSKY_EXREGS2: - return "exregs2"; - case PERF_REG_CSKY_EXREGS3: - return "exregs3"; - case PERF_REG_CSKY_EXREGS4: - return "exregs4"; - case PERF_REG_CSKY_EXREGS5: - return "exregs5"; - case PERF_REG_CSKY_EXREGS6: - return "exregs6"; - case PERF_REG_CSKY_EXREGS7: - return "exregs7"; - case PERF_REG_CSKY_EXREGS8: - return "exregs8"; - case PERF_REG_CSKY_EXREGS9: - return "exregs9"; - case PERF_REG_CSKY_EXREGS10: - return "exregs10"; - case PERF_REG_CSKY_EXREGS11: - return "exregs11"; - case PERF_REG_CSKY_EXREGS12: - return "exregs12"; - case PERF_REG_CSKY_EXREGS13: - return "exregs13"; - case PERF_REG_CSKY_EXREGS14: - return "exregs14"; - case PERF_REG_CSKY_TLS: - return "tls"; - case PERF_REG_CSKY_HI: - return "hi"; - case PERF_REG_CSKY_LO: - return "lo"; -#endif - default: - return NULL; - } - - return NULL; -} - #endif /* ARCH_PERF_REGS_H */ diff --git a/tools/perf/arch/mips/include/perf_regs.h b/tools/perf/arch/mips/include/perf_regs.h index ee73b36a14d1..b8cd8bbb37ba 100644 --- a/tools/perf/arch/mips/include/perf_regs.h +++ b/tools/perf/arch/mips/include/perf_regs.h @@ -12,73 +12,4 @@ #define PERF_REGS_MASK ((1ULL << PERF_REG_MIPS_MAX) - 1) -static inline const char *__perf_reg_name(int id) -{ - switch (id) { - case PERF_REG_MIPS_PC: - return "PC"; - case PERF_REG_MIPS_R1: - return "$1"; - case PERF_REG_MIPS_R2: - return "$2"; - case PERF_REG_MIPS_R3: - return "$3"; - case PERF_REG_MIPS_R4: - return "$4"; - case PERF_REG_MIPS_R5: - return "$5"; - case PERF_REG_MIPS_R6: - return "$6"; - case PERF_REG_MIPS_R7: - return "$7"; - case PERF_REG_MIPS_R8: - return "$8"; - case PERF_REG_MIPS_R9: - return "$9"; - case PERF_REG_MIPS_R10: - return "$10"; - case PERF_REG_MIPS_R11: - return "$11"; - case PERF_REG_MIPS_R12: - return "$12"; - case PERF_REG_MIPS_R13: - return "$13"; - case PERF_REG_MIPS_R14: - return "$14"; - case PERF_REG_MIPS_R15: - return "$15"; - case PERF_REG_MIPS_R16: - return "$16"; - case PERF_REG_MIPS_R17: - return "$17"; - case PERF_REG_MIPS_R18: - return "$18"; - case PERF_REG_MIPS_R19: - return "$19"; - case PERF_REG_MIPS_R20: - return "$20"; - case PERF_REG_MIPS_R21: - return "$21"; - case PERF_REG_MIPS_R22: - return "$22"; - case PERF_REG_MIPS_R23: - return "$23"; - case PERF_REG_MIPS_R24: - return "$24"; - case PERF_REG_MIPS_R25: - return "$25"; - case PERF_REG_MIPS_R28: - return "$28"; - case PERF_REG_MIPS_R29: - return "$29"; - case PERF_REG_MIPS_R30: - return "$30"; - case PERF_REG_MIPS_R31: - return "$31"; - default: - break; - } - return NULL; -} - #endif /* ARCH_PERF_REGS_H */ diff --git a/tools/perf/arch/powerpc/include/perf_regs.h b/tools/perf/arch/powerpc/include/perf_regs.h index 93339d17acc4..9bb17c3f370b 100644 --- a/tools/perf/arch/powerpc/include/perf_regs.h +++ b/tools/perf/arch/powerpc/include/perf_regs.h @@ -19,70 +19,4 @@ void perf_regs_load(u64 *regs); #define PERF_REG_IP PERF_REG_POWERPC_NIP #define PERF_REG_SP PERF_REG_POWERPC_R1 -static const char *reg_names[] = { - [PERF_REG_POWERPC_R0] = "r0", - [PERF_REG_POWERPC_R1] = "r1", - [PERF_REG_POWERPC_R2] = "r2", - [PERF_REG_POWERPC_R3] = "r3", - [PERF_REG_POWERPC_R4] = "r4", - [PERF_REG_POWERPC_R5] = "r5", - [PERF_REG_POWERPC_R6] = "r6", - [PERF_REG_POWERPC_R7] = "r7", - [PERF_REG_POWERPC_R8] = "r8", - [PERF_REG_POWERPC_R9] = "r9", - [PERF_REG_POWERPC_R10] = "r10", - [PERF_REG_POWERPC_R11] = "r11", - [PERF_REG_POWERPC_R12] = "r12", - [PERF_REG_POWERPC_R13] = "r13", - [PERF_REG_POWERPC_R14] = "r14", - [PERF_REG_POWERPC_R15] = "r15", - [PERF_REG_POWERPC_R16] = "r16", - [PERF_REG_POWERPC_R17] = "r17", - [PERF_REG_POWERPC_R18] = "r18", - [PERF_REG_POWERPC_R19] = "r19", - [PERF_REG_POWERPC_R20] = "r20", - [PERF_REG_POWERPC_R21] = "r21", - [PERF_REG_POWERPC_R22] = "r22", - [PERF_REG_POWERPC_R23] = "r23", - [PERF_REG_POWERPC_R24] = "r24", - [PERF_REG_POWERPC_R25] = "r25", - [PERF_REG_POWERPC_R26] = "r26", - [PERF_REG_POWERPC_R27] = "r27", - [PERF_REG_POWERPC_R28] = "r28", - [PERF_REG_POWERPC_R29] = "r29", - [PERF_REG_POWERPC_R30] = "r30", - [PERF_REG_POWERPC_R31] = "r31", - [PERF_REG_POWERPC_NIP] = "nip", - [PERF_REG_POWERPC_MSR] = "msr", - [PERF_REG_POWERPC_ORIG_R3] = "orig_r3", - [PERF_REG_POWERPC_CTR] = "ctr", - [PERF_REG_POWERPC_LINK] = "link", - [PERF_REG_POWERPC_XER] = "xer", - [PERF_REG_POWERPC_CCR] = "ccr", - [PERF_REG_POWERPC_SOFTE] = "softe", - [PERF_REG_POWERPC_TRAP] = "trap", - [PERF_REG_POWERPC_DAR] = "dar", - [PERF_REG_POWERPC_DSISR] = "dsisr", - [PERF_REG_POWERPC_SIER] = "sier", - [PERF_REG_POWERPC_MMCRA] = "mmcra", - [PERF_REG_POWERPC_MMCR0] = "mmcr0", - [PERF_REG_POWERPC_MMCR1] = "mmcr1", - [PERF_REG_POWERPC_MMCR2] = "mmcr2", - [PERF_REG_POWERPC_MMCR3] = "mmcr3", - [PERF_REG_POWERPC_SIER2] = "sier2", - [PERF_REG_POWERPC_SIER3] = "sier3", - [PERF_REG_POWERPC_PMC1] = "pmc1", - [PERF_REG_POWERPC_PMC2] = "pmc2", - [PERF_REG_POWERPC_PMC3] = "pmc3", - [PERF_REG_POWERPC_PMC4] = "pmc4", - [PERF_REG_POWERPC_PMC5] = "pmc5", - [PERF_REG_POWERPC_PMC6] = "pmc6", - [PERF_REG_POWERPC_SDAR] = "sdar", - [PERF_REG_POWERPC_SIAR] = "siar", -}; - -static inline const char *__perf_reg_name(int id) -{ - return reg_names[id]; -} #endif /* ARCH_PERF_REGS_H */ diff --git a/tools/perf/arch/riscv/include/perf_regs.h b/tools/perf/arch/riscv/include/perf_regs.h index 6b02a767c918..6944bf0de53e 100644 --- a/tools/perf/arch/riscv/include/perf_regs.h +++ b/tools/perf/arch/riscv/include/perf_regs.h @@ -19,78 +19,4 @@ #define PERF_REG_IP PERF_REG_RISCV_PC #define PERF_REG_SP PERF_REG_RISCV_SP -static inline const char *__perf_reg_name(int id) -{ - switch (id) { - case PERF_REG_RISCV_PC: - return "pc"; - case PERF_REG_RISCV_RA: - return "ra"; - case PERF_REG_RISCV_SP: - return "sp"; - case PERF_REG_RISCV_GP: - return "gp"; - case PERF_REG_RISCV_TP: - return "tp"; - case PERF_REG_RISCV_T0: - return "t0"; - case PERF_REG_RISCV_T1: - return "t1"; - case PERF_REG_RISCV_T2: - return "t2"; - case PERF_REG_RISCV_S0: - return "s0"; - case PERF_REG_RISCV_S1: - return "s1"; - case PERF_REG_RISCV_A0: - return "a0"; - case PERF_REG_RISCV_A1: - return "a1"; - case PERF_REG_RISCV_A2: - return "a2"; - case PERF_REG_RISCV_A3: - return "a3"; - case PERF_REG_RISCV_A4: - return "a4"; - case PERF_REG_RISCV_A5: - return "a5"; - case PERF_REG_RISCV_A6: - return "a6"; - case PERF_REG_RISCV_A7: - return "a7"; - case PERF_REG_RISCV_S2: - return "s2"; - case PERF_REG_RISCV_S3: - return "s3"; - case PERF_REG_RISCV_S4: - return "s4"; - case PERF_REG_RISCV_S5: - return "s5"; - case PERF_REG_RISCV_S6: - return "s6"; - case PERF_REG_RISCV_S7: - return "s7"; - case PERF_REG_RISCV_S8: - return "s8"; - case PERF_REG_RISCV_S9: - return "s9"; - case PERF_REG_RISCV_S10: - return "s10"; - case PERF_REG_RISCV_S11: - return "s11"; - case PERF_REG_RISCV_T3: - return "t3"; - case PERF_REG_RISCV_T4: - return "t4"; - case PERF_REG_RISCV_T5: - return "t5"; - case PERF_REG_RISCV_T6: - return "t6"; - default: - return NULL; - } - - return NULL; -} - #endif /* ARCH_PERF_REGS_H */ diff --git a/tools/perf/arch/s390/include/perf_regs.h b/tools/perf/arch/s390/include/perf_regs.h index ce3031526623..52fcc0891da6 100644 --- a/tools/perf/arch/s390/include/perf_regs.h +++ b/tools/perf/arch/s390/include/perf_regs.h @@ -14,82 +14,4 @@ void perf_regs_load(u64 *regs); #define PERF_REG_IP PERF_REG_S390_PC #define PERF_REG_SP PERF_REG_S390_R15 -static inline const char *__perf_reg_name(int id) -{ - switch (id) { - case PERF_REG_S390_R0: - return "R0"; - case PERF_REG_S390_R1: - return "R1"; - case PERF_REG_S390_R2: - return "R2"; - case PERF_REG_S390_R3: - return "R3"; - case PERF_REG_S390_R4: - return "R4"; - case PERF_REG_S390_R5: - return "R5"; - case PERF_REG_S390_R6: - return "R6"; - case PERF_REG_S390_R7: - return "R7"; - case PERF_REG_S390_R8: - return "R8"; - case PERF_REG_S390_R9: - return "R9"; - case PERF_REG_S390_R10: - return "R10"; - case PERF_REG_S390_R11: - return "R11"; - case PERF_REG_S390_R12: - return "R12"; - case PERF_REG_S390_R13: - return "R13"; - case PERF_REG_S390_R14: - return "R14"; - case PERF_REG_S390_R15: - return "R15"; - case PERF_REG_S390_FP0: - return "FP0"; - case PERF_REG_S390_FP1: - return "FP1"; - case PERF_REG_S390_FP2: - return "FP2"; - case PERF_REG_S390_FP3: - return "FP3"; - case PERF_REG_S390_FP4: - return "FP4"; - case PERF_REG_S390_FP5: - return "FP5"; - case PERF_REG_S390_FP6: - return "FP6"; - case PERF_REG_S390_FP7: - return "FP7"; - case PERF_REG_S390_FP8: - return "FP8"; - case PERF_REG_S390_FP9: - return "FP9"; - case PERF_REG_S390_FP10: - return "FP10"; - case PERF_REG_S390_FP11: - return "FP11"; - case PERF_REG_S390_FP12: - return "FP12"; - case PERF_REG_S390_FP13: - return "FP13"; - case PERF_REG_S390_FP14: - return "FP14"; - case PERF_REG_S390_FP15: - return "FP15"; - case PERF_REG_S390_MASK: - return "MASK"; - case PERF_REG_S390_PC: - return "PC"; - default: - return NULL; - } - - return NULL; -} - #endif /* ARCH_PERF_REGS_H */ diff --git a/tools/perf/arch/x86/include/perf_regs.h b/tools/perf/arch/x86/include/perf_regs.h index cddc4cdc0d9b..16e23b722042 100644 --- a/tools/perf/arch/x86/include/perf_regs.h +++ b/tools/perf/arch/x86/include/perf_regs.h @@ -23,86 +23,4 @@ void perf_regs_load(u64 *regs); #define PERF_REG_IP PERF_REG_X86_IP #define PERF_REG_SP PERF_REG_X86_SP -static inline const char *__perf_reg_name(int id) -{ - switch (id) { - case PERF_REG_X86_AX: - return "AX"; - case PERF_REG_X86_BX: - return "BX"; - case PERF_REG_X86_CX: - return "CX"; - case PERF_REG_X86_DX: - return "DX"; - case PERF_REG_X86_SI: - return "SI"; - case PERF_REG_X86_DI: - return "DI"; - case PERF_REG_X86_BP: - return "BP"; - case PERF_REG_X86_SP: - return "SP"; - case PERF_REG_X86_IP: - return "IP"; - case PERF_REG_X86_FLAGS: - return "FLAGS"; - case PERF_REG_X86_CS: - return "CS"; - case PERF_REG_X86_SS: - return "SS"; - case PERF_REG_X86_DS: - return "DS"; - case PERF_REG_X86_ES: - return "ES"; - case PERF_REG_X86_FS: - return "FS"; - case PERF_REG_X86_GS: - return "GS"; -#ifdef HAVE_ARCH_X86_64_SUPPORT - case PERF_REG_X86_R8: - return "R8"; - case PERF_REG_X86_R9: - return "R9"; - case PERF_REG_X86_R10: - return "R10"; - case PERF_REG_X86_R11: - return "R11"; - case PERF_REG_X86_R12: - return "R12"; - case PERF_REG_X86_R13: - return "R13"; - case PERF_REG_X86_R14: - return "R14"; - case PERF_REG_X86_R15: - return "R15"; -#endif /* HAVE_ARCH_X86_64_SUPPORT */ - -#define XMM(x) \ - case PERF_REG_X86_XMM ## x: \ - case PERF_REG_X86_XMM ## x + 1: \ - return "XMM" #x; - XMM(0) - XMM(1) - XMM(2) - XMM(3) - XMM(4) - XMM(5) - XMM(6) - XMM(7) - XMM(8) - XMM(9) - XMM(10) - XMM(11) - XMM(12) - XMM(13) - XMM(14) - XMM(15) -#undef XMM - default: - return NULL; - } - - return NULL; -} - #endif /* ARCH_PERF_REGS_H */ diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index 9434367af166..da2175d70ac9 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c @@ -15,6 +15,7 @@ #include "util/symbol.h" #include "util/thread.h" #include "util/trace-event.h" +#include "util/env.h" #include "util/evlist.h" #include "util/evsel.h" #include "util/evsel_fprintf.h" @@ -648,7 +649,7 @@ out: return 0; } -static int perf_sample__fprintf_regs(struct regs_dump *regs, uint64_t mask, +static int perf_sample__fprintf_regs(struct regs_dump *regs, uint64_t mask, const char *arch, FILE *fp) { unsigned i = 0, r; @@ -661,7 +662,7 @@ static int perf_sample__fprintf_regs(struct regs_dump *regs, uint64_t mask, for_each_set_bit(r, (unsigned long *) &mask, sizeof(mask) * 8) { u64 val = regs->regs[i++]; - printed += fprintf(fp, "%5s:0x%"PRIx64" ", perf_reg_name(r), val); + printed += fprintf(fp, "%5s:0x%"PRIx64" ", perf_reg_name(r, arch), val); } return printed; @@ -718,17 +719,17 @@ tod_scnprintf(struct perf_script *script, char *buf, int buflen, } static int perf_sample__fprintf_iregs(struct perf_sample *sample, - struct perf_event_attr *attr, FILE *fp) + struct perf_event_attr *attr, const char *arch, FILE *fp) { return perf_sample__fprintf_regs(&sample->intr_regs, - attr->sample_regs_intr, fp); + attr->sample_regs_intr, arch, fp); } static int perf_sample__fprintf_uregs(struct perf_sample *sample, - struct perf_event_attr *attr, FILE *fp) + struct perf_event_attr *attr, const char *arch, FILE *fp) { return perf_sample__fprintf_regs(&sample->user_regs, - attr->sample_regs_user, fp); + attr->sample_regs_user, arch, fp); } static int perf_sample__fprintf_start(struct perf_script *script, @@ -2000,6 +2001,7 @@ static void process_event(struct perf_script *script, struct evsel_script *es = evsel->priv; FILE *fp = es->fp; char str[PAGE_SIZE_NAME_LEN]; + const char *arch = perf_env__arch(machine->env); if (output[type].fields == 0) return; @@ -2066,10 +2068,10 @@ static void process_event(struct perf_script *script, } if (PRINT_FIELD(IREGS)) - perf_sample__fprintf_iregs(sample, attr, fp); + perf_sample__fprintf_iregs(sample, attr, arch, fp); if (PRINT_FIELD(UREGS)) - perf_sample__fprintf_uregs(sample, attr, fp); + perf_sample__fprintf_uregs(sample, attr, arch, fp); if (PRINT_FIELD(BRSTACK)) perf_sample__fprintf_brstack(sample, thread, attr, fp); diff --git a/tools/perf/util/perf_regs.c b/tools/perf/util/perf_regs.c index 06a7461ba864..a982e40ee5a9 100644 --- a/tools/perf/util/perf_regs.c +++ b/tools/perf/util/perf_regs.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include +#include #include "perf_regs.h" #include "event.h" @@ -20,6 +21,671 @@ uint64_t __weak arch__user_reg_mask(void) } #ifdef HAVE_PERF_REGS_SUPPORT + +#define perf_event_arm_regs perf_event_arm64_regs +#include "../../arch/arm64/include/uapi/asm/perf_regs.h" +#undef perf_event_arm_regs + +#include "../../arch/arm/include/uapi/asm/perf_regs.h" +#include "../../arch/csky/include/uapi/asm/perf_regs.h" +#include "../../arch/mips/include/uapi/asm/perf_regs.h" +#include "../../arch/powerpc/include/uapi/asm/perf_regs.h" +#include "../../arch/riscv/include/uapi/asm/perf_regs.h" +#include "../../arch/s390/include/uapi/asm/perf_regs.h" +#include "../../arch/x86/include/uapi/asm/perf_regs.h" + +static const char *__perf_reg_name_arm64(int id) +{ + switch (id) { + case PERF_REG_ARM64_X0: + return "x0"; + case PERF_REG_ARM64_X1: + return "x1"; + case PERF_REG_ARM64_X2: + return "x2"; + case PERF_REG_ARM64_X3: + return "x3"; + case PERF_REG_ARM64_X4: + return "x4"; + case PERF_REG_ARM64_X5: + return "x5"; + case PERF_REG_ARM64_X6: + return "x6"; + case PERF_REG_ARM64_X7: + return "x7"; + case PERF_REG_ARM64_X8: + return "x8"; + case PERF_REG_ARM64_X9: + return "x9"; + case PERF_REG_ARM64_X10: + return "x10"; + case PERF_REG_ARM64_X11: + return "x11"; + case PERF_REG_ARM64_X12: + return "x12"; + case PERF_REG_ARM64_X13: + return "x13"; + case PERF_REG_ARM64_X14: + return "x14"; + case PERF_REG_ARM64_X15: + return "x15"; + case PERF_REG_ARM64_X16: + return "x16"; + case PERF_REG_ARM64_X17: + return "x17"; + case PERF_REG_ARM64_X18: + return "x18"; + case PERF_REG_ARM64_X19: + return "x19"; + case PERF_REG_ARM64_X20: + return "x20"; + case PERF_REG_ARM64_X21: + return "x21"; + case PERF_REG_ARM64_X22: + return "x22"; + case PERF_REG_ARM64_X23: + return "x23"; + case PERF_REG_ARM64_X24: + return "x24"; + case PERF_REG_ARM64_X25: + return "x25"; + case PERF_REG_ARM64_X26: + return "x26"; + case PERF_REG_ARM64_X27: + return "x27"; + case PERF_REG_ARM64_X28: + return "x28"; + case PERF_REG_ARM64_X29: + return "x29"; + case PERF_REG_ARM64_SP: + return "sp"; + case PERF_REG_ARM64_LR: + return "lr"; + case PERF_REG_ARM64_PC: + return "pc"; + default: + return NULL; + } + + return NULL; +} + +static const char *__perf_reg_name_arm(int id) +{ + switch (id) { + case PERF_REG_ARM_R0: + return "r0"; + case PERF_REG_ARM_R1: + return "r1"; + case PERF_REG_ARM_R2: + return "r2"; + case PERF_REG_ARM_R3: + return "r3"; + case PERF_REG_ARM_R4: + return "r4"; + case PERF_REG_ARM_R5: + return "r5"; + case PERF_REG_ARM_R6: + return "r6"; + case PERF_REG_ARM_R7: + return "r7"; + case PERF_REG_ARM_R8: + return "r8"; + case PERF_REG_ARM_R9: + return "r9"; + case PERF_REG_ARM_R10: + return "r10"; + case PERF_REG_ARM_FP: + return "fp"; + case PERF_REG_ARM_IP: + return "ip"; + case PERF_REG_ARM_SP: + return "sp"; + case PERF_REG_ARM_LR: + return "lr"; + case PERF_REG_ARM_PC: + return "pc"; + default: + return NULL; + } + + return NULL; +} + +static const char *__perf_reg_name_csky(int id) +{ + switch (id) { + case PERF_REG_CSKY_A0: + return "a0"; + case PERF_REG_CSKY_A1: + return "a1"; + case PERF_REG_CSKY_A2: + return "a2"; + case PERF_REG_CSKY_A3: + return "a3"; + case PERF_REG_CSKY_REGS0: + return "regs0"; + case PERF_REG_CSKY_REGS1: + return "regs1"; + case PERF_REG_CSKY_REGS2: + return "regs2"; + case PERF_REG_CSKY_REGS3: + return "regs3"; + case PERF_REG_CSKY_REGS4: + return "regs4"; + case PERF_REG_CSKY_REGS5: + return "regs5"; + case PERF_REG_CSKY_REGS6: + return "regs6"; + case PERF_REG_CSKY_REGS7: + return "regs7"; + case PERF_REG_CSKY_REGS8: + return "regs8"; + case PERF_REG_CSKY_REGS9: + return "regs9"; + case PERF_REG_CSKY_SP: + return "sp"; + case PERF_REG_CSKY_LR: + return "lr"; + case PERF_REG_CSKY_PC: + return "pc"; +#if defined(__CSKYABIV2__) + case PERF_REG_CSKY_EXREGS0: + return "exregs0"; + case PERF_REG_CSKY_EXREGS1: + return "exregs1"; + case PERF_REG_CSKY_EXREGS2: + return "exregs2"; + case PERF_REG_CSKY_EXREGS3: + return "exregs3"; + case PERF_REG_CSKY_EXREGS4: + return "exregs4"; + case PERF_REG_CSKY_EXREGS5: + return "exregs5"; + case PERF_REG_CSKY_EXREGS6: + return "exregs6"; + case PERF_REG_CSKY_EXREGS7: + return "exregs7"; + case PERF_REG_CSKY_EXREGS8: + return "exregs8"; + case PERF_REG_CSKY_EXREGS9: + return "exregs9"; + case PERF_REG_CSKY_EXREGS10: + return "exregs10"; + case PERF_REG_CSKY_EXREGS11: + return "exregs11"; + case PERF_REG_CSKY_EXREGS12: + return "exregs12"; + case PERF_REG_CSKY_EXREGS13: + return "exregs13"; + case PERF_REG_CSKY_EXREGS14: + return "exregs14"; + case PERF_REG_CSKY_TLS: + return "tls"; + case PERF_REG_CSKY_HI: + return "hi"; + case PERF_REG_CSKY_LO: + return "lo"; +#endif + default: + return NULL; + } + + return NULL; +} + +static const char *__perf_reg_name_mips(int id) +{ + switch (id) { + case PERF_REG_MIPS_PC: + return "PC"; + case PERF_REG_MIPS_R1: + return "$1"; + case PERF_REG_MIPS_R2: + return "$2"; + case PERF_REG_MIPS_R3: + return "$3"; + case PERF_REG_MIPS_R4: + return "$4"; + case PERF_REG_MIPS_R5: + return "$5"; + case PERF_REG_MIPS_R6: + return "$6"; + case PERF_REG_MIPS_R7: + return "$7"; + case PERF_REG_MIPS_R8: + return "$8"; + case PERF_REG_MIPS_R9: + return "$9"; + case PERF_REG_MIPS_R10: + return "$10"; + case PERF_REG_MIPS_R11: + return "$11"; + case PERF_REG_MIPS_R12: + return "$12"; + case PERF_REG_MIPS_R13: + return "$13"; + case PERF_REG_MIPS_R14: + return "$14"; + case PERF_REG_MIPS_R15: + return "$15"; + case PERF_REG_MIPS_R16: + return "$16"; + case PERF_REG_MIPS_R17: + return "$17"; + case PERF_REG_MIPS_R18: + return "$18"; + case PERF_REG_MIPS_R19: + return "$19"; + case PERF_REG_MIPS_R20: + return "$20"; + case PERF_REG_MIPS_R21: + return "$21"; + case PERF_REG_MIPS_R22: + return "$22"; + case PERF_REG_MIPS_R23: + return "$23"; + case PERF_REG_MIPS_R24: + return "$24"; + case PERF_REG_MIPS_R25: + return "$25"; + case PERF_REG_MIPS_R28: + return "$28"; + case PERF_REG_MIPS_R29: + return "$29"; + case PERF_REG_MIPS_R30: + return "$30"; + case PERF_REG_MIPS_R31: + return "$31"; + default: + break; + } + return NULL; +} + +static const char *__perf_reg_name_powerpc(int id) +{ + switch (id) { + case PERF_REG_POWERPC_R0: + return "r0"; + case PERF_REG_POWERPC_R1: + return "r1"; + case PERF_REG_POWERPC_R2: + return "r2"; + case PERF_REG_POWERPC_R3: + return "r3"; + case PERF_REG_POWERPC_R4: + return "r4"; + case PERF_REG_POWERPC_R5: + return "r5"; + case PERF_REG_POWERPC_R6: + return "r6"; + case PERF_REG_POWERPC_R7: + return "r7"; + case PERF_REG_POWERPC_R8: + return "r8"; + case PERF_REG_POWERPC_R9: + return "r9"; + case PERF_REG_POWERPC_R10: + return "r10"; + case PERF_REG_POWERPC_R11: + return "r11"; + case PERF_REG_POWERPC_R12: + return "r12"; + case PERF_REG_POWERPC_R13: + return "r13"; + case PERF_REG_POWERPC_R14: + return "r14"; + case PERF_REG_POWERPC_R15: + return "r15"; + case PERF_REG_POWERPC_R16: + return "r16"; + case PERF_REG_POWERPC_R17: + return "r17"; + case PERF_REG_POWERPC_R18: + return "r18"; + case PERF_REG_POWERPC_R19: + return "r19"; + case PERF_REG_POWERPC_R20: + return "r20"; + case PERF_REG_POWERPC_R21: + return "r21"; + case PERF_REG_POWERPC_R22: + return "r22"; + case PERF_REG_POWERPC_R23: + return "r23"; + case PERF_REG_POWERPC_R24: + return "r24"; + case PERF_REG_POWERPC_R25: + return "r25"; + case PERF_REG_POWERPC_R26: + return "r26"; + case PERF_REG_POWERPC_R27: + return "r27"; + case PERF_REG_POWERPC_R28: + return "r28"; + case PERF_REG_POWERPC_R29: + return "r29"; + case PERF_REG_POWERPC_R30: + return "r30"; + case PERF_REG_POWERPC_R31: + return "r31"; + case PERF_REG_POWERPC_NIP: + return "nip"; + case PERF_REG_POWERPC_MSR: + return "msr"; + case PERF_REG_POWERPC_ORIG_R3: + return "orig_r3"; + case PERF_REG_POWERPC_CTR: + return "ctr"; + case PERF_REG_POWERPC_LINK: + return "link"; + case PERF_REG_POWERPC_XER: + return "xer"; + case PERF_REG_POWERPC_CCR: + return "ccr"; + case PERF_REG_POWERPC_SOFTE: + return "softe"; + case PERF_REG_POWERPC_TRAP: + return "trap"; + case PERF_REG_POWERPC_DAR: + return "dar"; + case PERF_REG_POWERPC_DSISR: + return "dsisr"; + case PERF_REG_POWERPC_SIER: + return "sier"; + case PERF_REG_POWERPC_MMCRA: + return "mmcra"; + case PERF_REG_POWERPC_MMCR0: + return "mmcr0"; + case PERF_REG_POWERPC_MMCR1: + return "mmcr1"; + case PERF_REG_POWERPC_MMCR2: + return "mmcr2"; + case PERF_REG_POWERPC_MMCR3: + return "mmcr3"; + case PERF_REG_POWERPC_SIER2: + return "sier2"; + case PERF_REG_POWERPC_SIER3: + return "sier3"; + case PERF_REG_POWERPC_PMC1: + return "pmc1"; + case PERF_REG_POWERPC_PMC2: + return "pmc2"; + case PERF_REG_POWERPC_PMC3: + return "pmc3"; + case PERF_REG_POWERPC_PMC4: + return "pmc4"; + case PERF_REG_POWERPC_PMC5: + return "pmc5"; + case PERF_REG_POWERPC_PMC6: + return "pmc6"; + case PERF_REG_POWERPC_SDAR: + return "sdar"; + case PERF_REG_POWERPC_SIAR: + return "siar"; + default: + break; + } + return NULL; +} + +static const char *__perf_reg_name_riscv(int id) +{ + switch (id) { + case PERF_REG_RISCV_PC: + return "pc"; + case PERF_REG_RISCV_RA: + return "ra"; + case PERF_REG_RISCV_SP: + return "sp"; + case PERF_REG_RISCV_GP: + return "gp"; + case PERF_REG_RISCV_TP: + return "tp"; + case PERF_REG_RISCV_T0: + return "t0"; + case PERF_REG_RISCV_T1: + return "t1"; + case PERF_REG_RISCV_T2: + return "t2"; + case PERF_REG_RISCV_S0: + return "s0"; + case PERF_REG_RISCV_S1: + return "s1"; + case PERF_REG_RISCV_A0: + return "a0"; + case PERF_REG_RISCV_A1: + return "a1"; + case PERF_REG_RISCV_A2: + return "a2"; + case PERF_REG_RISCV_A3: + return "a3"; + case PERF_REG_RISCV_A4: + return "a4"; + case PERF_REG_RISCV_A5: + return "a5"; + case PERF_REG_RISCV_A6: + return "a6"; + case PERF_REG_RISCV_A7: + return "a7"; + case PERF_REG_RISCV_S2: + return "s2"; + case PERF_REG_RISCV_S3: + return "s3"; + case PERF_REG_RISCV_S4: + return "s4"; + case PERF_REG_RISCV_S5: + return "s5"; + case PERF_REG_RISCV_S6: + return "s6"; + case PERF_REG_RISCV_S7: + return "s7"; + case PERF_REG_RISCV_S8: + return "s8"; + case PERF_REG_RISCV_S9: + return "s9"; + case PERF_REG_RISCV_S10: + return "s10"; + case PERF_REG_RISCV_S11: + return "s11"; + case PERF_REG_RISCV_T3: + return "t3"; + case PERF_REG_RISCV_T4: + return "t4"; + case PERF_REG_RISCV_T5: + return "t5"; + case PERF_REG_RISCV_T6: + return "t6"; + default: + return NULL; + } + + return NULL; +} + +static const char *__perf_reg_name_s390(int id) +{ + switch (id) { + case PERF_REG_S390_R0: + return "R0"; + case PERF_REG_S390_R1: + return "R1"; + case PERF_REG_S390_R2: + return "R2"; + case PERF_REG_S390_R3: + return "R3"; + case PERF_REG_S390_R4: + return "R4"; + case PERF_REG_S390_R5: + return "R5"; + case PERF_REG_S390_R6: + return "R6"; + case PERF_REG_S390_R7: + return "R7"; + case PERF_REG_S390_R8: + return "R8"; + case PERF_REG_S390_R9: + return "R9"; + case PERF_REG_S390_R10: + return "R10"; + case PERF_REG_S390_R11: + return "R11"; + case PERF_REG_S390_R12: + return "R12"; + case PERF_REG_S390_R13: + return "R13"; + case PERF_REG_S390_R14: + return "R14"; + case PERF_REG_S390_R15: + return "R15"; + case PERF_REG_S390_FP0: + return "FP0"; + case PERF_REG_S390_FP1: + return "FP1"; + case PERF_REG_S390_FP2: + return "FP2"; + case PERF_REG_S390_FP3: + return "FP3"; + case PERF_REG_S390_FP4: + return "FP4"; + case PERF_REG_S390_FP5: + return "FP5"; + case PERF_REG_S390_FP6: + return "FP6"; + case PERF_REG_S390_FP7: + return "FP7"; + case PERF_REG_S390_FP8: + return "FP8"; + case PERF_REG_S390_FP9: + return "FP9"; + case PERF_REG_S390_FP10: + return "FP10"; + case PERF_REG_S390_FP11: + return "FP11"; + case PERF_REG_S390_FP12: + return "FP12"; + case PERF_REG_S390_FP13: + return "FP13"; + case PERF_REG_S390_FP14: + return "FP14"; + case PERF_REG_S390_FP15: + return "FP15"; + case PERF_REG_S390_MASK: + return "MASK"; + case PERF_REG_S390_PC: + return "PC"; + default: + return NULL; + } + + return NULL; +} + +static const char *__perf_reg_name_x86(int id) +{ + switch (id) { + case PERF_REG_X86_AX: + return "AX"; + case PERF_REG_X86_BX: + return "BX"; + case PERF_REG_X86_CX: + return "CX"; + case PERF_REG_X86_DX: + return "DX"; + case PERF_REG_X86_SI: + return "SI"; + case PERF_REG_X86_DI: + return "DI"; + case PERF_REG_X86_BP: + return "BP"; + case PERF_REG_X86_SP: + return "SP"; + case PERF_REG_X86_IP: + return "IP"; + case PERF_REG_X86_FLAGS: + return "FLAGS"; + case PERF_REG_X86_CS: + return "CS"; + case PERF_REG_X86_SS: + return "SS"; + case PERF_REG_X86_DS: + return "DS"; + case PERF_REG_X86_ES: + return "ES"; + case PERF_REG_X86_FS: + return "FS"; + case PERF_REG_X86_GS: + return "GS"; + case PERF_REG_X86_R8: + return "R8"; + case PERF_REG_X86_R9: + return "R9"; + case PERF_REG_X86_R10: + return "R10"; + case PERF_REG_X86_R11: + return "R11"; + case PERF_REG_X86_R12: + return "R12"; + case PERF_REG_X86_R13: + return "R13"; + case PERF_REG_X86_R14: + return "R14"; + case PERF_REG_X86_R15: + return "R15"; + +#define XMM(x) \ + case PERF_REG_X86_XMM ## x: \ + case PERF_REG_X86_XMM ## x + 1: \ + return "XMM" #x; + XMM(0) + XMM(1) + XMM(2) + XMM(3) + XMM(4) + XMM(5) + XMM(6) + XMM(7) + XMM(8) + XMM(9) + XMM(10) + XMM(11) + XMM(12) + XMM(13) + XMM(14) + XMM(15) +#undef XMM + default: + return NULL; + } + + return NULL; +} + +const char *perf_reg_name(int id, const char *arch) +{ + const char *reg_name = NULL; + + if (!strcmp(arch, "csky")) + reg_name = __perf_reg_name_csky(id); + else if (!strcmp(arch, "mips")) + reg_name = __perf_reg_name_mips(id); + else if (!strcmp(arch, "powerpc")) + reg_name = __perf_reg_name_powerpc(id); + else if (!strcmp(arch, "riscv")) + reg_name = __perf_reg_name_riscv(id); + else if (!strcmp(arch, "s390")) + reg_name = __perf_reg_name_s390(id); + else if (!strcmp(arch, "x86")) + reg_name = __perf_reg_name_x86(id); + else if (!strcmp(arch, "arm")) + reg_name = __perf_reg_name_arm(id); + else if (!strcmp(arch, "arm64")) + reg_name = __perf_reg_name_arm64(id); + + return reg_name ?: "unknown"; +} + int perf_reg_value(u64 *valp, struct regs_dump *regs, int id) { int i, idx = 0; diff --git a/tools/perf/util/perf_regs.h b/tools/perf/util/perf_regs.h index eeac181ebccf..4e6b1299c571 100644 --- a/tools/perf/util/perf_regs.h +++ b/tools/perf/util/perf_regs.h @@ -31,22 +31,16 @@ extern const struct sample_reg sample_reg_masks[]; #define DWARF_MINIMAL_REGS ((1ULL << PERF_REG_IP) | (1ULL << PERF_REG_SP)) +const char *perf_reg_name(int id, const char *arch); int perf_reg_value(u64 *valp, struct regs_dump *regs, int id); -static inline const char *perf_reg_name(int id) -{ - const char *reg_name = __perf_reg_name(id); - - return reg_name ?: "unknown"; -} - #else #define PERF_REGS_MASK 0 #define PERF_REGS_MAX 0 #define DWARF_MINIMAL_REGS PERF_REGS_MASK -static inline const char *perf_reg_name(int id __maybe_unused) +static inline const char *perf_reg_name(int id __maybe_unused, const char *arch __maybe_unused) { return "unknown"; } diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c index c0c010350bc2..0445bee9290f 100644 --- a/tools/perf/util/scripting-engines/trace-event-python.c +++ b/tools/perf/util/scripting-engines/trace-event-python.c @@ -36,6 +36,7 @@ #include "../debug.h" #include "../dso.h" #include "../callchain.h" +#include "../env.h" #include "../evsel.h" #include "../event.h" #include "../thread.h" @@ -687,7 +688,7 @@ static void set_sample_datasrc_in_dict(PyObject *dict, _PyUnicode_FromString(decode)); } -static void regs_map(struct regs_dump *regs, uint64_t mask, char *bf, int size) +static void regs_map(struct regs_dump *regs, uint64_t mask, const char *arch, char *bf, int size) { unsigned int i = 0, r; int printed = 0; @@ -702,7 +703,7 @@ static void regs_map(struct regs_dump *regs, uint64_t mask, char *bf, int size) printed += scnprintf(bf + printed, size - printed, "%5s:0x%" PRIx64 " ", - perf_reg_name(r), val); + perf_reg_name(r, arch), val); } } @@ -711,6 +712,7 @@ static void set_regs_in_dict(PyObject *dict, struct evsel *evsel) { struct perf_event_attr *attr = &evsel->core.attr; + const char *arch = perf_env__arch(evsel__env(evsel)); /* * Here value 28 is a constant size which can be used to print @@ -722,12 +724,12 @@ static void set_regs_in_dict(PyObject *dict, int size = __sw_hweight64(attr->sample_regs_intr) * 28; char bf[size]; - regs_map(&sample->intr_regs, attr->sample_regs_intr, bf, sizeof(bf)); + regs_map(&sample->intr_regs, attr->sample_regs_intr, arch, bf, sizeof(bf)); pydict_set_item_string_decref(dict, "iregs", _PyUnicode_FromString(bf)); - regs_map(&sample->user_regs, attr->sample_regs_user, bf, sizeof(bf)); + regs_map(&sample->user_regs, attr->sample_regs_user, arch, bf, sizeof(bf)); pydict_set_item_string_decref(dict, "uregs", _PyUnicode_FromString(bf)); diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index d8857d1b6d7c..e1a273048681 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -15,6 +15,7 @@ #include "map_symbol.h" #include "branch.h" #include "debug.h" +#include "env.h" #include "evlist.h" #include "evsel.h" #include "memswap.h" @@ -1168,7 +1169,7 @@ static void branch_stack__printf(struct perf_sample *sample, bool callstack) } } -static void regs_dump__printf(u64 mask, u64 *regs) +static void regs_dump__printf(u64 mask, u64 *regs, const char *arch) { unsigned rid, i = 0; @@ -1176,7 +1177,7 @@ static void regs_dump__printf(u64 mask, u64 *regs) u64 val = regs[i++]; printf(".... %-5s 0x%016" PRIx64 "\n", - perf_reg_name(rid), val); + perf_reg_name(rid, arch), val); } } @@ -1194,7 +1195,7 @@ static inline const char *regs_dump_abi(struct regs_dump *d) return regs_abi[d->abi]; } -static void regs__printf(const char *type, struct regs_dump *regs) +static void regs__printf(const char *type, struct regs_dump *regs, const char *arch) { u64 mask = regs->mask; @@ -1203,23 +1204,23 @@ static void regs__printf(const char *type, struct regs_dump *regs) mask, regs_dump_abi(regs)); - regs_dump__printf(mask, regs->regs); + regs_dump__printf(mask, regs->regs, arch); } -static void regs_user__printf(struct perf_sample *sample) +static void regs_user__printf(struct perf_sample *sample, const char *arch) { struct regs_dump *user_regs = &sample->user_regs; if (user_regs->regs) - regs__printf("user", user_regs); + regs__printf("user", user_regs, arch); } -static void regs_intr__printf(struct perf_sample *sample) +static void regs_intr__printf(struct perf_sample *sample, const char *arch) { struct regs_dump *intr_regs = &sample->intr_regs; if (intr_regs->regs) - regs__printf("intr", intr_regs); + regs__printf("intr", intr_regs, arch); } static void stack_user__printf(struct stack_dump *dump) @@ -1304,7 +1305,7 @@ char *get_page_size_name(u64 size, char *str) } static void dump_sample(struct evsel *evsel, union perf_event *event, - struct perf_sample *sample) + struct perf_sample *sample, const char *arch) { u64 sample_type; char str[PAGE_SIZE_NAME_LEN]; @@ -1325,10 +1326,10 @@ static void dump_sample(struct evsel *evsel, union perf_event *event, branch_stack__printf(sample, evsel__has_branch_callstack(evsel)); if (sample_type & PERF_SAMPLE_REGS_USER) - regs_user__printf(sample); + regs_user__printf(sample, arch); if (sample_type & PERF_SAMPLE_REGS_INTR) - regs_intr__printf(sample); + regs_intr__printf(sample, arch); if (sample_type & PERF_SAMPLE_STACK_USER) stack_user__printf(&sample->user_stack); @@ -1502,7 +1503,7 @@ static int machines__deliver_event(struct machines *machines, ++evlist->stats.nr_unknown_id; return 0; } - dump_sample(evsel, event, sample); + dump_sample(evsel, event, sample, perf_env__arch(machine->env)); if (machine == NULL) { ++evlist->stats.nr_unprocessable_samples; return 0; -- cgit v1.2.3 From 416e15ad17f84358ba3eca6b82378be97c793c62 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Wed, 15 Dec 2021 10:51:50 -0800 Subject: perf ftrace: Add 'trace' subcommand This is a preparation to add more sub-commands for ftrace. The 'trace' subcommand does the same thing when no subcommand is given. Committer testing: The previous mode, i.e. no subcommand and the new 'perf ftrace trace' are equivalent: # perf ftrace -G check_preempt_curr sleep 0.00001 # tracer: function_graph # # CPU DURATION FUNCTION CALLS # | | | | | | | 25) | check_preempt_curr() { 25) | resched_curr() { 25) | native_smp_send_reschedule() { 25) | default_send_IPI_single_phys() { 25) 0.110 us | __default_send_IPI_dest_field(); 25) 0.490 us | } 25) 0.640 us | } 25) 0.850 us | } 25) 2.060 us | } # perf ftrace trace -G check_preempt_curr sleep 0.00001 # tracer: function_graph # # CPU DURATION FUNCTION CALLS # | | | | | | | 10) | check_preempt_curr() { 10) | resched_curr() { 10) | native_smp_send_reschedule() { 10) | default_send_IPI_single_phys() { 10) 0.080 us | __default_send_IPI_dest_field(); 10) 0.460 us | } 10) 0.610 us | } 10) 0.830 us | } 10) 2.020 us | } # Signed-off-by: Namhyung Kim Tested-by: Arnaldo Carvalho de Melo Cc: Andi Kleen Cc: Athira Jajeev Cc: Changbin Du Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Peter Zijlstra Cc: Song Liu Cc: Stephane Eranian Link: https://lore.kernel.org/r/20211215185154.360314-2-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-ftrace.c | 35 +++++++++++++++++++++++------------ 1 file changed, 23 insertions(+), 12 deletions(-) diff --git a/tools/perf/builtin-ftrace.c b/tools/perf/builtin-ftrace.c index 87cb11a7a3ee..b28e762c5d54 100644 --- a/tools/perf/builtin-ftrace.c +++ b/tools/perf/builtin-ftrace.c @@ -879,17 +879,7 @@ int cmd_ftrace(int argc, const char **argv) .tracer = DEFAULT_TRACER, .target = { .uid = UINT_MAX, }, }; - const char * const ftrace_usage[] = { - "perf ftrace [] []", - "perf ftrace [] -- []", - NULL - }; - const struct option ftrace_options[] = { - OPT_STRING('t', "tracer", &ftrace.tracer, "tracer", - "Tracer to use: function_graph(default) or function"), - OPT_CALLBACK_DEFAULT('F', "funcs", NULL, "[FILTER]", - "Show available functions to filter", - opt_list_avail_functions, "*"), + const struct option common_options[] = { OPT_STRING('p', "pid", &ftrace.target.pid, "pid", "Trace on existing process id"), /* TODO: Add short option -t after -t/--tracer can be removed. */ @@ -901,6 +891,14 @@ int cmd_ftrace(int argc, const char **argv) "System-wide collection from all CPUs"), OPT_STRING('C', "cpu", &ftrace.target.cpu_list, "cpu", "List of cpus to monitor"), + OPT_END() + }; + const struct option ftrace_options[] = { + OPT_STRING('t', "tracer", &ftrace.tracer, "tracer", + "Tracer to use: function_graph(default) or function"), + OPT_CALLBACK_DEFAULT('F', "funcs", NULL, "[FILTER]", + "Show available functions to filter", + opt_list_avail_functions, "*"), OPT_CALLBACK('T', "trace-funcs", &ftrace.filters, "func", "Trace given functions using function tracer", parse_filter_func), @@ -923,7 +921,15 @@ int cmd_ftrace(int argc, const char **argv) "Trace children processes"), OPT_UINTEGER('D', "delay", &ftrace.initial_delay, "Number of milliseconds to wait before starting tracing after program start"), - OPT_END() + OPT_PARENT(common_options), + }; + + const char * const ftrace_usage[] = { + "perf ftrace [] []", + "perf ftrace [] -- [] []", + "perf ftrace trace [] []", + "perf ftrace trace [] -- [] []", + NULL }; INIT_LIST_HEAD(&ftrace.filters); @@ -935,6 +941,11 @@ int cmd_ftrace(int argc, const char **argv) if (ret < 0) return -1; + if (argc > 1 && !strcmp(argv[1], "trace")) { + argc--; + argv++; + } + argc = parse_options(argc, argv, ftrace_options, ftrace_usage, PARSE_OPT_STOP_AT_NON_OPTION); if (!argc && target__none(&ftrace.target)) -- cgit v1.2.3 From a9b8ae8ae347941fefd6596f62586b13ae032e4b Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Wed, 15 Dec 2021 10:51:51 -0800 Subject: perf ftrace: Move out common code from __cmd_ftrace The signal setup code and evlist__prepare_workload() can be used for other subcommands. Let's move them out of the __cmd_ftrace(). Then it doesn't need to pass argc and argv. On the other hand, select_tracer() is specific to the 'trace' subcommand so it'd better moving it into the __cmd_ftrace(). Signed-off-by: Namhyung Kim Tested-by: Arnaldo Carvalho de Melo Cc: Andi Kleen Cc: Athira Jajeev Cc: Changbin Du Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Peter Zijlstra Cc: Song Liu Cc: Stephane Eranian Link: https://lore.kernel.org/r/20211215185154.360314-3-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-ftrace.c | 63 ++++++++++++++++++++++++--------------------- 1 file changed, 33 insertions(+), 30 deletions(-) diff --git a/tools/perf/builtin-ftrace.c b/tools/perf/builtin-ftrace.c index b28e762c5d54..0f8310bd0e6c 100644 --- a/tools/perf/builtin-ftrace.c +++ b/tools/perf/builtin-ftrace.c @@ -565,7 +565,24 @@ static int set_tracing_options(struct perf_ftrace *ftrace) return 0; } -static int __cmd_ftrace(struct perf_ftrace *ftrace, int argc, const char **argv) +static void select_tracer(struct perf_ftrace *ftrace) +{ + bool graph = !list_empty(&ftrace->graph_funcs) || + !list_empty(&ftrace->nograph_funcs); + bool func = !list_empty(&ftrace->filters) || + !list_empty(&ftrace->notrace); + + /* The function_graph has priority over function tracer. */ + if (graph) + ftrace->tracer = "function_graph"; + else if (func) + ftrace->tracer = "function"; + /* Otherwise, the default tracer is used. */ + + pr_debug("%s tracer is used\n", ftrace->tracer); +} + +static int __cmd_ftrace(struct perf_ftrace *ftrace) { char *trace_file; int trace_fd; @@ -586,10 +603,7 @@ static int __cmd_ftrace(struct perf_ftrace *ftrace, int argc, const char **argv) return -1; } - signal(SIGINT, sig_handler); - signal(SIGUSR1, sig_handler); - signal(SIGCHLD, sig_handler); - signal(SIGPIPE, sig_handler); + select_tracer(ftrace); if (reset_tracing_files(ftrace) < 0) { pr_err("failed to reset ftrace\n"); @@ -600,11 +614,6 @@ static int __cmd_ftrace(struct perf_ftrace *ftrace, int argc, const char **argv) if (write_tracing_file("trace", "0") < 0) goto out; - if (argc && evlist__prepare_workload(ftrace->evlist, &ftrace->target, argv, false, - ftrace__workload_exec_failed_signal) < 0) { - goto out; - } - if (set_tracing_options(ftrace) < 0) goto out_reset; @@ -855,23 +864,6 @@ static int parse_graph_tracer_opts(const struct option *opt, return 0; } -static void select_tracer(struct perf_ftrace *ftrace) -{ - bool graph = !list_empty(&ftrace->graph_funcs) || - !list_empty(&ftrace->nograph_funcs); - bool func = !list_empty(&ftrace->filters) || - !list_empty(&ftrace->notrace); - - /* The function_graph has priority over function tracer. */ - if (graph) - ftrace->tracer = "function_graph"; - else if (func) - ftrace->tracer = "function"; - /* Otherwise, the default tracer is used. */ - - pr_debug("%s tracer is used\n", ftrace->tracer); -} - int cmd_ftrace(int argc, const char **argv) { int ret; @@ -937,6 +929,11 @@ int cmd_ftrace(int argc, const char **argv) INIT_LIST_HEAD(&ftrace.graph_funcs); INIT_LIST_HEAD(&ftrace.nograph_funcs); + signal(SIGINT, sig_handler); + signal(SIGUSR1, sig_handler); + signal(SIGCHLD, sig_handler); + signal(SIGPIPE, sig_handler); + ret = perf_config(perf_ftrace_config, &ftrace); if (ret < 0) return -1; @@ -951,8 +948,6 @@ int cmd_ftrace(int argc, const char **argv) if (!argc && target__none(&ftrace.target)) ftrace.target.system_wide = true; - select_tracer(&ftrace); - ret = target__validate(&ftrace.target); if (ret) { char errbuf[512]; @@ -972,7 +967,15 @@ int cmd_ftrace(int argc, const char **argv) if (ret < 0) goto out_delete_evlist; - ret = __cmd_ftrace(&ftrace, argc, argv); + if (argc) { + ret = evlist__prepare_workload(ftrace.evlist, &ftrace.target, + argv, false, + ftrace__workload_exec_failed_signal); + if (ret < 0) + goto out_delete_evlist; + } + + ret = __cmd_ftrace(&ftrace); out_delete_evlist: evlist__delete(ftrace.evlist); -- cgit v1.2.3 From 53be50282269b46c678ae5a9f54acf7416a10dbb Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Wed, 15 Dec 2021 10:51:52 -0800 Subject: perf ftrace: Add 'latency' subcommand The perf ftrace latency is to get a histogram of function execution time. Users should give a function name using -T option. This is implemented using function_graph tracer with the given function only. And it parses the output to extract the time. $ sudo perf ftrace latency -a -T mutex_lock sleep 1 # DURATION | COUNT | GRAPH | 0 - 1 us | 4596 | ######################## | 1 - 2 us | 1680 | ######### | 2 - 4 us | 1106 | ##### | 4 - 8 us | 546 | ## | 8 - 16 us | 562 | ### | 16 - 32 us | 1 | | 32 - 64 us | 0 | | 64 - 128 us | 0 | | 128 - 256 us | 0 | | 256 - 512 us | 0 | | 512 - 1024 us | 0 | | 1 - 2 ms | 0 | | 2 - 4 ms | 0 | | 4 - 8 ms | 0 | | 8 - 16 ms | 0 | | 16 - 32 ms | 0 | | 32 - 64 ms | 0 | | 64 - 128 ms | 0 | | 128 - 256 ms | 0 | | 256 - 512 ms | 0 | | 512 - 1024 ms | 0 | | 1 - ... s | 0 | | Committer testing: Latency for the __handle_mm_fault kernel function, system wide for 1 second, see how one can go from the usual 'perf ftrace' output, now the same as for the 'perf ftrace trace' subcommand, to the new 'perf ftrace latency' subcommand: # perf ftrace -T __handle_mm_fault -a sleep 1 | wc -l 709 # perf ftrace -T __handle_mm_fault -a sleep 1 | wc -l 510 # perf ftrace -T __handle_mm_fault -a sleep 1 | head -20 # tracer: function # # entries-in-buffer/entries-written: 0/0 #P:32 # # TASK-PID CPU# TIMESTAMP FUNCTION # | | | | | perf-exec-1685104 [007] 90638.894613: __handle_mm_fault <-handle_mm_fault perf-exec-1685104 [007] 90638.894620: __handle_mm_fault <-handle_mm_fault perf-exec-1685104 [007] 90638.894622: __handle_mm_fault <-handle_mm_fault perf-exec-1685104 [007] 90638.894635: __handle_mm_fault <-handle_mm_fault perf-exec-1685104 [007] 90638.894688: __handle_mm_fault <-handle_mm_fault perf-exec-1685104 [007] 90638.894702: __handle_mm_fault <-handle_mm_fault perf-exec-1685104 [007] 90638.894714: __handle_mm_fault <-handle_mm_fault perf-exec-1685104 [007] 90638.894728: __handle_mm_fault <-handle_mm_fault perf-exec-1685104 [007] 90638.894740: __handle_mm_fault <-handle_mm_fault perf-exec-1685104 [007] 90638.894751: __handle_mm_fault <-handle_mm_fault sleep-1685104 [007] 90638.894962: __handle_mm_fault <-handle_mm_fault sleep-1685104 [007] 90638.894977: __handle_mm_fault <-handle_mm_fault sleep-1685104 [007] 90638.894983: __handle_mm_fault <-handle_mm_fault sleep-1685104 [007] 90638.894995: __handle_mm_fault <-handle_mm_fault # perf ftrace latency -T __handle_mm_fault -a sleep 1 # DURATION | COUNT | GRAPH | 0 - 1 us | 125 | ###### | 1 - 2 us | 249 | ############# | 2 - 4 us | 455 | ######################## | 4 - 8 us | 37 | # | 8 - 16 us | 0 | | 16 - 32 us | 0 | | 32 - 64 us | 0 | | 64 - 128 us | 0 | | 128 - 256 us | 0 | | 256 - 512 us | 0 | | 512 - 1024 us | 0 | | 1 - 2 ms | 0 | | 2 - 4 ms | 0 | | 4 - 8 ms | 0 | | 8 - 16 ms | 0 | | 16 - 32 ms | 0 | | 32 - 64 ms | 0 | | 64 - 128 ms | 0 | | 128 - 256 ms | 0 | | 256 - 512 ms | 0 | | 512 - 1024 ms | 0 | | 1 - ... s | 0 | | # Signed-off-by: Namhyung Kim Tested-by: Arnaldo Carvalho de Melo Cc: Andi Kleen Cc: Athira Jajeev Cc: Changbin Du Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Peter Zijlstra Cc: Song Liu Cc: Stephane Eranian Link: https://lore.kernel.org/r/20211215185154.360314-4-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-ftrace.c | 285 ++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 276 insertions(+), 9 deletions(-) diff --git a/tools/perf/builtin-ftrace.c b/tools/perf/builtin-ftrace.c index 0f8310bd0e6c..8fd3c9c44c69 100644 --- a/tools/perf/builtin-ftrace.c +++ b/tools/perf/builtin-ftrace.c @@ -13,7 +13,9 @@ #include #include #include +#include #include +#include #include #include @@ -702,6 +704,224 @@ out: return (done && !workload_exec_errno) ? 0 : -1; } +#define NUM_BUCKET 22 /* 20 + 2 (for outliers in both direction) */ + +static void make_histogram(int buckets[], char *buf, size_t len, char *linebuf) +{ + char *p, *q; + char *unit; + double num; + int i; + + /* ensure NUL termination */ + buf[len] = '\0'; + + /* handle data line by line */ + for (p = buf; (q = strchr(p, '\n')) != NULL; p = q + 1) { + *q = '\0'; + /* move it to the line buffer */ + strcat(linebuf, p); + + /* + * parse trace output to get function duration like in + * + * # tracer: function_graph + * # + * # CPU DURATION FUNCTION CALLS + * # | | | | | | | + * 1) + 10.291 us | do_filp_open(); + * 1) 4.889 us | do_filp_open(); + * 1) 6.086 us | do_filp_open(); + * + */ + if (linebuf[0] == '#') + goto next; + + /* ignore CPU */ + p = strchr(linebuf, ')'); + if (p == NULL) + p = linebuf; + + while (*p && !isdigit(*p) && (*p != '|')) + p++; + + /* no duration */ + if (*p == '\0' || *p == '|') + goto next; + + num = strtod(p, &unit); + if (!unit || strncmp(unit, " us", 3)) + goto next; + + i = log2(num); + if (i < 0) + i = 0; + if (i >= NUM_BUCKET) + i = NUM_BUCKET - 1; + + buckets[i]++; + +next: + /* empty the line buffer for the next output */ + linebuf[0] = '\0'; + } + + /* preserve any remaining output (before newline) */ + strcat(linebuf, p); +} + +static void display_histogram(int buckets[]) +{ + int i; + int total = 0; + int bar_total = 46; /* to fit in 80 column */ + char bar[] = "###############################################"; + int bar_len; + + for (i = 0; i < NUM_BUCKET; i++) + total += buckets[i]; + + if (total == 0) { + printf("No data found\n"); + return; + } + + printf("# %14s | %10s | %-*s |\n", + " DURATION ", "COUNT", bar_total, "GRAPH"); + + bar_len = buckets[0] * bar_total / total; + printf(" %4d - %-4d %s | %10d | %.*s%*s |\n", + 0, 1, "us", buckets[0], bar_len, bar, bar_total - bar_len, ""); + + for (i = 1; i < NUM_BUCKET - 1; i++) { + int start = (1 << (i - 1)); + int stop = 1 << i; + const char *unit = "us"; + + if (start >= 1024) { + start >>= 10; + stop >>= 10; + unit = "ms"; + } + bar_len = buckets[i] * bar_total / total; + printf(" %4d - %-4d %s | %10d | %.*s%*s |\n", + start, stop, unit, buckets[i], bar_len, bar, + bar_total - bar_len, ""); + } + + bar_len = buckets[NUM_BUCKET - 1] * bar_total / total; + printf(" %4d - %-4s %s | %10d | %.*s%*s |\n", + 1, "...", " s", buckets[NUM_BUCKET - 1], bar_len, bar, + bar_total - bar_len, ""); + +} + +static int __cmd_latency(struct perf_ftrace *ftrace) +{ + char *trace_file; + int trace_fd; + char buf[4096]; + char line[256]; + struct pollfd pollfd = { + .events = POLLIN, + }; + int buckets[NUM_BUCKET] = { }; + + if (!(perf_cap__capable(CAP_PERFMON) || + perf_cap__capable(CAP_SYS_ADMIN))) { + pr_err("ftrace only works for %s!\n", +#ifdef HAVE_LIBCAP_SUPPORT + "users with the CAP_PERFMON or CAP_SYS_ADMIN capability" +#else + "root" +#endif + ); + return -1; + } + + if (reset_tracing_files(ftrace) < 0) { + pr_err("failed to reset ftrace\n"); + goto out; + } + + /* reset ftrace buffer */ + if (write_tracing_file("trace", "0") < 0) + goto out; + + if (set_tracing_options(ftrace) < 0) + goto out_reset; + + /* force to use the function_graph tracer to track duration */ + if (write_tracing_file("current_tracer", "function_graph") < 0) { + pr_err("failed to set current_tracer to function_graph\n"); + goto out_reset; + } + + trace_file = get_tracing_file("trace_pipe"); + if (!trace_file) { + pr_err("failed to open trace_pipe\n"); + goto out_reset; + } + + trace_fd = open(trace_file, O_RDONLY); + + put_tracing_file(trace_file); + + if (trace_fd < 0) { + pr_err("failed to open trace_pipe\n"); + goto out_reset; + } + + fcntl(trace_fd, F_SETFL, O_NONBLOCK); + pollfd.fd = trace_fd; + + if (write_tracing_file("tracing_on", "1") < 0) { + pr_err("can't enable tracing\n"); + goto out_close_fd; + } + + evlist__start_workload(ftrace->evlist); + + line[0] = '\0'; + while (!done) { + if (poll(&pollfd, 1, -1) < 0) + break; + + if (pollfd.revents & POLLIN) { + int n = read(trace_fd, buf, sizeof(buf) - 1); + if (n < 0) + break; + + make_histogram(buckets, buf, n, line); + } + } + + write_tracing_file("tracing_on", "0"); + + if (workload_exec_errno) { + const char *emsg = str_error_r(workload_exec_errno, buf, sizeof(buf)); + pr_err("workload failed: %s\n", emsg); + goto out_close_fd; + } + + /* read remaining buffer contents */ + while (true) { + int n = read(trace_fd, buf, sizeof(buf) - 1); + if (n <= 0) + break; + make_histogram(buckets, buf, n, line); + } + + display_histogram(buckets); + +out_close_fd: + close(trace_fd); +out_reset: + reset_tracing_files(ftrace); +out: + return (done && !workload_exec_errno) ? 0 : -1; +} + static int perf_ftrace_config(const char *var, const char *value, void *cb) { struct perf_ftrace *ftrace = cb; @@ -864,6 +1084,12 @@ static int parse_graph_tracer_opts(const struct option *opt, return 0; } +enum perf_ftrace_subcommand { + PERF_FTRACE_NONE, + PERF_FTRACE_TRACE, + PERF_FTRACE_LATENCY, +}; + int cmd_ftrace(int argc, const char **argv) { int ret; @@ -915,14 +1141,21 @@ int cmd_ftrace(int argc, const char **argv) "Number of milliseconds to wait before starting tracing after program start"), OPT_PARENT(common_options), }; + const struct option latency_options[] = { + OPT_CALLBACK('T', "trace-funcs", &ftrace.filters, "func", + "Show latency of given function", parse_filter_func), + OPT_PARENT(common_options), + }; + const struct option *options = ftrace_options; const char * const ftrace_usage[] = { "perf ftrace [] []", "perf ftrace [] -- [] []", - "perf ftrace trace [] []", - "perf ftrace trace [] -- [] []", + "perf ftrace {trace|latency} [] []", + "perf ftrace {trace|latency} [] -- [] []", NULL }; + enum perf_ftrace_subcommand subcmd = PERF_FTRACE_NONE; INIT_LIST_HEAD(&ftrace.filters); INIT_LIST_HEAD(&ftrace.notrace); @@ -938,15 +1171,29 @@ int cmd_ftrace(int argc, const char **argv) if (ret < 0) return -1; - if (argc > 1 && !strcmp(argv[1], "trace")) { - argc--; - argv++; + if (argc > 1) { + if (!strcmp(argv[1], "trace")) { + subcmd = PERF_FTRACE_TRACE; + } else if (!strcmp(argv[1], "latency")) { + subcmd = PERF_FTRACE_LATENCY; + options = latency_options; + } + + if (subcmd != PERF_FTRACE_NONE) { + argc--; + argv++; + } } + /* for backward compatibility */ + if (subcmd == PERF_FTRACE_NONE) + subcmd = PERF_FTRACE_TRACE; - argc = parse_options(argc, argv, ftrace_options, ftrace_usage, + argc = parse_options(argc, argv, options, ftrace_usage, PARSE_OPT_STOP_AT_NON_OPTION); - if (!argc && target__none(&ftrace.target)) - ftrace.target.system_wide = true; + if (argc < 0) { + ret = -EINVAL; + goto out_delete_filters; + } ret = target__validate(&ftrace.target); if (ret) { @@ -975,7 +1222,27 @@ int cmd_ftrace(int argc, const char **argv) goto out_delete_evlist; } - ret = __cmd_ftrace(&ftrace); + switch (subcmd) { + case PERF_FTRACE_TRACE: + if (!argc && target__none(&ftrace.target)) + ftrace.target.system_wide = true; + ret = __cmd_ftrace(&ftrace); + break; + case PERF_FTRACE_LATENCY: + if (list_empty(&ftrace.filters)) { + pr_err("Should provide a function to measure\n"); + parse_options_usage(ftrace_usage, options, "T", 1); + ret = -EINVAL; + goto out_delete_evlist; + } + ret = __cmd_latency(&ftrace); + break; + case PERF_FTRACE_NONE: + default: + pr_err("Invalid subcommand\n"); + ret = -EINVAL; + break; + } out_delete_evlist: evlist__delete(ftrace.evlist); -- cgit v1.2.3 From 177f4eac7fb7fe5c70fef30dd6c4ef8f81cf7776 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Wed, 15 Dec 2021 10:51:53 -0800 Subject: perf ftrace: Add -b/--use-bpf option for latency subcommand The -b/--use-bpf option is to use BPF to get latency info of kernel functions. It'd have better performance impact and I observed that latency of same function is smaller than before when using BPF. Committer testing: # strace -e bpf perf ftrace latency -b -T __handle_mm_fault -a sleep 1 bpf(BPF_PROG_LOAD, {prog_type=BPF_PROG_TYPE_SOCKET_FILTER, insn_cnt=2, insns=0x7fff51914e00, license="GPL", log_level=0, log_size=0, log_buf=NULL, kern_version=KERNEL_VERSION(0, 0, 0), prog_flags=0, prog_name="", prog_ifindex=0, expected_attach_type=BPF_CGROUP_INET_INGRESS, prog_btf_fd=0, func_info_rec_size=0, func_info=NULL, func_info_cnt=0, line_info_rec_size=0, line_info=NULL, line_info_cnt=0, attach_btf_id=0, attach_prog_fd=0}, 128) = 3 bpf(BPF_BTF_LOAD, {btf="\237\353\1\0\30\0\0\0\0\0\0\0\20\0\0\0\20\0\0\0\5\0\0\0\1\0\0\0\0\0\0\1"..., btf_log_buf=NULL, btf_size=45, btf_log_size=0, btf_log_level=0}, 128) = 3 bpf(BPF_BTF_LOAD, {btf="\237\353\1\0\30\0\0\0\0\0\0\0000\0\0\0000\0\0\0\t\0\0\0\1\0\0\0\0\0\0\1"..., btf_log_buf=NULL, btf_size=81, btf_log_size=0, btf_log_level=0}, 128) = 3 bpf(BPF_BTF_LOAD, {btf="\237\353\1\0\30\0\0\0\0\0\0\08\0\0\08\0\0\0\t\0\0\0\0\0\0\0\0\0\0\1"..., btf_log_buf=NULL, btf_size=89, btf_log_size=0, btf_log_level=0}, 128) = 3 bpf(BPF_BTF_LOAD, {btf="\237\353\1\0\30\0\0\0\0\0\0\0\f\0\0\0\f\0\0\0\7\0\0\0\1\0\0\0\0\0\0\20"..., btf_log_buf=NULL, btf_size=43, btf_log_size=0, btf_log_level=0}, 128) = 3 bpf(BPF_BTF_LOAD, {btf="\237\353\1\0\30\0\0\0\0\0\0\0000\0\0\0000\0\0\0\t\0\0\0\1\0\0\0\0\0\0\1"..., btf_log_buf=NULL, btf_size=81, btf_log_size=0, btf_log_level=0}, 128) = 3 bpf(BPF_BTF_LOAD, {btf="\237\353\1\0\30\0\0\0\0\0\0\0000\0\0\0000\0\0\0\5\0\0\0\0\0\0\0\0\0\0\1"..., btf_log_buf=NULL, btf_size=77, btf_log_size=0, btf_log_level=0}, 128) = -1 EINVAL (Invalid argument) bpf(BPF_BTF_LOAD, {btf="\237\353\1\0\30\0\0\0\0\0\0\0\350\2\0\0\350\2\0\0\353\2\0\0\0\0\0\0\0\0\0\2"..., btf_log_buf=NULL, btf_size=1515, btf_log_size=0, btf_log_level=0}, 128) = 3 bpf(BPF_MAP_CREATE, {map_type=BPF_MAP_TYPE_ARRAY, key_size=4, value_size=32, max_entries=1, map_flags=0, inner_map_fd=0, map_name="", map_ifindex=0, btf_fd=0, btf_key_type_id=0, btf_value_type_id=0, btf_vmlinux_value_type_id=0}, 128) = 4 bpf(BPF_PROG_LOAD, {prog_type=BPF_PROG_TYPE_SOCKET_FILTER, insn_cnt=5, insns=0x7fff51914c30, license="GPL", log_level=0, log_size=0, log_buf=NULL, kern_version=KERNEL_VERSION(0, 0, 0), prog_flags=0, prog_name="", prog_ifindex=0, expected_attach_type=BPF_CGROUP_INET_INGRESS, prog_btf_fd=0, func_info_rec_size=0, func_info=NULL, func_info_cnt=0, line_info_rec_size=0, line_info=NULL, line_info_cnt=0, attach_btf_id=0, attach_prog_fd=0}, 128) = 5 bpf(BPF_MAP_CREATE, {map_type=BPF_MAP_TYPE_ARRAY, key_size=4, value_size=4, max_entries=1, map_flags=BPF_F_MMAPABLE, inner_map_fd=0, map_name="", map_ifindex=0, btf_fd=0, btf_key_type_id=0, btf_value_type_id=0, btf_vmlinux_value_type_id=0}, 128) = 4 bpf(BPF_PROG_LOAD, {prog_type=BPF_PROG_TYPE_SOCKET_FILTER, insn_cnt=2, insns=0x7fff51914a80, license="GPL", log_level=0, log_size=0, log_buf=NULL, kern_version=KERNEL_VERSION(0, 0, 0), prog_flags=0, prog_name="test", prog_ifindex=0, expected_attach_type=BPF_CGROUP_INET_INGRESS, prog_btf_fd=0, func_info_rec_size=0, func_info=NULL, func_info_cnt=0, line_info_rec_size=0, line_info=NULL, line_info_cnt=0, attach_btf_id=0, attach_prog_fd=0}, 128) = 4 bpf(BPF_MAP_CREATE, {map_type=BPF_MAP_TYPE_HASH, key_size=8, value_size=8, max_entries=10000, map_flags=0, inner_map_fd=0, map_name="functime", map_ifindex=0, btf_fd=3, btf_key_type_id=0, btf_value_type_id=0, btf_vmlinux_value_type_id=0}, 128) = 4 bpf(BPF_MAP_CREATE, {map_type=BPF_MAP_TYPE_HASH, key_size=4, value_size=1, max_entries=1, map_flags=0, inner_map_fd=0, map_name="cpu_filter", map_ifindex=0, btf_fd=3, btf_key_type_id=0, btf_value_type_id=0, btf_vmlinux_value_type_id=0}, 128) = 5 bpf(BPF_MAP_CREATE, {map_type=BPF_MAP_TYPE_HASH, key_size=4, value_size=1, max_entries=1, map_flags=0, inner_map_fd=0, map_name="task_filter", map_ifindex=0, btf_fd=3, btf_key_type_id=0, btf_value_type_id=0, btf_vmlinux_value_type_id=0}, 128) = 7 bpf(BPF_MAP_CREATE, {map_type=BPF_MAP_TYPE_PERCPU_ARRAY, key_size=4, value_size=8, max_entries=22, map_flags=0, inner_map_fd=0, map_name="latency", map_ifindex=0, btf_fd=3, btf_key_type_id=0, btf_value_type_id=0, btf_vmlinux_value_type_id=0}, 128) = 8 bpf(BPF_MAP_CREATE, {map_type=BPF_MAP_TYPE_ARRAY, key_size=4, value_size=4, max_entries=1, map_flags=BPF_F_MMAPABLE, inner_map_fd=0, map_name="func_lat.bss", map_ifindex=0, btf_fd=3, btf_key_type_id=0, btf_value_type_id=30, btf_vmlinux_value_type_id=0}, 128) = 9 bpf(BPF_MAP_UPDATE_ELEM, {map_fd=9, key=0x7fff51914c40, value=0x7f6e99be2000, flags=BPF_ANY}, 128) = 0 bpf(BPF_PROG_LOAD, {prog_type=BPF_PROG_TYPE_KPROBE, insn_cnt=18, insns=0x11e4160, license="", log_level=0, log_size=0, log_buf=NULL, kern_version=KERNEL_VERSION(5, 14, 16), prog_flags=0, prog_name="func_begin", prog_ifindex=0, expected_attach_type=BPF_CGROUP_INET_INGRESS, prog_btf_fd=3, func_info_rec_size=8, func_info=0x11dfc50, func_info_cnt=1, line_info_rec_size=16, line_info=0x11e04c0, line_info_cnt=9, attach_btf_id=0, attach_prog_fd=0}, 128) = 10 bpf(BPF_PROG_LOAD, {prog_type=BPF_PROG_TYPE_KPROBE, insn_cnt=99, insns=0x11ded70, license="", log_level=0, log_size=0, log_buf=NULL, kern_version=KERNEL_VERSION(5, 14, 16), prog_flags=0, prog_name="func_end", prog_ifindex=0, expected_attach_type=BPF_CGROUP_INET_INGRESS, prog_btf_fd=3, func_info_rec_size=8, func_info=0x11dfc70, func_info_cnt=1, line_info_rec_size=16, line_info=0x11f6e10, line_info_cnt=20, attach_btf_id=0, attach_prog_fd=0}, 128) = 11 bpf(BPF_PROG_LOAD, {prog_type=BPF_PROG_TYPE_TRACEPOINT, insn_cnt=2, insns=0x7fff51914a80, license="GPL", log_level=0, log_size=0, log_buf=NULL, kern_version=KERNEL_VERSION(0, 0, 0), prog_flags=0, prog_name="", prog_ifindex=0, expected_attach_type=BPF_CGROUP_INET_INGRESS, prog_btf_fd=0, func_info_rec_size=0, func_info=NULL, func_info_cnt=0, line_info_rec_size=0, line_info=NULL, line_info_cnt=0, attach_btf_id=0, attach_prog_fd=0}, 128) = 13 bpf(BPF_LINK_CREATE, {link_create={prog_fd=13, target_fd=-1, attach_type=0x29 /* BPF_??? */, flags=0}}, 128) = -1 EINVAL (Invalid argument) --- SIGCHLD {si_signo=SIGCHLD, si_code=CLD_EXITED, si_pid=1699992, si_uid=0, si_status=0, si_utime=0, si_stime=0} --- bpf(BPF_MAP_LOOKUP_ELEM, {map_fd=8, key=0x7fff51914f84, value=0x11f6fa0, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_LOOKUP_ELEM, {map_fd=8, key=0x7fff51914f84, value=0x11f6fa0, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_LOOKUP_ELEM, {map_fd=8, key=0x7fff51914f84, value=0x11f6fa0, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_LOOKUP_ELEM, {map_fd=8, key=0x7fff51914f84, value=0x11f6fa0, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_LOOKUP_ELEM, {map_fd=8, key=0x7fff51914f84, value=0x11f6fa0, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_LOOKUP_ELEM, {map_fd=8, key=0x7fff51914f84, value=0x11f6fa0, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_LOOKUP_ELEM, {map_fd=8, key=0x7fff51914f84, value=0x11f6fa0, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_LOOKUP_ELEM, {map_fd=8, key=0x7fff51914f84, value=0x11f6fa0, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_LOOKUP_ELEM, {map_fd=8, key=0x7fff51914f84, value=0x11f6fa0, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_LOOKUP_ELEM, {map_fd=8, key=0x7fff51914f84, value=0x11f6fa0, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_LOOKUP_ELEM, {map_fd=8, key=0x7fff51914f84, value=0x11f6fa0, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_LOOKUP_ELEM, {map_fd=8, key=0x7fff51914f84, value=0x11f6fa0, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_LOOKUP_ELEM, {map_fd=8, key=0x7fff51914f84, value=0x11f6fa0, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_LOOKUP_ELEM, {map_fd=8, key=0x7fff51914f84, value=0x11f6fa0, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_LOOKUP_ELEM, {map_fd=8, key=0x7fff51914f84, value=0x11f6fa0, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_LOOKUP_ELEM, {map_fd=8, key=0x7fff51914f84, value=0x11f6fa0, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_LOOKUP_ELEM, {map_fd=8, key=0x7fff51914f84, value=0x11f6fa0, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_LOOKUP_ELEM, {map_fd=8, key=0x7fff51914f84, value=0x11f6fa0, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_LOOKUP_ELEM, {map_fd=8, key=0x7fff51914f84, value=0x11f6fa0, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_LOOKUP_ELEM, {map_fd=8, key=0x7fff51914f84, value=0x11f6fa0, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_LOOKUP_ELEM, {map_fd=8, key=0x7fff51914f84, value=0x11f6fa0, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_LOOKUP_ELEM, {map_fd=8, key=0x7fff51914f84, value=0x11f6fa0, flags=BPF_ANY}, 128) = 0 # DURATION | COUNT | GRAPH | 0 - 1 us | 52 | ################### | 1 - 2 us | 36 | ############# | 2 - 4 us | 24 | ######### | 4 - 8 us | 7 | ## | 8 - 16 us | 1 | | 16 - 32 us | 0 | | 32 - 64 us | 0 | | 64 - 128 us | 0 | | 128 - 256 us | 0 | | 256 - 512 us | 0 | | 512 - 1024 us | 0 | | 1 - 2 ms | 0 | | 2 - 4 ms | 0 | | 4 - 8 ms | 0 | | 8 - 16 ms | 0 | | 16 - 32 ms | 0 | | 32 - 64 ms | 0 | | 64 - 128 ms | 0 | | 128 - 256 ms | 0 | | 256 - 512 ms | 0 | | 512 - 1024 ms | 0 | | 1 - ... s | 0 | | +++ exited with 0 +++ # Signed-off-by: Namhyung Kim Tested-by: Arnaldo Carvalho de Melo Cc: Andi Kleen Cc: Athira Jajeev Cc: Changbin Du Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Peter Zijlstra Cc: Song Liu Cc: Stephane Eranian Link: https://lore.kernel.org/r/20211215185154.360314-5-namhyung@kernel.org [ Add missing util/cpumap.h include and removed unused 'fd' variable ] Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Makefile.perf | 2 +- tools/perf/builtin-ftrace.c | 158 ++++++++++++++++------------ tools/perf/util/Build | 1 + tools/perf/util/bpf_ftrace.c | 112 ++++++++++++++++++++ tools/perf/util/bpf_skel/func_latency.bpf.c | 93 ++++++++++++++++ tools/perf/util/ftrace.h | 81 ++++++++++++++ 6 files changed, 380 insertions(+), 67 deletions(-) create mode 100644 tools/perf/util/bpf_ftrace.c create mode 100644 tools/perf/util/bpf_skel/func_latency.bpf.c create mode 100644 tools/perf/util/ftrace.h diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf index 164a37523781..ac861e42c8f7 100644 --- a/tools/perf/Makefile.perf +++ b/tools/perf/Makefile.perf @@ -1041,7 +1041,7 @@ SKEL_OUT := $(abspath $(OUTPUT)util/bpf_skel) SKEL_TMP_OUT := $(abspath $(SKEL_OUT)/.tmp) SKELETONS := $(SKEL_OUT)/bpf_prog_profiler.skel.h SKELETONS += $(SKEL_OUT)/bperf_leader.skel.h $(SKEL_OUT)/bperf_follower.skel.h -SKELETONS += $(SKEL_OUT)/bperf_cgroup.skel.h +SKELETONS += $(SKEL_OUT)/bperf_cgroup.skel.h $(SKEL_OUT)/func_latency.skel.h $(SKEL_TMP_OUT) $(LIBBPF_OUTPUT): $(Q)$(MKDIR) -p $@ diff --git a/tools/perf/builtin-ftrace.c b/tools/perf/builtin-ftrace.c index 8fd3c9c44c69..2b54e2ddc80a 100644 --- a/tools/perf/builtin-ftrace.c +++ b/tools/perf/builtin-ftrace.c @@ -30,36 +30,12 @@ #include "strfilter.h" #include "util/cap.h" #include "util/config.h" +#include "util/ftrace.h" #include "util/units.h" #include "util/parse-sublevel-options.h" #define DEFAULT_TRACER "function_graph" -struct perf_ftrace { - struct evlist *evlist; - struct target target; - const char *tracer; - struct list_head filters; - struct list_head notrace; - struct list_head graph_funcs; - struct list_head nograph_funcs; - int graph_depth; - unsigned long percpu_buffer_size; - bool inherit; - int func_stack_trace; - int func_irq_info; - int graph_nosleep_time; - int graph_noirqs; - int graph_verbose; - int graph_thresh; - unsigned int initial_delay; -}; - -struct filter_entry { - struct list_head list; - char name[]; -}; - static volatile int workload_exec_errno; static bool done; @@ -704,8 +680,6 @@ out: return (done && !workload_exec_errno) ? 0 : -1; } -#define NUM_BUCKET 22 /* 20 + 2 (for outliers in both direction) */ - static void make_histogram(int buckets[], char *buf, size_t len, char *linebuf) { char *p, *q; @@ -816,69 +790,116 @@ static void display_histogram(int buckets[]) } -static int __cmd_latency(struct perf_ftrace *ftrace) +static int prepare_func_latency(struct perf_ftrace *ftrace) { char *trace_file; - int trace_fd; - char buf[4096]; - char line[256]; - struct pollfd pollfd = { - .events = POLLIN, - }; - int buckets[NUM_BUCKET] = { }; + int fd; - if (!(perf_cap__capable(CAP_PERFMON) || - perf_cap__capable(CAP_SYS_ADMIN))) { - pr_err("ftrace only works for %s!\n", -#ifdef HAVE_LIBCAP_SUPPORT - "users with the CAP_PERFMON or CAP_SYS_ADMIN capability" -#else - "root" -#endif - ); - return -1; - } + if (ftrace->target.use_bpf) + return perf_ftrace__latency_prepare_bpf(ftrace); if (reset_tracing_files(ftrace) < 0) { pr_err("failed to reset ftrace\n"); - goto out; + return -1; } /* reset ftrace buffer */ if (write_tracing_file("trace", "0") < 0) - goto out; + return -1; if (set_tracing_options(ftrace) < 0) - goto out_reset; + return -1; /* force to use the function_graph tracer to track duration */ if (write_tracing_file("current_tracer", "function_graph") < 0) { pr_err("failed to set current_tracer to function_graph\n"); - goto out_reset; + return -1; } trace_file = get_tracing_file("trace_pipe"); if (!trace_file) { pr_err("failed to open trace_pipe\n"); - goto out_reset; + return -1; } - trace_fd = open(trace_file, O_RDONLY); + fd = open(trace_file, O_RDONLY); + if (fd < 0) + pr_err("failed to open trace_pipe\n"); put_tracing_file(trace_file); + return fd; +} - if (trace_fd < 0) { - pr_err("failed to open trace_pipe\n"); - goto out_reset; +static int start_func_latency(struct perf_ftrace *ftrace) +{ + if (ftrace->target.use_bpf) + return perf_ftrace__latency_start_bpf(ftrace); + + if (write_tracing_file("tracing_on", "1") < 0) { + pr_err("can't enable tracing\n"); + return -1; } + return 0; +} + +static int stop_func_latency(struct perf_ftrace *ftrace) +{ + if (ftrace->target.use_bpf) + return perf_ftrace__latency_stop_bpf(ftrace); + + write_tracing_file("tracing_on", "0"); + return 0; +} + +static int read_func_latency(struct perf_ftrace *ftrace, int buckets[]) +{ + if (ftrace->target.use_bpf) + return perf_ftrace__latency_read_bpf(ftrace, buckets); + + return 0; +} + +static int cleanup_func_latency(struct perf_ftrace *ftrace) +{ + if (ftrace->target.use_bpf) + return perf_ftrace__latency_cleanup_bpf(ftrace); + + reset_tracing_files(ftrace); + return 0; +} + +static int __cmd_latency(struct perf_ftrace *ftrace) +{ + int trace_fd; + char buf[4096]; + char line[256]; + struct pollfd pollfd = { + .events = POLLIN, + }; + int buckets[NUM_BUCKET] = { }; + + if (!(perf_cap__capable(CAP_PERFMON) || + perf_cap__capable(CAP_SYS_ADMIN))) { + pr_err("ftrace only works for %s!\n", +#ifdef HAVE_LIBCAP_SUPPORT + "users with the CAP_PERFMON or CAP_SYS_ADMIN capability" +#else + "root" +#endif + ); + return -1; + } + + trace_fd = prepare_func_latency(ftrace); + if (trace_fd < 0) + goto out; + fcntl(trace_fd, F_SETFL, O_NONBLOCK); pollfd.fd = trace_fd; - if (write_tracing_file("tracing_on", "1") < 0) { - pr_err("can't enable tracing\n"); - goto out_close_fd; - } + if (start_func_latency(ftrace) < 0) + goto out; evlist__start_workload(ftrace->evlist); @@ -896,29 +917,30 @@ static int __cmd_latency(struct perf_ftrace *ftrace) } } - write_tracing_file("tracing_on", "0"); + stop_func_latency(ftrace); if (workload_exec_errno) { const char *emsg = str_error_r(workload_exec_errno, buf, sizeof(buf)); pr_err("workload failed: %s\n", emsg); - goto out_close_fd; + goto out; } /* read remaining buffer contents */ - while (true) { + while (!ftrace->target.use_bpf) { int n = read(trace_fd, buf, sizeof(buf) - 1); if (n <= 0) break; make_histogram(buckets, buf, n, line); } + read_func_latency(ftrace, buckets); + display_histogram(buckets); -out_close_fd: - close(trace_fd); -out_reset: - reset_tracing_files(ftrace); out: + close(trace_fd); + cleanup_func_latency(ftrace); + return (done && !workload_exec_errno) ? 0 : -1; } @@ -1144,6 +1166,10 @@ int cmd_ftrace(int argc, const char **argv) const struct option latency_options[] = { OPT_CALLBACK('T', "trace-funcs", &ftrace.filters, "func", "Show latency of given function", parse_filter_func), +#ifdef HAVE_BPF_SKEL + OPT_BOOLEAN('b', "use-bpf", &ftrace.target.use_bpf, + "Use BPF to measure function latency"), +#endif OPT_PARENT(common_options), }; const struct option *options = ftrace_options; diff --git a/tools/perf/util/Build b/tools/perf/util/Build index 2e5bfbb69960..294b12430d73 100644 --- a/tools/perf/util/Build +++ b/tools/perf/util/Build @@ -144,6 +144,7 @@ perf-$(CONFIG_LIBBPF) += bpf-loader.o perf-$(CONFIG_LIBBPF) += bpf_map.o perf-$(CONFIG_PERF_BPF_SKEL) += bpf_counter.o perf-$(CONFIG_PERF_BPF_SKEL) += bpf_counter_cgroup.o +perf-$(CONFIG_PERF_BPF_SKEL) += bpf_ftrace.o perf-$(CONFIG_BPF_PROLOGUE) += bpf-prologue.o perf-$(CONFIG_LIBELF) += symbol-elf.o perf-$(CONFIG_LIBELF) += probe-file.o diff --git a/tools/perf/util/bpf_ftrace.c b/tools/perf/util/bpf_ftrace.c new file mode 100644 index 000000000000..ec4e2f5a2fc4 --- /dev/null +++ b/tools/perf/util/bpf_ftrace.c @@ -0,0 +1,112 @@ +#include +#include +#include +#include + +#include + +#include "util/ftrace.h" +#include "util/cpumap.h" +#include "util/debug.h" +#include "util/bpf_counter.h" + +#include "util/bpf_skel/func_latency.skel.h" + +static struct func_latency_bpf *skel; + +int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace) +{ + int err; + struct filter_entry *func; + + if (!list_is_singular(&ftrace->filters)) { + pr_err("ERROR: %s target function(s).\n", + list_empty(&ftrace->filters) ? "No" : "Too many"); + return -1; + } + + func = list_first_entry(&ftrace->filters, struct filter_entry, list); + + skel = func_latency_bpf__open(); + if (!skel) { + pr_err("Failed to open func latency skeleton\n"); + return -1; + } + + set_max_rlimit(); + + err = func_latency_bpf__load(skel); + if (err) { + pr_err("Failed to load func latency skeleton\n"); + goto out; + } + + skel->links.func_begin = bpf_program__attach_kprobe(skel->progs.func_begin, + false, func->name); + if (IS_ERR(skel->links.func_begin)) { + pr_err("Failed to attach fentry program\n"); + err = PTR_ERR(skel->links.func_begin); + goto out; + } + + skel->links.func_end = bpf_program__attach_kprobe(skel->progs.func_end, + true, func->name); + if (IS_ERR(skel->links.func_end)) { + pr_err("Failed to attach fexit program\n"); + err = PTR_ERR(skel->links.func_end); + goto out; + } + + /* XXX: we don't actually use this fd - just for poll() */ + return open("/dev/null", O_RDONLY); + +out: + return err; +} + +int perf_ftrace__latency_start_bpf(struct perf_ftrace *ftrace __maybe_unused) +{ + skel->bss->enabled = 1; + return 0; +} + +int perf_ftrace__latency_stop_bpf(struct perf_ftrace *ftrace __maybe_unused) +{ + skel->bss->enabled = 0; + return 0; +} + +int perf_ftrace__latency_read_bpf(struct perf_ftrace *ftrace __maybe_unused, + int buckets[]) +{ + int i, fd, err; + u32 idx; + u64 *hist; + int ncpus = cpu__max_cpu(); + + fd = bpf_map__fd(skel->maps.latency); + + hist = calloc(ncpus, sizeof(*hist)); + if (hist == NULL) + return -ENOMEM; + + for (idx = 0; idx < NUM_BUCKET; idx++) { + err = bpf_map_lookup_elem(fd, &idx, hist); + if (err) { + buckets[idx] = 0; + continue; + } + + for (i = 0; i < ncpus; i++) + buckets[idx] += hist[i]; + } + + free(hist); + return 0; +} + +int perf_ftrace__latency_cleanup_bpf(struct perf_ftrace *ftrace __maybe_unused) +{ + func_latency_bpf__destroy(skel); + return 0; +} diff --git a/tools/perf/util/bpf_skel/func_latency.bpf.c b/tools/perf/util/bpf_skel/func_latency.bpf.c new file mode 100644 index 000000000000..ccd96b09fc42 --- /dev/null +++ b/tools/perf/util/bpf_skel/func_latency.bpf.c @@ -0,0 +1,93 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +// Copyright (c) 2021 Google +#include "vmlinux.h" +#include +#include + +// This should be in sync with "util/ftrace.h" +#define NUM_BUCKET 22 + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(key_size, sizeof(__u64)); + __uint(value_size, sizeof(__u64)); + __uint(max_entries, 10000); +} functime SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(__u8)); + __uint(max_entries, 1); +} cpu_filter SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(__u8)); + __uint(max_entries, 1); +} task_filter SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(__u64)); + __uint(max_entries, NUM_BUCKET); +} latency SEC(".maps"); + + +int enabled = 0; + +SEC("kprobe/func") +int BPF_PROG(func_begin) +{ + __u64 key, now; + + if (!enabled) + return 0; + + key = bpf_get_current_pid_tgid(); + now = bpf_ktime_get_ns(); + + // overwrite timestamp for nested functions + bpf_map_update_elem(&functime, &key, &now, BPF_ANY); + return 0; +} + +SEC("kretprobe/func") +int BPF_PROG(func_end) +{ + __u64 tid; + __u64 *start; + + if (!enabled) + return 0; + + tid = bpf_get_current_pid_tgid(); + + start = bpf_map_lookup_elem(&functime, &tid); + if (start) { + __s64 delta = bpf_ktime_get_ns() - *start; + __u32 key; + __u64 *hist; + + bpf_map_delete_elem(&functime, &tid); + + if (delta < 0) + return 0; + + // calculate index using delta in usec + for (key = 0; key < (NUM_BUCKET - 1); key++) { + if (delta < ((1000UL) << key)) + break; + } + + hist = bpf_map_lookup_elem(&latency, &key); + if (!hist) + return 0; + + *hist += 1; + } + + return 0; +} diff --git a/tools/perf/util/ftrace.h b/tools/perf/util/ftrace.h new file mode 100644 index 000000000000..887f68a185f7 --- /dev/null +++ b/tools/perf/util/ftrace.h @@ -0,0 +1,81 @@ +#ifndef __PERF_FTRACE_H__ +#define __PERF_FTRACE_H__ + +#include + +#include "target.h" + +struct evlist; + +struct perf_ftrace { + struct evlist *evlist; + struct target target; + const char *tracer; + struct list_head filters; + struct list_head notrace; + struct list_head graph_funcs; + struct list_head nograph_funcs; + unsigned long percpu_buffer_size; + bool inherit; + int graph_depth; + int func_stack_trace; + int func_irq_info; + int graph_nosleep_time; + int graph_noirqs; + int graph_verbose; + int graph_thresh; + unsigned int initial_delay; +}; + +struct filter_entry { + struct list_head list; + char name[]; +}; + +#define NUM_BUCKET 22 /* 20 + 2 (for outliers in both direction) */ + +#ifdef HAVE_BPF_SKEL + +int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace); +int perf_ftrace__latency_start_bpf(struct perf_ftrace *ftrace); +int perf_ftrace__latency_stop_bpf(struct perf_ftrace *ftrace); +int perf_ftrace__latency_read_bpf(struct perf_ftrace *ftrace, + int buckets[]); +int perf_ftrace__latency_cleanup_bpf(struct perf_ftrace *ftrace); + +#else /* !HAVE_BPF_SKEL */ + +static inline int +perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace __maybe_unused) +{ + return -1; +} + +static inline int +perf_ftrace__latency_start_bpf(struct perf_ftrace *ftrace __maybe_unused) +{ + return -1; +} + +static inline int +perf_ftrace__latency_stop_bpf(struct perf_ftrace *ftrace __maybe_unused) +{ + return -1; +} + +static inline int +perf_ftrace__latency_read_bpf(struct perf_ftrace *ftrace __maybe_unused, + int buckets[] __maybe_unused) +{ + return -1; +} + +static inline int +perf_ftrace__latency_cleanup_bpf(struct perf_ftrace *ftrace __maybe_unused) +{ + return -1; +} + +#endif /* HAVE_BPF_SKEL */ + +#endif /* __PERF_FTRACE_H__ */ -- cgit v1.2.3 From 9c5c605219578b8167b649245e00d6407f2c18da Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Wed, 15 Dec 2021 10:51:54 -0800 Subject: perf ftrace: Implement cpu and task filters in BPF Honor cpu and task options to set up filters (by pid or tid) in the BPF program. For example, the following command will show latency of the mutex_lock for process 2570. # perf ftrace latency -b -T mutex_lock -p 2570 sleep 3 # DURATION | COUNT | GRAPH | 0 - 1 us | 675 | ############################## | 1 - 2 us | 9 | | 2 - 4 us | 0 | | 4 - 8 us | 0 | | 8 - 16 us | 0 | | 16 - 32 us | 0 | | 32 - 64 us | 0 | | 64 - 128 us | 0 | | 128 - 256 us | 0 | | 256 - 512 us | 0 | | 512 - 1024 us | 0 | | 1 - 2 ms | 0 | | 2 - 4 ms | 0 | | 4 - 8 ms | 0 | | 8 - 16 ms | 0 | | 16 - 32 ms | 0 | | 32 - 64 ms | 0 | | 64 - 128 ms | 0 | | 128 - 256 ms | 0 | | 256 - 512 ms | 0 | | 512 - 1024 ms | 0 | | 1 - ... s | 0 | | Committer testing: Looking at faults on a firefox process: # strace -e bpf perf ftrace latency -b -p 1674378 -T __handle_mm_fault bpf(BPF_PROG_LOAD, {prog_type=BPF_PROG_TYPE_SOCKET_FILTER, insn_cnt=2, insns=0x7ffee1fee740, license="GPL", log_level=0, log_size=0, log_buf=NULL, kern_version=KERNEL_VERSION(0, 0, 0), prog_flags=0, prog_name="", prog_ifindex=0, expected_attach_type=BPF_CGROUP_INET_INGRESS, prog_btf_fd=0, func_info_rec_size=0, func_info=NULL, func_info_cnt=0, line_info_rec_size=0, line_info=NULL, line_info_cnt=0, attach_btf_id=0, attach_prog_fd=0}, 128) = 3 bpf(BPF_BTF_LOAD, {btf="\237\353\1\0\30\0\0\0\0\0\0\0\20\0\0\0\20\0\0\0\5\0\0\0\1\0\0\0\0\0\0\1"..., btf_log_buf=NULL, btf_size=45, btf_log_size=0, btf_log_level=0}, 128) = 3 bpf(BPF_BTF_LOAD, {btf="\237\353\1\0\30\0\0\0\0\0\0\0000\0\0\0000\0\0\0\t\0\0\0\1\0\0\0\0\0\0\1"..., btf_log_buf=NULL, btf_size=81, btf_log_size=0, btf_log_level=0}, 128) = 3 bpf(BPF_BTF_LOAD, {btf="\237\353\1\0\30\0\0\0\0\0\0\08\0\0\08\0\0\0\t\0\0\0\0\0\0\0\0\0\0\1"..., btf_log_buf=NULL, btf_size=89, btf_log_size=0, btf_log_level=0}, 128) = 3 bpf(BPF_BTF_LOAD, {btf="\237\353\1\0\30\0\0\0\0\0\0\0\f\0\0\0\f\0\0\0\7\0\0\0\1\0\0\0\0\0\0\20"..., btf_log_buf=NULL, btf_size=43, btf_log_size=0, btf_log_level=0}, 128) = 3 bpf(BPF_BTF_LOAD, {btf="\237\353\1\0\30\0\0\0\0\0\0\0000\0\0\0000\0\0\0\t\0\0\0\1\0\0\0\0\0\0\1"..., btf_log_buf=NULL, btf_size=81, btf_log_size=0, btf_log_level=0}, 128) = 3 bpf(BPF_BTF_LOAD, {btf="\237\353\1\0\30\0\0\0\0\0\0\0000\0\0\0000\0\0\0\5\0\0\0\0\0\0\0\0\0\0\1"..., btf_log_buf=NULL, btf_size=77, btf_log_size=0, btf_log_level=0}, 128) = -1 EINVAL (Invalid argument) bpf(BPF_BTF_LOAD, {btf="\237\353\1\0\30\0\0\0\0\0\0\0 \3\0\0 \3\0\0\306\3\0\0\0\0\0\0\0\0\0\2"..., btf_log_buf=NULL, btf_size=1790, btf_log_size=0, btf_log_level=0}, 128) = 3 bpf(BPF_MAP_CREATE, {map_type=BPF_MAP_TYPE_ARRAY, key_size=4, value_size=32, max_entries=1, map_flags=0, inner_map_fd=0, map_name="", map_ifindex=0, btf_fd=0, btf_key_type_id=0, btf_value_type_id=0, btf_vmlinux_value_type_id=0}, 128) = 4 bpf(BPF_PROG_LOAD, {prog_type=BPF_PROG_TYPE_SOCKET_FILTER, insn_cnt=5, insns=0x7ffee1fee570, license="GPL", log_level=0, log_size=0, log_buf=NULL, kern_version=KERNEL_VERSION(0, 0, 0), prog_flags=0, prog_name="", prog_ifindex=0, expected_attach_type=BPF_CGROUP_INET_INGRESS, prog_btf_fd=0, func_info_rec_size=0, func_info=NULL, func_info_cnt=0, line_info_rec_size=0, line_info=NULL, line_info_cnt=0, attach_btf_id=0, attach_prog_fd=0}, 128) = 5 bpf(BPF_MAP_CREATE, {map_type=BPF_MAP_TYPE_ARRAY, key_size=4, value_size=4, max_entries=1, map_flags=BPF_F_MMAPABLE, inner_map_fd=0, map_name="", map_ifindex=0, btf_fd=0, btf_key_type_id=0, btf_value_type_id=0, btf_vmlinux_value_type_id=0}, 128) = 4 bpf(BPF_PROG_LOAD, {prog_type=BPF_PROG_TYPE_SOCKET_FILTER, insn_cnt=2, insns=0x7ffee1fee3c0, license="GPL", log_level=0, log_size=0, log_buf=NULL, kern_version=KERNEL_VERSION(0, 0, 0), prog_flags=0, prog_name="test", prog_ifindex=0, expected_attach_type=BPF_CGROUP_INET_INGRESS, prog_btf_fd=0, func_info_rec_size=0, func_info=NULL, func_info_cnt=0, line_info_rec_size=0, line_info=NULL, line_info_cnt=0, attach_btf_id=0, attach_prog_fd=0}, 128) = 4 bpf(BPF_MAP_CREATE, {map_type=BPF_MAP_TYPE_HASH, key_size=8, value_size=8, max_entries=10000, map_flags=0, inner_map_fd=0, map_name="functime", map_ifindex=0, btf_fd=3, btf_key_type_id=0, btf_value_type_id=0, btf_vmlinux_value_type_id=0}, 128) = 4 bpf(BPF_MAP_CREATE, {map_type=BPF_MAP_TYPE_HASH, key_size=4, value_size=1, max_entries=1, map_flags=0, inner_map_fd=0, map_name="cpu_filter", map_ifindex=0, btf_fd=3, btf_key_type_id=0, btf_value_type_id=0, btf_vmlinux_value_type_id=0}, 128) = 5 bpf(BPF_MAP_CREATE, {map_type=BPF_MAP_TYPE_HASH, key_size=4, value_size=1, max_entries=36, map_flags=0, inner_map_fd=0, map_name="task_filter", map_ifindex=0, btf_fd=3, btf_key_type_id=0, btf_value_type_id=0, btf_vmlinux_value_type_id=0}, 128) = 6 bpf(BPF_MAP_CREATE, {map_type=BPF_MAP_TYPE_PERCPU_ARRAY, key_size=4, value_size=8, max_entries=22, map_flags=0, inner_map_fd=0, map_name="latency", map_ifindex=0, btf_fd=3, btf_key_type_id=0, btf_value_type_id=0, btf_vmlinux_value_type_id=0}, 128) = 7 bpf(BPF_MAP_CREATE, {map_type=BPF_MAP_TYPE_ARRAY, key_size=4, value_size=12, max_entries=1, map_flags=BPF_F_MMAPABLE, inner_map_fd=0, map_name="func_lat.bss", map_ifindex=0, btf_fd=3, btf_key_type_id=0, btf_value_type_id=32, btf_vmlinux_value_type_id=0}, 128) = 8 bpf(BPF_MAP_UPDATE_ELEM, {map_fd=8, key=0x7ffee1fee580, value=0x7f01d940a000, flags=BPF_ANY}, 128) = 0 bpf(BPF_PROG_LOAD, {prog_type=BPF_PROG_TYPE_KPROBE, insn_cnt=42, insns=0x1871f30, license="", log_level=0, log_size=0, log_buf=NULL, kern_version=KERNEL_VERSION(5, 14, 16), prog_flags=0, prog_name="func_begin", prog_ifindex=0, expected_attach_type=BPF_CGROUP_INET_INGRESS, prog_btf_fd=3, func_info_rec_size=8, func_info=0x18746a0, func_info_cnt=1, line_info_rec_size=16, line_info=0x1874550, line_info_cnt=20, attach_btf_id=0, attach_prog_fd=0}, 128) = 9 bpf(BPF_PROG_LOAD, {prog_type=BPF_PROG_TYPE_KPROBE, insn_cnt=99, insns=0x18769b0, license="", log_level=0, log_size=0, log_buf=NULL, kern_version=KERNEL_VERSION(5, 14, 16), prog_flags=0, prog_name="func_end", prog_ifindex=0, expected_attach_type=BPF_CGROUP_INET_INGRESS, prog_btf_fd=3, func_info_rec_size=8, func_info=0x188a640, func_info_cnt=1, line_info_rec_size=16, line_info=0x188a660, line_info_cnt=20, attach_btf_id=0, attach_prog_fd=0}, 128) = 10 bpf(BPF_MAP_UPDATE_ELEM, {map_fd=6, key=0x7ffee1fee8e0, value=0x7ffee1fee8df, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_UPDATE_ELEM, {map_fd=6, key=0x7ffee1fee8e0, value=0x7ffee1fee8df, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_UPDATE_ELEM, {map_fd=6, key=0x7ffee1fee8e0, value=0x7ffee1fee8df, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_UPDATE_ELEM, {map_fd=6, key=0x7ffee1fee8e0, value=0x7ffee1fee8df, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_UPDATE_ELEM, {map_fd=6, key=0x7ffee1fee8e0, value=0x7ffee1fee8df, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_UPDATE_ELEM, {map_fd=6, key=0x7ffee1fee8e0, value=0x7ffee1fee8df, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_UPDATE_ELEM, {map_fd=6, key=0x7ffee1fee8e0, value=0x7ffee1fee8df, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_UPDATE_ELEM, {map_fd=6, key=0x7ffee1fee8e0, value=0x7ffee1fee8df, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_UPDATE_ELEM, {map_fd=6, key=0x7ffee1fee8e0, value=0x7ffee1fee8df, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_UPDATE_ELEM, {map_fd=6, key=0x7ffee1fee8e0, value=0x7ffee1fee8df, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_UPDATE_ELEM, {map_fd=6, key=0x7ffee1fee8e0, value=0x7ffee1fee8df, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_UPDATE_ELEM, {map_fd=6, key=0x7ffee1fee8e0, value=0x7ffee1fee8df, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_UPDATE_ELEM, {map_fd=6, key=0x7ffee1fee8e0, value=0x7ffee1fee8df, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_UPDATE_ELEM, {map_fd=6, key=0x7ffee1fee8e0, value=0x7ffee1fee8df, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_UPDATE_ELEM, {map_fd=6, key=0x7ffee1fee8e0, value=0x7ffee1fee8df, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_UPDATE_ELEM, {map_fd=6, key=0x7ffee1fee8e0, value=0x7ffee1fee8df, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_UPDATE_ELEM, {map_fd=6, key=0x7ffee1fee8e0, value=0x7ffee1fee8df, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_UPDATE_ELEM, {map_fd=6, key=0x7ffee1fee8e0, value=0x7ffee1fee8df, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_UPDATE_ELEM, {map_fd=6, key=0x7ffee1fee8e0, value=0x7ffee1fee8df, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_UPDATE_ELEM, {map_fd=6, key=0x7ffee1fee8e0, value=0x7ffee1fee8df, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_UPDATE_ELEM, {map_fd=6, key=0x7ffee1fee8e0, value=0x7ffee1fee8df, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_UPDATE_ELEM, {map_fd=6, key=0x7ffee1fee8e0, value=0x7ffee1fee8df, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_UPDATE_ELEM, {map_fd=6, key=0x7ffee1fee8e0, value=0x7ffee1fee8df, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_UPDATE_ELEM, {map_fd=6, key=0x7ffee1fee8e0, value=0x7ffee1fee8df, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_UPDATE_ELEM, {map_fd=6, key=0x7ffee1fee8e0, value=0x7ffee1fee8df, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_UPDATE_ELEM, {map_fd=6, key=0x7ffee1fee8e0, value=0x7ffee1fee8df, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_UPDATE_ELEM, {map_fd=6, key=0x7ffee1fee8e0, value=0x7ffee1fee8df, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_UPDATE_ELEM, {map_fd=6, key=0x7ffee1fee8e0, value=0x7ffee1fee8df, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_UPDATE_ELEM, {map_fd=6, key=0x7ffee1fee8e0, value=0x7ffee1fee8df, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_UPDATE_ELEM, {map_fd=6, key=0x7ffee1fee8e0, value=0x7ffee1fee8df, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_UPDATE_ELEM, {map_fd=6, key=0x7ffee1fee8e0, value=0x7ffee1fee8df, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_UPDATE_ELEM, {map_fd=6, key=0x7ffee1fee8e0, value=0x7ffee1fee8df, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_UPDATE_ELEM, {map_fd=6, key=0x7ffee1fee8e0, value=0x7ffee1fee8df, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_UPDATE_ELEM, {map_fd=6, key=0x7ffee1fee8e0, value=0x7ffee1fee8df, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_UPDATE_ELEM, {map_fd=6, key=0x7ffee1fee8e0, value=0x7ffee1fee8df, flags=BPF_ANY}, 128) = 0 bpf(BPF_MAP_UPDATE_ELEM, {map_fd=6, key=0x7ffee1fee8e0, value=0x7ffee1fee8df, flags=BPF_ANY}, 128) = 0 bpf(BPF_PROG_LOAD, {prog_type=BPF_PROG_TYPE_TRACEPOINT, insn_cnt=2, insns=0x7ffee1fee3c0, license="GPL", log_level=0, log_size=0, log_buf=NULL, kern_version=KERNEL_VERSION(0, 0, 0), prog_flags=0, prog_name="", prog_ifindex=0, expected_attach_type=BPF_CGROUP_INET_INGRESS, prog_btf_fd=0, func_info_rec_size=0, func_info=NULL, func_info_cnt=0, line_info_rec_size=0, line_info=NULL, line_info_cnt=0, attach_btf_id=0, attach_prog_fd=0}, 128) = 12 bpf(BPF_LINK_CREATE, {link_create={prog_fd=12, target_fd=-1, attach_type=0x29 /* BPF_??? */, flags=0}}, 128) = -1 EINVAL (Invalid argument) ^Cstrace: Process 1702285 detached # DURATION | COUNT | GRAPH | 0 - 1 us | 109 | ################# | 1 - 2 us | 127 | ################### | 2 - 4 us | 36 | ##### | 4 - 8 us | 20 | ### | 8 - 16 us | 2 | | 16 - 32 us | 0 | | 32 - 64 us | 0 | | 64 - 128 us | 0 | | 128 - 256 us | 0 | | 256 - 512 us | 0 | | 512 - 1024 us | 0 | | 1 - 2 ms | 0 | | 2 - 4 ms | 0 | | 4 - 8 ms | 0 | | 8 - 16 ms | 0 | | 16 - 32 ms | 0 | | 32 - 64 ms | 0 | | 64 - 128 ms | 0 | | 128 - 256 ms | 0 | | 256 - 512 ms | 0 | | 512 - 1024 ms | 0 | | 1 - ... s | 0 | | # Signed-off-by: Namhyung Kim Tested-by: Arnaldo Carvalho de Melo Cc: Andi Kleen Cc: Athira Jajeev Cc: Changbin Du Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Peter Zijlstra Cc: Song Liu Cc: Stephane Eranian Link: https://lore.kernel.org/r/20211215185154.360314-6-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/bpf_ftrace.c | 42 ++++++++++++++++++++++++++++- tools/perf/util/bpf_skel/func_latency.bpf.c | 21 +++++++++++++++ 2 files changed, 62 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/bpf_ftrace.c b/tools/perf/util/bpf_ftrace.c index ec4e2f5a2fc4..f00a2de6778c 100644 --- a/tools/perf/util/bpf_ftrace.c +++ b/tools/perf/util/bpf_ftrace.c @@ -7,7 +7,9 @@ #include "util/ftrace.h" #include "util/cpumap.h" +#include "util/thread_map.h" #include "util/debug.h" +#include "util/evlist.h" #include "util/bpf_counter.h" #include "util/bpf_skel/func_latency.skel.h" @@ -16,7 +18,8 @@ static struct func_latency_bpf *skel; int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace) { - int err; + int fd, err; + int i, ncpus = 1, ntasks = 1; struct filter_entry *func; if (!list_is_singular(&ftrace->filters)) { @@ -33,6 +36,17 @@ int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace) return -1; } + /* don't need to set cpu filter for system-wide mode */ + if (ftrace->target.cpu_list) { + ncpus = perf_cpu_map__nr(ftrace->evlist->core.cpus); + bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus); + } + + if (target__has_task(&ftrace->target) || target__none(&ftrace->target)) { + ntasks = perf_thread_map__nr(ftrace->evlist->core.threads); + bpf_map__set_max_entries(skel->maps.task_filter, ntasks); + } + set_max_rlimit(); err = func_latency_bpf__load(skel); @@ -41,6 +55,32 @@ int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace) goto out; } + if (ftrace->target.cpu_list) { + u32 cpu; + u8 val = 1; + + skel->bss->has_cpu = 1; + fd = bpf_map__fd(skel->maps.cpu_filter); + + for (i = 0; i < ncpus; i++) { + cpu = cpu_map__cpu(ftrace->evlist->core.cpus, i); + bpf_map_update_elem(fd, &cpu, &val, BPF_ANY); + } + } + + if (target__has_task(&ftrace->target) || target__none(&ftrace->target)) { + u32 pid; + u8 val = 1; + + skel->bss->has_task = 1; + fd = bpf_map__fd(skel->maps.task_filter); + + for (i = 0; i < ntasks; i++) { + pid = perf_thread_map__pid(ftrace->evlist->core.threads, i); + bpf_map_update_elem(fd, &pid, &val, BPF_ANY); + } + } + skel->links.func_begin = bpf_program__attach_kprobe(skel->progs.func_begin, false, func->name); if (IS_ERR(skel->links.func_begin)) { diff --git a/tools/perf/util/bpf_skel/func_latency.bpf.c b/tools/perf/util/bpf_skel/func_latency.bpf.c index ccd96b09fc42..ea94187fe443 100644 --- a/tools/perf/util/bpf_skel/func_latency.bpf.c +++ b/tools/perf/util/bpf_skel/func_latency.bpf.c @@ -37,6 +37,8 @@ struct { int enabled = 0; +int has_cpu = 0; +int has_task = 0; SEC("kprobe/func") int BPF_PROG(func_begin) @@ -47,6 +49,25 @@ int BPF_PROG(func_begin) return 0; key = bpf_get_current_pid_tgid(); + + if (has_cpu) { + __u32 cpu = bpf_get_smp_processor_id(); + __u8 *ok; + + ok = bpf_map_lookup_elem(&cpu_filter, &cpu); + if (!ok) + return 0; + } + + if (has_task) { + __u32 pid = key & 0xffffffff; + __u8 *ok; + + ok = bpf_map_lookup_elem(&task_filter, &pid); + if (!ok) + return 0; + } + now = bpf_ktime_get_ns(); // overwrite timestamp for nested functions -- cgit v1.2.3 From a840974e96fd51b47c79301522bccf23cc8bb388 Mon Sep 17 00:00:00 2001 From: Thomas Richter Date: Thu, 16 Dec 2021 16:14:54 +0100 Subject: perf test: Test 73 Sig_trap fails on s390 In Linux next commit 5504f67944484495 ("perf test sigtrap: Add basic stress test for sigtrap handling") introduced the new test which uses breakpoint events. These events are not supported on s390 and PowerPC and always fail: # perf test -F 73 73: Sigtrap : FAILED! # Fix it the same way as in the breakpoint tests in file tests/bp_account.c where these type of tests are skipped on s390 and PowerPC platforms. With this patch skip this test on both platforms. Output after: # perf test -F 73 73: Sigtrap # Fixes: 5504f67944484495 ("perf test sigtrap: Add basic stress test for sigtrap handling") Signed-off-by: Thomas Richter Acked-by: Marco Elver Cc: Heiko Carstens Cc: Sumanth Korikkar Cc: Sven Schnelle Cc: Vasily Gorbik Link: https://lore.kernel.org/r/20211216151454.752066-1-tmricht@linux.ibm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/sigtrap.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/tools/perf/tests/sigtrap.c b/tools/perf/tests/sigtrap.c index 1004bf0e7cc9..1f147fe6595f 100644 --- a/tools/perf/tests/sigtrap.c +++ b/tools/perf/tests/sigtrap.c @@ -22,6 +22,19 @@ #include "tests.h" #include "../perf-sys.h" +/* + * PowerPC and S390 do not support creation of instruction breakpoints using the + * perf_event interface. + * + * Just disable the test for these architectures until these issues are + * resolved. + */ +#if defined(__powerpc__) || defined(__s390x__) +#define BP_ACCOUNT_IS_SUPPORTED 0 +#else +#define BP_ACCOUNT_IS_SUPPORTED 1 +#endif + #define NUM_THREADS 5 static struct { @@ -122,6 +135,11 @@ static int test__sigtrap(struct test_suite *test __maybe_unused, int subtest __m char sbuf[STRERR_BUFSIZE]; int i, fd, ret = TEST_FAIL; + if (!BP_ACCOUNT_IS_SUPPORTED) { + pr_debug("Test not supported on this architecture"); + return TEST_SKIP; + } + pthread_barrier_init(&barrier, NULL, NUM_THREADS + 1); action.sa_flags = SA_SIGINFO | SA_NODEFER; -- cgit v1.2.3 From ff8752d7617da301ad3b7ef18caa58d135ee8c3c Mon Sep 17 00:00:00 2001 From: German Gomez Date: Thu, 16 Dec 2021 15:24:04 +0000 Subject: perf arm-spe: Synthesize SPE instruction events Synthesize instruction events for every ARM SPE record. Arm SPE implements a hardware-based sample period, and perf implements a software-based one. Add a warning message to inform the user of this. Signed-off-by: German Gomez Tested-by: Leo Yan Acked-by: Namhyung Kim Cc: Alexander Shishkin Cc: Jiri Olsa Cc: John Garry Cc: Mark Rutland Cc: Mathieu Poirier Cc: Will Deacon Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20211216152404.52474-1-german.gomez@arm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/arm-spe.c | 62 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) diff --git a/tools/perf/util/arm-spe.c b/tools/perf/util/arm-spe.c index 8a3828f86901..d2b64e3f588b 100644 --- a/tools/perf/util/arm-spe.c +++ b/tools/perf/util/arm-spe.c @@ -58,6 +58,8 @@ struct arm_spe { u8 sample_branch; u8 sample_remote_access; u8 sample_memory; + u8 sample_instructions; + u64 instructions_sample_period; u64 l1d_miss_id; u64 l1d_access_id; @@ -68,6 +70,7 @@ struct arm_spe { u64 branch_miss_id; u64 remote_access_id; u64 memory_id; + u64 instructions_id; u64 kernel_start; @@ -90,6 +93,7 @@ struct arm_spe_queue { u64 time; u64 timestamp; struct thread *thread; + u64 period_instructions; }; static void arm_spe_dump(struct arm_spe *spe __maybe_unused, @@ -202,6 +206,7 @@ static struct arm_spe_queue *arm_spe__alloc_queue(struct arm_spe *spe, speq->pid = -1; speq->tid = -1; speq->cpu = -1; + speq->period_instructions = 0; /* params set */ params.get_trace = arm_spe_get_trace; @@ -353,6 +358,35 @@ static int arm_spe__synth_branch_sample(struct arm_spe_queue *speq, return arm_spe_deliver_synth_event(spe, speq, event, &sample); } +static int arm_spe__synth_instruction_sample(struct arm_spe_queue *speq, + u64 spe_events_id, u64 data_src) +{ + struct arm_spe *spe = speq->spe; + struct arm_spe_record *record = &speq->decoder->record; + union perf_event *event = speq->event_buf; + struct perf_sample sample = { .ip = 0, }; + + /* + * Handles perf instruction sampling period. + */ + speq->period_instructions++; + if (speq->period_instructions < spe->instructions_sample_period) + return 0; + speq->period_instructions = 0; + + arm_spe_prep_sample(spe, speq, event, &sample); + + sample.id = spe_events_id; + sample.stream_id = spe_events_id; + sample.addr = record->virt_addr; + sample.phys_addr = record->phys_addr; + sample.data_src = data_src; + sample.period = spe->instructions_sample_period; + sample.weight = record->latency; + + return arm_spe_deliver_synth_event(spe, speq, event, &sample); +} + #define SPE_MEM_TYPE (ARM_SPE_L1D_ACCESS | ARM_SPE_L1D_MISS | \ ARM_SPE_LLC_ACCESS | ARM_SPE_LLC_MISS | \ ARM_SPE_REMOTE_ACCESS) @@ -482,6 +516,12 @@ static int arm_spe_sample(struct arm_spe_queue *speq) return err; } + if (spe->sample_instructions) { + err = arm_spe__synth_instruction_sample(speq, spe->instructions_id, data_src); + if (err) + return err; + } + return 0; } @@ -1110,7 +1150,29 @@ arm_spe_synth_events(struct arm_spe *spe, struct perf_session *session) return err; spe->memory_id = id; arm_spe_set_event_name(evlist, id, "memory"); + id += 1; + } + + if (spe->synth_opts.instructions) { + if (spe->synth_opts.period_type != PERF_ITRACE_PERIOD_INSTRUCTIONS) { + pr_warning("Only instruction-based sampling period is currently supported by Arm SPE.\n"); + goto synth_instructions_out; + } + if (spe->synth_opts.period > 1) + pr_warning("Arm SPE has a hardware-based sample period.\n" + "Additional instruction events will be discarded by --itrace\n"); + + spe->sample_instructions = true; + attr.config = PERF_COUNT_HW_INSTRUCTIONS; + attr.sample_period = spe->synth_opts.period; + spe->instructions_sample_period = attr.sample_period; + err = arm_spe_synth_event(session, &attr, id); + if (err) + return err; + spe->instructions_id = id; + arm_spe_set_event_name(evlist, id, "instructions"); } +synth_instructions_out: return 0; } -- cgit v1.2.3 From f8464e084dd3c4cf37bdbeb06fea0afbd2e0f4e8 Mon Sep 17 00:00:00 2001 From: Carsten Haitzler Date: Wed, 15 Dec 2021 16:03:54 +0000 Subject: perf test: Use 3 digits for test numbering now we can have more tests This is in preparation for adding more tests that will need the test number to be 3 digts so they align nicely in the output. Reviewed-by: Leo Yan Signed-off-by: Carsten Haitzler Cc: Mathieu Poirier Cc: Mike Leach Cc: Suzuki Poulouse Cc: coresight@lists.linaro.org Link: http://lore.kernel.org/lkml/20211215160403.69264-3-carsten.haitzler@foss.arm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/builtin-test.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c index 1fb9f2a11d63..fac3717d9ba1 100644 --- a/tools/perf/tests/builtin-test.c +++ b/tools/perf/tests/builtin-test.c @@ -421,7 +421,7 @@ static int run_shell_tests(int argc, const char *argv[], int i, int width, continue; st.file = ent->d_name; - pr_info("%2d: %-*s:", i, width, test_suite.desc); + pr_info("%3d: %-*s:", i, width, test_suite.desc); if (intlist__find(skiplist, i)) { color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (user override)\n"); @@ -471,7 +471,7 @@ static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist) continue; } - pr_info("%2d: %-*s:", i, width, test_description(t, -1)); + pr_info("%3d: %-*s:", i, width, test_description(t, -1)); if (intlist__find(skiplist, i)) { color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (user override)\n"); @@ -511,7 +511,7 @@ static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist) curr, argc, argv)) continue; - pr_info("%2d.%1d: %-*s:", i, subi + 1, subw, + pr_info("%3d.%1d: %-*s:", i, subi + 1, subw, test_description(t, subi)); test_and_print(t, subi); } @@ -546,7 +546,7 @@ static int perf_test__list_shell(int argc, const char **argv, int i) if (!perf_test__matches(t.desc, curr, argc, argv)) continue; - pr_info("%2d: %s\n", i, t.desc); + pr_info("%3d: %s\n", i, t.desc); } @@ -568,14 +568,14 @@ static int perf_test__list(int argc, const char **argv) if (!perf_test__matches(test_description(t, -1), curr, argc, argv)) continue; - pr_info("%2d: %s\n", i, test_description(t, -1)); + pr_info("%3d: %s\n", i, test_description(t, -1)); if (has_subtests(t)) { int subn = num_subtests(t); int subi; for (subi = 0; subi < subn; subi++) - pr_info("%2d:%1d: %s\n", i, subi + 1, + pr_info("%3d:%1d: %s\n", i, subi + 1, test_description(t, subi)); } } -- cgit v1.2.3 From 7248e308a57587615431b83689cd57e957815bfc Mon Sep 17 00:00:00 2001 From: Alexandre Truong Date: Fri, 17 Dec 2021 15:45:15 +0000 Subject: perf tools: Record ARM64 LR register automatically On ARM64, automatically record the link register if the frame pointer mode is on. It will be used to do a dwarf unwind to find the caller of the leaf frame if the frame pointer was omitted. Reviewed-by: James Clark Signed-off-by: Alexandre Truong Acked-by: Jiri Olsa Cc: Alexander Shishkin Cc: John Garry Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Namhyung Kim Cc: Will Deacon Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20211217154521.80603-2-german.gomez@arm.com Signed-off-by: German Gomez Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/arch/arm64/util/machine.c | 7 +++++++ tools/perf/builtin-record.c | 8 ++++++++ tools/perf/util/callchain.h | 2 ++ 3 files changed, 17 insertions(+) diff --git a/tools/perf/arch/arm64/util/machine.c b/tools/perf/arch/arm64/util/machine.c index 7e7714290a87..d2ce31e28cd7 100644 --- a/tools/perf/arch/arm64/util/machine.c +++ b/tools/perf/arch/arm64/util/machine.c @@ -5,6 +5,8 @@ #include #include "debug.h" #include "symbol.h" +#include "callchain.h" +#include "record.h" /* On arm64, kernel text segment starts at high memory address, * for example 0xffff 0000 8xxx xxxx. Modules start at a low memory @@ -26,3 +28,8 @@ void arch__symbols__fixup_end(struct symbol *p, struct symbol *c) p->end = c->start; pr_debug4("%s sym:%s end:%#" PRIx64 "\n", __func__, p->name, p->end); } + +void arch__add_leaf_frame_record_opts(struct record_opts *opts) +{ + opts->sample_user_regs |= sample_reg_masks[PERF_REG_ARM64_LR].mask; +} diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 0338b813585a..6ac2160913ea 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -2267,6 +2267,10 @@ out_free: return ret; } +void __weak arch__add_leaf_frame_record_opts(struct record_opts *opts __maybe_unused) +{ +} + static int parse_control_option(const struct option *opt, const char *str, int unset __maybe_unused) @@ -2898,6 +2902,10 @@ int cmd_record(int argc, const char **argv) } rec->opts.target.hybrid = perf_pmu__has_hybrid(); + + if (callchain_param.enabled && callchain_param.record_mode == CALLCHAIN_FP) + arch__add_leaf_frame_record_opts(&rec->opts); + err = -ENOMEM; if (evlist__create_maps(rec->evlist, &rec->opts.target) < 0) usage_with_options(record_usage, record_options); diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h index 5824134f983b..77fba053c677 100644 --- a/tools/perf/util/callchain.h +++ b/tools/perf/util/callchain.h @@ -280,6 +280,8 @@ static inline int arch_skip_callchain_idx(struct thread *thread __maybe_unused, } #endif +void arch__add_leaf_frame_record_opts(struct record_opts *opts); + char *callchain_list__sym_name(struct callchain_list *cl, char *bf, size_t bfsize, bool show_dso); char *callchain_node__scnprintf_value(struct callchain_node *node, -- cgit v1.2.3 From 32bfa5bf71db672c646751da131a17aace8cceac Mon Sep 17 00:00:00 2001 From: Alexandre Truong Date: Fri, 17 Dec 2021 15:45:16 +0000 Subject: perf machine: Add a mechanism to inject stack frames Add a mechanism for platforms to inject stack frames for the leaf frame caller if there is enough information to determine a frame is missing from dwarf or other post processing mechanisms. Reviewed-by: James Clark Signed-off-by: Alexandre Truong Acked-by: Jiri Olsa Cc: Alexander Shishkin Cc: John Garry Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Namhyung Kim Cc: Will Deacon Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20211217154521.80603-3-german.gomez@arm.com Signed-off-by: German Gomez Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/machine.c | 37 ++++++++++++++++++++++++++++++++++++- 1 file changed, 36 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index fb8496df8432..3eddad009f78 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -2710,6 +2710,12 @@ static int find_prev_cpumode(struct ip_callchain *chain, struct thread *thread, return err; } +static u64 get_leaf_frame_caller(struct perf_sample *sample __maybe_unused, + struct thread *thread __maybe_unused, int usr_idx __maybe_unused) +{ + return 0; +} + static int thread__resolve_callchain_sample(struct thread *thread, struct callchain_cursor *cursor, struct evsel *evsel, @@ -2723,9 +2729,10 @@ static int thread__resolve_callchain_sample(struct thread *thread, struct ip_callchain *chain = sample->callchain; int chain_nr = 0; u8 cpumode = PERF_RECORD_MISC_USER; - int i, j, err, nr_entries; + int i, j, err, nr_entries, usr_idx; int skip_idx = -1; int first_call = 0; + u64 leaf_frame_caller; if (chain) chain_nr = chain->nr; @@ -2850,6 +2857,34 @@ check_calls: continue; } + /* + * PERF_CONTEXT_USER allows us to locate where the user stack ends. + * Depending on callchain_param.order and the position of PERF_CONTEXT_USER, + * the index will be different in order to add the missing frame + * at the right place. + */ + + usr_idx = callchain_param.order == ORDER_CALLEE ? j-2 : j-1; + + if (usr_idx >= 0 && chain->ips[usr_idx] == PERF_CONTEXT_USER) { + + leaf_frame_caller = get_leaf_frame_caller(sample, thread, usr_idx); + + /* + * check if leaf_frame_Caller != ip to not add the same + * value twice. + */ + + if (leaf_frame_caller && leaf_frame_caller != ip) { + + err = add_callchain_ip(thread, cursor, parent, + root_al, &cpumode, leaf_frame_caller, + false, NULL, NULL, 0); + if (err) + return (err < 0) ? err : 0; + } + } + err = add_callchain_ip(thread, cursor, parent, root_al, &cpumode, ip, false, NULL, NULL, 0); -- cgit v1.2.3 From ab23692134489f0f563168449fc27bfb5d6b04dd Mon Sep 17 00:00:00 2001 From: Alexandre Truong Date: Fri, 17 Dec 2021 15:45:17 +0000 Subject: perf script: Use callchain_param_setup() instead of open coded equivalent Refactoring script__setup_sample_type() by using callchain_param_setup() to replace the duplicate code for callchain parameter setting up. Reviewed-by: James Clark Signed-off-by: Alexandre Truong Acked-by: Jiri Olsa Cc: Alexander Shishkin Cc: John Garry Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Namhyung Kim Cc: Will Deacon Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20211217154521.80603-4-german.gomez@arm.com Signed-off-by: German Gomez Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-script.c | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index da2175d70ac9..ab7d575f97f2 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c @@ -3468,16 +3468,7 @@ static void script__setup_sample_type(struct perf_script *script) struct perf_session *session = script->session; u64 sample_type = evlist__combined_sample_type(session->evlist); - if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain) { - if ((sample_type & PERF_SAMPLE_REGS_USER) && - (sample_type & PERF_SAMPLE_STACK_USER)) { - callchain_param.record_mode = CALLCHAIN_DWARF; - dwarf_callchain_users = true; - } else if (sample_type & PERF_SAMPLE_BRANCH_STACK) - callchain_param.record_mode = CALLCHAIN_LBR; - else - callchain_param.record_mode = CALLCHAIN_FP; - } + callchain_param_setup(sample_type); if (script->stitch_lbr && (callchain_param.record_mode != CALLCHAIN_LBR)) { pr_warning("Can't find LBR callchain. Switch off --stitch-lbr.\n" -- cgit v1.2.3 From aa8db3e41dae953b636ff68e944479900d149a37 Mon Sep 17 00:00:00 2001 From: Alexandre Truong Date: Fri, 17 Dec 2021 15:45:18 +0000 Subject: perf callchain: Enable dwarf_callchain_users on arm64 Enable dwarf_callchain_users on arm64 which will be needed to do a DWARF unwind in order to get the caller of the leaf frame. Reviewed-by: James Clark Signed-off-by: Alexandre Truong Acked-by: Jiri Olsa Cc: Alexander Shishkin Cc: John Garry Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Namhyung Kim Cc: Will Deacon Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20211217154521.80603-5-german.gomez@arm.com Signed-off-by: German Gomez Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-report.c | 4 ++-- tools/perf/builtin-script.c | 4 ++-- tools/perf/util/callchain.c | 14 +++++++++++++- tools/perf/util/callchain.h | 2 +- 4 files changed, 18 insertions(+), 6 deletions(-) diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 8ae400429870..1dd92d8c9279 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -410,7 +410,7 @@ static int report__setup_sample_type(struct report *rep) } } - callchain_param_setup(sample_type); + callchain_param_setup(sample_type, perf_env__arch(&rep->session->header.env)); if (rep->stitch_lbr && (callchain_param.record_mode != CALLCHAIN_LBR)) { ui__warning("Can't find LBR callchain. Switch off --stitch-lbr.\n" @@ -1127,7 +1127,7 @@ static int process_attr(struct perf_tool *tool __maybe_unused, * on events sample_type. */ sample_type = evlist__combined_sample_type(*pevlist); - callchain_param_setup(sample_type); + callchain_param_setup(sample_type, perf_env__arch((*pevlist)->env)); return 0; } diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index ab7d575f97f2..d308adfd1176 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c @@ -2318,7 +2318,7 @@ static int process_attr(struct perf_tool *tool, union perf_event *event, * on events sample_type. */ sample_type = evlist__combined_sample_type(evlist); - callchain_param_setup(sample_type); + callchain_param_setup(sample_type, perf_env__arch((*pevlist)->env)); /* Enable fields for callchain entries */ if (symbol_conf.use_callchain && @@ -3468,7 +3468,7 @@ static void script__setup_sample_type(struct perf_script *script) struct perf_session *session = script->session; u64 sample_type = evlist__combined_sample_type(session->evlist); - callchain_param_setup(sample_type); + callchain_param_setup(sample_type, perf_env__arch(session->machines.host.env)); if (script->stitch_lbr && (callchain_param.record_mode != CALLCHAIN_LBR)) { pr_warning("Can't find LBR callchain. Switch off --stitch-lbr.\n" diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c index 8e2777133bd9..131207b91d15 100644 --- a/tools/perf/util/callchain.c +++ b/tools/perf/util/callchain.c @@ -1600,7 +1600,7 @@ void callchain_cursor_reset(struct callchain_cursor *cursor) map__zput(node->ms.map); } -void callchain_param_setup(u64 sample_type) +void callchain_param_setup(u64 sample_type, const char *arch) { if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain) { if ((sample_type & PERF_SAMPLE_REGS_USER) && @@ -1612,6 +1612,18 @@ void callchain_param_setup(u64 sample_type) else callchain_param.record_mode = CALLCHAIN_FP; } + + /* + * It's necessary to use libunwind to reliably determine the caller of + * a leaf function on aarch64, as otherwise we cannot know whether to + * start from the LR or FP. + * + * Always starting from the LR can result in duplicate or entirely + * erroneous entries. Always skipping the LR and starting from the FP + * can result in missing entries. + */ + if (callchain_param.record_mode == CALLCHAIN_FP && !strcmp(arch, "arm64")) + dwarf_callchain_users = true; } static bool chain_match(struct callchain_list *base_chain, diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h index 77fba053c677..d95615daed73 100644 --- a/tools/perf/util/callchain.h +++ b/tools/perf/util/callchain.h @@ -300,7 +300,7 @@ int callchain_branch_counts(struct callchain_root *root, u64 *branch_count, u64 *predicted_count, u64 *abort_count, u64 *cycles_count); -void callchain_param_setup(u64 sample_type); +void callchain_param_setup(u64 sample_type, const char *arch); bool callchain_cnode_matched(struct callchain_node *base_cnode, struct callchain_node *pair_cnode); -- cgit v1.2.3 From ffc60350489db9f6e3010ac1e795078cb0d06efe Mon Sep 17 00:00:00 2001 From: German Gomez Date: Fri, 17 Dec 2021 15:45:19 +0000 Subject: perf tools: Refactor SMPL_REG macro in perf_regs.h Refactor the SAMPL_REG macro so that it can be used in a followup commit to obtain the masks for ARM64 registers. Reviewed-by: James Clark Signed-off-by: German Gomez Acked-by: Jiri Olsa Cc: Alexander Shishkin Cc: John Garry Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Namhyung Kim Cc: Will Deacon Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20211217154521.80603-6-german.gomez@arm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/perf_regs.h | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/tools/perf/util/perf_regs.h b/tools/perf/util/perf_regs.h index 4e6b1299c571..ce1127af05e4 100644 --- a/tools/perf/util/perf_regs.h +++ b/tools/perf/util/perf_regs.h @@ -11,8 +11,11 @@ struct sample_reg { const char *name; uint64_t mask; }; -#define SMPL_REG(n, b) { .name = #n, .mask = 1ULL << (b) } -#define SMPL_REG2(n, b) { .name = #n, .mask = 3ULL << (b) } + +#define SMPL_REG_MASK(b) (1ULL << (b)) +#define SMPL_REG(n, b) { .name = #n, .mask = SMPL_REG_MASK(b) } +#define SMPL_REG2_MASK(b) (3ULL << (b)) +#define SMPL_REG2(n, b) { .name = #n, .mask = SMPL_REG2_MASK(b) } #define SMPL_REG_END { .name = NULL } enum { -- cgit v1.2.3 From b9f6fbb3b2c29736970ae9fcc0e82b0bd459442b Mon Sep 17 00:00:00 2001 From: Alexandre Truong Date: Fri, 17 Dec 2021 15:45:20 +0000 Subject: perf arm64: Inject missing frames when using 'perf record --call-graph=fp' When unwinding using frame pointers on ARM64, the return address of the current function may not have been pushed into the stack when a function was interrupted, which makes perf show an incorrect call graph to the user. Consider the following example program: void leaf() { /* long computation */ } void parent() { // (1) leaf(); // (2) } ... could be compiled into (using gcc -fno-inline -fno-omit-frame-pointer): leaf: /* long computation */ nop ret parent: // (1) stp x29, x30, [sp, -16]! mov x29, sp bl parent nop ldp x29, x30, [sp], 16 // (2) ret If the program is interrupted at (1), (2), or any point in "leaf:", the call graph will skip the callers of the current function. We can unwind using the dwarf info and check if the return addr is the same as the LR register, and inject the missing frame into the call graph. Before this patch, the above example shows the following call-graph when recording using "--call-graph fp" mode in ARM64: # Children Self Command Shared Object Symbol # ........ ........ ........ ................ ...................... # 99.86% 99.86% program3 program3 [.] leaf | ---_start __libc_start_main main leaf As can be seen, the "parent" function is missing. This is specially problematic in "leaf" because for leaf functions the compiler may always omit pushing the return addr into the stack. After this patch, it shows the correct graph: # Children Self Command Shared Object Symbol # ........ ........ ........ ................ ...................... # 99.86% 99.86% program3 program3 [.] leaf | ---_start __libc_start_main main parent leaf Reviewed-by: James Clark Signed-off-by: Alexandre Truong Acked-by: Jiri Olsa Cc: Alexander Shishkin Cc: John Garry Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Namhyung Kim Cc: Will Deacon Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20211217154521.80603-7-german.gomez@arm.com Signed-off-by: German Gomez [ Rename machine__normalize_is() to machine__normalized_is(), as suggested by James Clark ] Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/Build | 1 + .../perf/util/arm64-frame-pointer-unwind-support.c | 63 ++++++++++++++++++++++ .../perf/util/arm64-frame-pointer-unwind-support.h | 10 ++++ tools/perf/util/machine.c | 19 +++++-- tools/perf/util/machine.h | 1 + 5 files changed, 89 insertions(+), 5 deletions(-) create mode 100644 tools/perf/util/arm64-frame-pointer-unwind-support.c create mode 100644 tools/perf/util/arm64-frame-pointer-unwind-support.h diff --git a/tools/perf/util/Build b/tools/perf/util/Build index 294b12430d73..2a403cefcaf2 100644 --- a/tools/perf/util/Build +++ b/tools/perf/util/Build @@ -1,3 +1,4 @@ +perf-y += arm64-frame-pointer-unwind-support.o perf-y += annotate.o perf-y += block-info.o perf-y += block-range.o diff --git a/tools/perf/util/arm64-frame-pointer-unwind-support.c b/tools/perf/util/arm64-frame-pointer-unwind-support.c new file mode 100644 index 000000000000..4f5ecf51ed38 --- /dev/null +++ b/tools/perf/util/arm64-frame-pointer-unwind-support.c @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: GPL-2.0 +#include "arm64-frame-pointer-unwind-support.h" +#include "callchain.h" +#include "event.h" +#include "perf_regs.h" // SMPL_REG_MASK +#include "unwind.h" + +#define perf_event_arm_regs perf_event_arm64_regs +#include "../arch/arm64/include/uapi/asm/perf_regs.h" +#undef perf_event_arm_regs + +struct entries { + u64 stack[2]; + size_t length; +}; + +static bool get_leaf_frame_caller_enabled(struct perf_sample *sample) +{ + return callchain_param.record_mode == CALLCHAIN_FP && sample->user_regs.regs + && sample->user_regs.mask & SMPL_REG_MASK(PERF_REG_ARM64_LR); +} + +static int add_entry(struct unwind_entry *entry, void *arg) +{ + struct entries *entries = arg; + + entries->stack[entries->length++] = entry->ip; + return 0; +} + +u64 get_leaf_frame_caller_aarch64(struct perf_sample *sample, struct thread *thread, int usr_idx) +{ + int ret; + struct entries entries = {}; + struct regs_dump old_regs = sample->user_regs; + + if (!get_leaf_frame_caller_enabled(sample)) + return 0; + + /* + * If PC and SP are not recorded, get the value of PC from the stack + * and set its mask. SP is not used when doing the unwinding but it + * still needs to be set to prevent failures. + */ + + if (!(sample->user_regs.mask & SMPL_REG_MASK(PERF_REG_ARM64_PC))) { + sample->user_regs.cache_mask |= SMPL_REG_MASK(PERF_REG_ARM64_PC); + sample->user_regs.cache_regs[PERF_REG_ARM64_PC] = sample->callchain->ips[usr_idx+1]; + } + + if (!(sample->user_regs.mask & SMPL_REG_MASK(PERF_REG_ARM64_SP))) { + sample->user_regs.cache_mask |= SMPL_REG_MASK(PERF_REG_ARM64_SP); + sample->user_regs.cache_regs[PERF_REG_ARM64_SP] = 0; + } + + ret = unwind__get_entries(add_entry, &entries, thread, sample, 2); + sample->user_regs = old_regs; + + if (ret || entries.length != 2) + return ret; + + return callchain_param.order == ORDER_CALLER ? entries.stack[0] : entries.stack[1]; +} diff --git a/tools/perf/util/arm64-frame-pointer-unwind-support.h b/tools/perf/util/arm64-frame-pointer-unwind-support.h new file mode 100644 index 000000000000..32af9ce94398 --- /dev/null +++ b/tools/perf/util/arm64-frame-pointer-unwind-support.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __PERF_ARM_FRAME_POINTER_UNWIND_SUPPORT_H +#define __PERF_ARM_FRAME_POINTER_UNWIND_SUPPORT_H + +#include "event.h" +#include "thread.h" + +u64 get_leaf_frame_caller_aarch64(struct perf_sample *sample, struct thread *thread, int user_idx); + +#endif /* __PERF_ARM_FRAME_POINTER_UNWIND_SUPPORT_H */ diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index 3eddad009f78..3901440aeff9 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -34,6 +34,7 @@ #include "bpf-event.h" #include // page_size #include "cgroup.h" +#include "arm64-frame-pointer-unwind-support.h" #include #include @@ -2710,10 +2711,13 @@ static int find_prev_cpumode(struct ip_callchain *chain, struct thread *thread, return err; } -static u64 get_leaf_frame_caller(struct perf_sample *sample __maybe_unused, - struct thread *thread __maybe_unused, int usr_idx __maybe_unused) +static u64 get_leaf_frame_caller(struct perf_sample *sample, + struct thread *thread, int usr_idx) { - return 0; + if (machine__normalized_is(thread->maps->machine, "arm64")) + return get_leaf_frame_caller_aarch64(sample, thread, usr_idx); + else + return 0; } static int thread__resolve_callchain_sample(struct thread *thread, @@ -3114,14 +3118,19 @@ int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid, } /* - * Compares the raw arch string. N.B. see instead perf_env__arch() if a - * normalized arch is needed. + * Compares the raw arch string. N.B. see instead perf_env__arch() or + * machine__normalized_is() if a normalized arch is needed. */ bool machine__is(struct machine *machine, const char *arch) { return machine && !strcmp(perf_env__raw_arch(machine->env), arch); } +bool machine__normalized_is(struct machine *machine, const char *arch) +{ + return machine && !strcmp(perf_env__arch(machine->env), arch); +} + int machine__nr_cpus_avail(struct machine *machine) { return machine ? perf_env__nr_cpus_avail(machine->env) : 0; diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h index a143087eeb47..c5a45dc8df4c 100644 --- a/tools/perf/util/machine.h +++ b/tools/perf/util/machine.h @@ -208,6 +208,7 @@ static inline bool machine__is_host(struct machine *machine) } bool machine__is(struct machine *machine, const char *arch); +bool machine__normalized_is(struct machine *machine, const char *arch); int machine__nr_cpus_avail(struct machine *machine); struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid); -- cgit v1.2.3 From 7fbddf40b881a2430daf1bd03ba80e871a2fadce Mon Sep 17 00:00:00 2001 From: Kajol Jain Date: Mon, 6 Dec 2021 14:47:47 +0530 Subject: tools headers UAPI: Add new macros for mem_hops field to perf_event.h Add new macros for mem_hops field which can be used to represent remote-node, socket and board level details. Currently the code had macro for HOPS_0 which, corresponds to data coming from another core but same node. Add new macros for HOPS_1 to HOPS_3 to represent remote-node, socket and board level data. Also add corresponding strings in the mem_hops array to represent mem_hop field data in perf_mem__lvl_scnprintf function Incase mem_hops field is used, PERF_MEM_LVLNUM field also need to be set inorder to represent the data source. Hence printing data source via PERF_MEM_LVL field can be skip in that scenario. For ex: Encodings for mem_hops fields with L2 cache: L2 - local L2 L2 | REMOTE | HOPS_0 - remote core, same node L2 L2 | REMOTE | HOPS_1 - remote node, same socket L2 L2 | REMOTE | HOPS_2 - remote socket, same board L2 L2 | REMOTE | HOPS_3 - remote board L2 Signed-off-by: Kajol Jain Acked-by: Peter Zijlstra (Intel) Cc: Alexander Shishkin Cc: Alexei Starovoitov Cc: Andi Kleen Cc: Athira Jajeev Cc: Daniel Borkmann Cc: Jin Yao Cc: Jiri Olsa Cc: Kan Liang Cc: Madhavan Srinivasan Cc: Mark Rutland Cc: Michael Ellerman Cc: Nageswara R Sastry Cc: Namhyung Kim Cc: Paul Mackerras Cc: Song Liu Cc: linuxppc-dev@lists.ozlabs.org Link: http://lore.kernel.org/lkml/20211206091749.87585-3-kjain@linux.ibm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/include/uapi/linux/perf_event.h | 5 ++++- tools/perf/util/mem-events.c | 29 ++++++++++++++++++----------- 2 files changed, 22 insertions(+), 12 deletions(-) diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h index bd8860eeb291..4cd39aaccbe7 100644 --- a/tools/include/uapi/linux/perf_event.h +++ b/tools/include/uapi/linux/perf_event.h @@ -1332,7 +1332,10 @@ union perf_mem_data_src { /* hop level */ #define PERF_MEM_HOPS_0 0x01 /* remote core, same node */ -/* 2-7 available */ +#define PERF_MEM_HOPS_1 0x02 /* remote node, same socket */ +#define PERF_MEM_HOPS_2 0x03 /* remote socket, same board */ +#define PERF_MEM_HOPS_3 0x04 /* remote board */ +/* 5-7 available */ #define PERF_MEM_HOPS_SHIFT 43 #define PERF_MEM_S(a, s) \ diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c index 3167b4628b6d..ed0ab838bcc5 100644 --- a/tools/perf/util/mem-events.c +++ b/tools/perf/util/mem-events.c @@ -309,6 +309,9 @@ static const char * const mem_hops[] = { * to be set with mem_hops field. */ "core, same node", + "node, same socket", + "socket, same board", + "board", }; int perf_mem__lvl_scnprintf(char *out, size_t sz, struct mem_info *mem_info) @@ -316,7 +319,7 @@ int perf_mem__lvl_scnprintf(char *out, size_t sz, struct mem_info *mem_info) size_t i, l = 0; u64 m = PERF_MEM_LVL_NA; u64 hit, miss; - int printed; + int printed = 0; if (mem_info) m = mem_info->data_src.mem_lvl; @@ -335,18 +338,22 @@ int perf_mem__lvl_scnprintf(char *out, size_t sz, struct mem_info *mem_info) l += 7; } - if (mem_info && mem_info->data_src.mem_hops) + /* + * Incase mem_hops field is set, we can skip printing data source via + * PERF_MEM_LVL namespace. + */ + if (mem_info && mem_info->data_src.mem_hops) { l += scnprintf(out + l, sz - l, "%s ", mem_hops[mem_info->data_src.mem_hops]); - - printed = 0; - for (i = 0; m && i < ARRAY_SIZE(mem_lvl); i++, m >>= 1) { - if (!(m & 0x1)) - continue; - if (printed++) { - strcat(out, " or "); - l += 4; + } else { + for (i = 0; m && i < ARRAY_SIZE(mem_lvl); i++, m >>= 1) { + if (!(m & 0x1)) + continue; + if (printed++) { + strcat(out, " or "); + l += 4; + } + l += scnprintf(out + l, sz - l, mem_lvl[i]); } - l += scnprintf(out + l, sz - l, mem_lvl[i]); } if (mem_info && mem_info->data_src.mem_lvl_num) { -- cgit v1.2.3 From 0ebce3d65f1f53c936fdd51e975bd876ba7ed64f Mon Sep 17 00:00:00 2001 From: Kajol Jain Date: Mon, 6 Dec 2021 14:47:48 +0530 Subject: perf powerpc: Add encodings to represent data based on newer composite PERF_MEM_LVLNUM* fields The code represent data coming from L1/L2/L3 cache hits based on PERF_MEM_LVL_* namespace, which is in the process of deprecation in the favour of newer composite PERF_MEM_{LVLNUM_,REMOTE_,SNOOPX_,HOPS_} fields. Add data source encodings to represent L1/L2/L3 cache hits based on newer composite PERF_MEM_{LVLNUM_,REMOTE_,SNOOPX_,HOPS_} fields for power10 and older platforms Result in power9 system without patch changes: localhost:# ./perf mem report --sort="mem,sym,dso" --stdio # Overhead Samples Memory access Symbol Shared Object # ........ ....... ............. ................................. ................ # 29.51% 1 L2 hit [k] perf_event_exec [kernel.vmlinux] 27.05% 1 L1 hit [k] perf_ctx_unlock [kernel.vmlinux] 13.93% 1 L1 hit [k] vtime_delta [kernel.vmlinux] 13.11% 1 L1 hit [k] prepend_path.isra.11 [kernel.vmlinux] 8.20% 1 L1 hit [.] 00000038.plt_call.__GI_strlen libc-2.28.so 8.20% 1 L1 hit [k] perf_event_interrupt [kernel.vmlinux] Result in power9 system with patch changes: localhost:# ./perf mem report --sort="mem,sym,dso" --stdio # Overhead Samples Memory access Symbol Shared Object # ........ ....... ............. .......................... ................ # 36.63% 1 L2 or L2 hit [k] perf_event_exec [kernel.vmlinux] 25.50% 1 L1 or L1 hit [k] vtime_delta [kernel.vmlinux] 13.12% 1 L1 or L1 hit [k] unmap_region [kernel.vmlinux] 12.62% 1 L1 or L1 hit [k] perf_sample_event_took [kernel.vmlinux] 6.93% 1 L1 or L1 hit [k] perf_ctx_unlock [kernel.vmlinux] 5.20% 1 L1 or L1 hit [.] __memcpy_power7 libc-2.28.so Reviewed-by: Madhavan Srinivasan Signed-off-by: Kajol Jain Cc: Alexander Shishkin Cc: Alexei Starovoitov Cc: Andi Kleen Cc: Athira Jajeev Cc: Daniel Borkmann Cc: Jin Yao Cc: Jiri Olsa Cc: Kan Liang Cc: Mark Rutland Cc: Michael Ellerman Cc: Nageswara R Sastry Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Song Liu Cc: linuxppc-dev@lists.ozlabs.org Link: http://lore.kernel.org/lkml/20211206091749.87585-4-kjain@linux.ibm.com Signed-off-by: Arnaldo Carvalho de Melo --- arch/powerpc/perf/isa207-common.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c index 7ea873ab2e6f..6c6bc8b7d887 100644 --- a/arch/powerpc/perf/isa207-common.c +++ b/arch/powerpc/perf/isa207-common.c @@ -220,13 +220,13 @@ static inline u64 isa207_find_source(u64 idx, u32 sub_idx) /* Nothing to do */ break; case 1: - ret = PH(LVL, L1); + ret = PH(LVL, L1) | LEVEL(L1) | P(SNOOP, HIT); break; case 2: - ret = PH(LVL, L2); + ret = PH(LVL, L2) | LEVEL(L2) | P(SNOOP, HIT); break; case 3: - ret = PH(LVL, L3); + ret = PH(LVL, L3) | LEVEL(L3) | P(SNOOP, HIT); break; case 4: if (sub_idx <= 1) -- cgit v1.2.3 From af2b24f228a0373ac65eb7a502e0bc31e2c0269d Mon Sep 17 00:00:00 2001 From: Kajol Jain Date: Mon, 6 Dec 2021 14:47:49 +0530 Subject: perf powerpc: Add data source encodings for power10 platform The code represent memory/cache level data based on PERF_MEM_LVL_* namespace, which is in the process of deprication in the favour of newer composite PERF_MEM_{LVLNUM_,REMOTE_,SNOOPX_,HOPS_} fields. Add data source encodings to represent cache/memory data based on newer composite PERF_MEM_{LVLNUM_,REMOTE_,SNOOPX_,HOPS_} fields. Add data source encodings to represent data coming from local memory/Remote memory/distant memory and remote/distant cache hits. Inorder to represent data coming from OpenCAPI cache/memory, we use LVLNUM "PMEM" field which is used to present persistent memory accesses. Result in power10 system with patch changes: localhost:# ./perf mem report --sort="mem,sym,dso" --stdio # Overhead Samples Memory access Symbol Shared Object # ........ ....... ...................................... ........................... ................. # 29.46% 2331 L1 or L1 hit [.] __random libc-2.28.so 23.11% 2121 L1 or L1 hit [.] producer_populate_cache producer_consumer 18.56% 1758 L1 or L1 hit [.] __random_r libc-2.28.so 15.64% 1559 L2 or L2 hit [.] __random libc-2.28.so ..... 0.09% 5 Remote socket, same board Any cache hit [.] __random libc-2.28.so 0.07% 4 Remote socket, same board Any cache hit [.] __random libc-2.28.so ..... Reviewed-by: Madhavan Srinivasan Signed-off-by: Kajol Jain Cc: Alexander Shishkin Cc: Alexei Starovoitov Cc: Andi Kleen Cc: Athira Jajeev Cc: Daniel Borkmann Cc: Jin Yao Cc: Jiri Olsa Cc: Kan Liang Cc: Mark Rutland Cc: Michael Ellerman Cc: Nageswara R Sastry Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Song Liu Cc: linuxppc-dev@lists.ozlabs.org Link: http://lore.kernel.org/lkml/20211206091749.87585-5-kjain@linux.ibm.com Signed-off-by: Arnaldo Carvalho de Melo --- arch/powerpc/perf/isa207-common.c | 54 ++++++++++++++++++++++++++++++--------- 1 file changed, 42 insertions(+), 12 deletions(-) diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c index 6c6bc8b7d887..4037ea652522 100644 --- a/arch/powerpc/perf/isa207-common.c +++ b/arch/powerpc/perf/isa207-common.c @@ -229,13 +229,28 @@ static inline u64 isa207_find_source(u64 idx, u32 sub_idx) ret = PH(LVL, L3) | LEVEL(L3) | P(SNOOP, HIT); break; case 4: - if (sub_idx <= 1) - ret = PH(LVL, LOC_RAM); - else if (sub_idx > 1 && sub_idx <= 2) - ret = PH(LVL, REM_RAM1); - else - ret = PH(LVL, REM_RAM2); - ret |= P(SNOOP, HIT); + if (cpu_has_feature(CPU_FTR_ARCH_31)) { + ret = P(SNOOP, HIT); + + if (sub_idx == 1) + ret |= PH(LVL, LOC_RAM) | LEVEL(RAM); + else if (sub_idx == 2 || sub_idx == 3) + ret |= P(LVL, HIT) | LEVEL(PMEM); + else if (sub_idx == 4) + ret |= PH(LVL, REM_RAM1) | REM | LEVEL(RAM) | P(HOPS, 2); + else if (sub_idx == 5 || sub_idx == 7) + ret |= P(LVL, HIT) | LEVEL(PMEM) | REM; + else if (sub_idx == 6) + ret |= PH(LVL, REM_RAM2) | REM | LEVEL(RAM) | P(HOPS, 3); + } else { + if (sub_idx <= 1) + ret = PH(LVL, LOC_RAM); + else if (sub_idx > 1 && sub_idx <= 2) + ret = PH(LVL, REM_RAM1); + else + ret = PH(LVL, REM_RAM2); + ret |= P(SNOOP, HIT); + } break; case 5: if (cpu_has_feature(CPU_FTR_ARCH_31)) { @@ -261,11 +276,26 @@ static inline u64 isa207_find_source(u64 idx, u32 sub_idx) } break; case 6: - ret = PH(LVL, REM_CCE2); - if ((sub_idx == 0) || (sub_idx == 2)) - ret |= P(SNOOP, HIT); - else if ((sub_idx == 1) || (sub_idx == 3)) - ret |= P(SNOOP, HITM); + if (cpu_has_feature(CPU_FTR_ARCH_31)) { + if (sub_idx == 0) + ret = PH(LVL, REM_CCE1) | LEVEL(ANY_CACHE) | REM | + P(SNOOP, HIT) | P(HOPS, 2); + else if (sub_idx == 1) + ret = PH(LVL, REM_CCE1) | LEVEL(ANY_CACHE) | REM | + P(SNOOP, HITM) | P(HOPS, 2); + else if (sub_idx == 2) + ret = PH(LVL, REM_CCE2) | LEVEL(ANY_CACHE) | REM | + P(SNOOP, HIT) | P(HOPS, 3); + else if (sub_idx == 3) + ret = PH(LVL, REM_CCE2) | LEVEL(ANY_CACHE) | REM | + P(SNOOP, HITM) | P(HOPS, 3); + } else { + ret = PH(LVL, REM_CCE2); + if (sub_idx == 0 || sub_idx == 2) + ret |= P(SNOOP, HIT); + else if (sub_idx == 1 || sub_idx == 3) + ret |= P(SNOOP, HITM); + } break; case 7: ret = PM(LVL, L1); -- cgit v1.2.3 From e3304c21357268ecbe156ed6247a03dc78d3fce4 Mon Sep 17 00:00:00 2001 From: Athira Rajeev Date: Fri, 3 Dec 2021 07:50:37 +0530 Subject: perf sort: Include global and local variants for p_stage_cyc sort key Sort key 'p_stage_cyc' is used to present the latency cycles spent in pipeline stages. perf has local 'p_stage_cyc' sort key to display this info. There is no global variant available for this sort key. The local variant shows latency in a single sample, whereas the global value will be useful to present the total latency (sum of latencies) in the hist entry. It represents the latency number multiplied by the number of samples. Add global ('p_stage_cyc') and local variant ('local_p_stage_cyc') for this sort key. Use 'local_p_stage_cyc' as default option for "mem" sort mode. Also add this to the list of dynamic sort keys and made the "dynamic_headers" and "arch_specific_sort_keys" as static. Reported-by: Namhyung Kim Signed-off-by: Athira Jajeev Tested-by: Nageswara R Sastry Cc: Jiri Olsa Cc: Kajol Jain Cc: Madhavan Srinivasan Cc: Michael Ellerman Cc: linuxppc-dev@lists.ozlabs.org Link: https://lore.kernel.org/r/20211203022038.48240-1-atrajeev@linux.vnet.ibm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/hist.c | 4 +++- tools/perf/util/hist.h | 3 ++- tools/perf/util/sort.c | 34 +++++++++++++++++++++++++--------- tools/perf/util/sort.h | 3 ++- 4 files changed, 32 insertions(+), 12 deletions(-) diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index b776465e04ef..0a8033b09e28 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -211,7 +211,9 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h) hists__new_col_len(hists, HISTC_MEM_BLOCKED, 10); hists__new_col_len(hists, HISTC_LOCAL_INS_LAT, 13); hists__new_col_len(hists, HISTC_GLOBAL_INS_LAT, 13); - hists__new_col_len(hists, HISTC_P_STAGE_CYC, 13); + hists__new_col_len(hists, HISTC_LOCAL_P_STAGE_CYC, 13); + hists__new_col_len(hists, HISTC_GLOBAL_P_STAGE_CYC, 13); + if (symbol_conf.nanosecs) hists__new_col_len(hists, HISTC_TIME, 16); else diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index 621f35ae1efa..2a15e22fb89c 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -75,7 +75,8 @@ enum hist_column { HISTC_MEM_BLOCKED, HISTC_LOCAL_INS_LAT, HISTC_GLOBAL_INS_LAT, - HISTC_P_STAGE_CYC, + HISTC_LOCAL_P_STAGE_CYC, + HISTC_GLOBAL_P_STAGE_CYC, HISTC_NR_COLS, /* Last entry */ }; diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index a111065b484e..e417e47f51b9 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -37,7 +37,7 @@ const char default_parent_pattern[] = "^sys_|^do_page_fault"; const char *parent_pattern = default_parent_pattern; const char *default_sort_order = "comm,dso,symbol"; const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles"; -const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked,blocked,local_ins_lat,p_stage_cyc"; +const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked,blocked,local_ins_lat,local_p_stage_cyc"; const char default_top_sort_order[] = "dso,symbol"; const char default_diff_sort_order[] = "dso,symbol"; const char default_tracepoint_sort_order[] = "trace"; @@ -46,8 +46,8 @@ const char *field_order; regex_t ignore_callees_regex; int have_ignore_callees = 0; enum sort_mode sort__mode = SORT_MODE__NORMAL; -const char *dynamic_headers[] = {"local_ins_lat", "p_stage_cyc"}; -const char *arch_specific_sort_keys[] = {"p_stage_cyc"}; +static const char *const dynamic_headers[] = {"local_ins_lat", "ins_lat", "local_p_stage_cyc", "p_stage_cyc"}; +static const char *const arch_specific_sort_keys[] = {"local_p_stage_cyc", "p_stage_cyc"}; /* * Replaces all occurrences of a char used with the: @@ -1392,22 +1392,37 @@ struct sort_entry sort_global_ins_lat = { }; static int64_t -sort__global_p_stage_cyc_cmp(struct hist_entry *left, struct hist_entry *right) +sort__p_stage_cyc_cmp(struct hist_entry *left, struct hist_entry *right) { return left->p_stage_cyc - right->p_stage_cyc; } +static int hist_entry__global_p_stage_cyc_snprintf(struct hist_entry *he, char *bf, + size_t size, unsigned int width) +{ + return repsep_snprintf(bf, size, "%-*u", width, + he->p_stage_cyc * he->stat.nr_events); +} + + static int hist_entry__p_stage_cyc_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width) { return repsep_snprintf(bf, size, "%-*u", width, he->p_stage_cyc); } -struct sort_entry sort_p_stage_cyc = { - .se_header = "Pipeline Stage Cycle", - .se_cmp = sort__global_p_stage_cyc_cmp, +struct sort_entry sort_local_p_stage_cyc = { + .se_header = "Local Pipeline Stage Cycle", + .se_cmp = sort__p_stage_cyc_cmp, .se_snprintf = hist_entry__p_stage_cyc_snprintf, - .se_width_idx = HISTC_P_STAGE_CYC, + .se_width_idx = HISTC_LOCAL_P_STAGE_CYC, +}; + +struct sort_entry sort_global_p_stage_cyc = { + .se_header = "Pipeline Stage Cycle", + .se_cmp = sort__p_stage_cyc_cmp, + .se_snprintf = hist_entry__global_p_stage_cyc_snprintf, + .se_width_idx = HISTC_GLOBAL_P_STAGE_CYC, }; struct sort_entry sort_mem_daddr_sym = { @@ -1858,7 +1873,8 @@ static struct sort_dimension common_sort_dimensions[] = { DIM(SORT_CODE_PAGE_SIZE, "code_page_size", sort_code_page_size), DIM(SORT_LOCAL_INS_LAT, "local_ins_lat", sort_local_ins_lat), DIM(SORT_GLOBAL_INS_LAT, "ins_lat", sort_global_ins_lat), - DIM(SORT_PIPELINE_STAGE_CYC, "p_stage_cyc", sort_p_stage_cyc), + DIM(SORT_LOCAL_PIPELINE_STAGE_CYC, "local_p_stage_cyc", sort_local_p_stage_cyc), + DIM(SORT_GLOBAL_PIPELINE_STAGE_CYC, "p_stage_cyc", sort_global_p_stage_cyc), }; #undef DIM diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h index 7b7145501933..f994261888e1 100644 --- a/tools/perf/util/sort.h +++ b/tools/perf/util/sort.h @@ -235,7 +235,8 @@ enum sort_type { SORT_CODE_PAGE_SIZE, SORT_LOCAL_INS_LAT, SORT_GLOBAL_INS_LAT, - SORT_PIPELINE_STAGE_CYC, + SORT_LOCAL_PIPELINE_STAGE_CYC, + SORT_GLOBAL_PIPELINE_STAGE_CYC, /* branch stack specific sort keys */ __SORT_BRANCH_STACK, -- cgit v1.2.3 From befee3775b6dabd7ec1bd8a44584f7f6f8fc8329 Mon Sep 17 00:00:00 2001 From: Athira Rajeev Date: Fri, 3 Dec 2021 07:50:38 +0530 Subject: perf powerpc: Update global/local variants for p_stage_cyc Update the arch_support_sort_key() function in powerpc to enable presenting local and global variants of sort key 'p_stage_cyc'. Update the "se_header" strings for these in arch_perf_header_entry() along with instruction latency. Reported-by: Namhyung Kim Signed-off-by: Athira Jajeev Tested-by: Nageswara R Sastry Cc: Jiri Olsa Cc: Kajol Jain Cc: Madhavan Srinivasan Cc: Michael Ellerman Cc: linuxppc-dev@lists.ozlabs.org Link: https://lore.kernel.org/r/20211203022038.48240-2-atrajeev@linux.vnet.ibm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/arch/powerpc/util/event.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tools/perf/arch/powerpc/util/event.c b/tools/perf/arch/powerpc/util/event.c index 3bf441257466..cf430a4c55b9 100644 --- a/tools/perf/arch/powerpc/util/event.c +++ b/tools/perf/arch/powerpc/util/event.c @@ -40,8 +40,12 @@ const char *arch_perf_header_entry(const char *se_header) { if (!strcmp(se_header, "Local INSTR Latency")) return "Finish Cyc"; - else if (!strcmp(se_header, "Pipeline Stage Cycle")) + else if (!strcmp(se_header, "INSTR Latency")) + return "Global Finish_cyc"; + else if (!strcmp(se_header, "Local Pipeline Stage Cycle")) return "Dispatch Cyc"; + else if (!strcmp(se_header, "Pipeline Stage Cycle")) + return "Global Dispatch_cyc"; return se_header; } @@ -49,5 +53,7 @@ int arch_support_sort_key(const char *sort_key) { if (!strcmp(sort_key, "p_stage_cyc")) return 1; + if (!strcmp(sort_key, "local_p_stage_cyc")) + return 1; return 0; } -- cgit v1.2.3 From d5962fb7d69073bf68fb647531cfd4f0adf84be3 Mon Sep 17 00:00:00 2001 From: Dario Petrillo Date: Mon, 10 Jan 2022 00:44:41 +0100 Subject: perf annotate: Avoid TUI crash when navigating in the annotation of recursive functions In 'perf report', entering a recursive function from inside of itself (either directly of indirectly through some other function) results in calling symbol__annotate2 multiple() times, and freeing the whole disassembly when exiting from the innermost instance. The first issue causes the function's disassembly to be duplicated, and the latter a heap use-after-free (and crash) when trying to access the disassembly again. I reproduced the bug on perf 5.11.22 (Ubuntu 20.04.3 LTS) and 5.16.rc8 with the following testcase (compile with gcc recursive.c -o recursive). To reproduce: - perf record ./recursive - perf report - enter fibonacci and annotate it - move the cursor on one of the "callq fibonacci" instructions and press enter - at this point there will be two copies of the function in the disassembly - go back by pressing q, and perf will crash #include int fibonacci(int n) { if(n <= 2) return 1; return fibonacci(n-1) + fibonacci(n-2); } int main() { printf("%d\n", fibonacci(40)); } This patch addresses the issue by annotating a function and freeing the associated memory on exit only if no annotation is already present, so that a recursive function is only annotated on entry. Signed-off-by: Dario Petrillo Tested-by: Arnaldo Carvalho de Melo Cc: Alexander Shishkin Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: stable@kernel.org Link: http://lore.kernel.org/lkml/20220109234441.325106-1-dario.pk1@gmail.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/ui/browsers/annotate.c | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c index e81c2493efdf..44ba900828f6 100644 --- a/tools/perf/ui/browsers/annotate.c +++ b/tools/perf/ui/browsers/annotate.c @@ -966,6 +966,7 @@ int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel, .opts = opts, }; int ret = -1, err; + int not_annotated = list_empty(¬es->src->source); if (sym == NULL) return -1; @@ -973,13 +974,15 @@ int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel, if (ms->map->dso->annotate_warned) return -1; - err = symbol__annotate2(ms, evsel, opts, &browser.arch); - if (err) { - char msg[BUFSIZ]; - ms->map->dso->annotate_warned = true; - symbol__strerror_disassemble(ms, err, msg, sizeof(msg)); - ui__error("Couldn't annotate %s:\n%s", sym->name, msg); - goto out_free_offsets; + if (not_annotated) { + err = symbol__annotate2(ms, evsel, opts, &browser.arch); + if (err) { + char msg[BUFSIZ]; + ms->map->dso->annotate_warned = true; + symbol__strerror_disassemble(ms, err, msg, sizeof(msg)); + ui__error("Couldn't annotate %s:\n%s", sym->name, msg); + goto out_free_offsets; + } } ui_helpline__push("Press ESC to exit"); @@ -994,9 +997,11 @@ int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel, ret = annotate_browser__run(&browser, evsel, hbt); - annotated_source__purge(notes->src); + if(not_annotated) + annotated_source__purge(notes->src); out_free_offsets: - zfree(¬es->offsets); + if(not_annotated) + zfree(¬es->offsets); return ret; } -- cgit v1.2.3 From 0046686da0ef692a6381260c3aa44291187eafc9 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Thu, 23 Dec 2021 10:39:47 -0800 Subject: perf test: Enable system wide for metricgroups test Uncore events as group leaders fail in per-thread mode causing exit errors. Enable system-wide for metricgroup testing. This fixes the HPC metric group when tested on skylakex. Fixes: 4a87dea9e60fe100 ("perf test: Workload test of metric and metricgroups") Signed-off-by: Ian Rogers Tested-by: Arnaldo Carvalho de Melo Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Stephane Eranian Link: https://lore.kernel.org/r/20211223183948.3423989-1-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/shell/stat_all_metricgroups.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/tests/shell/stat_all_metricgroups.sh b/tools/perf/tests/shell/stat_all_metricgroups.sh index de24d374ce24..cb35e488809a 100755 --- a/tools/perf/tests/shell/stat_all_metricgroups.sh +++ b/tools/perf/tests/shell/stat_all_metricgroups.sh @@ -6,7 +6,7 @@ set -e for m in $(perf list --raw-dump metricgroups); do echo "Testing $m" - perf stat -M "$m" true + perf stat -M "$m" -a true done exit 0 -- cgit v1.2.3 From 62942e9fda9fd1def10ffcbd5e1c025b3c9eec17 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Wed, 12 Jan 2022 10:50:57 +0200 Subject: perf script: Fix hex dump character output Using grep -C with perf script -D can give erroneous results as grep loses lines due to non-printable characters, for example, below the 0020, 0060 and 0070 lines are missing: $ perf script -D | grep -C10 AUX | head . 0010: 08 00 00 00 00 00 00 00 1f 00 00 00 00 00 00 00 ................ . 0030: 01 00 00 00 00 00 00 00 00 04 00 00 00 00 00 00 ................ . 0040: 00 08 00 00 00 00 00 00 02 00 00 00 00 00 00 00 ................ . 0050: 00 00 00 00 00 00 00 00 01 00 00 00 00 00 00 00 ................ . 0080: 02 00 00 00 00 00 00 00 1b 00 00 00 00 00 00 00 ................ . 0090: 00 00 00 00 00 00 00 00 ........ 0 0 0x450 [0x98]: PERF_RECORD_AUXTRACE_INFO type: 1 PMU Type 8 Time Shift 31 perf's isprint() is a custom implementation from the kernel, but the kernel's _ctype appears to include characters from Latin-1 Supplement which is not compatible with, for example, UTF-8. Fix by checking also isascii(). After: $ tools/perf/perf script -D | grep -C10 AUX | head . 0010: 08 00 00 00 00 00 00 00 1f 00 00 00 00 00 00 00 ................ . 0020: 03 84 32 2f 00 00 00 00 63 7c 4f d2 fa ff ff ff ..2/....c|O..... . 0030: 01 00 00 00 00 00 00 00 00 04 00 00 00 00 00 00 ................ . 0040: 00 08 00 00 00 00 00 00 02 00 00 00 00 00 00 00 ................ . 0050: 00 00 00 00 00 00 00 00 01 00 00 00 00 00 00 00 ................ . 0060: 00 02 00 00 00 00 00 00 00 c0 03 00 00 00 00 00 ................ . 0070: e2 00 00 00 00 00 00 00 02 00 00 00 00 00 00 00 ................ . 0080: 02 00 00 00 00 00 00 00 1b 00 00 00 00 00 00 00 ................ . 0090: 00 00 00 00 00 00 00 00 ........ Fixes: 3052ba56bcb58904 ("tools perf: Move from sane_ctype.h obtained from git to the Linux's original") Signed-off-by: Adrian Hunter Cc: Jiri Olsa Link: http://lore.kernel.org/lkml/20220112085057.277205-1-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/debug.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c index 2c06abf6dcd2..65e6c22f38e4 100644 --- a/tools/perf/util/debug.c +++ b/tools/perf/util/debug.c @@ -179,7 +179,7 @@ static int trace_event_printer(enum binary_printer_ops op, break; case BINARY_PRINT_CHAR_DATA: printed += color_fprintf(fp, color, "%c", - isprint(ch) ? ch : '.'); + isprint(ch) && isascii(ch) ? ch : '.'); break; case BINARY_PRINT_CHAR_PAD: printed += color_fprintf(fp, color, " "); -- cgit v1.2.3 From 8de78328f041f10a2b546fdb3791a87ba6b742e6 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 12 Jan 2022 14:19:21 -0300 Subject: Revert "perf powerpc: Add encodings to represent data based on newer composite PERF_MEM_LVLNUM* fields" This was in a patchkit mixing up kernel with tools/ parts and I mistakenly got it merged in the perf tools tree, revert it, it'll go via the PowerPC kernel tree. This reverts commit 0ebce3d65f1f53c936fdd51e975bd876ba7ed64f. Cc: kajoljain Cc: Michael Ellerman Cc: Stephen Rothwell Link: http://lore.kernel.org/lkml/20220112171659.531d22ce@canb.auug.org.au Signed-off-by: Arnaldo Carvalho de Melo --- arch/powerpc/perf/isa207-common.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c index 4037ea652522..0c8b1a5cfe5c 100644 --- a/arch/powerpc/perf/isa207-common.c +++ b/arch/powerpc/perf/isa207-common.c @@ -220,13 +220,13 @@ static inline u64 isa207_find_source(u64 idx, u32 sub_idx) /* Nothing to do */ break; case 1: - ret = PH(LVL, L1) | LEVEL(L1) | P(SNOOP, HIT); + ret = PH(LVL, L1); break; case 2: - ret = PH(LVL, L2) | LEVEL(L2) | P(SNOOP, HIT); + ret = PH(LVL, L2); break; case 3: - ret = PH(LVL, L3) | LEVEL(L3) | P(SNOOP, HIT); + ret = PH(LVL, L3); break; case 4: if (cpu_has_feature(CPU_FTR_ARCH_31)) { -- cgit v1.2.3 From b4bb6f05e4b25e66825956006c3d5cbe5b73eaec Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 12 Jan 2022 14:21:45 -0300 Subject: Revert "perf powerpc: Add data source encodings for power10 platform" This was in a patchkit mixing up kernel with tools/ parts and I mistakenly got it merged in the perf tools tree, revert it, it'll go via the PowerPC kernel tree. This reverts commit af2b24f228a0373ac65eb7a502e0bc31e2c0269d. Cc: kajoljain Cc: Michael Ellerman Cc: Stephen Rothwell Link: http://lore.kernel.org/lkml/20220112171659.531d22ce@canb.auug.org.au Signed-off-by: Arnaldo Carvalho de Melo --- arch/powerpc/perf/isa207-common.c | 54 +++++++++------------------------------ 1 file changed, 12 insertions(+), 42 deletions(-) diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c index 0c8b1a5cfe5c..7ea873ab2e6f 100644 --- a/arch/powerpc/perf/isa207-common.c +++ b/arch/powerpc/perf/isa207-common.c @@ -229,28 +229,13 @@ static inline u64 isa207_find_source(u64 idx, u32 sub_idx) ret = PH(LVL, L3); break; case 4: - if (cpu_has_feature(CPU_FTR_ARCH_31)) { - ret = P(SNOOP, HIT); - - if (sub_idx == 1) - ret |= PH(LVL, LOC_RAM) | LEVEL(RAM); - else if (sub_idx == 2 || sub_idx == 3) - ret |= P(LVL, HIT) | LEVEL(PMEM); - else if (sub_idx == 4) - ret |= PH(LVL, REM_RAM1) | REM | LEVEL(RAM) | P(HOPS, 2); - else if (sub_idx == 5 || sub_idx == 7) - ret |= P(LVL, HIT) | LEVEL(PMEM) | REM; - else if (sub_idx == 6) - ret |= PH(LVL, REM_RAM2) | REM | LEVEL(RAM) | P(HOPS, 3); - } else { - if (sub_idx <= 1) - ret = PH(LVL, LOC_RAM); - else if (sub_idx > 1 && sub_idx <= 2) - ret = PH(LVL, REM_RAM1); - else - ret = PH(LVL, REM_RAM2); - ret |= P(SNOOP, HIT); - } + if (sub_idx <= 1) + ret = PH(LVL, LOC_RAM); + else if (sub_idx > 1 && sub_idx <= 2) + ret = PH(LVL, REM_RAM1); + else + ret = PH(LVL, REM_RAM2); + ret |= P(SNOOP, HIT); break; case 5: if (cpu_has_feature(CPU_FTR_ARCH_31)) { @@ -276,26 +261,11 @@ static inline u64 isa207_find_source(u64 idx, u32 sub_idx) } break; case 6: - if (cpu_has_feature(CPU_FTR_ARCH_31)) { - if (sub_idx == 0) - ret = PH(LVL, REM_CCE1) | LEVEL(ANY_CACHE) | REM | - P(SNOOP, HIT) | P(HOPS, 2); - else if (sub_idx == 1) - ret = PH(LVL, REM_CCE1) | LEVEL(ANY_CACHE) | REM | - P(SNOOP, HITM) | P(HOPS, 2); - else if (sub_idx == 2) - ret = PH(LVL, REM_CCE2) | LEVEL(ANY_CACHE) | REM | - P(SNOOP, HIT) | P(HOPS, 3); - else if (sub_idx == 3) - ret = PH(LVL, REM_CCE2) | LEVEL(ANY_CACHE) | REM | - P(SNOOP, HITM) | P(HOPS, 3); - } else { - ret = PH(LVL, REM_CCE2); - if (sub_idx == 0 || sub_idx == 2) - ret |= P(SNOOP, HIT); - else if (sub_idx == 1 || sub_idx == 3) - ret |= P(SNOOP, HITM); - } + ret = PH(LVL, REM_CCE2); + if ((sub_idx == 0) || (sub_idx == 2)) + ret |= P(SNOOP, HIT); + else if ((sub_idx == 1) || (sub_idx == 3)) + ret |= P(SNOOP, HITM); break; case 7: ret = PM(LVL, L1); -- cgit v1.2.3 From dcffc5ebb80dd5887b91091b8ecd082c9ed75361 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Thu, 23 Dec 2021 10:39:48 -0800 Subject: perf evsel: Improve error message for uncore events When a group has multiple events and the leader fails it can yield errors like: $ perf stat -e '{uncore_imc/cas_count_read/},instructions' /bin/true Error: The sys_perf_event_open() syscall returned with 22 (Invalid argument) for event (uncore_imc/cas_count_read/). /bin/dmesg | grep -i perf may provide additional information. However, when not the group leader is given: $ perf stat -e '{instructions,uncore_imc/cas_count_read/}' /bin/true ... 1,619,057 instructions MiB uncore_imc/cas_count_read/ This is necessary because get_group_fd will fail if the leader fails and is the direct result of the check on line 750 of builtin-stat.c in stat_handle_error that returns COUNTER_SKIP for the latter case. This patch improves the error message to: $ perf stat -e '{uncore_imc/cas_count_read/},instructions' /bin/true Error: Invalid event (uncore_imc/cas_count_read/) in per-thread mode, enable system wide with '-a'. v2. Changed the test to use !target__has_cpu as suggested by Namhyung Kim. Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Stephane Eranian Link: https://lore.kernel.org/r/20211223183948.3423989-2-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/evsel.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 656c30b988ce..a0acf53a2510 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -2931,6 +2931,10 @@ int evsel__open_strerror(struct evsel *evsel, struct target *target, return scnprintf(msg, size, "wrong clockid (%d).", clockid); if (perf_missing_features.aux_output) return scnprintf(msg, size, "The 'aux_output' feature is not supported, update the kernel."); + if (!target__has_cpu(target)) + return scnprintf(msg, size, + "Invalid event (%s) in per-thread mode, enable system wide with '-a'.", + evsel__name(evsel)); break; case ENODATA: return scnprintf(msg, size, "Cannot collect data source with the load latency event alone. " -- cgit v1.2.3 From 818ab78c03aad94fabc18d386e9c73b539a1f447 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 4 Jan 2022 22:13:04 -0800 Subject: libperf: Add comments to 'struct perf_cpu_map' A particular observed problem is confusing the index with the CPU value, documentation should hopefully reduce this type of problem. Reviewed-by: James Clark Reviewed-by: John Garry Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: Jiri Olsa Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Mike Leach Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Riccardo Mancini Cc: Stephane Eranian Cc: Suzuki Poulouse Cc: Vineet Singh Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-2-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/lib/perf/include/internal/cpumap.h | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tools/lib/perf/include/internal/cpumap.h b/tools/lib/perf/include/internal/cpumap.h index 840d4032587b..4054169c12c5 100644 --- a/tools/lib/perf/include/internal/cpumap.h +++ b/tools/lib/perf/include/internal/cpumap.h @@ -4,9 +4,18 @@ #include +/** + * A sized, reference counted, sorted array of integers representing CPU + * numbers. This is commonly used to capture which CPUs a PMU is associated + * with. The indices into the cpumap are frequently used as they avoid having + * gaps if CPU numbers were used. For events associated with a pid, rather than + * a CPU, a single dummy map with an entry of -1 is used. + */ struct perf_cpu_map { refcount_t refcnt; + /** Length of the map array. */ int nr; + /** The CPU values. */ int map[]; }; -- cgit v1.2.3 From ca2c9b76bc3c75ac116ef199b75e7ca4e27e7acb Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 4 Jan 2022 22:13:05 -0800 Subject: perf stat: Add aggr creators that are passed a cpu The cpu_map and index can get confused. Add variants of the cpu_map__get routines that are passed a cpu. Make the existing cpu_map__get routines use the new functions with a view to remove them when no longer used. Reviewed-by: James Clark Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: Jiri Olsa Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Mike Leach Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Riccardo Mancini Cc: Stephane Eranian Cc: Suzuki Poulouse Cc: Vineet Singh Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-3-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/cpumap.c | 79 ++++++++++++++++++++++++++++-------------------- tools/perf/util/cpumap.h | 6 +++- 2 files changed, 51 insertions(+), 34 deletions(-) diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c index 87d3eca9b872..49fba2c53822 100644 --- a/tools/perf/util/cpumap.c +++ b/tools/perf/util/cpumap.c @@ -128,21 +128,23 @@ int cpu_map__get_socket_id(int cpu) return ret ?: value; } -struct aggr_cpu_id cpu_map__get_socket(struct perf_cpu_map *map, int idx, - void *data __maybe_unused) +struct aggr_cpu_id cpu_map__get_socket_aggr_by_cpu(int cpu, void *data __maybe_unused) { - int cpu; struct aggr_cpu_id id = cpu_map__empty_aggr_cpu_id(); - if (idx > map->nr) - return id; - - cpu = map->map[idx]; - id.socket = cpu_map__get_socket_id(cpu); return id; } +struct aggr_cpu_id cpu_map__get_socket(struct perf_cpu_map *map, int idx, + void *data) +{ + if (idx < 0 || idx > map->nr) + return cpu_map__empty_aggr_cpu_id(); + + return cpu_map__get_socket_aggr_by_cpu(map->map[idx], data); +} + static int cmp_aggr_cpu_id(const void *a_pointer, const void *b_pointer) { struct aggr_cpu_id *a = (struct aggr_cpu_id *)a_pointer; @@ -200,15 +202,10 @@ int cpu_map__get_die_id(int cpu) return ret ?: value; } -struct aggr_cpu_id cpu_map__get_die(struct perf_cpu_map *map, int idx, void *data) +struct aggr_cpu_id cpu_map__get_die_aggr_by_cpu(int cpu, void *data) { - int cpu, die; - struct aggr_cpu_id id = cpu_map__empty_aggr_cpu_id(); - - if (idx > map->nr) - return id; - - cpu = map->map[idx]; + struct aggr_cpu_id id; + int die; die = cpu_map__get_die_id(cpu); /* There is no die_id on legacy system. */ @@ -220,7 +217,7 @@ struct aggr_cpu_id cpu_map__get_die(struct perf_cpu_map *map, int idx, void *dat * with the socket ID and then add die to * make a unique ID. */ - id = cpu_map__get_socket(map, idx, data); + id = cpu_map__get_socket_aggr_by_cpu(cpu, data); if (cpu_map__aggr_cpu_id_is_empty(id)) return id; @@ -228,6 +225,15 @@ struct aggr_cpu_id cpu_map__get_die(struct perf_cpu_map *map, int idx, void *dat return id; } +struct aggr_cpu_id cpu_map__get_die(struct perf_cpu_map *map, int idx, + void *data) +{ + if (idx < 0 || idx > map->nr) + return cpu_map__empty_aggr_cpu_id(); + + return cpu_map__get_die_aggr_by_cpu(map->map[idx], data); +} + int cpu_map__get_core_id(int cpu) { int value, ret = cpu__get_topology_int(cpu, "core_id", &value); @@ -239,20 +245,13 @@ int cpu_map__get_node_id(int cpu) return cpu__get_node(cpu); } -struct aggr_cpu_id cpu_map__get_core(struct perf_cpu_map *map, int idx, void *data) +struct aggr_cpu_id cpu_map__get_core_aggr_by_cpu(int cpu, void *data) { - int cpu; - struct aggr_cpu_id id = cpu_map__empty_aggr_cpu_id(); - - if (idx > map->nr) - return id; - - cpu = map->map[idx]; - - cpu = cpu_map__get_core_id(cpu); + struct aggr_cpu_id id; + int core = cpu_map__get_core_id(cpu); /* cpu_map__get_die returns a struct with socket and die set*/ - id = cpu_map__get_die(map, idx, data); + id = cpu_map__get_die_aggr_by_cpu(cpu, data); if (cpu_map__aggr_cpu_id_is_empty(id)) return id; @@ -260,19 +259,33 @@ struct aggr_cpu_id cpu_map__get_core(struct perf_cpu_map *map, int idx, void *da * core_id is relative to socket and die, we need a global id. * So we combine the result from cpu_map__get_die with the core id */ - id.core = cpu; + id.core = core; return id; + } -struct aggr_cpu_id cpu_map__get_node(struct perf_cpu_map *map, int idx, void *data __maybe_unused) +struct aggr_cpu_id cpu_map__get_core(struct perf_cpu_map *map, int idx, void *data) +{ + if (idx < 0 || idx > map->nr) + return cpu_map__empty_aggr_cpu_id(); + + return cpu_map__get_core_aggr_by_cpu(map->map[idx], data); +} + +struct aggr_cpu_id cpu_map__get_node_aggr_by_cpu(int cpu, void *data __maybe_unused) { struct aggr_cpu_id id = cpu_map__empty_aggr_cpu_id(); + id.node = cpu_map__get_node_id(cpu); + return id; +} + +struct aggr_cpu_id cpu_map__get_node(struct perf_cpu_map *map, int idx, void *data) +{ if (idx < 0 || idx >= map->nr) - return id; + return cpu_map__empty_aggr_cpu_id(); - id.node = cpu_map__get_node_id(map->map[idx]); - return id; + return cpu_map__get_node_aggr_by_cpu(map->map[idx], data); } int cpu_map__build_socket_map(struct perf_cpu_map *cpus, struct cpu_aggr_map **sockp) diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h index a27eeaf086e8..c62d67704425 100644 --- a/tools/perf/util/cpumap.h +++ b/tools/perf/util/cpumap.h @@ -31,13 +31,17 @@ size_t cpu_map__snprint(struct perf_cpu_map *map, char *buf, size_t size); size_t cpu_map__snprint_mask(struct perf_cpu_map *map, char *buf, size_t size); size_t cpu_map__fprintf(struct perf_cpu_map *map, FILE *fp); int cpu_map__get_socket_id(int cpu); +struct aggr_cpu_id cpu_map__get_socket_aggr_by_cpu(int cpu, void *data); struct aggr_cpu_id cpu_map__get_socket(struct perf_cpu_map *map, int idx, void *data); int cpu_map__get_die_id(int cpu); +struct aggr_cpu_id cpu_map__get_die_aggr_by_cpu(int cpu, void *data); struct aggr_cpu_id cpu_map__get_die(struct perf_cpu_map *map, int idx, void *data); int cpu_map__get_core_id(int cpu); +struct aggr_cpu_id cpu_map__get_core_aggr_by_cpu(int cpu, void *data); struct aggr_cpu_id cpu_map__get_core(struct perf_cpu_map *map, int idx, void *data); int cpu_map__get_node_id(int cpu); -struct aggr_cpu_id cpu_map__get_node(struct perf_cpu_map *map, int idx, void *data); +struct aggr_cpu_id cpu_map__get_node_aggr_by_cpu(int cpu, void *data); +struct aggr_cpu_id cpu_map__get_node(struct perf_cpu_map *map, int idx, void *data); int cpu_map__build_socket_map(struct perf_cpu_map *cpus, struct cpu_aggr_map **sockp); int cpu_map__build_die_map(struct perf_cpu_map *cpus, struct cpu_aggr_map **diep); int cpu_map__build_core_map(struct perf_cpu_map *cpus, struct cpu_aggr_map **corep); -- cgit v1.2.3 From 01843ca0197783d0951a1948ebeaaed9a47ce55d Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 4 Jan 2022 22:13:06 -0800 Subject: perf stat: Correct aggregation CPU map Switch the perf_cpu_map in aggr_update_shadow from the evlist to the counter's cpu map, so the index is appropriate. This addresses a problem where uncore counts, with a cpumap like: $ cat /sys/devices/uncore_imc_0/cpumask 0,18 Don't aggregate counts in CPUs based on the index of those values in the cpumap (0 and 1) but on the actual CPU (0 and 18). Thereby correcting metric calculations in per-socket mode for counters without a full cpumask. On a SkylakeX with a tweaked DRAM_BW_Use metric, to remove unnecessary scaling, this gives: Before: $ /perf stat --per-socket -M DRAM_BW_Use -I 1000 1.001102293 S0 1 27.01 MiB uncore_imc/cas_count_write/ # 103.00 DRAM_BW_Use 1.001102293 S0 1 30.22 MiB uncore_imc/cas_count_read/ 1.001102293 S0 1 1,001,102,293 ns duration_time 1.001102293 S1 1 20.10 MiB uncore_imc/cas_count_write/ # 0.00 DRAM_BW_Use 1.001102293 S1 1 32.74 MiB uncore_imc/cas_count_read/ 1.001102293 S1 0 ns duration_time 2.003517973 S0 1 83.04 MiB uncore_imc/cas_count_write/ # 920.00 DRAM_BW_Use 2.003517973 S0 1 145.95 MiB uncore_imc/cas_count_read/ 2.003517973 S0 1 1,002,415,680 ns duration_time 2.003517973 S1 1 302.45 MiB uncore_imc/cas_count_write/ # 0.00 DRAM_BW_Use 2.003517973 S1 1 290.99 MiB uncore_imc/cas_count_read/ 2.003517973 S1 0 ns duration_time After: $ perf stat --per-socket -M DRAM_BW_Use -I 1000 1.001080840 S0 1 24.96 MiB uncore_imc/cas_count_write/ # 54.00 DRAM_BW_Use 1.001080840 S0 1 33.64 MiB uncore_imc/cas_count_read/ 1.001080840 S0 1 1,001,080,840 ns duration_time 1.001080840 S1 1 42.43 MiB uncore_imc/cas_count_write/ # 84.00 DRAM_BW_Use 1.001080840 S1 1 47.05 MiB uncore_imc/cas_count_read/ 1.001080840 S1 0 ns duration_time Signed-off-by: Ian Rogers Tested-by: John Garry Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Mike Leach Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Riccardo Mancini Cc: Stephane Eranian Cc: Suzuki Poulouse Cc: Vineet Singh Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-4-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/stat-display.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c index 588601000f3f..b0fa81ffce61 100644 --- a/tools/perf/util/stat-display.c +++ b/tools/perf/util/stat-display.c @@ -526,7 +526,7 @@ static void aggr_update_shadow(struct perf_stat_config *config, evlist__for_each_entry(evlist, counter) { val = 0; for (cpu = 0; cpu < evsel__nr_cpus(counter); cpu++) { - s2 = config->aggr_get_id(config, evlist->core.cpus, cpu); + s2 = config->aggr_get_id(config, evsel__cpus(counter), cpu); if (!cpu_map__compare_aggr_cpu_id(s2, id)) continue; val += perf_counts(counter->counts, cpu, 0)->val; -- cgit v1.2.3 From a023283fadef8a3f6916ba2b0c37955d76ffaf4d Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 4 Jan 2022 22:13:07 -0800 Subject: perf stat: Switch aggregation to use for_each loop Tidy up the use of cpu and index to hopefully make the code less error prone. Avoid unused warnings with (void) which will be removed in a later patch. Reviewed-by: James Clark Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: Jiri Olsa Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Mike Leach Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Riccardo Mancini Cc: Stephane Eranian Cc: Suzuki Poulouse Cc: Vineet Singh Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-5-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/stat-display.c | 48 ++++++++++++++++++++++++------------------ 1 file changed, 27 insertions(+), 21 deletions(-) diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c index b0fa81ffce61..efab39a759ff 100644 --- a/tools/perf/util/stat-display.c +++ b/tools/perf/util/stat-display.c @@ -330,8 +330,8 @@ static void print_metric_header(struct perf_stat_config *config, static int first_shadow_cpu(struct perf_stat_config *config, struct evsel *evsel, struct aggr_cpu_id id) { - struct evlist *evlist = evsel->evlist; - int i; + struct perf_cpu_map *cpus; + int cpu, idx; if (config->aggr_mode == AGGR_NONE) return id.core; @@ -339,14 +339,11 @@ static int first_shadow_cpu(struct perf_stat_config *config, if (!config->aggr_get_id) return 0; - for (i = 0; i < evsel__nr_cpus(evsel); i++) { - int cpu2 = evsel__cpus(evsel)->map[i]; - - if (cpu_map__compare_aggr_cpu_id( - config->aggr_get_id(config, evlist->core.cpus, cpu2), - id)) { - return cpu2; - } + cpus = evsel__cpus(evsel); + perf_cpu_map__for_each_cpu(cpu, idx, cpus) { + if (cpu_map__compare_aggr_cpu_id(config->aggr_get_id(config, cpus, idx), + id)) + return cpu; } return 0; } @@ -516,20 +513,23 @@ static void printout(struct perf_stat_config *config, struct aggr_cpu_id id, int static void aggr_update_shadow(struct perf_stat_config *config, struct evlist *evlist) { - int cpu, s; + int cpu, idx, s; struct aggr_cpu_id s2, id; u64 val; struct evsel *counter; + struct perf_cpu_map *cpus; for (s = 0; s < config->aggr_map->nr; s++) { id = config->aggr_map->map[s]; evlist__for_each_entry(evlist, counter) { + cpus = evsel__cpus(counter); val = 0; - for (cpu = 0; cpu < evsel__nr_cpus(counter); cpu++) { - s2 = config->aggr_get_id(config, evsel__cpus(counter), cpu); + perf_cpu_map__for_each_cpu(cpu, idx, cpus) { + (void)cpu; + s2 = config->aggr_get_id(config, cpus, idx); if (!cpu_map__compare_aggr_cpu_id(s2, id)) continue; - val += perf_counts(counter->counts, cpu, 0)->val; + val += perf_counts(counter->counts, idx, 0)->val; } perf_stat__update_shadow_stats(counter, val, first_shadow_cpu(config, counter, id), @@ -634,18 +634,21 @@ static void aggr_cb(struct perf_stat_config *config, struct evsel *counter, void *data, bool first) { struct aggr_data *ad = data; - int cpu; + int idx, cpu; + struct perf_cpu_map *cpus; struct aggr_cpu_id s2; - for (cpu = 0; cpu < evsel__nr_cpus(counter); cpu++) { + cpus = evsel__cpus(counter); + perf_cpu_map__for_each_cpu(cpu, idx, cpus) { struct perf_counts_values *counts; - s2 = config->aggr_get_id(config, evsel__cpus(counter), cpu); + (void)cpu; + s2 = config->aggr_get_id(config, cpus, idx); if (!cpu_map__compare_aggr_cpu_id(s2, ad->id)) continue; if (first) ad->nr++; - counts = perf_counts(counter->counts, cpu, 0); + counts = perf_counts(counter->counts, idx, 0); /* * When any result is bad, make them all to give * consistent output in interval mode. @@ -1208,10 +1211,13 @@ static void print_percore_thread(struct perf_stat_config *config, { int s; struct aggr_cpu_id s2, id; + struct perf_cpu_map *cpus; bool first = true; + int idx, cpu; - for (int i = 0; i < evsel__nr_cpus(counter); i++) { - s2 = config->aggr_get_id(config, evsel__cpus(counter), i); + cpus = evsel__cpus(counter); + perf_cpu_map__for_each_cpu(cpu, idx, cpus) { + s2 = config->aggr_get_id(config, cpus, idx); for (s = 0; s < config->aggr_map->nr; s++) { id = config->aggr_map->map[s]; if (cpu_map__compare_aggr_cpu_id(s2, id)) @@ -1220,7 +1226,7 @@ static void print_percore_thread(struct perf_stat_config *config, print_counter_aggrdata(config, counter, s, prefix, false, - &first, i); + &first, cpu); } } -- cgit v1.2.3 From 88031a0de7d68d132014154b9e5307428e8ed70d Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 4 Jan 2022 22:13:08 -0800 Subject: perf stat: Switch to cpu version of cpu_map__get() Avoid possible bugs where the wrong index is passed with the cpu_map. Reviewed-by: James Clark Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: Jiri Olsa Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Mike Leach Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Riccardo Mancini Cc: Stephane Eranian Cc: Suzuki Poulouse Cc: Vineet Singh Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-6-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-stat.c | 93 +++++++++++++++++++++++------------------- tools/perf/util/stat-display.c | 11 ++--- tools/perf/util/stat.h | 3 +- 3 files changed, 57 insertions(+), 50 deletions(-) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index f6ca2b054c5b..9791ae9b1a53 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -1323,69 +1323,63 @@ static struct option stat_options[] = { }; static struct aggr_cpu_id perf_stat__get_socket(struct perf_stat_config *config __maybe_unused, - struct perf_cpu_map *map, int cpu) + int cpu) { - return cpu_map__get_socket(map, cpu, NULL); + return cpu_map__get_socket_aggr_by_cpu(cpu, /*data=*/NULL); } static struct aggr_cpu_id perf_stat__get_die(struct perf_stat_config *config __maybe_unused, - struct perf_cpu_map *map, int cpu) + int cpu) { - return cpu_map__get_die(map, cpu, NULL); + return cpu_map__get_die_aggr_by_cpu(cpu, /*data=*/NULL); } static struct aggr_cpu_id perf_stat__get_core(struct perf_stat_config *config __maybe_unused, - struct perf_cpu_map *map, int cpu) + int cpu) { - return cpu_map__get_core(map, cpu, NULL); + return cpu_map__get_core_aggr_by_cpu(cpu, /*data=*/NULL); } static struct aggr_cpu_id perf_stat__get_node(struct perf_stat_config *config __maybe_unused, - struct perf_cpu_map *map, int cpu) + int cpu) { - return cpu_map__get_node(map, cpu, NULL); + return cpu_map__get_node_aggr_by_cpu(cpu, /*data=*/NULL); } static struct aggr_cpu_id perf_stat__get_aggr(struct perf_stat_config *config, - aggr_get_id_t get_id, struct perf_cpu_map *map, int idx) + aggr_get_id_t get_id, int cpu) { - int cpu; struct aggr_cpu_id id = cpu_map__empty_aggr_cpu_id(); - if (idx >= map->nr) - return id; - - cpu = map->map[idx]; - if (cpu_map__aggr_cpu_id_is_empty(config->cpus_aggr_map->map[cpu])) - config->cpus_aggr_map->map[cpu] = get_id(config, map, idx); + config->cpus_aggr_map->map[cpu] = get_id(config, cpu); id = config->cpus_aggr_map->map[cpu]; return id; } static struct aggr_cpu_id perf_stat__get_socket_cached(struct perf_stat_config *config, - struct perf_cpu_map *map, int idx) + int cpu) { - return perf_stat__get_aggr(config, perf_stat__get_socket, map, idx); + return perf_stat__get_aggr(config, perf_stat__get_socket, cpu); } static struct aggr_cpu_id perf_stat__get_die_cached(struct perf_stat_config *config, - struct perf_cpu_map *map, int idx) + int cpu) { - return perf_stat__get_aggr(config, perf_stat__get_die, map, idx); + return perf_stat__get_aggr(config, perf_stat__get_die, cpu); } static struct aggr_cpu_id perf_stat__get_core_cached(struct perf_stat_config *config, - struct perf_cpu_map *map, int idx) + int cpu) { - return perf_stat__get_aggr(config, perf_stat__get_core, map, idx); + return perf_stat__get_aggr(config, perf_stat__get_core, cpu); } static struct aggr_cpu_id perf_stat__get_node_cached(struct perf_stat_config *config, - struct perf_cpu_map *map, int idx) + int cpu) { - return perf_stat__get_aggr(config, perf_stat__get_node, map, idx); + return perf_stat__get_aggr(config, perf_stat__get_node, cpu); } static bool term_percore_set(void) @@ -1483,8 +1477,9 @@ static void perf_stat__exit_aggr_mode(void) stat_config.cpus_aggr_map = NULL; } -static inline int perf_env__get_cpu(struct perf_env *env, struct perf_cpu_map *map, int idx) +static inline int perf_env__get_cpu(void *data, struct perf_cpu_map *map, int idx) { + struct perf_env *env = data; int cpu; if (idx > map->nr) @@ -1498,10 +1493,9 @@ static inline int perf_env__get_cpu(struct perf_env *env, struct perf_cpu_map *m return cpu; } -static struct aggr_cpu_id perf_env__get_socket(struct perf_cpu_map *map, int idx, void *data) +static struct aggr_cpu_id perf_env__get_socket_aggr_by_cpu(int cpu, void *data) { struct perf_env *env = data; - int cpu = perf_env__get_cpu(env, map, idx); struct aggr_cpu_id id = cpu_map__empty_aggr_cpu_id(); if (cpu != -1) @@ -1510,11 +1504,15 @@ static struct aggr_cpu_id perf_env__get_socket(struct perf_cpu_map *map, int idx return id; } -static struct aggr_cpu_id perf_env__get_die(struct perf_cpu_map *map, int idx, void *data) +static struct aggr_cpu_id perf_env__get_socket(struct perf_cpu_map *map, int idx, void *data) +{ + return perf_env__get_socket_aggr_by_cpu(perf_env__get_cpu(data, map, idx), data); +} + +static struct aggr_cpu_id perf_env__get_die_aggr_by_cpu(int cpu, void *data) { struct perf_env *env = data; struct aggr_cpu_id id = cpu_map__empty_aggr_cpu_id(); - int cpu = perf_env__get_cpu(env, map, idx); if (cpu != -1) { /* @@ -1529,11 +1527,15 @@ static struct aggr_cpu_id perf_env__get_die(struct perf_cpu_map *map, int idx, v return id; } -static struct aggr_cpu_id perf_env__get_core(struct perf_cpu_map *map, int idx, void *data) +static struct aggr_cpu_id perf_env__get_die(struct perf_cpu_map *map, int idx, void *data) +{ + return perf_env__get_die_aggr_by_cpu(perf_env__get_cpu(data, map, idx), data); +} + +static struct aggr_cpu_id perf_env__get_core_aggr_by_cpu(int cpu, void *data) { struct perf_env *env = data; struct aggr_cpu_id id = cpu_map__empty_aggr_cpu_id(); - int cpu = perf_env__get_cpu(env, map, idx); if (cpu != -1) { /* @@ -1549,15 +1551,24 @@ static struct aggr_cpu_id perf_env__get_core(struct perf_cpu_map *map, int idx, return id; } -static struct aggr_cpu_id perf_env__get_node(struct perf_cpu_map *map, int idx, void *data) +static struct aggr_cpu_id perf_env__get_core(struct perf_cpu_map *map, int idx, void *data) +{ + return perf_env__get_core_aggr_by_cpu(perf_env__get_cpu(data, map, idx), data); +} + +static struct aggr_cpu_id perf_env__get_node_aggr_by_cpu(int cpu, void *data) { - int cpu = perf_env__get_cpu(data, map, idx); struct aggr_cpu_id id = cpu_map__empty_aggr_cpu_id(); id.node = perf_env__numa_node(data, cpu); return id; } +static struct aggr_cpu_id perf_env__get_node(struct perf_cpu_map *map, int idx, void *data) +{ + return perf_env__get_node_aggr_by_cpu(perf_env__get_cpu(data, map, idx), data); +} + static int perf_env__build_socket_map(struct perf_env *env, struct perf_cpu_map *cpus, struct cpu_aggr_map **sockp) { @@ -1583,26 +1594,26 @@ static int perf_env__build_node_map(struct perf_env *env, struct perf_cpu_map *c } static struct aggr_cpu_id perf_stat__get_socket_file(struct perf_stat_config *config __maybe_unused, - struct perf_cpu_map *map, int idx) + int cpu) { - return perf_env__get_socket(map, idx, &perf_stat.session->header.env); + return perf_env__get_socket_aggr_by_cpu(cpu, &perf_stat.session->header.env); } static struct aggr_cpu_id perf_stat__get_die_file(struct perf_stat_config *config __maybe_unused, - struct perf_cpu_map *map, int idx) + int cpu) { - return perf_env__get_die(map, idx, &perf_stat.session->header.env); + return perf_env__get_die_aggr_by_cpu(cpu, &perf_stat.session->header.env); } static struct aggr_cpu_id perf_stat__get_core_file(struct perf_stat_config *config __maybe_unused, - struct perf_cpu_map *map, int idx) + int cpu) { - return perf_env__get_core(map, idx, &perf_stat.session->header.env); + return perf_env__get_core_aggr_by_cpu(cpu, &perf_stat.session->header.env); } static struct aggr_cpu_id perf_stat__get_node_file(struct perf_stat_config *config __maybe_unused, - struct perf_cpu_map *map, int idx) + int cpu) { - return perf_env__get_node(map, idx, &perf_stat.session->header.env); + return perf_env__get_node_aggr_by_cpu(cpu, &perf_stat.session->header.env); } static int perf_stat_init_aggr_mode_file(struct perf_stat *st) diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c index efab39a759ff..6c40b91d5e32 100644 --- a/tools/perf/util/stat-display.c +++ b/tools/perf/util/stat-display.c @@ -341,8 +341,7 @@ static int first_shadow_cpu(struct perf_stat_config *config, cpus = evsel__cpus(evsel); perf_cpu_map__for_each_cpu(cpu, idx, cpus) { - if (cpu_map__compare_aggr_cpu_id(config->aggr_get_id(config, cpus, idx), - id)) + if (cpu_map__compare_aggr_cpu_id(config->aggr_get_id(config, cpu), id)) return cpu; } return 0; @@ -525,8 +524,7 @@ static void aggr_update_shadow(struct perf_stat_config *config, cpus = evsel__cpus(counter); val = 0; perf_cpu_map__for_each_cpu(cpu, idx, cpus) { - (void)cpu; - s2 = config->aggr_get_id(config, cpus, idx); + s2 = config->aggr_get_id(config, cpu); if (!cpu_map__compare_aggr_cpu_id(s2, id)) continue; val += perf_counts(counter->counts, idx, 0)->val; @@ -642,8 +640,7 @@ static void aggr_cb(struct perf_stat_config *config, perf_cpu_map__for_each_cpu(cpu, idx, cpus) { struct perf_counts_values *counts; - (void)cpu; - s2 = config->aggr_get_id(config, cpus, idx); + s2 = config->aggr_get_id(config, cpu); if (!cpu_map__compare_aggr_cpu_id(s2, ad->id)) continue; if (first) @@ -1217,7 +1214,7 @@ static void print_percore_thread(struct perf_stat_config *config, cpus = evsel__cpus(counter); perf_cpu_map__for_each_cpu(cpu, idx, cpus) { - s2 = config->aggr_get_id(config, cpus, idx); + s2 = config->aggr_get_id(config, cpu); for (s = 0; s < config->aggr_map->nr; s++) { id = config->aggr_map->map[s]; if (cpu_map__compare_aggr_cpu_id(s2, id)) diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h index 32c8527de347..32cf24186229 100644 --- a/tools/perf/util/stat.h +++ b/tools/perf/util/stat.h @@ -108,8 +108,7 @@ struct runtime_stat { struct rblist value_list; }; -typedef struct aggr_cpu_id (*aggr_get_id_t)(struct perf_stat_config *config, - struct perf_cpu_map *m, int cpu); +typedef struct aggr_cpu_id (*aggr_get_id_t)(struct perf_stat_config *config, int cpu); struct perf_stat_config { enum aggr_mode aggr_mode; -- cgit v1.2.3 From eff54c24bb147afc0a1423b49bfa1b8eaa85a88f Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 4 Jan 2022 22:13:09 -0800 Subject: perf cpumap: Switch cpu_map__build_map() to cpu function Avoid error prone cpu_map + idx variant. Remove now unused functions. Committer notes: Remove by now unused perf_env__get_cpu(). Reviewed-by: James Clark Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: Jiri Olsa Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Mike Leach Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Riccardo Mancini Cc: Stephane Eranian Cc: Suzuki Poulouse Cc: Vineet Singh Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-7-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-stat.c | 44 ++++---------------------------------------- tools/perf/util/cpumap.c | 12 ++++++------ tools/perf/util/cpumap.h | 2 +- 3 files changed, 11 insertions(+), 47 deletions(-) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 9791ae9b1a53..40cb3518f27e 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -1477,22 +1477,6 @@ static void perf_stat__exit_aggr_mode(void) stat_config.cpus_aggr_map = NULL; } -static inline int perf_env__get_cpu(void *data, struct perf_cpu_map *map, int idx) -{ - struct perf_env *env = data; - int cpu; - - if (idx > map->nr) - return -1; - - cpu = map->map[idx]; - - if (cpu >= env->nr_cpus_avail) - return -1; - - return cpu; -} - static struct aggr_cpu_id perf_env__get_socket_aggr_by_cpu(int cpu, void *data) { struct perf_env *env = data; @@ -1504,11 +1488,6 @@ static struct aggr_cpu_id perf_env__get_socket_aggr_by_cpu(int cpu, void *data) return id; } -static struct aggr_cpu_id perf_env__get_socket(struct perf_cpu_map *map, int idx, void *data) -{ - return perf_env__get_socket_aggr_by_cpu(perf_env__get_cpu(data, map, idx), data); -} - static struct aggr_cpu_id perf_env__get_die_aggr_by_cpu(int cpu, void *data) { struct perf_env *env = data; @@ -1527,11 +1506,6 @@ static struct aggr_cpu_id perf_env__get_die_aggr_by_cpu(int cpu, void *data) return id; } -static struct aggr_cpu_id perf_env__get_die(struct perf_cpu_map *map, int idx, void *data) -{ - return perf_env__get_die_aggr_by_cpu(perf_env__get_cpu(data, map, idx), data); -} - static struct aggr_cpu_id perf_env__get_core_aggr_by_cpu(int cpu, void *data) { struct perf_env *env = data; @@ -1551,11 +1525,6 @@ static struct aggr_cpu_id perf_env__get_core_aggr_by_cpu(int cpu, void *data) return id; } -static struct aggr_cpu_id perf_env__get_core(struct perf_cpu_map *map, int idx, void *data) -{ - return perf_env__get_core_aggr_by_cpu(perf_env__get_cpu(data, map, idx), data); -} - static struct aggr_cpu_id perf_env__get_node_aggr_by_cpu(int cpu, void *data) { struct aggr_cpu_id id = cpu_map__empty_aggr_cpu_id(); @@ -1564,33 +1533,28 @@ static struct aggr_cpu_id perf_env__get_node_aggr_by_cpu(int cpu, void *data) return id; } -static struct aggr_cpu_id perf_env__get_node(struct perf_cpu_map *map, int idx, void *data) -{ - return perf_env__get_node_aggr_by_cpu(perf_env__get_cpu(data, map, idx), data); -} - static int perf_env__build_socket_map(struct perf_env *env, struct perf_cpu_map *cpus, struct cpu_aggr_map **sockp) { - return cpu_map__build_map(cpus, sockp, perf_env__get_socket, env); + return cpu_map__build_map(cpus, sockp, perf_env__get_socket_aggr_by_cpu, env); } static int perf_env__build_die_map(struct perf_env *env, struct perf_cpu_map *cpus, struct cpu_aggr_map **diep) { - return cpu_map__build_map(cpus, diep, perf_env__get_die, env); + return cpu_map__build_map(cpus, diep, perf_env__get_die_aggr_by_cpu, env); } static int perf_env__build_core_map(struct perf_env *env, struct perf_cpu_map *cpus, struct cpu_aggr_map **corep) { - return cpu_map__build_map(cpus, corep, perf_env__get_core, env); + return cpu_map__build_map(cpus, corep, perf_env__get_core_aggr_by_cpu, env); } static int perf_env__build_node_map(struct perf_env *env, struct perf_cpu_map *cpus, struct cpu_aggr_map **nodep) { - return cpu_map__build_map(cpus, nodep, perf_env__get_node, env); + return cpu_map__build_map(cpus, nodep, perf_env__get_node_aggr_by_cpu, env); } static struct aggr_cpu_id perf_stat__get_socket_file(struct perf_stat_config *config __maybe_unused, diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c index 49fba2c53822..feaf34b25efc 100644 --- a/tools/perf/util/cpumap.c +++ b/tools/perf/util/cpumap.c @@ -163,7 +163,7 @@ static int cmp_aggr_cpu_id(const void *a_pointer, const void *b_pointer) } int cpu_map__build_map(struct perf_cpu_map *cpus, struct cpu_aggr_map **res, - struct aggr_cpu_id (*f)(struct perf_cpu_map *map, int cpu, void *data), + struct aggr_cpu_id (*f)(int cpu, void *data), void *data) { int nr = cpus->nr; @@ -178,7 +178,7 @@ int cpu_map__build_map(struct perf_cpu_map *cpus, struct cpu_aggr_map **res, c->nr = 0; for (cpu = 0; cpu < nr; cpu++) { - s1 = f(cpus, cpu, data); + s1 = f(cpu, data); for (s2 = 0; s2 < c->nr; s2++) { if (cpu_map__compare_aggr_cpu_id(s1, c->map[s2])) break; @@ -290,22 +290,22 @@ struct aggr_cpu_id cpu_map__get_node(struct perf_cpu_map *map, int idx, void *da int cpu_map__build_socket_map(struct perf_cpu_map *cpus, struct cpu_aggr_map **sockp) { - return cpu_map__build_map(cpus, sockp, cpu_map__get_socket, NULL); + return cpu_map__build_map(cpus, sockp, cpu_map__get_socket_aggr_by_cpu, NULL); } int cpu_map__build_die_map(struct perf_cpu_map *cpus, struct cpu_aggr_map **diep) { - return cpu_map__build_map(cpus, diep, cpu_map__get_die, NULL); + return cpu_map__build_map(cpus, diep, cpu_map__get_die_aggr_by_cpu, NULL); } int cpu_map__build_core_map(struct perf_cpu_map *cpus, struct cpu_aggr_map **corep) { - return cpu_map__build_map(cpus, corep, cpu_map__get_core, NULL); + return cpu_map__build_map(cpus, corep, cpu_map__get_core_aggr_by_cpu, NULL); } int cpu_map__build_node_map(struct perf_cpu_map *cpus, struct cpu_aggr_map **numap) { - return cpu_map__build_map(cpus, numap, cpu_map__get_node, NULL); + return cpu_map__build_map(cpus, numap, cpu_map__get_node_aggr_by_cpu, NULL); } /* setup simple routines to easily access node numbers given a cpu number */ diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h index c62d67704425..9648816c4255 100644 --- a/tools/perf/util/cpumap.h +++ b/tools/perf/util/cpumap.h @@ -63,7 +63,7 @@ int cpu__max_present_cpu(void); int cpu__get_node(int cpu); int cpu_map__build_map(struct perf_cpu_map *cpus, struct cpu_aggr_map **res, - struct aggr_cpu_id (*f)(struct perf_cpu_map *map, int cpu, void *data), + struct aggr_cpu_id (*f)(int cpu, void *data), void *data); int cpu_map__cpu(struct perf_cpu_map *cpus, int idx); -- cgit v1.2.3 From 448a69d9f34d02920cffba741ca0a2e34a5bb316 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 4 Jan 2022 22:13:10 -0800 Subject: perf cpumap: Remove map+index get_socket() Migrate final users to appropriate cpu variant. Reviewed-by: James Clark Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: Jiri Olsa Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Mike Leach Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Riccardo Mancini Cc: Stephane Eranian Cc: Suzuki Poulouse Cc: Vineet Singh Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-8-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/topology.c | 2 +- tools/perf/util/cpumap.c | 9 --------- tools/perf/util/cpumap.h | 1 - tools/perf/util/stat.c | 2 +- 4 files changed, 2 insertions(+), 12 deletions(-) diff --git a/tools/perf/tests/topology.c b/tools/perf/tests/topology.c index 869986139146..69a64074b897 100644 --- a/tools/perf/tests/topology.c +++ b/tools/perf/tests/topology.c @@ -150,7 +150,7 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map) // Test that socket ID contains only socket for (i = 0; i < map->nr; i++) { - id = cpu_map__get_socket(map, i, NULL); + id = cpu_map__get_socket_aggr_by_cpu(perf_cpu_map__cpu(map, i), NULL); TEST_ASSERT_VAL("Socket map - Socket ID doesn't match", session->header.env.cpu[map->map[i]].socket_id == id.socket); diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c index feaf34b25efc..342a5eaee9d3 100644 --- a/tools/perf/util/cpumap.c +++ b/tools/perf/util/cpumap.c @@ -136,15 +136,6 @@ struct aggr_cpu_id cpu_map__get_socket_aggr_by_cpu(int cpu, void *data __maybe_u return id; } -struct aggr_cpu_id cpu_map__get_socket(struct perf_cpu_map *map, int idx, - void *data) -{ - if (idx < 0 || idx > map->nr) - return cpu_map__empty_aggr_cpu_id(); - - return cpu_map__get_socket_aggr_by_cpu(map->map[idx], data); -} - static int cmp_aggr_cpu_id(const void *a_pointer, const void *b_pointer) { struct aggr_cpu_id *a = (struct aggr_cpu_id *)a_pointer; diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h index 9648816c4255..a53af24301d2 100644 --- a/tools/perf/util/cpumap.h +++ b/tools/perf/util/cpumap.h @@ -32,7 +32,6 @@ size_t cpu_map__snprint_mask(struct perf_cpu_map *map, char *buf, size_t size); size_t cpu_map__fprintf(struct perf_cpu_map *map, FILE *fp); int cpu_map__get_socket_id(int cpu); struct aggr_cpu_id cpu_map__get_socket_aggr_by_cpu(int cpu, void *data); -struct aggr_cpu_id cpu_map__get_socket(struct perf_cpu_map *map, int idx, void *data); int cpu_map__get_die_id(int cpu); struct aggr_cpu_id cpu_map__get_die_aggr_by_cpu(int cpu, void *data); struct aggr_cpu_id cpu_map__get_die(struct perf_cpu_map *map, int idx, void *data); diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c index 09ea334586f2..9eca1111fa52 100644 --- a/tools/perf/util/stat.c +++ b/tools/perf/util/stat.c @@ -328,7 +328,7 @@ static int check_per_pkg(struct evsel *counter, if (!(vals->run && vals->ena)) return 0; - s = cpu_map__get_socket(cpus, cpu, NULL).socket; + s = cpu_map__get_socket_id(cpu); if (s < 0) return -1; -- cgit v1.2.3 From 1cdae3d6734779a637bc4e6ec24e7f615b4e71be Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 4 Jan 2022 22:13:11 -0800 Subject: perf cpumap: Remove map+index get_die() Migrate final users to appropriate cpu variant. Reviewed-by: James Clark Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: Jiri Olsa Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Mike Leach Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Riccardo Mancini Cc: Stephane Eranian Cc: Suzuki Poulouse Cc: Vineet Singh Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-9-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/topology.c | 2 +- tools/perf/util/cpumap.c | 9 --------- tools/perf/util/cpumap.h | 1 - tools/perf/util/stat.c | 2 +- 4 files changed, 2 insertions(+), 12 deletions(-) diff --git a/tools/perf/tests/topology.c b/tools/perf/tests/topology.c index 69a64074b897..ce085b6f379b 100644 --- a/tools/perf/tests/topology.c +++ b/tools/perf/tests/topology.c @@ -136,7 +136,7 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map) // Test that die ID contains socket and die for (i = 0; i < map->nr; i++) { - id = cpu_map__get_die(map, i, NULL); + id = cpu_map__get_die_aggr_by_cpu(perf_cpu_map__cpu(map, i), NULL); TEST_ASSERT_VAL("Die map - Socket ID doesn't match", session->header.env.cpu[map->map[i]].socket_id == id.socket); diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c index 342a5eaee9d3..ff91c32da688 100644 --- a/tools/perf/util/cpumap.c +++ b/tools/perf/util/cpumap.c @@ -216,15 +216,6 @@ struct aggr_cpu_id cpu_map__get_die_aggr_by_cpu(int cpu, void *data) return id; } -struct aggr_cpu_id cpu_map__get_die(struct perf_cpu_map *map, int idx, - void *data) -{ - if (idx < 0 || idx > map->nr) - return cpu_map__empty_aggr_cpu_id(); - - return cpu_map__get_die_aggr_by_cpu(map->map[idx], data); -} - int cpu_map__get_core_id(int cpu) { int value, ret = cpu__get_topology_int(cpu, "core_id", &value); diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h index a53af24301d2..365ed69699e1 100644 --- a/tools/perf/util/cpumap.h +++ b/tools/perf/util/cpumap.h @@ -34,7 +34,6 @@ int cpu_map__get_socket_id(int cpu); struct aggr_cpu_id cpu_map__get_socket_aggr_by_cpu(int cpu, void *data); int cpu_map__get_die_id(int cpu); struct aggr_cpu_id cpu_map__get_die_aggr_by_cpu(int cpu, void *data); -struct aggr_cpu_id cpu_map__get_die(struct perf_cpu_map *map, int idx, void *data); int cpu_map__get_core_id(int cpu); struct aggr_cpu_id cpu_map__get_core_aggr_by_cpu(int cpu, void *data); struct aggr_cpu_id cpu_map__get_core(struct perf_cpu_map *map, int idx, void *data); diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c index 9eca1111fa52..5ed99bcfe91e 100644 --- a/tools/perf/util/stat.c +++ b/tools/perf/util/stat.c @@ -336,7 +336,7 @@ static int check_per_pkg(struct evsel *counter, * On multi-die system, die_id > 0. On no-die system, die_id = 0. * We use hashmap(socket, die) to check the used socket+die pair. */ - d = cpu_map__get_die(cpus, cpu, NULL).die; + d = cpu_map__get_die_id(cpu); if (d < 0) return -1; -- cgit v1.2.3 From 3f6233dc7798044637426ae1099d88aa375c467f Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 4 Jan 2022 22:13:12 -0800 Subject: perf cpumap: Remove map+index get_core() Migrate final users to appropriate cpu variant. Reviewed-by: James Clark Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: Jiri Olsa Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Mike Leach Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Riccardo Mancini Cc: Stephane Eranian Cc: Suzuki Poulouse Cc: Vineet Singh Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-10-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/topology.c | 2 +- tools/perf/util/cpumap.c | 8 -------- tools/perf/util/cpumap.h | 1 - 3 files changed, 1 insertion(+), 10 deletions(-) diff --git a/tools/perf/tests/topology.c b/tools/perf/tests/topology.c index ce085b6f379b..9a671670415a 100644 --- a/tools/perf/tests/topology.c +++ b/tools/perf/tests/topology.c @@ -121,7 +121,7 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map) // Test that core ID contains socket, die and core for (i = 0; i < map->nr; i++) { - id = cpu_map__get_core(map, i, NULL); + id = cpu_map__get_core_aggr_by_cpu(perf_cpu_map__cpu(map, i), NULL); TEST_ASSERT_VAL("Core map - Core ID doesn't match", session->header.env.cpu[map->map[i]].core_id == id.core); diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c index ff91c32da688..e8149bcf8bfa 100644 --- a/tools/perf/util/cpumap.c +++ b/tools/perf/util/cpumap.c @@ -246,14 +246,6 @@ struct aggr_cpu_id cpu_map__get_core_aggr_by_cpu(int cpu, void *data) } -struct aggr_cpu_id cpu_map__get_core(struct perf_cpu_map *map, int idx, void *data) -{ - if (idx < 0 || idx > map->nr) - return cpu_map__empty_aggr_cpu_id(); - - return cpu_map__get_core_aggr_by_cpu(map->map[idx], data); -} - struct aggr_cpu_id cpu_map__get_node_aggr_by_cpu(int cpu, void *data __maybe_unused) { struct aggr_cpu_id id = cpu_map__empty_aggr_cpu_id(); diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h index 365ed69699e1..7e1829468bd6 100644 --- a/tools/perf/util/cpumap.h +++ b/tools/perf/util/cpumap.h @@ -36,7 +36,6 @@ int cpu_map__get_die_id(int cpu); struct aggr_cpu_id cpu_map__get_die_aggr_by_cpu(int cpu, void *data); int cpu_map__get_core_id(int cpu); struct aggr_cpu_id cpu_map__get_core_aggr_by_cpu(int cpu, void *data); -struct aggr_cpu_id cpu_map__get_core(struct perf_cpu_map *map, int idx, void *data); int cpu_map__get_node_id(int cpu); struct aggr_cpu_id cpu_map__get_node_aggr_by_cpu(int cpu, void *data); struct aggr_cpu_id cpu_map__get_node(struct perf_cpu_map *map, int idx, void *data); -- cgit v1.2.3 From 86d94048e234c94af88a528ab4d5ef16e8a89f8a Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 4 Jan 2022 22:13:13 -0800 Subject: perf cpumap: Remove map+index get_node() Migrate final users to appropriate cpu variant. Reviewed-by: James Clark Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: Jiri Olsa Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Mike Leach Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Riccardo Mancini Cc: Stephane Eranian Cc: Suzuki Poulouse Cc: Vineet Singh Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-11-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/topology.c | 2 +- tools/perf/util/cpumap.c | 8 -------- tools/perf/util/cpumap.h | 1 - 3 files changed, 1 insertion(+), 10 deletions(-) diff --git a/tools/perf/tests/topology.c b/tools/perf/tests/topology.c index 9a671670415a..5992b323c4f5 100644 --- a/tools/perf/tests/topology.c +++ b/tools/perf/tests/topology.c @@ -162,7 +162,7 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map) // Test that node ID contains only node for (i = 0; i < map->nr; i++) { - id = cpu_map__get_node(map, i, NULL); + id = cpu_map__get_node_aggr_by_cpu(perf_cpu_map__cpu(map, i), NULL); TEST_ASSERT_VAL("Node map - Node ID doesn't match", cpu__get_node(map->map[i]) == id.node); TEST_ASSERT_VAL("Node map - Socket is set", id.socket == -1); diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c index e8149bcf8bfa..f67b2e7aac13 100644 --- a/tools/perf/util/cpumap.c +++ b/tools/perf/util/cpumap.c @@ -254,14 +254,6 @@ struct aggr_cpu_id cpu_map__get_node_aggr_by_cpu(int cpu, void *data __maybe_unu return id; } -struct aggr_cpu_id cpu_map__get_node(struct perf_cpu_map *map, int idx, void *data) -{ - if (idx < 0 || idx >= map->nr) - return cpu_map__empty_aggr_cpu_id(); - - return cpu_map__get_node_aggr_by_cpu(map->map[idx], data); -} - int cpu_map__build_socket_map(struct perf_cpu_map *cpus, struct cpu_aggr_map **sockp) { return cpu_map__build_map(cpus, sockp, cpu_map__get_socket_aggr_by_cpu, NULL); diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h index 7e1829468bd6..f0121dd4fdcb 100644 --- a/tools/perf/util/cpumap.h +++ b/tools/perf/util/cpumap.h @@ -38,7 +38,6 @@ int cpu_map__get_core_id(int cpu); struct aggr_cpu_id cpu_map__get_core_aggr_by_cpu(int cpu, void *data); int cpu_map__get_node_id(int cpu); struct aggr_cpu_id cpu_map__get_node_aggr_by_cpu(int cpu, void *data); -struct aggr_cpu_id cpu_map__get_node(struct perf_cpu_map *map, int idx, void *data); int cpu_map__build_socket_map(struct perf_cpu_map *cpus, struct cpu_aggr_map **sockp); int cpu_map__build_die_map(struct perf_cpu_map *cpus, struct cpu_aggr_map **diep); int cpu_map__build_core_map(struct perf_cpu_map *cpus, struct cpu_aggr_map **corep); -- cgit v1.2.3 From 49679da388f4c45b0ca444dcf8bb5f59a02f8f4e Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 4 Jan 2022 22:13:14 -0800 Subject: perf cpumap: Add comments to aggr_cpu_id() This code is already tested in topology.c. Reviewed-by: James Clark Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: Jiri Olsa Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Mike Leach Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Riccardo Mancini Cc: Stephane Eranian Cc: Suzuki Poulouse Cc: Vineet Singh Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-12-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/cpumap.h | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h index f0121dd4fdcb..edd93e1db36a 100644 --- a/tools/perf/util/cpumap.h +++ b/tools/perf/util/cpumap.h @@ -7,11 +7,20 @@ #include #include +/** Identify where counts are aggregated, -1 implies not to aggregate. */ struct aggr_cpu_id { + /** A value in the range 0 to number of threads. */ int thread; + /** The numa node X as read from /sys/devices/system/node/nodeX. */ int node; + /** + * The socket number as read from + * /sys/devices/system/cpu/cpuX/topology/physical_package_id. + */ int socket; + /** The die id as read from /sys/devices/system/cpu/cpuX/topology/die_id. */ int die; + /** The core id as read from /sys/devices/system/cpu/cpuX/topology/core_id. */ int core; }; -- cgit v1.2.3 From 63e0fa873d8820b996a01a83d832bf1b3969e9b6 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 4 Jan 2022 22:13:15 -0800 Subject: perf cpumap: Remove unused cpu_map__socket() Unused function so remove. Reviewed-by: James Clark Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: Jiri Olsa Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Mike Leach Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Riccardo Mancini Cc: Stephane Eranian Cc: Suzuki Poulouse Cc: Vineet Singh Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-13-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/cpumap.h | 7 ------- 1 file changed, 7 deletions(-) diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h index edd93e1db36a..22e53fd54657 100644 --- a/tools/perf/util/cpumap.h +++ b/tools/perf/util/cpumap.h @@ -53,13 +53,6 @@ int cpu_map__build_core_map(struct perf_cpu_map *cpus, struct cpu_aggr_map **cor int cpu_map__build_node_map(struct perf_cpu_map *cpus, struct cpu_aggr_map **nodep); const struct perf_cpu_map *cpu_map__online(void); /* thread unsafe */ -static inline int cpu_map__socket(struct perf_cpu_map *sock, int s) -{ - if (!sock || s > sock->nr || s < 0) - return 0; - return sock->map[s]; -} - int cpu__setup_cpunode_map(void); int cpu__max_node(void); -- cgit v1.2.3 From 3ac23d199c2bc3bc2a2b31c803e7c5d841959670 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 4 Jan 2022 22:13:16 -0800 Subject: perf cpumap: Simplify equal function name Rename cpu_map__compare_aggr_cpu_id() to aggr_cpu_id__equal(), the cpu_map part of the name is misleading. Equal better describes the function than compare. Switch to const pointer rather than value as struct given the number of variables in aggr_cpu_id(). Reviewed-by: James Clark Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: Jiri Olsa Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Mike Leach Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Riccardo Mancini Cc: Stephane Eranian Cc: Suzuki Poulouse Cc: Vineet Singh Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-14-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/cpumap.c | 14 +++++++------- tools/perf/util/cpumap.h | 2 +- tools/perf/util/stat-display.c | 18 ++++++++++-------- 3 files changed, 18 insertions(+), 16 deletions(-) diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c index f67b2e7aac13..8fa00a6221c8 100644 --- a/tools/perf/util/cpumap.c +++ b/tools/perf/util/cpumap.c @@ -171,7 +171,7 @@ int cpu_map__build_map(struct perf_cpu_map *cpus, struct cpu_aggr_map **res, for (cpu = 0; cpu < nr; cpu++) { s1 = f(cpu, data); for (s2 = 0; s2 < c->nr; s2++) { - if (cpu_map__compare_aggr_cpu_id(s1, c->map[s2])) + if (aggr_cpu_id__equal(&s1, &c->map[s2])) break; } if (s2 == c->nr) { @@ -593,13 +593,13 @@ const struct perf_cpu_map *cpu_map__online(void) /* thread unsafe */ return online; } -bool cpu_map__compare_aggr_cpu_id(struct aggr_cpu_id a, struct aggr_cpu_id b) +bool aggr_cpu_id__equal(const struct aggr_cpu_id *a, const struct aggr_cpu_id *b) { - return a.thread == b.thread && - a.node == b.node && - a.socket == b.socket && - a.die == b.die && - a.core == b.core; + return a->thread == b->thread && + a->node == b->node && + a->socket == b->socket && + a->die == b->die && + a->core == b->core; } bool cpu_map__aggr_cpu_id_is_empty(struct aggr_cpu_id a) diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h index 22e53fd54657..652b76c69376 100644 --- a/tools/perf/util/cpumap.h +++ b/tools/perf/util/cpumap.h @@ -67,7 +67,7 @@ int cpu_map__build_map(struct perf_cpu_map *cpus, struct cpu_aggr_map **res, int cpu_map__cpu(struct perf_cpu_map *cpus, int idx); bool cpu_map__has(struct perf_cpu_map *cpus, int cpu); -bool cpu_map__compare_aggr_cpu_id(struct aggr_cpu_id a, struct aggr_cpu_id b); +bool aggr_cpu_id__equal(const struct aggr_cpu_id *a, const struct aggr_cpu_id *b); bool cpu_map__aggr_cpu_id_is_empty(struct aggr_cpu_id a); struct aggr_cpu_id cpu_map__empty_aggr_cpu_id(void); diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c index 6c40b91d5e32..0241436bb1fb 100644 --- a/tools/perf/util/stat-display.c +++ b/tools/perf/util/stat-display.c @@ -328,20 +328,22 @@ static void print_metric_header(struct perf_stat_config *config, } static int first_shadow_cpu(struct perf_stat_config *config, - struct evsel *evsel, struct aggr_cpu_id id) + struct evsel *evsel, const struct aggr_cpu_id *id) { struct perf_cpu_map *cpus; int cpu, idx; if (config->aggr_mode == AGGR_NONE) - return id.core; + return id->core; if (!config->aggr_get_id) return 0; cpus = evsel__cpus(evsel); perf_cpu_map__for_each_cpu(cpu, idx, cpus) { - if (cpu_map__compare_aggr_cpu_id(config->aggr_get_id(config, cpu), id)) + struct aggr_cpu_id cpu_id = config->aggr_get_id(config, cpu); + + if (aggr_cpu_id__equal(&cpu_id, id)) return cpu; } return 0; @@ -501,7 +503,7 @@ static void printout(struct perf_stat_config *config, struct aggr_cpu_id id, int } perf_stat__print_shadow_stats(config, counter, uval, - first_shadow_cpu(config, counter, id), + first_shadow_cpu(config, counter, &id), &out, &config->metric_events, st); if (!config->csv_output && !config->metric_only) { print_noise(config, counter, noise); @@ -525,12 +527,12 @@ static void aggr_update_shadow(struct perf_stat_config *config, val = 0; perf_cpu_map__for_each_cpu(cpu, idx, cpus) { s2 = config->aggr_get_id(config, cpu); - if (!cpu_map__compare_aggr_cpu_id(s2, id)) + if (!aggr_cpu_id__equal(&s2, &id)) continue; val += perf_counts(counter->counts, idx, 0)->val; } perf_stat__update_shadow_stats(counter, val, - first_shadow_cpu(config, counter, id), + first_shadow_cpu(config, counter, &id), &rt_stat); } } @@ -641,7 +643,7 @@ static void aggr_cb(struct perf_stat_config *config, struct perf_counts_values *counts; s2 = config->aggr_get_id(config, cpu); - if (!cpu_map__compare_aggr_cpu_id(s2, ad->id)) + if (!aggr_cpu_id__equal(&s2, &ad->id)) continue; if (first) ad->nr++; @@ -1217,7 +1219,7 @@ static void print_percore_thread(struct perf_stat_config *config, s2 = config->aggr_get_id(config, cpu); for (s = 0; s < config->aggr_map->nr; s++) { id = config->aggr_map->map[s]; - if (cpu_map__compare_aggr_cpu_id(s2, id)) + if (aggr_cpu_id__equal(&s2, &id)) break; } -- cgit v1.2.3 From 51b826fadf4fc42c8614b752b6cb0cb516589ade Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 4 Jan 2022 22:13:17 -0800 Subject: perf cpumap: Rename empty functions Remove cpu_map from name as a cpu_map isn't used. Pass a const pointer rather than by value to avoid unnecessary copying. Reviewed-by: James Clark Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: Jiri Olsa Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Mike Leach Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Riccardo Mancini Cc: Stephane Eranian Cc: Suzuki Poulouse Cc: Vineet Singh Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-15-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-stat.c | 12 ++++++------ tools/perf/util/cpumap.c | 24 ++++++++++++------------ tools/perf/util/cpumap.h | 4 ++-- tools/perf/util/stat-display.c | 10 +++++----- 4 files changed, 25 insertions(+), 25 deletions(-) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 40cb3518f27e..d06921cd3592 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -1349,9 +1349,9 @@ static struct aggr_cpu_id perf_stat__get_node(struct perf_stat_config *config __ static struct aggr_cpu_id perf_stat__get_aggr(struct perf_stat_config *config, aggr_get_id_t get_id, int cpu) { - struct aggr_cpu_id id = cpu_map__empty_aggr_cpu_id(); + struct aggr_cpu_id id = aggr_cpu_id__empty(); - if (cpu_map__aggr_cpu_id_is_empty(config->cpus_aggr_map->map[cpu])) + if (aggr_cpu_id__is_empty(&config->cpus_aggr_map->map[cpu])) config->cpus_aggr_map->map[cpu] = get_id(config, cpu); id = config->cpus_aggr_map->map[cpu]; @@ -1480,7 +1480,7 @@ static void perf_stat__exit_aggr_mode(void) static struct aggr_cpu_id perf_env__get_socket_aggr_by_cpu(int cpu, void *data) { struct perf_env *env = data; - struct aggr_cpu_id id = cpu_map__empty_aggr_cpu_id(); + struct aggr_cpu_id id = aggr_cpu_id__empty(); if (cpu != -1) id.socket = env->cpu[cpu].socket_id; @@ -1491,7 +1491,7 @@ static struct aggr_cpu_id perf_env__get_socket_aggr_by_cpu(int cpu, void *data) static struct aggr_cpu_id perf_env__get_die_aggr_by_cpu(int cpu, void *data) { struct perf_env *env = data; - struct aggr_cpu_id id = cpu_map__empty_aggr_cpu_id(); + struct aggr_cpu_id id = aggr_cpu_id__empty(); if (cpu != -1) { /* @@ -1509,7 +1509,7 @@ static struct aggr_cpu_id perf_env__get_die_aggr_by_cpu(int cpu, void *data) static struct aggr_cpu_id perf_env__get_core_aggr_by_cpu(int cpu, void *data) { struct perf_env *env = data; - struct aggr_cpu_id id = cpu_map__empty_aggr_cpu_id(); + struct aggr_cpu_id id = aggr_cpu_id__empty(); if (cpu != -1) { /* @@ -1527,7 +1527,7 @@ static struct aggr_cpu_id perf_env__get_core_aggr_by_cpu(int cpu, void *data) static struct aggr_cpu_id perf_env__get_node_aggr_by_cpu(int cpu, void *data) { - struct aggr_cpu_id id = cpu_map__empty_aggr_cpu_id(); + struct aggr_cpu_id id = aggr_cpu_id__empty(); id.node = perf_env__numa_node(data, cpu); return id; diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c index 8fa00a6221c8..b3e1304aca0c 100644 --- a/tools/perf/util/cpumap.c +++ b/tools/perf/util/cpumap.c @@ -104,7 +104,7 @@ struct cpu_aggr_map *cpu_aggr_map__empty_new(int nr) cpus->nr = nr; for (i = 0; i < nr; i++) - cpus->map[i] = cpu_map__empty_aggr_cpu_id(); + cpus->map[i] = aggr_cpu_id__empty(); refcount_set(&cpus->refcnt, 1); } @@ -130,7 +130,7 @@ int cpu_map__get_socket_id(int cpu) struct aggr_cpu_id cpu_map__get_socket_aggr_by_cpu(int cpu, void *data __maybe_unused) { - struct aggr_cpu_id id = cpu_map__empty_aggr_cpu_id(); + struct aggr_cpu_id id = aggr_cpu_id__empty(); id.socket = cpu_map__get_socket_id(cpu); return id; @@ -209,7 +209,7 @@ struct aggr_cpu_id cpu_map__get_die_aggr_by_cpu(int cpu, void *data) * make a unique ID. */ id = cpu_map__get_socket_aggr_by_cpu(cpu, data); - if (cpu_map__aggr_cpu_id_is_empty(id)) + if (aggr_cpu_id__is_empty(&id)) return id; id.die = die; @@ -234,7 +234,7 @@ struct aggr_cpu_id cpu_map__get_core_aggr_by_cpu(int cpu, void *data) /* cpu_map__get_die returns a struct with socket and die set*/ id = cpu_map__get_die_aggr_by_cpu(cpu, data); - if (cpu_map__aggr_cpu_id_is_empty(id)) + if (aggr_cpu_id__is_empty(&id)) return id; /* @@ -248,7 +248,7 @@ struct aggr_cpu_id cpu_map__get_core_aggr_by_cpu(int cpu, void *data) struct aggr_cpu_id cpu_map__get_node_aggr_by_cpu(int cpu, void *data __maybe_unused) { - struct aggr_cpu_id id = cpu_map__empty_aggr_cpu_id(); + struct aggr_cpu_id id = aggr_cpu_id__empty(); id.node = cpu_map__get_node_id(cpu); return id; @@ -602,16 +602,16 @@ bool aggr_cpu_id__equal(const struct aggr_cpu_id *a, const struct aggr_cpu_id *b a->core == b->core; } -bool cpu_map__aggr_cpu_id_is_empty(struct aggr_cpu_id a) +bool aggr_cpu_id__is_empty(const struct aggr_cpu_id *a) { - return a.thread == -1 && - a.node == -1 && - a.socket == -1 && - a.die == -1 && - a.core == -1; + return a->thread == -1 && + a->node == -1 && + a->socket == -1 && + a->die == -1 && + a->core == -1; } -struct aggr_cpu_id cpu_map__empty_aggr_cpu_id(void) +struct aggr_cpu_id aggr_cpu_id__empty(void) { struct aggr_cpu_id ret = { .thread = -1, diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h index 652b76c69376..9589b0001a28 100644 --- a/tools/perf/util/cpumap.h +++ b/tools/perf/util/cpumap.h @@ -68,7 +68,7 @@ int cpu_map__cpu(struct perf_cpu_map *cpus, int idx); bool cpu_map__has(struct perf_cpu_map *cpus, int cpu); bool aggr_cpu_id__equal(const struct aggr_cpu_id *a, const struct aggr_cpu_id *b); -bool cpu_map__aggr_cpu_id_is_empty(struct aggr_cpu_id a); -struct aggr_cpu_id cpu_map__empty_aggr_cpu_id(void); +bool aggr_cpu_id__is_empty(const struct aggr_cpu_id *a); +struct aggr_cpu_id aggr_cpu_id__empty(void); #endif /* __PERF_CPUMAP_H */ diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c index 0241436bb1fb..870b1db71fbc 100644 --- a/tools/perf/util/stat-display.c +++ b/tools/perf/util/stat-display.c @@ -698,7 +698,7 @@ static void print_counter_aggrdata(struct perf_stat_config *config, uval = val * counter->scale; if (cpu != -1) { - id = cpu_map__empty_aggr_cpu_id(); + id = aggr_cpu_id__empty(); id.core = cpu; } printout(config, id, nr, counter, uval, @@ -780,7 +780,7 @@ static struct perf_aggr_thread_value *sort_aggr_thread( continue; buf[i].counter = counter; - buf[i].id = cpu_map__empty_aggr_cpu_id(); + buf[i].id = aggr_cpu_id__empty(); buf[i].id.thread = thread; buf[i].uval = uval; buf[i].val = val; @@ -868,7 +868,7 @@ static void print_counter_aggr(struct perf_stat_config *config, fprintf(output, "%s", prefix); uval = cd.avg * counter->scale; - printout(config, cpu_map__empty_aggr_cpu_id(), 0, counter, uval, prefix, cd.avg_running, + printout(config, aggr_cpu_id__empty(), 0, counter, uval, prefix, cd.avg_running, cd.avg_enabled, cd.avg, &rt_stat); if (!metric_only) fprintf(output, "\n"); @@ -911,7 +911,7 @@ static void print_counter(struct perf_stat_config *config, fprintf(output, "%s", prefix); uval = val * counter->scale; - id = cpu_map__empty_aggr_cpu_id(); + id = aggr_cpu_id__empty(); id.core = cpu; printout(config, id, 0, counter, uval, prefix, run, ena, 1.0, &rt_stat); @@ -938,7 +938,7 @@ static void print_no_aggr_metric(struct perf_stat_config *config, if (prefix) fputs(prefix, config->output); evlist__for_each_entry(evlist, counter) { - id = cpu_map__empty_aggr_cpu_id(); + id = aggr_cpu_id__empty(); id.core = cpu; if (first) { aggr_printout(config, counter, id, 0); -- cgit v1.2.3 From 194a3a202564153493789997643181737a6ae4b9 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 4 Jan 2022 22:13:18 -0800 Subject: perf cpumap: Document cpu__get_node() and remove redundant function cpu_map__get_node_id() isn't used externally and merely delegates to cpu__get_node(). Reviewed-by: James Clark Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: Jiri Olsa Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Mike Leach Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Riccardo Mancini Cc: Stephane Eranian Cc: Suzuki Poulouse Cc: Vineet Singh Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-16-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/cpumap.c | 11 +++++------ tools/perf/util/cpumap.h | 5 ++++- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c index b3e1304aca0c..1626b0991408 100644 --- a/tools/perf/util/cpumap.c +++ b/tools/perf/util/cpumap.c @@ -16,6 +16,10 @@ static int max_cpu_num; static int max_present_cpu_num; static int max_node_num; +/** + * The numa node X as read from /sys/devices/system/node/nodeX indexed by the + * CPU number. + */ static int *cpunode_map; static struct perf_cpu_map *cpu_map__from_entries(struct cpu_map_entries *cpus) @@ -222,11 +226,6 @@ int cpu_map__get_core_id(int cpu) return ret ?: value; } -int cpu_map__get_node_id(int cpu) -{ - return cpu__get_node(cpu); -} - struct aggr_cpu_id cpu_map__get_core_aggr_by_cpu(int cpu, void *data) { struct aggr_cpu_id id; @@ -250,7 +249,7 @@ struct aggr_cpu_id cpu_map__get_node_aggr_by_cpu(int cpu, void *data __maybe_unu { struct aggr_cpu_id id = aggr_cpu_id__empty(); - id.node = cpu_map__get_node_id(cpu); + id.node = cpu__get_node(cpu); return id; } diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h index 9589b0001a28..f849f01c5860 100644 --- a/tools/perf/util/cpumap.h +++ b/tools/perf/util/cpumap.h @@ -45,7 +45,6 @@ int cpu_map__get_die_id(int cpu); struct aggr_cpu_id cpu_map__get_die_aggr_by_cpu(int cpu, void *data); int cpu_map__get_core_id(int cpu); struct aggr_cpu_id cpu_map__get_core_aggr_by_cpu(int cpu, void *data); -int cpu_map__get_node_id(int cpu); struct aggr_cpu_id cpu_map__get_node_aggr_by_cpu(int cpu, void *data); int cpu_map__build_socket_map(struct perf_cpu_map *cpus, struct cpu_aggr_map **sockp); int cpu_map__build_die_map(struct perf_cpu_map *cpus, struct cpu_aggr_map **diep); @@ -58,6 +57,10 @@ int cpu__setup_cpunode_map(void); int cpu__max_node(void); int cpu__max_cpu(void); int cpu__max_present_cpu(void); +/** + * cpu__get_node - Returns the numa node X as read from + * /sys/devices/system/node/nodeX for the given CPU. + */ int cpu__get_node(int cpu); int cpu_map__build_map(struct perf_cpu_map *cpus, struct cpu_aggr_map **res, -- cgit v1.2.3 From 4e90e5cc74c6b1c1b9abff8b53cec5be1fb5e839 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 4 Jan 2022 22:13:19 -0800 Subject: perf cpumap: Remove map from function names that don't use a map Move to the cpu name and document for consistency. Reviewed-by: James Clark Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: Jiri Olsa Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Mike Leach Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Riccardo Mancini Cc: Stephane Eranian Cc: Suzuki Poulouse Cc: Vineet Singh Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-17-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/cpumap.c | 12 ++++++------ tools/perf/util/cpumap.h | 19 ++++++++++++++++--- tools/perf/util/env.c | 6 +++--- tools/perf/util/stat.c | 4 ++-- 4 files changed, 27 insertions(+), 14 deletions(-) diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c index 1626b0991408..e0d7f1da5858 100644 --- a/tools/perf/util/cpumap.c +++ b/tools/perf/util/cpumap.c @@ -126,7 +126,7 @@ static int cpu__get_topology_int(int cpu, const char *name, int *value) return sysfs__read_int(path, value); } -int cpu_map__get_socket_id(int cpu) +int cpu__get_socket_id(int cpu) { int value, ret = cpu__get_topology_int(cpu, "physical_package_id", &value); return ret ?: value; @@ -136,7 +136,7 @@ struct aggr_cpu_id cpu_map__get_socket_aggr_by_cpu(int cpu, void *data __maybe_u { struct aggr_cpu_id id = aggr_cpu_id__empty(); - id.socket = cpu_map__get_socket_id(cpu); + id.socket = cpu__get_socket_id(cpu); return id; } @@ -190,7 +190,7 @@ int cpu_map__build_map(struct perf_cpu_map *cpus, struct cpu_aggr_map **res, return 0; } -int cpu_map__get_die_id(int cpu) +int cpu__get_die_id(int cpu) { int value, ret = cpu__get_topology_int(cpu, "die_id", &value); @@ -202,7 +202,7 @@ struct aggr_cpu_id cpu_map__get_die_aggr_by_cpu(int cpu, void *data) struct aggr_cpu_id id; int die; - die = cpu_map__get_die_id(cpu); + die = cpu__get_die_id(cpu); /* There is no die_id on legacy system. */ if (die == -1) die = 0; @@ -220,7 +220,7 @@ struct aggr_cpu_id cpu_map__get_die_aggr_by_cpu(int cpu, void *data) return id; } -int cpu_map__get_core_id(int cpu) +int cpu__get_core_id(int cpu) { int value, ret = cpu__get_topology_int(cpu, "core_id", &value); return ret ?: value; @@ -229,7 +229,7 @@ int cpu_map__get_core_id(int cpu) struct aggr_cpu_id cpu_map__get_core_aggr_by_cpu(int cpu, void *data) { struct aggr_cpu_id id; - int core = cpu_map__get_core_id(cpu); + int core = cpu__get_core_id(cpu); /* cpu_map__get_die returns a struct with socket and die set*/ id = cpu_map__get_die_aggr_by_cpu(cpu, data); diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h index f849f01c5860..a053bf31a3f0 100644 --- a/tools/perf/util/cpumap.h +++ b/tools/perf/util/cpumap.h @@ -39,11 +39,8 @@ struct perf_cpu_map *cpu_map__new_data(struct perf_record_cpu_map_data *data); size_t cpu_map__snprint(struct perf_cpu_map *map, char *buf, size_t size); size_t cpu_map__snprint_mask(struct perf_cpu_map *map, char *buf, size_t size); size_t cpu_map__fprintf(struct perf_cpu_map *map, FILE *fp); -int cpu_map__get_socket_id(int cpu); struct aggr_cpu_id cpu_map__get_socket_aggr_by_cpu(int cpu, void *data); -int cpu_map__get_die_id(int cpu); struct aggr_cpu_id cpu_map__get_die_aggr_by_cpu(int cpu, void *data); -int cpu_map__get_core_id(int cpu); struct aggr_cpu_id cpu_map__get_core_aggr_by_cpu(int cpu, void *data); struct aggr_cpu_id cpu_map__get_node_aggr_by_cpu(int cpu, void *data); int cpu_map__build_socket_map(struct perf_cpu_map *cpus, struct cpu_aggr_map **sockp); @@ -62,6 +59,22 @@ int cpu__max_present_cpu(void); * /sys/devices/system/node/nodeX for the given CPU. */ int cpu__get_node(int cpu); +/** + * cpu__get_socket_id - Returns the socket number as read from + * /sys/devices/system/cpu/cpuX/topology/physical_package_id for the given CPU. + */ +int cpu__get_socket_id(int cpu); +/** + * cpu__get_die_id - Returns the die id as read from + * /sys/devices/system/cpu/cpuX/topology/die_id for the given CPU. + */ +int cpu__get_die_id(int cpu); +/** + * cpu__get_core_id - Returns the core id as read from + * /sys/devices/system/cpu/cpuX/topology/core_id for the given CPU. + */ +int cpu__get_core_id(int cpu); + int cpu_map__build_map(struct perf_cpu_map *cpus, struct cpu_aggr_map **res, struct aggr_cpu_id (*f)(int cpu, void *data), diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c index b9904896eb97..fd12c0dcaefb 100644 --- a/tools/perf/util/env.c +++ b/tools/perf/util/env.c @@ -302,9 +302,9 @@ int perf_env__read_cpu_topology_map(struct perf_env *env) return -ENOMEM; for (cpu = 0; cpu < nr_cpus; ++cpu) { - env->cpu[cpu].core_id = cpu_map__get_core_id(cpu); - env->cpu[cpu].socket_id = cpu_map__get_socket_id(cpu); - env->cpu[cpu].die_id = cpu_map__get_die_id(cpu); + env->cpu[cpu].core_id = cpu__get_core_id(cpu); + env->cpu[cpu].socket_id = cpu__get_socket_id(cpu); + env->cpu[cpu].die_id = cpu__get_die_id(cpu); } env->nr_cpus_avail = nr_cpus; diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c index 5ed99bcfe91e..5c24aca0968c 100644 --- a/tools/perf/util/stat.c +++ b/tools/perf/util/stat.c @@ -328,7 +328,7 @@ static int check_per_pkg(struct evsel *counter, if (!(vals->run && vals->ena)) return 0; - s = cpu_map__get_socket_id(cpu); + s = cpu__get_socket_id(cpu); if (s < 0) return -1; @@ -336,7 +336,7 @@ static int check_per_pkg(struct evsel *counter, * On multi-die system, die_id > 0. On no-die system, die_id = 0. * We use hashmap(socket, die) to check the used socket+die pair. */ - d = cpu_map__get_die_id(cpu); + d = cpu__get_die_id(cpu); if (d < 0) return -1; -- cgit v1.2.3 From adff2c634357115a0f94a9a5054061b497df7f72 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 4 Jan 2022 22:13:20 -0800 Subject: perf cpumap: Remove cpu_map__cpu(), use libperf function Switch the remaining few users of cpu_map__cpu() to perf_cpu_map__cpu() and remove the function. Reviewed-by: James Clark Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: Jiri Olsa Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Mike Leach Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Riccardo Mancini Cc: Stephane Eranian Cc: Suzuki Poulouse Cc: Vineet Singh Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-18-irogers@google.com [ Did the conversion to perf_ftrace__latency_prepare_bpf() as well, used when building with BUILD_BPF_SKEL=1 ] Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-ftrace.c | 2 +- tools/perf/util/bpf_ftrace.c | 2 +- tools/perf/util/cpumap.c | 9 ++------- tools/perf/util/cpumap.h | 1 - 4 files changed, 4 insertions(+), 10 deletions(-) diff --git a/tools/perf/builtin-ftrace.c b/tools/perf/builtin-ftrace.c index 2b54e2ddc80a..f16c39a37a52 100644 --- a/tools/perf/builtin-ftrace.c +++ b/tools/perf/builtin-ftrace.c @@ -281,7 +281,7 @@ static int set_tracing_cpumask(struct perf_cpu_map *cpumap) int ret; int last_cpu; - last_cpu = cpu_map__cpu(cpumap, cpumap->nr - 1); + last_cpu = perf_cpu_map__cpu(cpumap, cpumap->nr - 1); mask_size = last_cpu / 4 + 2; /* one more byte for EOS */ mask_size += last_cpu / 32; /* ',' is needed for every 32th cpus */ diff --git a/tools/perf/util/bpf_ftrace.c b/tools/perf/util/bpf_ftrace.c index f00a2de6778c..28dc4c60c788 100644 --- a/tools/perf/util/bpf_ftrace.c +++ b/tools/perf/util/bpf_ftrace.c @@ -63,7 +63,7 @@ int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace) fd = bpf_map__fd(skel->maps.cpu_filter); for (i = 0; i < ncpus; i++) { - cpu = cpu_map__cpu(ftrace->evlist->core.cpus, i); + cpu = perf_cpu_map__cpu(ftrace->evlist->core.cpus, i); bpf_map_update_elem(fd, &cpu, &val, BPF_ANY); } } diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c index e0d7f1da5858..32f9fc2dd389 100644 --- a/tools/perf/util/cpumap.c +++ b/tools/perf/util/cpumap.c @@ -485,11 +485,6 @@ bool cpu_map__has(struct perf_cpu_map *cpus, int cpu) return perf_cpu_map__idx(cpus, cpu) != -1; } -int cpu_map__cpu(struct perf_cpu_map *cpus, int idx) -{ - return cpus->map[idx]; -} - size_t cpu_map__snprint(struct perf_cpu_map *map, char *buf, size_t size) { int i, cpu, start = -1; @@ -547,7 +542,7 @@ size_t cpu_map__snprint_mask(struct perf_cpu_map *map, char *buf, size_t size) int i, cpu; char *ptr = buf; unsigned char *bitmap; - int last_cpu = cpu_map__cpu(map, map->nr - 1); + int last_cpu = perf_cpu_map__cpu(map, map->nr - 1); if (buf == NULL) return 0; @@ -559,7 +554,7 @@ size_t cpu_map__snprint_mask(struct perf_cpu_map *map, char *buf, size_t size) } for (i = 0; i < map->nr; i++) { - cpu = cpu_map__cpu(map, i); + cpu = perf_cpu_map__cpu(map, i); bitmap[cpu / 8] |= 1 << (cpu % 8); } diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h index a053bf31a3f0..87545bcd461d 100644 --- a/tools/perf/util/cpumap.h +++ b/tools/perf/util/cpumap.h @@ -80,7 +80,6 @@ int cpu_map__build_map(struct perf_cpu_map *cpus, struct cpu_aggr_map **res, struct aggr_cpu_id (*f)(int cpu, void *data), void *data); -int cpu_map__cpu(struct perf_cpu_map *cpus, int idx); bool cpu_map__has(struct perf_cpu_map *cpus, int cpu); bool aggr_cpu_id__equal(const struct aggr_cpu_id *a, const struct aggr_cpu_id *b); -- cgit v1.2.3 From 5f50e15c1510c77b37e10c6b22912bf4bf11476b Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 4 Jan 2022 22:13:21 -0800 Subject: perf cpumap: Refactor cpu_map__build_map() Turn it into a cpu_aggr_map__new(). Pass helper functions. Refactor builtin-stat calls to manually pass function pointers. Try to reduce some copy-paste code. Reviewed-by: James Clark Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: Jiri Olsa Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Mike Leach Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Riccardo Mancini Cc: Stephane Eranian Cc: Suzuki Poulouse Cc: Vineet Singh Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-19-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-stat.c | 187 +++++++++++++++++++++++++--------------------- tools/perf/util/cpumap.c | 59 ++++++--------- tools/perf/util/cpumap.h | 16 ++-- 3 files changed, 130 insertions(+), 132 deletions(-) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index d06921cd3592..c55a7fee22bc 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -1322,6 +1322,17 @@ static struct option stat_options[] = { OPT_END() }; +static const char *const aggr_mode__string[] = { + [AGGR_CORE] = "core", + [AGGR_DIE] = "die", + [AGGR_GLOBAL] = "global", + [AGGR_NODE] = "node", + [AGGR_NONE] = "none", + [AGGR_SOCKET] = "socket", + [AGGR_THREAD] = "thread", + [AGGR_UNSET] = "unset", +}; + static struct aggr_cpu_id perf_stat__get_socket(struct perf_stat_config *config __maybe_unused, int cpu) { @@ -1394,54 +1405,67 @@ static bool term_percore_set(void) return false; } -static int perf_stat_init_aggr_mode(void) +static aggr_cpu_id_get_t aggr_mode__get_aggr(enum aggr_mode aggr_mode) { - int nr; + switch (aggr_mode) { + case AGGR_SOCKET: + return cpu_map__get_socket_aggr_by_cpu; + case AGGR_DIE: + return cpu_map__get_die_aggr_by_cpu; + case AGGR_CORE: + return cpu_map__get_core_aggr_by_cpu; + case AGGR_NODE: + return cpu_map__get_node_aggr_by_cpu; + case AGGR_NONE: + if (term_percore_set()) + return cpu_map__get_core_aggr_by_cpu; + + return NULL; + case AGGR_GLOBAL: + case AGGR_THREAD: + case AGGR_UNSET: + default: + return NULL; + } +} - switch (stat_config.aggr_mode) { +static aggr_get_id_t aggr_mode__get_id(enum aggr_mode aggr_mode) +{ + switch (aggr_mode) { case AGGR_SOCKET: - if (cpu_map__build_socket_map(evsel_list->core.cpus, &stat_config.aggr_map)) { - perror("cannot build socket map"); - return -1; - } - stat_config.aggr_get_id = perf_stat__get_socket_cached; - break; + return perf_stat__get_socket_cached; case AGGR_DIE: - if (cpu_map__build_die_map(evsel_list->core.cpus, &stat_config.aggr_map)) { - perror("cannot build die map"); - return -1; - } - stat_config.aggr_get_id = perf_stat__get_die_cached; - break; + return perf_stat__get_die_cached; case AGGR_CORE: - if (cpu_map__build_core_map(evsel_list->core.cpus, &stat_config.aggr_map)) { - perror("cannot build core map"); - return -1; - } - stat_config.aggr_get_id = perf_stat__get_core_cached; - break; + return perf_stat__get_core_cached; case AGGR_NODE: - if (cpu_map__build_node_map(evsel_list->core.cpus, &stat_config.aggr_map)) { - perror("cannot build core map"); - return -1; - } - stat_config.aggr_get_id = perf_stat__get_node_cached; - break; + return perf_stat__get_node_cached; case AGGR_NONE: if (term_percore_set()) { - if (cpu_map__build_core_map(evsel_list->core.cpus, - &stat_config.aggr_map)) { - perror("cannot build core map"); - return -1; - } - stat_config.aggr_get_id = perf_stat__get_core_cached; + return perf_stat__get_core_cached; } - break; + return NULL; case AGGR_GLOBAL: case AGGR_THREAD: case AGGR_UNSET: default: - break; + return NULL; + } +} + +static int perf_stat_init_aggr_mode(void) +{ + int nr; + aggr_cpu_id_get_t get_id = aggr_mode__get_aggr(stat_config.aggr_mode); + + if (get_id) { + stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.cpus, + get_id, /*data=*/NULL); + if (!stat_config.aggr_map) { + pr_err("cannot build %s map", aggr_mode__string[stat_config.aggr_mode]); + return -1; + } + stat_config.aggr_get_id = aggr_mode__get_id(stat_config.aggr_mode); } /* @@ -1533,30 +1557,6 @@ static struct aggr_cpu_id perf_env__get_node_aggr_by_cpu(int cpu, void *data) return id; } -static int perf_env__build_socket_map(struct perf_env *env, struct perf_cpu_map *cpus, - struct cpu_aggr_map **sockp) -{ - return cpu_map__build_map(cpus, sockp, perf_env__get_socket_aggr_by_cpu, env); -} - -static int perf_env__build_die_map(struct perf_env *env, struct perf_cpu_map *cpus, - struct cpu_aggr_map **diep) -{ - return cpu_map__build_map(cpus, diep, perf_env__get_die_aggr_by_cpu, env); -} - -static int perf_env__build_core_map(struct perf_env *env, struct perf_cpu_map *cpus, - struct cpu_aggr_map **corep) -{ - return cpu_map__build_map(cpus, corep, perf_env__get_core_aggr_by_cpu, env); -} - -static int perf_env__build_node_map(struct perf_env *env, struct perf_cpu_map *cpus, - struct cpu_aggr_map **nodep) -{ - return cpu_map__build_map(cpus, nodep, perf_env__get_node_aggr_by_cpu, env); -} - static struct aggr_cpu_id perf_stat__get_socket_file(struct perf_stat_config *config __maybe_unused, int cpu) { @@ -1580,47 +1580,60 @@ static struct aggr_cpu_id perf_stat__get_node_file(struct perf_stat_config *conf return perf_env__get_node_aggr_by_cpu(cpu, &perf_stat.session->header.env); } -static int perf_stat_init_aggr_mode_file(struct perf_stat *st) +static aggr_cpu_id_get_t aggr_mode__get_aggr_file(enum aggr_mode aggr_mode) { - struct perf_env *env = &st->session->header.env; + switch (aggr_mode) { + case AGGR_SOCKET: + return perf_env__get_socket_aggr_by_cpu; + case AGGR_DIE: + return perf_env__get_die_aggr_by_cpu; + case AGGR_CORE: + return perf_env__get_core_aggr_by_cpu; + case AGGR_NODE: + return perf_env__get_node_aggr_by_cpu; + case AGGR_NONE: + case AGGR_GLOBAL: + case AGGR_THREAD: + case AGGR_UNSET: + default: + return NULL; + } +} - switch (stat_config.aggr_mode) { +static aggr_get_id_t aggr_mode__get_id_file(enum aggr_mode aggr_mode) +{ + switch (aggr_mode) { case AGGR_SOCKET: - if (perf_env__build_socket_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) { - perror("cannot build socket map"); - return -1; - } - stat_config.aggr_get_id = perf_stat__get_socket_file; - break; + return perf_stat__get_socket_file; case AGGR_DIE: - if (perf_env__build_die_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) { - perror("cannot build die map"); - return -1; - } - stat_config.aggr_get_id = perf_stat__get_die_file; - break; + return perf_stat__get_die_file; case AGGR_CORE: - if (perf_env__build_core_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) { - perror("cannot build core map"); - return -1; - } - stat_config.aggr_get_id = perf_stat__get_core_file; - break; + return perf_stat__get_core_file; case AGGR_NODE: - if (perf_env__build_node_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) { - perror("cannot build core map"); - return -1; - } - stat_config.aggr_get_id = perf_stat__get_node_file; - break; + return perf_stat__get_node_file; case AGGR_NONE: case AGGR_GLOBAL: case AGGR_THREAD: case AGGR_UNSET: default: - break; + return NULL; } +} + +static int perf_stat_init_aggr_mode_file(struct perf_stat *st) +{ + struct perf_env *env = &st->session->header.env; + aggr_cpu_id_get_t get_id = aggr_mode__get_aggr_file(stat_config.aggr_mode); + if (!get_id) + return 0; + + stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.cpus, get_id, env); + if (!stat_config.aggr_map) { + pr_err("cannot build %s map", aggr_mode__string[stat_config.aggr_mode]); + return -1; + } + stat_config.aggr_get_id = aggr_mode__get_id_file(stat_config.aggr_mode); return 0; } diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c index 32f9fc2dd389..c8f9b3f15759 100644 --- a/tools/perf/util/cpumap.c +++ b/tools/perf/util/cpumap.c @@ -140,7 +140,7 @@ struct aggr_cpu_id cpu_map__get_socket_aggr_by_cpu(int cpu, void *data __maybe_u return id; } -static int cmp_aggr_cpu_id(const void *a_pointer, const void *b_pointer) +static int aggr_cpu_id__cmp(const void *a_pointer, const void *b_pointer) { struct aggr_cpu_id *a = (struct aggr_cpu_id *)a_pointer; struct aggr_cpu_id *b = (struct aggr_cpu_id *)b_pointer; @@ -157,37 +157,40 @@ static int cmp_aggr_cpu_id(const void *a_pointer, const void *b_pointer) return a->thread - b->thread; } -int cpu_map__build_map(struct perf_cpu_map *cpus, struct cpu_aggr_map **res, - struct aggr_cpu_id (*f)(int cpu, void *data), - void *data) +struct cpu_aggr_map *cpu_aggr_map__new(const struct perf_cpu_map *cpus, + aggr_cpu_id_get_t get_id, + void *data) { - int nr = cpus->nr; - struct cpu_aggr_map *c = cpu_aggr_map__empty_new(nr); - int cpu, s2; - struct aggr_cpu_id s1; + int cpu, idx; + struct cpu_aggr_map *c = cpu_aggr_map__empty_new(cpus->nr); if (!c) - return -1; + return NULL; /* Reset size as it may only be partially filled */ c->nr = 0; - for (cpu = 0; cpu < nr; cpu++) { - s1 = f(cpu, data); - for (s2 = 0; s2 < c->nr; s2++) { - if (aggr_cpu_id__equal(&s1, &c->map[s2])) + perf_cpu_map__for_each_cpu(cpu, idx, cpus) { + bool duplicate = false; + struct aggr_cpu_id cpu_id = get_id(cpu, data); + + for (int j = 0; j < c->nr; j++) { + if (aggr_cpu_id__equal(&cpu_id, &c->map[j])) { + duplicate = true; break; + } } - if (s2 == c->nr) { - c->map[c->nr] = s1; + if (!duplicate) { + c->map[c->nr] = cpu_id; c->nr++; } } + /* ensure we process id in increasing order */ - qsort(c->map, c->nr, sizeof(struct aggr_cpu_id), cmp_aggr_cpu_id); + qsort(c->map, c->nr, sizeof(struct aggr_cpu_id), aggr_cpu_id__cmp); + + return c; - *res = c; - return 0; } int cpu__get_die_id(int cpu) @@ -253,26 +256,6 @@ struct aggr_cpu_id cpu_map__get_node_aggr_by_cpu(int cpu, void *data __maybe_unu return id; } -int cpu_map__build_socket_map(struct perf_cpu_map *cpus, struct cpu_aggr_map **sockp) -{ - return cpu_map__build_map(cpus, sockp, cpu_map__get_socket_aggr_by_cpu, NULL); -} - -int cpu_map__build_die_map(struct perf_cpu_map *cpus, struct cpu_aggr_map **diep) -{ - return cpu_map__build_map(cpus, diep, cpu_map__get_die_aggr_by_cpu, NULL); -} - -int cpu_map__build_core_map(struct perf_cpu_map *cpus, struct cpu_aggr_map **corep) -{ - return cpu_map__build_map(cpus, corep, cpu_map__get_core_aggr_by_cpu, NULL); -} - -int cpu_map__build_node_map(struct perf_cpu_map *cpus, struct cpu_aggr_map **numap) -{ - return cpu_map__build_map(cpus, numap, cpu_map__get_node_aggr_by_cpu, NULL); -} - /* setup simple routines to easily access node numbers given a cpu number */ static int get_max_num(char *path, int *max) { diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h index 87545bcd461d..611048e2a592 100644 --- a/tools/perf/util/cpumap.h +++ b/tools/perf/util/cpumap.h @@ -43,10 +43,6 @@ struct aggr_cpu_id cpu_map__get_socket_aggr_by_cpu(int cpu, void *data); struct aggr_cpu_id cpu_map__get_die_aggr_by_cpu(int cpu, void *data); struct aggr_cpu_id cpu_map__get_core_aggr_by_cpu(int cpu, void *data); struct aggr_cpu_id cpu_map__get_node_aggr_by_cpu(int cpu, void *data); -int cpu_map__build_socket_map(struct perf_cpu_map *cpus, struct cpu_aggr_map **sockp); -int cpu_map__build_die_map(struct perf_cpu_map *cpus, struct cpu_aggr_map **diep); -int cpu_map__build_core_map(struct perf_cpu_map *cpus, struct cpu_aggr_map **corep); -int cpu_map__build_node_map(struct perf_cpu_map *cpus, struct cpu_aggr_map **nodep); const struct perf_cpu_map *cpu_map__online(void); /* thread unsafe */ int cpu__setup_cpunode_map(void); @@ -75,10 +71,16 @@ int cpu__get_die_id(int cpu); */ int cpu__get_core_id(int cpu); +typedef struct aggr_cpu_id (*aggr_cpu_id_get_t)(int cpu, void *data); -int cpu_map__build_map(struct perf_cpu_map *cpus, struct cpu_aggr_map **res, - struct aggr_cpu_id (*f)(int cpu, void *data), - void *data); +/** + * cpu_aggr_map__new - Create a cpu_aggr_map with an aggr_cpu_id for each cpu in + * cpus. The aggr_cpu_id is created with 'get_id' that may have a data value + * passed to it. The cpu_aggr_map is sorted with duplicate values removed. + */ +struct cpu_aggr_map *cpu_aggr_map__new(const struct perf_cpu_map *cpus, + aggr_cpu_id_get_t get_id, + void *data); bool cpu_map__has(struct perf_cpu_map *cpus, int cpu); -- cgit v1.2.3 From 973aeb3c7ada35b75442126c745bb6074cb3e172 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 4 Jan 2022 22:13:22 -0800 Subject: perf cpumap: Rename cpu_map__get_X_aggr_by_cpu functions The functions don't use a cpu_map so reduce them to being like constructors of aggr_cpu_id. Reviewed-by: James Clark Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: Jiri Olsa Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Mike Leach Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Riccardo Mancini Cc: Stephane Eranian Cc: Suzuki Poulouse Cc: Vineet Singh Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-20-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-stat.c | 18 +++++++++--------- tools/perf/tests/topology.c | 8 ++++---- tools/perf/util/cpumap.c | 14 +++++++------- tools/perf/util/cpumap.h | 29 +++++++++++++++++++++++++---- 4 files changed, 45 insertions(+), 24 deletions(-) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index c55a7fee22bc..a518fcf0b3f8 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -1336,25 +1336,25 @@ static const char *const aggr_mode__string[] = { static struct aggr_cpu_id perf_stat__get_socket(struct perf_stat_config *config __maybe_unused, int cpu) { - return cpu_map__get_socket_aggr_by_cpu(cpu, /*data=*/NULL); + return aggr_cpu_id__socket(cpu, /*data=*/NULL); } static struct aggr_cpu_id perf_stat__get_die(struct perf_stat_config *config __maybe_unused, int cpu) { - return cpu_map__get_die_aggr_by_cpu(cpu, /*data=*/NULL); + return aggr_cpu_id__die(cpu, /*data=*/NULL); } static struct aggr_cpu_id perf_stat__get_core(struct perf_stat_config *config __maybe_unused, int cpu) { - return cpu_map__get_core_aggr_by_cpu(cpu, /*data=*/NULL); + return aggr_cpu_id__core(cpu, /*data=*/NULL); } static struct aggr_cpu_id perf_stat__get_node(struct perf_stat_config *config __maybe_unused, int cpu) { - return cpu_map__get_node_aggr_by_cpu(cpu, /*data=*/NULL); + return aggr_cpu_id__node(cpu, /*data=*/NULL); } static struct aggr_cpu_id perf_stat__get_aggr(struct perf_stat_config *config, @@ -1409,16 +1409,16 @@ static aggr_cpu_id_get_t aggr_mode__get_aggr(enum aggr_mode aggr_mode) { switch (aggr_mode) { case AGGR_SOCKET: - return cpu_map__get_socket_aggr_by_cpu; + return aggr_cpu_id__socket; case AGGR_DIE: - return cpu_map__get_die_aggr_by_cpu; + return aggr_cpu_id__die; case AGGR_CORE: - return cpu_map__get_core_aggr_by_cpu; + return aggr_cpu_id__core; case AGGR_NODE: - return cpu_map__get_node_aggr_by_cpu; + return aggr_cpu_id__node; case AGGR_NONE: if (term_percore_set()) - return cpu_map__get_core_aggr_by_cpu; + return aggr_cpu_id__core; return NULL; case AGGR_GLOBAL: diff --git a/tools/perf/tests/topology.c b/tools/perf/tests/topology.c index 5992b323c4f5..0cb7b015b4b9 100644 --- a/tools/perf/tests/topology.c +++ b/tools/perf/tests/topology.c @@ -121,7 +121,7 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map) // Test that core ID contains socket, die and core for (i = 0; i < map->nr; i++) { - id = cpu_map__get_core_aggr_by_cpu(perf_cpu_map__cpu(map, i), NULL); + id = aggr_cpu_id__core(perf_cpu_map__cpu(map, i), NULL); TEST_ASSERT_VAL("Core map - Core ID doesn't match", session->header.env.cpu[map->map[i]].core_id == id.core); @@ -136,7 +136,7 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map) // Test that die ID contains socket and die for (i = 0; i < map->nr; i++) { - id = cpu_map__get_die_aggr_by_cpu(perf_cpu_map__cpu(map, i), NULL); + id = aggr_cpu_id__die(perf_cpu_map__cpu(map, i), NULL); TEST_ASSERT_VAL("Die map - Socket ID doesn't match", session->header.env.cpu[map->map[i]].socket_id == id.socket); @@ -150,7 +150,7 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map) // Test that socket ID contains only socket for (i = 0; i < map->nr; i++) { - id = cpu_map__get_socket_aggr_by_cpu(perf_cpu_map__cpu(map, i), NULL); + id = aggr_cpu_id__socket(perf_cpu_map__cpu(map, i), NULL); TEST_ASSERT_VAL("Socket map - Socket ID doesn't match", session->header.env.cpu[map->map[i]].socket_id == id.socket); @@ -162,7 +162,7 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map) // Test that node ID contains only node for (i = 0; i < map->nr; i++) { - id = cpu_map__get_node_aggr_by_cpu(perf_cpu_map__cpu(map, i), NULL); + id = aggr_cpu_id__node(perf_cpu_map__cpu(map, i), NULL); TEST_ASSERT_VAL("Node map - Node ID doesn't match", cpu__get_node(map->map[i]) == id.node); TEST_ASSERT_VAL("Node map - Socket is set", id.socket == -1); diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c index c8f9b3f15759..19e502cc65e7 100644 --- a/tools/perf/util/cpumap.c +++ b/tools/perf/util/cpumap.c @@ -132,7 +132,7 @@ int cpu__get_socket_id(int cpu) return ret ?: value; } -struct aggr_cpu_id cpu_map__get_socket_aggr_by_cpu(int cpu, void *data __maybe_unused) +struct aggr_cpu_id aggr_cpu_id__socket(int cpu, void *data __maybe_unused) { struct aggr_cpu_id id = aggr_cpu_id__empty(); @@ -200,7 +200,7 @@ int cpu__get_die_id(int cpu) return ret ?: value; } -struct aggr_cpu_id cpu_map__get_die_aggr_by_cpu(int cpu, void *data) +struct aggr_cpu_id aggr_cpu_id__die(int cpu, void *data) { struct aggr_cpu_id id; int die; @@ -215,7 +215,7 @@ struct aggr_cpu_id cpu_map__get_die_aggr_by_cpu(int cpu, void *data) * with the socket ID and then add die to * make a unique ID. */ - id = cpu_map__get_socket_aggr_by_cpu(cpu, data); + id = aggr_cpu_id__socket(cpu, data); if (aggr_cpu_id__is_empty(&id)) return id; @@ -229,13 +229,13 @@ int cpu__get_core_id(int cpu) return ret ?: value; } -struct aggr_cpu_id cpu_map__get_core_aggr_by_cpu(int cpu, void *data) +struct aggr_cpu_id aggr_cpu_id__core(int cpu, void *data) { struct aggr_cpu_id id; int core = cpu__get_core_id(cpu); - /* cpu_map__get_die returns a struct with socket and die set*/ - id = cpu_map__get_die_aggr_by_cpu(cpu, data); + /* aggr_cpu_id__die returns a struct with socket and die set*/ + id = aggr_cpu_id__die(cpu, data); if (aggr_cpu_id__is_empty(&id)) return id; @@ -248,7 +248,7 @@ struct aggr_cpu_id cpu_map__get_core_aggr_by_cpu(int cpu, void *data) } -struct aggr_cpu_id cpu_map__get_node_aggr_by_cpu(int cpu, void *data __maybe_unused) +struct aggr_cpu_id aggr_cpu_id__node(int cpu, void *data __maybe_unused) { struct aggr_cpu_id id = aggr_cpu_id__empty(); diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h index 611048e2a592..ecd658293a2d 100644 --- a/tools/perf/util/cpumap.h +++ b/tools/perf/util/cpumap.h @@ -39,10 +39,6 @@ struct perf_cpu_map *cpu_map__new_data(struct perf_record_cpu_map_data *data); size_t cpu_map__snprint(struct perf_cpu_map *map, char *buf, size_t size); size_t cpu_map__snprint_mask(struct perf_cpu_map *map, char *buf, size_t size); size_t cpu_map__fprintf(struct perf_cpu_map *map, FILE *fp); -struct aggr_cpu_id cpu_map__get_socket_aggr_by_cpu(int cpu, void *data); -struct aggr_cpu_id cpu_map__get_die_aggr_by_cpu(int cpu, void *data); -struct aggr_cpu_id cpu_map__get_core_aggr_by_cpu(int cpu, void *data); -struct aggr_cpu_id cpu_map__get_node_aggr_by_cpu(int cpu, void *data); const struct perf_cpu_map *cpu_map__online(void); /* thread unsafe */ int cpu__setup_cpunode_map(void); @@ -88,4 +84,29 @@ bool aggr_cpu_id__equal(const struct aggr_cpu_id *a, const struct aggr_cpu_id *b bool aggr_cpu_id__is_empty(const struct aggr_cpu_id *a); struct aggr_cpu_id aggr_cpu_id__empty(void); + +/** + * aggr_cpu_id__socket - Create an aggr_cpu_id with the socket populated with + * the socket for cpu. The function signature is compatible with + * aggr_cpu_id_get_t. + */ +struct aggr_cpu_id aggr_cpu_id__socket(int cpu, void *data); +/** + * aggr_cpu_id__die - Create an aggr_cpu_id with the die and socket populated + * with the die and socket for cpu. The function signature is compatible with + * aggr_cpu_id_get_t. + */ +struct aggr_cpu_id aggr_cpu_id__die(int cpu, void *data); +/** + * aggr_cpu_id__core - Create an aggr_cpu_id with the core, die and socket + * populated with the core, die and socket for cpu. The function signature is + * compatible with aggr_cpu_id_get_t. + */ +struct aggr_cpu_id aggr_cpu_id__core(int cpu, void *data); +/** + * aggr_cpu_id__node - Create an aggr_cpu_id with the numa node populated for + * cpu. The function signature is compatible with aggr_cpu_id_get_t. + */ +struct aggr_cpu_id aggr_cpu_id__node(int cpu, void *data); + #endif /* __PERF_CPUMAP_H */ -- cgit v1.2.3 From dfc66beff7fa95b9eb507ccb48fb325569bc2f74 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 4 Jan 2022 22:13:23 -0800 Subject: perf cpumap: Move 'has' function to libperf Make the cpu map argument const for consistency with the rest of the API. Modify cpu_map__idx accordingly. Reviewed-by: James Clark Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: Jiri Olsa Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Mike Leach Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Riccardo Mancini Cc: Stephane Eranian Cc: Suzuki Poulouse Cc: Vineet Singh Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-21-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/lib/perf/Documentation/libperf.txt | 1 + tools/lib/perf/cpumap.c | 7 ++++++- tools/lib/perf/include/internal/cpumap.h | 2 +- tools/lib/perf/include/perf/cpumap.h | 1 + tools/lib/perf/libperf.map | 1 + tools/perf/arch/arm/util/cs-etm.c | 16 ++++++++-------- tools/perf/builtin-sched.c | 6 +++--- tools/perf/tests/topology.c | 2 +- tools/perf/util/cpumap.c | 5 ----- tools/perf/util/cpumap.h | 2 -- tools/perf/util/cputopo.c | 2 +- 11 files changed, 23 insertions(+), 22 deletions(-) diff --git a/tools/lib/perf/Documentation/libperf.txt b/tools/lib/perf/Documentation/libperf.txt index 63ae5e0195ce..faef9ba3a540 100644 --- a/tools/lib/perf/Documentation/libperf.txt +++ b/tools/lib/perf/Documentation/libperf.txt @@ -48,6 +48,7 @@ SYNOPSIS int perf_cpu_map__nr(const struct perf_cpu_map *cpus); bool perf_cpu_map__empty(const struct perf_cpu_map *map); int perf_cpu_map__max(struct perf_cpu_map *map); + bool perf_cpu_map__has(const struct perf_cpu_map *map, int cpu); #define perf_cpu_map__for_each_cpu(cpu, idx, cpus) -- diff --git a/tools/lib/perf/cpumap.c b/tools/lib/perf/cpumap.c index adaad3dddf6e..3c36a06771af 100644 --- a/tools/lib/perf/cpumap.c +++ b/tools/lib/perf/cpumap.c @@ -268,7 +268,7 @@ bool perf_cpu_map__empty(const struct perf_cpu_map *map) return map ? map->map[0] == -1 : true; } -int perf_cpu_map__idx(struct perf_cpu_map *cpus, int cpu) +int perf_cpu_map__idx(const struct perf_cpu_map *cpus, int cpu) { int low = 0, high = cpus->nr; @@ -288,6 +288,11 @@ int perf_cpu_map__idx(struct perf_cpu_map *cpus, int cpu) return -1; } +bool perf_cpu_map__has(const struct perf_cpu_map *cpus, int cpu) +{ + return perf_cpu_map__idx(cpus, cpu) != -1; +} + int perf_cpu_map__max(struct perf_cpu_map *map) { // cpu_map__trim_new() qsort()s it, cpu_map__default_new() sorts it as well. diff --git a/tools/lib/perf/include/internal/cpumap.h b/tools/lib/perf/include/internal/cpumap.h index 4054169c12c5..71a31ed738c9 100644 --- a/tools/lib/perf/include/internal/cpumap.h +++ b/tools/lib/perf/include/internal/cpumap.h @@ -23,6 +23,6 @@ struct perf_cpu_map { #define MAX_NR_CPUS 2048 #endif -int perf_cpu_map__idx(struct perf_cpu_map *cpus, int cpu); +int perf_cpu_map__idx(const struct perf_cpu_map *cpus, int cpu); #endif /* __LIBPERF_INTERNAL_CPUMAP_H */ diff --git a/tools/lib/perf/include/perf/cpumap.h b/tools/lib/perf/include/perf/cpumap.h index 7c27766ea0bf..3f1c0afa3ccd 100644 --- a/tools/lib/perf/include/perf/cpumap.h +++ b/tools/lib/perf/include/perf/cpumap.h @@ -20,6 +20,7 @@ LIBPERF_API int perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx); LIBPERF_API int perf_cpu_map__nr(const struct perf_cpu_map *cpus); LIBPERF_API bool perf_cpu_map__empty(const struct perf_cpu_map *map); LIBPERF_API int perf_cpu_map__max(struct perf_cpu_map *map); +LIBPERF_API bool perf_cpu_map__has(const struct perf_cpu_map *map, int cpu); #define perf_cpu_map__for_each_cpu(cpu, idx, cpus) \ for ((idx) = 0, (cpu) = perf_cpu_map__cpu(cpus, idx); \ diff --git a/tools/lib/perf/libperf.map b/tools/lib/perf/libperf.map index 5979bf92d98f..93696affda2e 100644 --- a/tools/lib/perf/libperf.map +++ b/tools/lib/perf/libperf.map @@ -10,6 +10,7 @@ LIBPERF_0.0.1 { perf_cpu_map__cpu; perf_cpu_map__empty; perf_cpu_map__max; + perf_cpu_map__has; perf_thread_map__new_dummy; perf_thread_map__set_pid; perf_thread_map__comm; diff --git a/tools/perf/arch/arm/util/cs-etm.c b/tools/perf/arch/arm/util/cs-etm.c index 8a3d54a86c9c..129c0272d65b 100644 --- a/tools/perf/arch/arm/util/cs-etm.c +++ b/tools/perf/arch/arm/util/cs-etm.c @@ -204,8 +204,8 @@ static int cs_etm_set_option(struct auxtrace_record *itr, /* Set option of each CPU we have */ for (i = 0; i < cpu__max_cpu(); i++) { - if (!cpu_map__has(event_cpus, i) || - !cpu_map__has(online_cpus, i)) + if (!perf_cpu_map__has(event_cpus, i) || + !perf_cpu_map__has(online_cpus, i)) continue; if (option & BIT(ETM_OPT_CTXTID)) { @@ -523,8 +523,8 @@ cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused, /* cpu map is not empty, we have specific CPUs to work with */ if (!perf_cpu_map__empty(event_cpus)) { for (i = 0; i < cpu__max_cpu(); i++) { - if (!cpu_map__has(event_cpus, i) || - !cpu_map__has(online_cpus, i)) + if (!perf_cpu_map__has(event_cpus, i) || + !perf_cpu_map__has(online_cpus, i)) continue; if (cs_etm_is_ete(itr, i)) @@ -537,7 +537,7 @@ cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused, } else { /* get configuration for all CPUs in the system */ for (i = 0; i < cpu__max_cpu(); i++) { - if (!cpu_map__has(online_cpus, i)) + if (!perf_cpu_map__has(online_cpus, i)) continue; if (cs_etm_is_ete(itr, i)) @@ -722,8 +722,8 @@ static int cs_etm_info_fill(struct auxtrace_record *itr, } else { /* Make sure all specified CPUs are online */ for (i = 0; i < perf_cpu_map__nr(event_cpus); i++) { - if (cpu_map__has(event_cpus, i) && - !cpu_map__has(online_cpus, i)) + if (perf_cpu_map__has(event_cpus, i) && + !perf_cpu_map__has(online_cpus, i)) return -EINVAL; } @@ -744,7 +744,7 @@ static int cs_etm_info_fill(struct auxtrace_record *itr, offset = CS_ETM_SNAPSHOT + 1; for (i = 0; i < cpu__max_cpu() && offset < priv_size; i++) - if (cpu_map__has(cpu_map, i)) + if (perf_cpu_map__has(cpu_map, i)) cs_etm_get_metadata(i, &offset, itr, info); perf_cpu_map__put(online_cpus); diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index 4527f632ebe4..9da1da4749c9 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -1617,10 +1617,10 @@ static int map_switch_event(struct perf_sched *sched, struct evsel *evsel, if (curr_thread && thread__has_color(curr_thread)) pid_color = COLOR_PIDS; - if (sched->map.cpus && !cpu_map__has(sched->map.cpus, cpu)) + if (sched->map.cpus && !perf_cpu_map__has(sched->map.cpus, cpu)) continue; - if (sched->map.color_cpus && cpu_map__has(sched->map.color_cpus, cpu)) + if (sched->map.color_cpus && perf_cpu_map__has(sched->map.color_cpus, cpu)) cpu_color = COLOR_CPUS; if (cpu != this_cpu) @@ -1639,7 +1639,7 @@ static int map_switch_event(struct perf_sched *sched, struct evsel *evsel, color_fprintf(stdout, color, " "); } - if (sched->map.cpus && !cpu_map__has(sched->map.cpus, this_cpu)) + if (sched->map.cpus && !perf_cpu_map__has(sched->map.cpus, this_cpu)) goto out; timestamp__scnprintf_usec(timestamp, stimestamp, sizeof(stimestamp)); diff --git a/tools/perf/tests/topology.c b/tools/perf/tests/topology.c index 0cb7b015b4b9..cb29ea7ec409 100644 --- a/tools/perf/tests/topology.c +++ b/tools/perf/tests/topology.c @@ -112,7 +112,7 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map) TEST_ASSERT_VAL("Session header CPU map not set", session->header.env.cpu); for (i = 0; i < session->header.env.nr_cpus_avail; i++) { - if (!cpu_map__has(map, i)) + if (!perf_cpu_map__has(map, i)) continue; pr_debug("CPU %d, core %d, socket %d\n", i, session->header.env.cpu[i].core_id, diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c index 19e502cc65e7..f1d76a8e92e8 100644 --- a/tools/perf/util/cpumap.c +++ b/tools/perf/util/cpumap.c @@ -463,11 +463,6 @@ int cpu__setup_cpunode_map(void) return 0; } -bool cpu_map__has(struct perf_cpu_map *cpus, int cpu) -{ - return perf_cpu_map__idx(cpus, cpu) != -1; -} - size_t cpu_map__snprint(struct perf_cpu_map *map, char *buf, size_t size) { int i, cpu, start = -1; diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h index ecd658293a2d..32b8b5178f01 100644 --- a/tools/perf/util/cpumap.h +++ b/tools/perf/util/cpumap.h @@ -78,8 +78,6 @@ struct cpu_aggr_map *cpu_aggr_map__new(const struct perf_cpu_map *cpus, aggr_cpu_id_get_t get_id, void *data); -bool cpu_map__has(struct perf_cpu_map *cpus, int cpu); - bool aggr_cpu_id__equal(const struct aggr_cpu_id *a, const struct aggr_cpu_id *b); bool aggr_cpu_id__is_empty(const struct aggr_cpu_id *a); struct aggr_cpu_id aggr_cpu_id__empty(void); diff --git a/tools/perf/util/cputopo.c b/tools/perf/util/cputopo.c index 51b429c86f98..8affb37d90e7 100644 --- a/tools/perf/util/cputopo.c +++ b/tools/perf/util/cputopo.c @@ -218,7 +218,7 @@ struct cpu_topology *cpu_topology__new(void) tp->core_cpus_list = addr; for (i = 0; i < nr; i++) { - if (!cpu_map__has(map, i)) + if (!perf_cpu_map__has(map, i)) continue; ret = build_cpu_topology(tp, i); -- cgit v1.2.3 From 92aad5c33f531187cc6013c8e51620212cdfefe1 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 4 Jan 2022 22:13:24 -0800 Subject: perf cpumap: Add some comments to cpu_aggr_map Move cpu_aggr_map__empty_new() to be with other cpu_aggr_map function. Reviewed-by: James Clark Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: Jiri Olsa Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Mike Leach Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Riccardo Mancini Cc: Stephane Eranian Cc: Suzuki Poulouse Cc: Vineet Singh Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-22-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/cpumap.h | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h index 32b8b5178f01..25a08d640d81 100644 --- a/tools/perf/util/cpumap.h +++ b/tools/perf/util/cpumap.h @@ -24,16 +24,18 @@ struct aggr_cpu_id { int core; }; +/** A collection of aggr_cpu_id values, the "built" version is sorted and uniqued. */ struct cpu_aggr_map { refcount_t refcnt; + /** Number of valid entries. */ int nr; + /** The entries. */ struct aggr_cpu_id map[]; }; struct perf_record_cpu_map_data; struct perf_cpu_map *perf_cpu_map__empty_new(int nr); -struct cpu_aggr_map *cpu_aggr_map__empty_new(int nr); struct perf_cpu_map *cpu_map__new_data(struct perf_record_cpu_map_data *data); size_t cpu_map__snprint(struct perf_cpu_map *map, char *buf, size_t size); @@ -67,6 +69,12 @@ int cpu__get_die_id(int cpu); */ int cpu__get_core_id(int cpu); +/** + * cpu_aggr_map__empty_new - Create a cpu_aggr_map of size nr with every entry + * being empty. + */ +struct cpu_aggr_map *cpu_aggr_map__empty_new(int nr); + typedef struct aggr_cpu_id (*aggr_cpu_id_get_t)(int cpu, void *data); /** -- cgit v1.2.3 From bd26bddfd93688d10984251249b84e1f6d91de27 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 4 Jan 2022 22:13:25 -0800 Subject: perf cpumap: Trim the cpu_aggr_map cpu_aggr_map__new() removes duplicates, when this happens shrink the array. Reviewed-by: James Clark Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: Jiri Olsa Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Mike Leach Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Riccardo Mancini Cc: Stephane Eranian Cc: Suzuki Poulouse Cc: Vineet Singh Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-23-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/cpumap.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c index f1d76a8e92e8..2779474f39db 100644 --- a/tools/perf/util/cpumap.c +++ b/tools/perf/util/cpumap.c @@ -185,7 +185,15 @@ struct cpu_aggr_map *cpu_aggr_map__new(const struct perf_cpu_map *cpus, c->nr++; } } - + /* Trim. */ + if (c->nr != cpus->nr) { + struct cpu_aggr_map *trimmed_c = + realloc(c, + sizeof(struct cpu_aggr_map) + sizeof(struct aggr_cpu_id) * c->nr); + + if (trimmed_c) + c = trimmed_c; + } /* ensure we process id in increasing order */ qsort(c->map, c->nr, sizeof(struct aggr_cpu_id), aggr_cpu_id__cmp); -- cgit v1.2.3 From f9e891ea172235f902972069b87be3bdc7c48f5a Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 4 Jan 2022 22:13:26 -0800 Subject: perf stat: Fix memory leak in check_per_pkg() If the key is already present then free the key used for lookup. Found with: $ perf stat -M IO_Read_BW /bin/true ==1749112==ERROR: LeakSanitizer: detected memory leaks Direct leak of 32 byte(s) in 4 object(s) allocated from: #0 0x7f6f6fa7d7cf in __interceptor_malloc ../../../../src/libsanitizer/asan/asan_malloc_linux.cpp:145 #1 0x55acecd9d7a6 in check_per_pkg util/stat.c:343 #2 0x55acecd9d9c5 in process_counter_values util/stat.c:365 #3 0x55acecd9e0ab in process_counter_maps util/stat.c:421 #4 0x55acecd9e292 in perf_stat_process_counter util/stat.c:443 #5 0x55aceca8553e in read_counters ./tools/perf/builtin-stat.c:470 #6 0x55aceca88fe3 in __run_perf_stat ./tools/perf/builtin-stat.c:1023 #7 0x55aceca89146 in run_perf_stat ./tools/perf/builtin-stat.c:1048 #8 0x55aceca90858 in cmd_stat ./tools/perf/builtin-stat.c:2555 #9 0x55acecc05fa5 in run_builtin ./tools/perf/perf.c:313 #10 0x55acecc064fe in handle_internal_command ./tools/perf/perf.c:365 #11 0x55acecc068bb in run_argv ./tools/perf/perf.c:409 #12 0x55acecc070aa in main ./tools/perf/perf.c:539 Reviewed-by: James Clark Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: Jiri Olsa Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Mike Leach Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Riccardo Mancini Cc: Stephane Eranian Cc: Suzuki Poulouse Cc: Vineet Singh Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-24-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/stat.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c index 5c24aca0968c..c69b221f5e3e 100644 --- a/tools/perf/util/stat.c +++ b/tools/perf/util/stat.c @@ -345,9 +345,10 @@ static int check_per_pkg(struct evsel *counter, return -ENOMEM; *key = (uint64_t)d << 32 | s; - if (hashmap__find(mask, (void *)key, NULL)) + if (hashmap__find(mask, (void *)key, NULL)) { *skip = true; - else + free(key); + } else ret = hashmap__add(mask, (void *)key, (void *)1); return ret; -- cgit v1.2.3 From 34794913e2dc08a464499f795073a021feeb3b47 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 4 Jan 2022 22:13:27 -0800 Subject: perf cpumap: Add CPU to aggr_cpu_id With no aggregration, such as 'perf stat -A', the aggr_cpu_id lacks a way to describe per CPU aggregation and the core is set to the CPU in places like print_counter_aggrdata in stat-display.c. Setting the core to the CPU is undesirable as the CPU will exceed valid core values and lead to confusion. Add a CPU variable to address this. Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Mike Leach Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Riccardo Mancini Cc: Stephane Eranian Cc: Suzuki Poulouse Cc: Vineet Singh Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-25-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/topology.c | 19 +++++++++++++++++++ tools/perf/util/cpumap.c | 25 +++++++++++++++++++++---- tools/perf/util/cpumap.h | 8 ++++++++ 3 files changed, 48 insertions(+), 4 deletions(-) diff --git a/tools/perf/tests/topology.c b/tools/perf/tests/topology.c index cb29ea7ec409..33e4cb81265c 100644 --- a/tools/perf/tests/topology.c +++ b/tools/perf/tests/topology.c @@ -119,6 +119,22 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map) session->header.env.cpu[i].socket_id); } + // Test that CPU ID contains socket, die, core and CPU + for (i = 0; i < map->nr; i++) { + id = aggr_cpu_id__cpu(perf_cpu_map__cpu(map, i), NULL); + TEST_ASSERT_VAL("Cpu map - CPU ID doesn't match", map->map[i] == id.cpu); + + TEST_ASSERT_VAL("Cpu map - Core ID doesn't match", + session->header.env.cpu[map->map[i]].core_id == id.core); + TEST_ASSERT_VAL("Cpu map - Socket ID doesn't match", + session->header.env.cpu[map->map[i]].socket_id == id.socket); + + TEST_ASSERT_VAL("Cpu map - Die ID doesn't match", + session->header.env.cpu[map->map[i]].die_id == id.die); + TEST_ASSERT_VAL("Cpu map - Node ID is set", id.node == -1); + TEST_ASSERT_VAL("Cpu map - Thread is set", id.thread == -1); + } + // Test that core ID contains socket, die and core for (i = 0; i < map->nr; i++) { id = aggr_cpu_id__core(perf_cpu_map__cpu(map, i), NULL); @@ -145,6 +161,7 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map) TEST_ASSERT_VAL("Die map - Node ID is set", id.node == -1); TEST_ASSERT_VAL("Die map - Core is set", id.core == -1); + TEST_ASSERT_VAL("Die map - CPU is set", id.cpu == -1); TEST_ASSERT_VAL("Die map - Thread is set", id.thread == -1); } @@ -157,6 +174,7 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map) TEST_ASSERT_VAL("Socket map - Node ID is set", id.node == -1); TEST_ASSERT_VAL("Socket map - Die ID is set", id.die == -1); TEST_ASSERT_VAL("Socket map - Core is set", id.core == -1); + TEST_ASSERT_VAL("Socket map - CPU is set", id.cpu == -1); TEST_ASSERT_VAL("Socket map - Thread is set", id.thread == -1); } @@ -168,6 +186,7 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map) TEST_ASSERT_VAL("Node map - Socket is set", id.socket == -1); TEST_ASSERT_VAL("Node map - Die ID is set", id.die == -1); TEST_ASSERT_VAL("Node map - Core is set", id.core == -1); + TEST_ASSERT_VAL("Node map - CPU is set", id.cpu == -1); TEST_ASSERT_VAL("Node map - Thread is set", id.thread == -1); } perf_session__delete(session); diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c index 2779474f39db..48ce583af0ec 100644 --- a/tools/perf/util/cpumap.c +++ b/tools/perf/util/cpumap.c @@ -242,7 +242,7 @@ struct aggr_cpu_id aggr_cpu_id__core(int cpu, void *data) struct aggr_cpu_id id; int core = cpu__get_core_id(cpu); - /* aggr_cpu_id__die returns a struct with socket and die set*/ + /* aggr_cpu_id__die returns a struct with socket and die set. */ id = aggr_cpu_id__die(cpu, data); if (aggr_cpu_id__is_empty(&id)) return id; @@ -256,6 +256,20 @@ struct aggr_cpu_id aggr_cpu_id__core(int cpu, void *data) } +struct aggr_cpu_id aggr_cpu_id__cpu(int cpu, void *data) +{ + struct aggr_cpu_id id; + + /* aggr_cpu_id__core returns a struct with socket, die and core set. */ + id = aggr_cpu_id__core(cpu, data); + if (aggr_cpu_id__is_empty(&id)) + return id; + + id.cpu = cpu; + return id; + +} + struct aggr_cpu_id aggr_cpu_id__node(int cpu, void *data __maybe_unused) { struct aggr_cpu_id id = aggr_cpu_id__empty(); @@ -579,7 +593,8 @@ bool aggr_cpu_id__equal(const struct aggr_cpu_id *a, const struct aggr_cpu_id *b a->node == b->node && a->socket == b->socket && a->die == b->die && - a->core == b->core; + a->core == b->core && + a->cpu == b->cpu; } bool aggr_cpu_id__is_empty(const struct aggr_cpu_id *a) @@ -588,7 +603,8 @@ bool aggr_cpu_id__is_empty(const struct aggr_cpu_id *a) a->node == -1 && a->socket == -1 && a->die == -1 && - a->core == -1; + a->core == -1 && + a->cpu == -1; } struct aggr_cpu_id aggr_cpu_id__empty(void) @@ -598,7 +614,8 @@ struct aggr_cpu_id aggr_cpu_id__empty(void) .node = -1, .socket = -1, .die = -1, - .core = -1 + .core = -1, + .cpu = -1 }; return ret; } diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h index 25a08d640d81..b98cd1739677 100644 --- a/tools/perf/util/cpumap.h +++ b/tools/perf/util/cpumap.h @@ -22,6 +22,8 @@ struct aggr_cpu_id { int die; /** The core id as read from /sys/devices/system/cpu/cpuX/topology/core_id. */ int core; + /** CPU aggregation, note there is one CPU for each SMT thread. */ + int cpu; }; /** A collection of aggr_cpu_id values, the "built" version is sorted and uniqued. */ @@ -109,6 +111,12 @@ struct aggr_cpu_id aggr_cpu_id__die(int cpu, void *data); * compatible with aggr_cpu_id_get_t. */ struct aggr_cpu_id aggr_cpu_id__core(int cpu, void *data); +/** + * aggr_cpu_id__core - Create an aggr_cpu_id with the cpu, core, die and socket + * populated with the cpu, core, die and socket for cpu. The function signature + * is compatible with aggr_cpu_id_get_t. + */ +struct aggr_cpu_id aggr_cpu_id__cpu(int cpu, void *data); /** * aggr_cpu_id__node - Create an aggr_cpu_id with the numa node populated for * cpu. The function signature is compatible with aggr_cpu_id_get_t. -- cgit v1.2.3 From 7365f105e37429d28757f7f68d4850723ce18aa1 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 4 Jan 2022 22:13:28 -0800 Subject: perf stat-display: Avoid use of core for CPU Correct use of cpumap index in print_no_aggr_metric(). Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Mike Leach Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Riccardo Mancini Cc: Stephane Eranian Cc: Suzuki Poulouse Cc: Vineet Singh Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-26-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/stat-display.c | 45 +++++++++++++++++++++--------------------- 1 file changed, 22 insertions(+), 23 deletions(-) diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c index 870b1db71fbc..f48d1678861c 100644 --- a/tools/perf/util/stat-display.c +++ b/tools/perf/util/stat-display.c @@ -120,11 +120,10 @@ static void aggr_printout(struct perf_stat_config *config, id.die, config->csv_output ? 0 : -3, id.core, config->csv_sep); - } else if (id.core > -1) { + } else if (id.cpu > -1) { fprintf(config->output, "CPU%*d%s", config->csv_output ? 0 : -7, - evsel__cpus(evsel)->map[id.core], - config->csv_sep); + id.cpu, config->csv_sep); } break; case AGGR_THREAD: @@ -334,7 +333,7 @@ static int first_shadow_cpu(struct perf_stat_config *config, int cpu, idx; if (config->aggr_mode == AGGR_NONE) - return id->core; + return id->cpu; if (!config->aggr_get_id) return 0; @@ -697,10 +696,9 @@ static void print_counter_aggrdata(struct perf_stat_config *config, fprintf(output, "%s", prefix); uval = val * counter->scale; - if (cpu != -1) { - id = aggr_cpu_id__empty(); - id.core = cpu; - } + if (cpu != -1) + id = aggr_cpu_id__cpu(cpu, /*data=*/NULL); + printout(config, id, nr, counter, uval, prefix, run, ena, 1.0, &rt_stat); if (!metric_only) @@ -911,8 +909,7 @@ static void print_counter(struct perf_stat_config *config, fprintf(output, "%s", prefix); uval = val * counter->scale; - id = aggr_cpu_id__empty(); - id.core = cpu; + id = aggr_cpu_id__cpu(cpu, /*data=*/NULL); printout(config, id, 0, counter, uval, prefix, run, ena, 1.0, &rt_stat); @@ -924,29 +921,31 @@ static void print_no_aggr_metric(struct perf_stat_config *config, struct evlist *evlist, char *prefix) { - int cpu; - int nrcpus = 0; - struct evsel *counter; - u64 ena, run, val; - double uval; - struct aggr_cpu_id id; + int all_idx, cpu; - nrcpus = evlist->core.cpus->nr; - for (cpu = 0; cpu < nrcpus; cpu++) { + perf_cpu_map__for_each_cpu(cpu, all_idx, evlist->core.cpus) { + struct evsel *counter; bool first = true; if (prefix) fputs(prefix, config->output); evlist__for_each_entry(evlist, counter) { - id = aggr_cpu_id__empty(); - id.core = cpu; + u64 ena, run, val; + double uval; + struct aggr_cpu_id id; + int counter_idx = perf_cpu_map__idx(evsel__cpus(counter), cpu); + + if (counter_idx < 0) + continue; + + id = aggr_cpu_id__cpu(cpu, /*data=*/NULL); if (first) { aggr_printout(config, counter, id, 0); first = false; } - val = perf_counts(counter->counts, cpu, 0)->val; - ena = perf_counts(counter->counts, cpu, 0)->ena; - run = perf_counts(counter->counts, cpu, 0)->run; + val = perf_counts(counter->counts, counter_idx, 0)->val; + ena = perf_counts(counter->counts, counter_idx, 0)->ena; + run = perf_counts(counter->counts, counter_idx, 0)->run; uval = val * counter->scale; printout(config, id, 0, counter, uval, prefix, -- cgit v1.2.3 From 2ca0a3718da24953689b1771589ac63b60f17358 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 4 Jan 2022 22:13:29 -0800 Subject: perf evsel: Derive CPUs and threads in alloc_counts Passing the number of CPUs and threads allows for an evsel's counts to be mismatched to its cpu map. To avoid this always derive the counts size from the cpu map. Change openat-syscall-all-cpus to set the cpus to allow for this to work. Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Mike Leach Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Riccardo Mancini Cc: Stephane Eranian Cc: Suzuki Poulouse Cc: Vineet Singh Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-27-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/openat-syscall-all-cpus.c | 10 +--------- tools/perf/util/counts.c | 8 ++++++-- tools/perf/util/counts.h | 2 +- tools/perf/util/evsel.c | 2 +- tools/perf/util/stat.c | 13 ++++++------- 5 files changed, 15 insertions(+), 20 deletions(-) diff --git a/tools/perf/tests/openat-syscall-all-cpus.c b/tools/perf/tests/openat-syscall-all-cpus.c index cd3dd463783f..544db0839b3b 100644 --- a/tools/perf/tests/openat-syscall-all-cpus.c +++ b/tools/perf/tests/openat-syscall-all-cpus.c @@ -85,15 +85,7 @@ static int test__openat_syscall_event_on_all_cpus(struct test_suite *test __mayb CPU_CLR(cpus->map[cpu], &cpu_set); } - /* - * Here we need to explicitly preallocate the counts, as if - * we use the auto allocation it will allocate just for 1 cpu, - * as we start by cpu 0. - */ - if (evsel__alloc_counts(evsel, cpus->nr, 1) < 0) { - pr_debug("evsel__alloc_counts(ncpus=%d)\n", cpus->nr); - goto out_close_fd; - } + evsel->core.cpus = perf_cpu_map__get(cpus); err = 0; diff --git a/tools/perf/util/counts.c b/tools/perf/util/counts.c index 582f3aeaf5e4..2b81707b9dba 100644 --- a/tools/perf/util/counts.c +++ b/tools/perf/util/counts.c @@ -4,6 +4,7 @@ #include #include "evsel.h" #include "counts.h" +#include #include struct perf_counts *perf_counts__new(int ncpus, int nthreads) @@ -55,9 +56,12 @@ void evsel__reset_counts(struct evsel *evsel) perf_counts__reset(evsel->counts); } -int evsel__alloc_counts(struct evsel *evsel, int ncpus, int nthreads) +int evsel__alloc_counts(struct evsel *evsel) { - evsel->counts = perf_counts__new(ncpus, nthreads); + struct perf_cpu_map *cpus = evsel__cpus(evsel); + int nthreads = perf_thread_map__nr(evsel->core.threads); + + evsel->counts = perf_counts__new(cpus ? cpus->nr : 1, nthreads); return evsel->counts != NULL ? 0 : -ENOMEM; } diff --git a/tools/perf/util/counts.h b/tools/perf/util/counts.h index 7ff36bf6d644..3e275e9c60d1 100644 --- a/tools/perf/util/counts.h +++ b/tools/perf/util/counts.h @@ -40,7 +40,7 @@ void perf_counts__delete(struct perf_counts *counts); void perf_counts__reset(struct perf_counts *counts); void evsel__reset_counts(struct evsel *evsel); -int evsel__alloc_counts(struct evsel *evsel, int ncpus, int nthreads); +int evsel__alloc_counts(struct evsel *evsel); void evsel__free_counts(struct evsel *evsel); #endif /* __PERF_COUNTS_H */ diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index a0acf53a2510..2de569a1a272 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -1578,7 +1578,7 @@ int __evsel__read_on_cpu(struct evsel *evsel, int cpu, int thread, bool scale) if (FD(evsel, cpu, thread) < 0) return -EINVAL; - if (evsel->counts == NULL && evsel__alloc_counts(evsel, cpu + 1, thread + 1) < 0) + if (evsel->counts == NULL && evsel__alloc_counts(evsel) < 0) return -ENOMEM; if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) <= 0) diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c index c69b221f5e3e..995cb5003133 100644 --- a/tools/perf/util/stat.c +++ b/tools/perf/util/stat.c @@ -152,11 +152,13 @@ static void evsel__free_stat_priv(struct evsel *evsel) zfree(&evsel->stats); } -static int evsel__alloc_prev_raw_counts(struct evsel *evsel, int ncpus, int nthreads) +static int evsel__alloc_prev_raw_counts(struct evsel *evsel) { + int cpu_map_nr = evsel__nr_cpus(evsel); + int nthreads = perf_thread_map__nr(evsel->core.threads); struct perf_counts *counts; - counts = perf_counts__new(ncpus, nthreads); + counts = perf_counts__new(cpu_map_nr, nthreads); if (counts) evsel->prev_raw_counts = counts; @@ -177,12 +179,9 @@ static void evsel__reset_prev_raw_counts(struct evsel *evsel) static int evsel__alloc_stats(struct evsel *evsel, bool alloc_raw) { - int ncpus = evsel__nr_cpus(evsel); - int nthreads = perf_thread_map__nr(evsel->core.threads); - if (evsel__alloc_stat_priv(evsel) < 0 || - evsel__alloc_counts(evsel, ncpus, nthreads) < 0 || - (alloc_raw && evsel__alloc_prev_raw_counts(evsel, ncpus, nthreads) < 0)) + evsel__alloc_counts(evsel) < 0 || + (alloc_raw && evsel__alloc_prev_raw_counts(evsel) < 0)) return -ENOMEM; return 0; -- cgit v1.2.3 From 7e3d1784c8a4d9c643a6ed0a2c44ee94dee8f7a6 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 4 Jan 2022 22:13:30 -0800 Subject: libperf: Switch cpu to more accurate cpu_map_idx Modify variable names and adopt perf_cpu_map__for_each_cpu() in perf_evsel__open(). Renaming is done by looking for consistency in API usage. Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Mike Leach Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Riccardo Mancini Cc: Stephane Eranian Cc: Suzuki Poulouse Cc: Vineet Singh Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-28-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/lib/perf/evsel.c | 88 +++++++++++++++++++------------------ tools/lib/perf/include/perf/evsel.h | 10 ++--- 2 files changed, 50 insertions(+), 48 deletions(-) diff --git a/tools/lib/perf/evsel.c b/tools/lib/perf/evsel.c index 68f83d2c27c1..8028b5a4da69 100644 --- a/tools/lib/perf/evsel.c +++ b/tools/lib/perf/evsel.c @@ -43,18 +43,22 @@ void perf_evsel__delete(struct perf_evsel *evsel) free(evsel); } -#define FD(e, x, y) ((int *) xyarray__entry(e->fd, x, y)) -#define MMAP(e, x, y) (e->mmap ? ((struct perf_mmap *) xyarray__entry(e->mmap, x, y)) : NULL) +#define FD(_evsel, _cpu_map_idx, _thread) \ + ((int *)xyarray__entry(_evsel->fd, _cpu_map_idx, _thread)) +#define MMAP(_evsel, _cpu_map_idx, _thread) \ + (_evsel->mmap ? ((struct perf_mmap *) xyarray__entry(_evsel->mmap, _cpu_map_idx, _thread)) \ + : NULL) int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) { evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int)); if (evsel->fd) { - int cpu, thread; - for (cpu = 0; cpu < ncpus; cpu++) { + int idx, thread; + + for (idx = 0; idx < ncpus; idx++) { for (thread = 0; thread < nthreads; thread++) { - int *fd = FD(evsel, cpu, thread); + int *fd = FD(evsel, idx, thread); if (fd) *fd = -1; @@ -80,7 +84,7 @@ sys_perf_event_open(struct perf_event_attr *attr, return syscall(__NR_perf_event_open, attr, pid, cpu, group_fd, flags); } -static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread, int *group_fd) +static int get_group_fd(struct perf_evsel *evsel, int cpu_map_idx, int thread, int *group_fd) { struct perf_evsel *leader = evsel->leader; int *fd; @@ -97,7 +101,7 @@ static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread, int *grou if (!leader->fd) return -ENOTCONN; - fd = FD(leader, cpu, thread); + fd = FD(leader, cpu_map_idx, thread); if (fd == NULL || *fd == -1) return -EBADF; @@ -109,7 +113,7 @@ static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread, int *grou int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus, struct perf_thread_map *threads) { - int cpu, thread, err = 0; + int cpu, idx, thread, err = 0; if (cpus == NULL) { static struct perf_cpu_map *empty_cpu_map; @@ -139,21 +143,21 @@ int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus, perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0) return -ENOMEM; - for (cpu = 0; cpu < cpus->nr; cpu++) { + perf_cpu_map__for_each_cpu(cpu, idx, cpus) { for (thread = 0; thread < threads->nr; thread++) { int fd, group_fd, *evsel_fd; - evsel_fd = FD(evsel, cpu, thread); + evsel_fd = FD(evsel, idx, thread); if (evsel_fd == NULL) return -EINVAL; - err = get_group_fd(evsel, cpu, thread, &group_fd); + err = get_group_fd(evsel, idx, thread, &group_fd); if (err < 0) return err; fd = sys_perf_event_open(&evsel->attr, threads->map[thread].pid, - cpus->map[cpu], group_fd, 0); + cpu, group_fd, 0); if (fd < 0) return -errno; @@ -165,12 +169,12 @@ int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus, return err; } -static void perf_evsel__close_fd_cpu(struct perf_evsel *evsel, int cpu) +static void perf_evsel__close_fd_cpu(struct perf_evsel *evsel, int cpu_map_idx) { int thread; for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) { - int *fd = FD(evsel, cpu, thread); + int *fd = FD(evsel, cpu_map_idx, thread); if (fd && *fd >= 0) { close(*fd); @@ -181,10 +185,8 @@ static void perf_evsel__close_fd_cpu(struct perf_evsel *evsel, int cpu) void perf_evsel__close_fd(struct perf_evsel *evsel) { - int cpu; - - for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) - perf_evsel__close_fd_cpu(evsel, cpu); + for (int idx = 0; idx < xyarray__max_x(evsel->fd); idx++) + perf_evsel__close_fd_cpu(evsel, idx); } void perf_evsel__free_fd(struct perf_evsel *evsel) @@ -202,29 +204,29 @@ void perf_evsel__close(struct perf_evsel *evsel) perf_evsel__free_fd(evsel); } -void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu) +void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu_map_idx) { if (evsel->fd == NULL) return; - perf_evsel__close_fd_cpu(evsel, cpu); + perf_evsel__close_fd_cpu(evsel, cpu_map_idx); } void perf_evsel__munmap(struct perf_evsel *evsel) { - int cpu, thread; + int idx, thread; if (evsel->fd == NULL || evsel->mmap == NULL) return; - for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) { + for (idx = 0; idx < xyarray__max_x(evsel->fd); idx++) { for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) { - int *fd = FD(evsel, cpu, thread); + int *fd = FD(evsel, idx, thread); if (fd == NULL || *fd < 0) continue; - perf_mmap__munmap(MMAP(evsel, cpu, thread)); + perf_mmap__munmap(MMAP(evsel, idx, thread)); } } @@ -234,7 +236,7 @@ void perf_evsel__munmap(struct perf_evsel *evsel) int perf_evsel__mmap(struct perf_evsel *evsel, int pages) { - int ret, cpu, thread; + int ret, idx, thread; struct perf_mmap_param mp = { .prot = PROT_READ | PROT_WRITE, .mask = (pages * page_size) - 1, @@ -246,18 +248,18 @@ int perf_evsel__mmap(struct perf_evsel *evsel, int pages) if (perf_evsel__alloc_mmap(evsel, xyarray__max_x(evsel->fd), xyarray__max_y(evsel->fd)) < 0) return -ENOMEM; - for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) { + for (idx = 0; idx < xyarray__max_x(evsel->fd); idx++) { for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) { - int *fd = FD(evsel, cpu, thread); + int *fd = FD(evsel, idx, thread); struct perf_mmap *map; if (fd == NULL || *fd < 0) continue; - map = MMAP(evsel, cpu, thread); + map = MMAP(evsel, idx, thread); perf_mmap__init(map, NULL, false, NULL); - ret = perf_mmap__mmap(map, &mp, *fd, cpu); + ret = perf_mmap__mmap(map, &mp, *fd, idx); if (ret) { perf_evsel__munmap(evsel); return ret; @@ -268,14 +270,14 @@ int perf_evsel__mmap(struct perf_evsel *evsel, int pages) return 0; } -void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu, int thread) +void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu_map_idx, int thread) { - int *fd = FD(evsel, cpu, thread); + int *fd = FD(evsel, cpu_map_idx, thread); - if (fd == NULL || *fd < 0 || MMAP(evsel, cpu, thread) == NULL) + if (fd == NULL || *fd < 0 || MMAP(evsel, cpu_map_idx, thread) == NULL) return NULL; - return MMAP(evsel, cpu, thread)->base; + return MMAP(evsel, cpu_map_idx, thread)->base; } int perf_evsel__read_size(struct perf_evsel *evsel) @@ -303,19 +305,19 @@ int perf_evsel__read_size(struct perf_evsel *evsel) return size; } -int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread, +int perf_evsel__read(struct perf_evsel *evsel, int cpu_map_idx, int thread, struct perf_counts_values *count) { size_t size = perf_evsel__read_size(evsel); - int *fd = FD(evsel, cpu, thread); + int *fd = FD(evsel, cpu_map_idx, thread); memset(count, 0, sizeof(*count)); if (fd == NULL || *fd < 0) return -EINVAL; - if (MMAP(evsel, cpu, thread) && - !perf_mmap__read_self(MMAP(evsel, cpu, thread), count)) + if (MMAP(evsel, cpu_map_idx, thread) && + !perf_mmap__read_self(MMAP(evsel, cpu_map_idx, thread), count)) return 0; if (readn(*fd, count->values, size) <= 0) @@ -326,13 +328,13 @@ int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread, static int perf_evsel__run_ioctl(struct perf_evsel *evsel, int ioc, void *arg, - int cpu) + int cpu_map_idx) { int thread; for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) { int err; - int *fd = FD(evsel, cpu, thread); + int *fd = FD(evsel, cpu_map_idx, thread); if (fd == NULL || *fd < 0) return -1; @@ -346,9 +348,9 @@ static int perf_evsel__run_ioctl(struct perf_evsel *evsel, return 0; } -int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu) +int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu_map_idx) { - return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, cpu); + return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, cpu_map_idx); } int perf_evsel__enable(struct perf_evsel *evsel) @@ -361,9 +363,9 @@ int perf_evsel__enable(struct perf_evsel *evsel) return err; } -int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu) +int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu_map_idx) { - return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, cpu); + return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, cpu_map_idx); } int perf_evsel__disable(struct perf_evsel *evsel) diff --git a/tools/lib/perf/include/perf/evsel.h b/tools/lib/perf/include/perf/evsel.h index f401c7484bec..2a9516b42d15 100644 --- a/tools/lib/perf/include/perf/evsel.h +++ b/tools/lib/perf/include/perf/evsel.h @@ -28,16 +28,16 @@ LIBPERF_API void perf_evsel__delete(struct perf_evsel *evsel); LIBPERF_API int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus, struct perf_thread_map *threads); LIBPERF_API void perf_evsel__close(struct perf_evsel *evsel); -LIBPERF_API void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu); +LIBPERF_API void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu_map_idx); LIBPERF_API int perf_evsel__mmap(struct perf_evsel *evsel, int pages); LIBPERF_API void perf_evsel__munmap(struct perf_evsel *evsel); -LIBPERF_API void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu, int thread); -LIBPERF_API int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread, +LIBPERF_API void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu_map_idx, int thread); +LIBPERF_API int perf_evsel__read(struct perf_evsel *evsel, int cpu_map_idx, int thread, struct perf_counts_values *count); LIBPERF_API int perf_evsel__enable(struct perf_evsel *evsel); -LIBPERF_API int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu); +LIBPERF_API int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu_map_idx); LIBPERF_API int perf_evsel__disable(struct perf_evsel *evsel); -LIBPERF_API int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu); +LIBPERF_API int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu_map_idx); LIBPERF_API struct perf_cpu_map *perf_evsel__cpus(struct perf_evsel *evsel); LIBPERF_API struct perf_thread_map *perf_evsel__threads(struct perf_evsel *evsel); LIBPERF_API struct perf_event_attr *perf_evsel__attr(struct perf_evsel *evsel); -- cgit v1.2.3 From 47ffe806674f67e729627edd689b10827b1790eb Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 4 Jan 2022 22:13:31 -0800 Subject: libperf: Use cpu not index for evsel mmap Fix issue where evsel's CPU map index was being used as the mmap cpu. Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Mike Leach Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Riccardo Mancini Cc: Stephane Eranian Cc: Suzuki Poulouse Cc: Vineet Singh Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-29-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/lib/perf/evsel.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/lib/perf/evsel.c b/tools/lib/perf/evsel.c index 8028b5a4da69..f1e1665ef4bd 100644 --- a/tools/lib/perf/evsel.c +++ b/tools/lib/perf/evsel.c @@ -252,6 +252,7 @@ int perf_evsel__mmap(struct perf_evsel *evsel, int pages) for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) { int *fd = FD(evsel, idx, thread); struct perf_mmap *map; + int cpu = perf_cpu_map__cpu(evsel->cpus, idx); if (fd == NULL || *fd < 0) continue; @@ -259,7 +260,7 @@ int perf_evsel__mmap(struct perf_evsel *evsel, int pages) map = MMAP(evsel, idx, thread); perf_mmap__init(map, NULL, false, NULL); - ret = perf_mmap__mmap(map, &mp, *fd, idx); + ret = perf_mmap__mmap(map, &mp, *fd, cpu); if (ret) { perf_evsel__munmap(evsel); return ret; -- cgit v1.2.3 From 7316268ff740c29dfb52649ff8074a5aa17ec0ce Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 4 Jan 2022 22:13:32 -0800 Subject: perf counts: Switch name cpu to cpu_map_idx Try to reduce confusion in particular when the cpu map doesn't contain an entry for every CPU. Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Mike Leach Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Riccardo Mancini Cc: Stephane Eranian Cc: Suzuki Poulouse Cc: Vineet Singh Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-30-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/counts.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tools/perf/util/counts.h b/tools/perf/util/counts.h index 3e275e9c60d1..5de275194f2b 100644 --- a/tools/perf/util/counts.h +++ b/tools/perf/util/counts.h @@ -18,21 +18,21 @@ struct perf_counts { static inline struct perf_counts_values* -perf_counts(struct perf_counts *counts, int cpu, int thread) +perf_counts(struct perf_counts *counts, int cpu_map_idx, int thread) { - return xyarray__entry(counts->values, cpu, thread); + return xyarray__entry(counts->values, cpu_map_idx, thread); } static inline bool -perf_counts__is_loaded(struct perf_counts *counts, int cpu, int thread) +perf_counts__is_loaded(struct perf_counts *counts, int cpu_map_idx, int thread) { - return *((bool *) xyarray__entry(counts->loaded, cpu, thread)); + return *((bool *) xyarray__entry(counts->loaded, cpu_map_idx, thread)); } static inline void -perf_counts__set_loaded(struct perf_counts *counts, int cpu, int thread, bool loaded) +perf_counts__set_loaded(struct perf_counts *counts, int cpu_map_idx, int thread, bool loaded) { - *((bool *) xyarray__entry(counts->loaded, cpu, thread)) = loaded; + *((bool *) xyarray__entry(counts->loaded, cpu_map_idx, thread)) = loaded; } struct perf_counts *perf_counts__new(int ncpus, int nthreads); -- cgit v1.2.3 From ab90caa7b2d0b708cfee16b33325ca24de4d8f25 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 4 Jan 2022 22:13:33 -0800 Subject: perf stat: Rename aggr_data cpu to imply it's an index Trying to make cpu maps less error prone. Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Mike Leach Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Riccardo Mancini Cc: Stephane Eranian Cc: Suzuki Poulouse Cc: Vineet Singh Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-31-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/stat-display.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c index f48d1678861c..7e933a8fee68 100644 --- a/tools/perf/util/stat-display.c +++ b/tools/perf/util/stat-display.c @@ -626,7 +626,7 @@ struct aggr_data { u64 ena, run, val; struct aggr_cpu_id id; int nr; - int cpu; + int cpu_map_idx; }; static void aggr_cb(struct perf_stat_config *config, @@ -878,9 +878,9 @@ static void counter_cb(struct perf_stat_config *config __maybe_unused, { struct aggr_data *ad = data; - ad->val += perf_counts(counter->counts, ad->cpu, 0)->val; - ad->ena += perf_counts(counter->counts, ad->cpu, 0)->ena; - ad->run += perf_counts(counter->counts, ad->cpu, 0)->run; + ad->val += perf_counts(counter->counts, ad->cpu_map_idx, 0)->val; + ad->ena += perf_counts(counter->counts, ad->cpu_map_idx, 0)->ena; + ad->run += perf_counts(counter->counts, ad->cpu_map_idx, 0)->run; } /* @@ -897,7 +897,7 @@ static void print_counter(struct perf_stat_config *config, struct aggr_cpu_id id; for (cpu = 0; cpu < evsel__nr_cpus(counter); cpu++) { - struct aggr_data ad = { .cpu = cpu }; + struct aggr_data ad = { .cpu_map_idx = cpu }; if (!collect_data(config, counter, counter_cb, &ad)) return; -- cgit v1.2.3 From 7ea82fbee4598e51e8bf47566b252cd5745d5b17 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 4 Jan 2022 22:13:34 -0800 Subject: perf stat: Use perf_cpu_map__for_each_cpu() Correct in print_counter() where an index was being used as a cpu. Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Mike Leach Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Riccardo Mancini Cc: Stephane Eranian Cc: Suzuki Poulouse Cc: Vineet Singh Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-32-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/stat-display.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c index 7e933a8fee68..0f192360b6c6 100644 --- a/tools/perf/util/stat-display.c +++ b/tools/perf/util/stat-display.c @@ -4,6 +4,7 @@ #include #include #include +#include #include "color.h" #include "counts.h" #include "evlist.h" @@ -732,7 +733,7 @@ static void print_aggr(struct perf_stat_config *config, evlist__for_each_entry(evlist, counter) { print_counter_aggrdata(config, counter, s, prefix, metric_only, - &first, -1); + &first, /*cpu=*/-1); } if (metric_only) fputc('\n', output); @@ -893,11 +894,11 @@ static void print_counter(struct perf_stat_config *config, FILE *output = config->output; u64 ena, run, val; double uval; - int cpu; + int idx, cpu; struct aggr_cpu_id id; - for (cpu = 0; cpu < evsel__nr_cpus(counter); cpu++) { - struct aggr_data ad = { .cpu_map_idx = cpu }; + perf_cpu_map__for_each_cpu(cpu, idx, evsel__cpus(counter)) { + struct aggr_data ad = { .cpu_map_idx = idx }; if (!collect_data(config, counter, counter_cb, &ad)) return; @@ -1248,7 +1249,7 @@ static void print_percore(struct perf_stat_config *config, print_counter_aggrdata(config, counter, s, prefix, metric_only, - &first, -1); + &first, /*cpu=*/-1); } if (metric_only) -- cgit v1.2.3 From f9551b3f6249cfe8ea5b5f8716675ccf2f6ec737 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 4 Jan 2022 22:13:35 -0800 Subject: perf script: Use for each cpu to aid readability Use perf_cpu_map__for_each_cpu() to help with readability. Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Mike Leach Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Riccardo Mancini Cc: Stephane Eranian Cc: Suzuki Poulouse Cc: Vineet Singh Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-33-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-script.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index c9b3002ec254..f40319144856 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c @@ -2115,8 +2115,7 @@ static struct scripting_ops *scripting_ops; static void __process_stat(struct evsel *counter, u64 tstamp) { int nthreads = perf_thread_map__nr(counter->core.threads); - int ncpus = evsel__nr_cpus(counter); - int cpu, thread; + int idx, cpu, thread; static int header_printed; if (counter->core.system_wide) @@ -2129,13 +2128,13 @@ static void __process_stat(struct evsel *counter, u64 tstamp) } for (thread = 0; thread < nthreads; thread++) { - for (cpu = 0; cpu < ncpus; cpu++) { + perf_cpu_map__for_each_cpu(cpu, idx, evsel__cpus(counter)) { struct perf_counts_values *counts; counts = perf_counts(counter->counts, cpu, thread); printf("%3d %8d %15" PRIu64 " %15" PRIu64 " %15" PRIu64 " %15" PRIu64 " %s\n", - counter->core.cpus->map[cpu], + cpu, perf_thread_map__pid(counter->core.threads, thread), counts->val, counts->ena, -- cgit v1.2.3 From 80b82f3b65e94ba22d3f12a98f7ecc56cc14c903 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 4 Jan 2022 22:13:36 -0800 Subject: libperf: Allow NULL in perf_cpu_map__idx() Return -1, not found, if NULL is passed. Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Mike Leach Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Riccardo Mancini Cc: Stephane Eranian Cc: Suzuki Poulouse Cc: Vineet Singh Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-34-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/lib/perf/cpumap.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tools/lib/perf/cpumap.c b/tools/lib/perf/cpumap.c index 3c36a06771af..eacea3ab965a 100644 --- a/tools/lib/perf/cpumap.c +++ b/tools/lib/perf/cpumap.c @@ -270,8 +270,13 @@ bool perf_cpu_map__empty(const struct perf_cpu_map *map) int perf_cpu_map__idx(const struct perf_cpu_map *cpus, int cpu) { - int low = 0, high = cpus->nr; + int low, high; + if (!cpus) + return -1; + + low = 0; + high = cpus->nr; while (low < high) { int idx = (low + high) / 2, cpu_at_idx = cpus->map[idx]; -- cgit v1.2.3 From 472832d2c000b9611feaea66fe521055c3dbf17a Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 4 Jan 2022 22:13:37 -0800 Subject: perf evlist: Refactor evlist__for_each_cpu() Previously evlist__for_each_cpu() needed to iterate over the evlist in an inner loop and call "skip" routines. Refactor this so that the iteratr is smarter and the next function can update both the current CPU and evsel. By using a cpu map index, fix apparent off-by-1 in __run_perf_stat's call to perf_evsel__close_cpu(). Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Mike Leach Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Riccardo Mancini Cc: Stephane Eranian Cc: Suzuki Poulouse Cc: Vineet Singh Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-35-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-stat.c | 179 ++++++++++++++++++++++------------------------ tools/perf/util/evlist.c | 146 ++++++++++++++++++++----------------- tools/perf/util/evlist.h | 50 +++++++++++-- tools/perf/util/evsel.h | 1 - 4 files changed, 210 insertions(+), 166 deletions(-) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index a518fcf0b3f8..f84116c9e016 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -405,36 +405,33 @@ static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu) static int read_affinity_counters(struct timespec *rs) { - struct evsel *counter; - struct affinity affinity; - int i, ncpus, cpu; + struct evlist_cpu_iterator evlist_cpu_itr; + struct affinity saved_affinity, *affinity; if (all_counters_use_bpf) return 0; - if (affinity__setup(&affinity) < 0) + if (!target__has_cpu(&target) || target__has_per_thread(&target)) + affinity = NULL; + else if (affinity__setup(&saved_affinity) < 0) return -1; + else + affinity = &saved_affinity; - ncpus = perf_cpu_map__nr(evsel_list->core.all_cpus); - if (!target__has_cpu(&target) || target__has_per_thread(&target)) - ncpus = 1; - evlist__for_each_cpu(evsel_list, i, cpu) { - if (i >= ncpus) - break; - affinity__set(&affinity, cpu); + evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { + struct evsel *counter = evlist_cpu_itr.evsel; - evlist__for_each_entry(evsel_list, counter) { - if (evsel__cpu_iter_skip(counter, cpu)) - continue; - if (evsel__is_bpf(counter)) - continue; - if (!counter->err) { - counter->err = read_counter_cpu(counter, rs, - counter->cpu_iter - 1); - } + if (evsel__is_bpf(counter)) + continue; + + if (!counter->err) { + counter->err = read_counter_cpu(counter, rs, + evlist_cpu_itr.cpu_map_idx); } } - affinity__cleanup(&affinity); + if (affinity) + affinity__cleanup(&saved_affinity); + return 0; } @@ -788,8 +785,9 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx) int status = 0; const bool forks = (argc > 0); bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false; + struct evlist_cpu_iterator evlist_cpu_itr; struct affinity affinity; - int i, cpu, err; + int err; bool second_pass = false; if (forks) { @@ -813,102 +811,97 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx) all_counters_use_bpf = false; } - evlist__for_each_cpu (evsel_list, i, cpu) { + evlist__for_each_cpu(evlist_cpu_itr, evsel_list, &affinity) { + counter = evlist_cpu_itr.evsel; + /* * bperf calls evsel__open_per_cpu() in bperf__load(), so * no need to call it again here. */ if (target.use_bpf) break; - affinity__set(&affinity, cpu); - evlist__for_each_entry(evsel_list, counter) { - if (evsel__cpu_iter_skip(counter, cpu)) + if (counter->reset_group || counter->errored) + continue; + if (evsel__is_bpf(counter)) + continue; +try_again: + if (create_perf_stat_counter(counter, &stat_config, &target, + evlist_cpu_itr.cpu_map_idx) < 0) { + + /* + * Weak group failed. We cannot just undo this here + * because earlier CPUs might be in group mode, and the kernel + * doesn't support mixing group and non group reads. Defer + * it to later. + * Don't close here because we're in the wrong affinity. + */ + if ((errno == EINVAL || errno == EBADF) && + evsel__leader(counter) != counter && + counter->weak_group) { + evlist__reset_weak_group(evsel_list, counter, false); + assert(counter->reset_group); + second_pass = true; continue; - if (counter->reset_group || counter->errored) + } + + switch (stat_handle_error(counter)) { + case COUNTER_FATAL: + return -1; + case COUNTER_RETRY: + goto try_again; + case COUNTER_SKIP: continue; - if (evsel__is_bpf(counter)) + default: + break; + } + + } + counter->supported = true; + } + + if (second_pass) { + /* + * Now redo all the weak group after closing them, + * and also close errored counters. + */ + + /* First close errored or weak retry */ + evlist__for_each_cpu(evlist_cpu_itr, evsel_list, &affinity) { + counter = evlist_cpu_itr.evsel; + + if (!counter->reset_group && !counter->errored) continue; -try_again: + + perf_evsel__close_cpu(&counter->core, evlist_cpu_itr.cpu_map_idx); + } + /* Now reopen weak */ + evlist__for_each_cpu(evlist_cpu_itr, evsel_list, &affinity) { + counter = evlist_cpu_itr.evsel; + + if (!counter->reset_group && !counter->errored) + continue; + if (!counter->reset_group) + continue; +try_again_reset: + pr_debug2("reopening weak %s\n", evsel__name(counter)); if (create_perf_stat_counter(counter, &stat_config, &target, - counter->cpu_iter - 1) < 0) { - - /* - * Weak group failed. We cannot just undo this here - * because earlier CPUs might be in group mode, and the kernel - * doesn't support mixing group and non group reads. Defer - * it to later. - * Don't close here because we're in the wrong affinity. - */ - if ((errno == EINVAL || errno == EBADF) && - evsel__leader(counter) != counter && - counter->weak_group) { - evlist__reset_weak_group(evsel_list, counter, false); - assert(counter->reset_group); - second_pass = true; - continue; - } + evlist_cpu_itr.cpu_map_idx) < 0) { switch (stat_handle_error(counter)) { case COUNTER_FATAL: return -1; case COUNTER_RETRY: - goto try_again; + goto try_again_reset; case COUNTER_SKIP: continue; default: break; } - } counter->supported = true; } } - - if (second_pass) { - /* - * Now redo all the weak group after closing them, - * and also close errored counters. - */ - - evlist__for_each_cpu(evsel_list, i, cpu) { - affinity__set(&affinity, cpu); - /* First close errored or weak retry */ - evlist__for_each_entry(evsel_list, counter) { - if (!counter->reset_group && !counter->errored) - continue; - if (evsel__cpu_iter_skip_no_inc(counter, cpu)) - continue; - perf_evsel__close_cpu(&counter->core, counter->cpu_iter); - } - /* Now reopen weak */ - evlist__for_each_entry(evsel_list, counter) { - if (!counter->reset_group && !counter->errored) - continue; - if (evsel__cpu_iter_skip(counter, cpu)) - continue; - if (!counter->reset_group) - continue; -try_again_reset: - pr_debug2("reopening weak %s\n", evsel__name(counter)); - if (create_perf_stat_counter(counter, &stat_config, &target, - counter->cpu_iter - 1) < 0) { - - switch (stat_handle_error(counter)) { - case COUNTER_FATAL: - return -1; - case COUNTER_RETRY: - goto try_again_reset; - case COUNTER_SKIP: - continue; - default: - break; - } - } - counter->supported = true; - } - } - } affinity__cleanup(&affinity); evlist__for_each_entry(evsel_list, counter) { diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index 5f92319ce258..39d294f6c321 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -342,36 +342,65 @@ static int evlist__nr_threads(struct evlist *evlist, struct evsel *evsel) return perf_thread_map__nr(evlist->core.threads); } -void evlist__cpu_iter_start(struct evlist *evlist) -{ - struct evsel *pos; - - /* - * Reset the per evsel cpu_iter. This is needed because - * each evsel's cpumap may have a different index space, - * and some operations need the index to modify - * the FD xyarray (e.g. open, close) - */ - evlist__for_each_entry(evlist, pos) - pos->cpu_iter = 0; -} +struct evlist_cpu_iterator evlist__cpu_begin(struct evlist *evlist, struct affinity *affinity) +{ + struct evlist_cpu_iterator itr = { + .container = evlist, + .evsel = evlist__first(evlist), + .cpu_map_idx = 0, + .evlist_cpu_map_idx = 0, + .evlist_cpu_map_nr = perf_cpu_map__nr(evlist->core.all_cpus), + .cpu = -1, + .affinity = affinity, + }; -bool evsel__cpu_iter_skip_no_inc(struct evsel *ev, int cpu) -{ - if (ev->cpu_iter >= ev->core.cpus->nr) - return true; - if (cpu >= 0 && ev->core.cpus->map[ev->cpu_iter] != cpu) - return true; - return false; + if (itr.affinity) { + itr.cpu = perf_cpu_map__cpu(evlist->core.all_cpus, 0); + affinity__set(itr.affinity, itr.cpu); + itr.cpu_map_idx = perf_cpu_map__idx(itr.evsel->core.cpus, itr.cpu); + /* + * If this CPU isn't in the evsel's cpu map then advance through + * the list. + */ + if (itr.cpu_map_idx == -1) + evlist_cpu_iterator__next(&itr); + } + return itr; +} + +void evlist_cpu_iterator__next(struct evlist_cpu_iterator *evlist_cpu_itr) +{ + while (evlist_cpu_itr->evsel != evlist__last(evlist_cpu_itr->container)) { + evlist_cpu_itr->evsel = evsel__next(evlist_cpu_itr->evsel); + evlist_cpu_itr->cpu_map_idx = + perf_cpu_map__idx(evlist_cpu_itr->evsel->core.cpus, + evlist_cpu_itr->cpu); + if (evlist_cpu_itr->cpu_map_idx != -1) + return; + } + evlist_cpu_itr->evlist_cpu_map_idx++; + if (evlist_cpu_itr->evlist_cpu_map_idx < evlist_cpu_itr->evlist_cpu_map_nr) { + evlist_cpu_itr->evsel = evlist__first(evlist_cpu_itr->container); + evlist_cpu_itr->cpu = + perf_cpu_map__cpu(evlist_cpu_itr->container->core.all_cpus, + evlist_cpu_itr->evlist_cpu_map_idx); + if (evlist_cpu_itr->affinity) + affinity__set(evlist_cpu_itr->affinity, evlist_cpu_itr->cpu); + evlist_cpu_itr->cpu_map_idx = + perf_cpu_map__idx(evlist_cpu_itr->evsel->core.cpus, + evlist_cpu_itr->cpu); + /* + * If this CPU isn't in the evsel's cpu map then advance through + * the list. + */ + if (evlist_cpu_itr->cpu_map_idx == -1) + evlist_cpu_iterator__next(evlist_cpu_itr); + } } -bool evsel__cpu_iter_skip(struct evsel *ev, int cpu) +bool evlist_cpu_iterator__end(const struct evlist_cpu_iterator *evlist_cpu_itr) { - if (!evsel__cpu_iter_skip_no_inc(ev, cpu)) { - ev->cpu_iter++; - return false; - } - return true; + return evlist_cpu_itr->evlist_cpu_map_idx >= evlist_cpu_itr->evlist_cpu_map_nr; } static int evsel__strcmp(struct evsel *pos, char *evsel_name) @@ -400,31 +429,26 @@ static int evlist__is_enabled(struct evlist *evlist) static void __evlist__disable(struct evlist *evlist, char *evsel_name) { struct evsel *pos; + struct evlist_cpu_iterator evlist_cpu_itr; struct affinity affinity; - int cpu, i, imm = 0; bool has_imm = false; if (affinity__setup(&affinity) < 0) return; /* Disable 'immediate' events last */ - for (imm = 0; imm <= 1; imm++) { - evlist__for_each_cpu(evlist, i, cpu) { - affinity__set(&affinity, cpu); - - evlist__for_each_entry(evlist, pos) { - if (evsel__strcmp(pos, evsel_name)) - continue; - if (evsel__cpu_iter_skip(pos, cpu)) - continue; - if (pos->disabled || !evsel__is_group_leader(pos) || !pos->core.fd) - continue; - if (pos->immediate) - has_imm = true; - if (pos->immediate != imm) - continue; - evsel__disable_cpu(pos, pos->cpu_iter - 1); - } + for (int imm = 0; imm <= 1; imm++) { + evlist__for_each_cpu(evlist_cpu_itr, evlist, &affinity) { + pos = evlist_cpu_itr.evsel; + if (evsel__strcmp(pos, evsel_name)) + continue; + if (pos->disabled || !evsel__is_group_leader(pos) || !pos->core.fd) + continue; + if (pos->immediate) + has_imm = true; + if (pos->immediate != imm) + continue; + evsel__disable_cpu(pos, evlist_cpu_itr.cpu_map_idx); } if (!has_imm) break; @@ -462,24 +486,19 @@ void evlist__disable_evsel(struct evlist *evlist, char *evsel_name) static void __evlist__enable(struct evlist *evlist, char *evsel_name) { struct evsel *pos; + struct evlist_cpu_iterator evlist_cpu_itr; struct affinity affinity; - int cpu, i; if (affinity__setup(&affinity) < 0) return; - evlist__for_each_cpu(evlist, i, cpu) { - affinity__set(&affinity, cpu); - - evlist__for_each_entry(evlist, pos) { - if (evsel__strcmp(pos, evsel_name)) - continue; - if (evsel__cpu_iter_skip(pos, cpu)) - continue; - if (!evsel__is_group_leader(pos) || !pos->core.fd) - continue; - evsel__enable_cpu(pos, pos->cpu_iter - 1); - } + evlist__for_each_cpu(evlist_cpu_itr, evlist, &affinity) { + pos = evlist_cpu_itr.evsel; + if (evsel__strcmp(pos, evsel_name)) + continue; + if (!evsel__is_group_leader(pos) || !pos->core.fd) + continue; + evsel__enable_cpu(pos, evlist_cpu_itr.cpu_map_idx); } affinity__cleanup(&affinity); evlist__for_each_entry(evlist, pos) { @@ -1264,8 +1283,8 @@ void evlist__set_selected(struct evlist *evlist, struct evsel *evsel) void evlist__close(struct evlist *evlist) { struct evsel *evsel; + struct evlist_cpu_iterator evlist_cpu_itr; struct affinity affinity; - int cpu, i; /* * With perf record core.cpus is usually NULL. @@ -1279,15 +1298,12 @@ void evlist__close(struct evlist *evlist) if (affinity__setup(&affinity) < 0) return; - evlist__for_each_cpu(evlist, i, cpu) { - affinity__set(&affinity, cpu); - evlist__for_each_entry_reverse(evlist, evsel) { - if (evsel__cpu_iter_skip(evsel, cpu)) - continue; - perf_evsel__close_cpu(&evsel->core, evsel->cpu_iter - 1); - } + evlist__for_each_cpu(evlist_cpu_itr, evlist, &affinity) { + perf_evsel__close_cpu(&evlist_cpu_itr.evsel->core, + evlist_cpu_itr.cpu_map_idx); } + affinity__cleanup(&affinity); evlist__for_each_entry_reverse(evlist, evsel) { perf_evsel__free_fd(&evsel->core); diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index 27594900a052..57828ebfcb61 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h @@ -327,17 +327,53 @@ void evlist__to_front(struct evlist *evlist, struct evsel *move_evsel); #define evlist__for_each_entry_safe(evlist, tmp, evsel) \ __evlist__for_each_entry_safe(&(evlist)->core.entries, tmp, evsel) -#define evlist__for_each_cpu(evlist, index, cpu) \ - evlist__cpu_iter_start(evlist); \ - perf_cpu_map__for_each_cpu (cpu, index, (evlist)->core.all_cpus) +/** Iterator state for evlist__for_each_cpu */ +struct evlist_cpu_iterator { + /** The list being iterated through. */ + struct evlist *container; + /** The current evsel of the iterator. */ + struct evsel *evsel; + /** The CPU map index corresponding to the evsel->core.cpus for the current CPU. */ + int cpu_map_idx; + /** + * The CPU map index corresponding to evlist->core.all_cpus for the + * current CPU. Distinct from cpu_map_idx as the evsel's cpu map may + * contain fewer entries. + */ + int evlist_cpu_map_idx; + /** The number of CPU map entries in evlist->core.all_cpus. */ + int evlist_cpu_map_nr; + /** The current CPU of the iterator. */ + int cpu; + /** If present, used to set the affinity when switching between CPUs. */ + struct affinity *affinity; +}; + +/** + * evlist__for_each_cpu - without affinity, iterate over the evlist. With + * affinity, iterate over all CPUs and then the evlist + * for each evsel on that CPU. When switching between + * CPUs the affinity is set to the CPU to avoid IPIs + * during syscalls. + * @evlist_cpu_itr: the iterator instance. + * @evlist: evlist instance to iterate. + * @affinity: NULL or used to set the affinity to the current CPU. + */ +#define evlist__for_each_cpu(evlist_cpu_itr, evlist, affinity) \ + for ((evlist_cpu_itr) = evlist__cpu_begin(evlist, affinity); \ + !evlist_cpu_iterator__end(&evlist_cpu_itr); \ + evlist_cpu_iterator__next(&evlist_cpu_itr)) + +/** Returns an iterator set to the first CPU/evsel of evlist. */ +struct evlist_cpu_iterator evlist__cpu_begin(struct evlist *evlist, struct affinity *affinity); +/** Move to next element in iterator, updating CPU, evsel and the affinity. */ +void evlist_cpu_iterator__next(struct evlist_cpu_iterator *evlist_cpu_itr); +/** Returns true when iterator is at the end of the CPUs and evlist. */ +bool evlist_cpu_iterator__end(const struct evlist_cpu_iterator *evlist_cpu_itr); struct evsel *evlist__get_tracking_event(struct evlist *evlist); void evlist__set_tracking_event(struct evlist *evlist, struct evsel *tracking_evsel); -void evlist__cpu_iter_start(struct evlist *evlist); -bool evsel__cpu_iter_skip(struct evsel *ev, int cpu); -bool evsel__cpu_iter_skip_no_inc(struct evsel *ev, int cpu); - struct evsel *evlist__find_evsel_by_str(struct evlist *evlist, const char *str); struct evsel *evlist__event2evsel(struct evlist *evlist, union perf_event *event); diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index 99aa3363def7..7cb7c9c77ab0 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -121,7 +121,6 @@ struct evsel { bool errored; struct hashmap *per_pkg_mask; int err; - int cpu_iter; struct { evsel__sb_cb_t *cb; void *data; -- cgit v1.2.3 From 7ac0089d138f80dcd7ba8ca368a9b2bdfe780b16 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 4 Jan 2022 22:13:38 -0800 Subject: perf evsel: Pass cpu not cpu map index to synthesize evsel__write_stat_event() was incorrectly passing a cpu map index rather than a CPU to perf_event__synthesize_stat(). Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Mike Leach Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Riccardo Mancini Cc: Stephane Eranian Cc: Suzuki Poulouse Cc: Vineet Singh Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-36-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-stat.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index f84116c9e016..ed993c20772f 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -327,10 +327,11 @@ static int write_stat_round_event(u64 tm, u64 type) #define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y) -static int evsel__write_stat_event(struct evsel *counter, u32 cpu, u32 thread, +static int evsel__write_stat_event(struct evsel *counter, int cpu_map_idx, u32 thread, struct perf_counts_values *count) { - struct perf_sample_id *sid = SID(counter, cpu, thread); + struct perf_sample_id *sid = SID(counter, cpu_map_idx, thread); + int cpu = perf_cpu_map__cpu(evsel__cpus(counter), cpu_map_idx); return perf_event__synthesize_stat(NULL, cpu, thread, sid->id, count, process_synthesized_event, NULL); -- cgit v1.2.3 From da8c94c065174099853a207d9716a49d339b265f Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 4 Jan 2022 22:13:39 -0800 Subject: perf stat: Correct variable name for read counter Switch from cpu to cpu_map_idx to reduce confusion. Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Mike Leach Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Riccardo Mancini Cc: Stephane Eranian Cc: Suzuki Poulouse Cc: Vineet Singh Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-37-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-stat.c | 24 ++++++++++++------------ tools/perf/util/evsel.c | 30 +++++++++++++++--------------- tools/perf/util/evsel.h | 16 ++++++++-------- 3 files changed, 35 insertions(+), 35 deletions(-) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index ed993c20772f..dfb8f7847e6c 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -337,25 +337,25 @@ static int evsel__write_stat_event(struct evsel *counter, int cpu_map_idx, u32 t process_synthesized_event, NULL); } -static int read_single_counter(struct evsel *counter, int cpu, +static int read_single_counter(struct evsel *counter, int cpu_map_idx, int thread, struct timespec *rs) { if (counter->tool_event == PERF_TOOL_DURATION_TIME) { u64 val = rs->tv_nsec + rs->tv_sec*1000000000ULL; struct perf_counts_values *count = - perf_counts(counter->counts, cpu, thread); + perf_counts(counter->counts, cpu_map_idx, thread); count->ena = count->run = val; count->val = val; return 0; } - return evsel__read_counter(counter, cpu, thread); + return evsel__read_counter(counter, cpu_map_idx, thread); } /* * Read out the results of a single counter: * do not aggregate counts across CPUs in system-wide mode */ -static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu) +static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu_map_idx) { int nthreads = perf_thread_map__nr(evsel_list->core.threads); int thread; @@ -369,24 +369,24 @@ static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu) for (thread = 0; thread < nthreads; thread++) { struct perf_counts_values *count; - count = perf_counts(counter->counts, cpu, thread); + count = perf_counts(counter->counts, cpu_map_idx, thread); /* * The leader's group read loads data into its group members * (via evsel__read_counter()) and sets their count->loaded. */ - if (!perf_counts__is_loaded(counter->counts, cpu, thread) && - read_single_counter(counter, cpu, thread, rs)) { + if (!perf_counts__is_loaded(counter->counts, cpu_map_idx, thread) && + read_single_counter(counter, cpu_map_idx, thread, rs)) { counter->counts->scaled = -1; - perf_counts(counter->counts, cpu, thread)->ena = 0; - perf_counts(counter->counts, cpu, thread)->run = 0; + perf_counts(counter->counts, cpu_map_idx, thread)->ena = 0; + perf_counts(counter->counts, cpu_map_idx, thread)->run = 0; return -1; } - perf_counts__set_loaded(counter->counts, cpu, thread, false); + perf_counts__set_loaded(counter->counts, cpu_map_idx, thread, false); if (STAT_RECORD) { - if (evsel__write_stat_event(counter, cpu, thread, count)) { + if (evsel__write_stat_event(counter, cpu_map_idx, thread, count)) { pr_err("failed to write stat event\n"); return -1; } @@ -396,7 +396,7 @@ static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu) fprintf(stat_config.output, "%s: %d: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n", evsel__name(counter), - cpu, + perf_cpu_map__cpu(evsel__cpus(counter), cpu_map_idx), count->val, count->ena, count->run); } } diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 2de569a1a272..8f539a81b30b 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -1476,11 +1476,11 @@ void evsel__compute_deltas(struct evsel *evsel, int cpu, int thread, count->run = count->run - tmp.run; } -static int evsel__read_one(struct evsel *evsel, int cpu, int thread) +static int evsel__read_one(struct evsel *evsel, int cpu_map_idx, int thread) { - struct perf_counts_values *count = perf_counts(evsel->counts, cpu, thread); + struct perf_counts_values *count = perf_counts(evsel->counts, cpu_map_idx, thread); - return perf_evsel__read(&evsel->core, cpu, thread, count); + return perf_evsel__read(&evsel->core, cpu_map_idx, thread, count); } static void evsel__set_count(struct evsel *counter, int cpu, int thread, u64 val, u64 ena, u64 run) @@ -1530,7 +1530,7 @@ static int evsel__process_group_data(struct evsel *leader, int cpu, int thread, return 0; } -static int evsel__read_group(struct evsel *leader, int cpu, int thread) +static int evsel__read_group(struct evsel *leader, int cpu_map_idx, int thread) { struct perf_stat_evsel *ps = leader->stats; u64 read_format = leader->core.attr.read_format; @@ -1551,42 +1551,42 @@ static int evsel__read_group(struct evsel *leader, int cpu, int thread) ps->group_data = data; } - if (FD(leader, cpu, thread) < 0) + if (FD(leader, cpu_map_idx, thread) < 0) return -EINVAL; - if (readn(FD(leader, cpu, thread), data, size) <= 0) + if (readn(FD(leader, cpu_map_idx, thread), data, size) <= 0) return -errno; - return evsel__process_group_data(leader, cpu, thread, data); + return evsel__process_group_data(leader, cpu_map_idx, thread, data); } -int evsel__read_counter(struct evsel *evsel, int cpu, int thread) +int evsel__read_counter(struct evsel *evsel, int cpu_map_idx, int thread) { u64 read_format = evsel->core.attr.read_format; if (read_format & PERF_FORMAT_GROUP) - return evsel__read_group(evsel, cpu, thread); + return evsel__read_group(evsel, cpu_map_idx, thread); - return evsel__read_one(evsel, cpu, thread); + return evsel__read_one(evsel, cpu_map_idx, thread); } -int __evsel__read_on_cpu(struct evsel *evsel, int cpu, int thread, bool scale) +int __evsel__read_on_cpu(struct evsel *evsel, int cpu_map_idx, int thread, bool scale) { struct perf_counts_values count; size_t nv = scale ? 3 : 1; - if (FD(evsel, cpu, thread) < 0) + if (FD(evsel, cpu_map_idx, thread) < 0) return -EINVAL; if (evsel->counts == NULL && evsel__alloc_counts(evsel) < 0) return -ENOMEM; - if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) <= 0) + if (readn(FD(evsel, cpu_map_idx, thread), &count, nv * sizeof(u64)) <= 0) return -errno; - evsel__compute_deltas(evsel, cpu, thread, &count); + evsel__compute_deltas(evsel, cpu_map_idx, thread, &count); perf_counts_values__scale(&count, scale, NULL); - *perf_counts(evsel->counts, cpu, thread) = count; + *perf_counts(evsel->counts, cpu_map_idx, thread) = count; return 0; } diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index 7cb7c9c77ab0..c3db41282400 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -333,32 +333,32 @@ static inline bool evsel__match2(struct evsel *e1, struct evsel *e2) (e1->core.attr.config == e2->core.attr.config); } -int evsel__read_counter(struct evsel *evsel, int cpu, int thread); +int evsel__read_counter(struct evsel *evsel, int cpu_map_idx, int thread); -int __evsel__read_on_cpu(struct evsel *evsel, int cpu, int thread, bool scale); +int __evsel__read_on_cpu(struct evsel *evsel, int cpu_map_idx, int thread, bool scale); /** * evsel__read_on_cpu - Read out the results on a CPU and thread * * @evsel - event selector to read value - * @cpu - CPU of interest + * @cpu_map_idx - CPU of interest * @thread - thread of interest */ -static inline int evsel__read_on_cpu(struct evsel *evsel, int cpu, int thread) +static inline int evsel__read_on_cpu(struct evsel *evsel, int cpu_map_idx, int thread) { - return __evsel__read_on_cpu(evsel, cpu, thread, false); + return __evsel__read_on_cpu(evsel, cpu_map_idx, thread, false); } /** * evsel__read_on_cpu_scaled - Read out the results on a CPU and thread, scaled * * @evsel - event selector to read value - * @cpu - CPU of interest + * @cpu_map_idx - CPU of interest * @thread - thread of interest */ -static inline int evsel__read_on_cpu_scaled(struct evsel *evsel, int cpu, int thread) +static inline int evsel__read_on_cpu_scaled(struct evsel *evsel, int cpu_map_idx, int thread) { - return __evsel__read_on_cpu(evsel, cpu, thread, true); + return __evsel__read_on_cpu(evsel, cpu_map_idx, thread, true); } int evsel__parse_sample(struct evsel *evsel, union perf_event *event, -- cgit v1.2.3 From 2daa08c4d9cd9d0845094d718920e5d105c11558 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 4 Jan 2022 22:13:40 -0800 Subject: perf evsel: Rename CPU around get_group_fd CPU is really a cpu map index, change names to make code more intention revealing. Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Mike Leach Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Riccardo Mancini Cc: Stephane Eranian Cc: Suzuki Poulouse Cc: Vineet Singh Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-38-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/evsel.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 8f539a81b30b..45338be3f501 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -1591,27 +1591,27 @@ int __evsel__read_on_cpu(struct evsel *evsel, int cpu_map_idx, int thread, bool } static int evsel__match_other_cpu(struct evsel *evsel, struct evsel *other, - int cpu) + int cpu_map_idx) { - int cpuid; + int cpu; - cpuid = perf_cpu_map__cpu(evsel->core.cpus, cpu); - return perf_cpu_map__idx(other->core.cpus, cpuid); + cpu = perf_cpu_map__cpu(evsel->core.cpus, cpu_map_idx); + return perf_cpu_map__idx(other->core.cpus, cpu); } -static int evsel__hybrid_group_cpu(struct evsel *evsel, int cpu) +static int evsel__hybrid_group_cpu_map_idx(struct evsel *evsel, int cpu_map_idx) { struct evsel *leader = evsel__leader(evsel); if ((evsel__is_hybrid(evsel) && !evsel__is_hybrid(leader)) || (!evsel__is_hybrid(evsel) && evsel__is_hybrid(leader))) { - return evsel__match_other_cpu(evsel, leader, cpu); + return evsel__match_other_cpu(evsel, leader, cpu_map_idx); } - return cpu; + return cpu_map_idx; } -static int get_group_fd(struct evsel *evsel, int cpu, int thread) +static int get_group_fd(struct evsel *evsel, int cpu_map_idx, int thread) { struct evsel *leader = evsel__leader(evsel); int fd; @@ -1625,11 +1625,11 @@ static int get_group_fd(struct evsel *evsel, int cpu, int thread) */ BUG_ON(!leader->core.fd); - cpu = evsel__hybrid_group_cpu(evsel, cpu); - if (cpu == -1) + cpu_map_idx = evsel__hybrid_group_cpu_map_idx(evsel, cpu_map_idx); + if (cpu_map_idx == -1) return -1; - fd = FD(leader, cpu, thread); + fd = FD(leader, cpu_map_idx, thread); BUG_ON(fd == -1); return fd; -- cgit v1.2.3 From 1fa497d4c01d497e25131ccdd5def6f24dd1f330 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 4 Jan 2022 22:13:41 -0800 Subject: perf evsel: Reduce scope of evsel__ignore_missing_thread Move to being static. Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Mike Leach Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Riccardo Mancini Cc: Stephane Eranian Cc: Suzuki Poulouse Cc: Vineet Singh Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-39-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/evsel.c | 8 ++++---- tools/perf/util/evsel.h | 4 ---- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 45338be3f501..97348d302156 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -1666,10 +1666,10 @@ static int update_fds(struct evsel *evsel, return 0; } -bool evsel__ignore_missing_thread(struct evsel *evsel, - int nr_cpus, int cpu, - struct perf_thread_map *threads, - int thread, int err) +static bool evsel__ignore_missing_thread(struct evsel *evsel, + int nr_cpus, int cpu, + struct perf_thread_map *threads, + int thread, int err) { pid_t ignore_pid = perf_thread_map__pid(threads, thread); diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index c3db41282400..84e597f6c395 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -301,10 +301,6 @@ bool evsel__detect_missing_features(struct evsel *evsel); enum rlimit_action { NO_CHANGE, SET_TO_MAX, INCREASED_MAX }; bool evsel__increase_rlimit(enum rlimit_action *set_rlimit); -bool evsel__ignore_missing_thread(struct evsel *evsel, - int nr_cpus, int cpu, - struct perf_thread_map *threads, - int thread, int err); bool evsel__precise_ip_fallback(struct evsel *evsel); struct perf_sample; -- cgit v1.2.3 From 6f844b1fdd3bc3a25995ff83edea32a73bfa72d9 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 4 Jan 2022 22:13:42 -0800 Subject: perf evsel: Rename variable cpu to index Make naming less error prone. Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Mike Leach Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Riccardo Mancini Cc: Stephane Eranian Cc: Suzuki Poulouse Cc: Vineet Singh Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-40-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/evsel.c | 83 +++++++++++++++++++++++++------------------------ tools/perf/util/evsel.h | 6 ++-- tools/perf/util/stat.c | 4 +-- tools/perf/util/stat.h | 2 +- 4 files changed, 48 insertions(+), 47 deletions(-) diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 97348d302156..796923c80ff6 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -1372,9 +1372,9 @@ int evsel__append_addr_filter(struct evsel *evsel, const char *filter) } /* Caller has to clear disabled after going through all CPUs. */ -int evsel__enable_cpu(struct evsel *evsel, int cpu) +int evsel__enable_cpu(struct evsel *evsel, int cpu_map_idx) { - return perf_evsel__enable_cpu(&evsel->core, cpu); + return perf_evsel__enable_cpu(&evsel->core, cpu_map_idx); } int evsel__enable(struct evsel *evsel) @@ -1387,9 +1387,9 @@ int evsel__enable(struct evsel *evsel) } /* Caller has to set disabled after going through all CPUs. */ -int evsel__disable_cpu(struct evsel *evsel, int cpu) +int evsel__disable_cpu(struct evsel *evsel, int cpu_map_idx) { - return perf_evsel__disable_cpu(&evsel->core, cpu); + return perf_evsel__disable_cpu(&evsel->core, cpu_map_idx); } int evsel__disable(struct evsel *evsel) @@ -1455,7 +1455,7 @@ void evsel__delete(struct evsel *evsel) free(evsel); } -void evsel__compute_deltas(struct evsel *evsel, int cpu, int thread, +void evsel__compute_deltas(struct evsel *evsel, int cpu_map_idx, int thread, struct perf_counts_values *count) { struct perf_counts_values tmp; @@ -1463,12 +1463,12 @@ void evsel__compute_deltas(struct evsel *evsel, int cpu, int thread, if (!evsel->prev_raw_counts) return; - if (cpu == -1) { + if (cpu_map_idx == -1) { tmp = evsel->prev_raw_counts->aggr; evsel->prev_raw_counts->aggr = *count; } else { - tmp = *perf_counts(evsel->prev_raw_counts, cpu, thread); - *perf_counts(evsel->prev_raw_counts, cpu, thread) = *count; + tmp = *perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread); + *perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread) = *count; } count->val = count->val - tmp.val; @@ -1483,20 +1483,21 @@ static int evsel__read_one(struct evsel *evsel, int cpu_map_idx, int thread) return perf_evsel__read(&evsel->core, cpu_map_idx, thread, count); } -static void evsel__set_count(struct evsel *counter, int cpu, int thread, u64 val, u64 ena, u64 run) +static void evsel__set_count(struct evsel *counter, int cpu_map_idx, int thread, + u64 val, u64 ena, u64 run) { struct perf_counts_values *count; - count = perf_counts(counter->counts, cpu, thread); + count = perf_counts(counter->counts, cpu_map_idx, thread); count->val = val; count->ena = ena; count->run = run; - perf_counts__set_loaded(counter->counts, cpu, thread, true); + perf_counts__set_loaded(counter->counts, cpu_map_idx, thread, true); } -static int evsel__process_group_data(struct evsel *leader, int cpu, int thread, u64 *data) +static int evsel__process_group_data(struct evsel *leader, int cpu_map_idx, int thread, u64 *data) { u64 read_format = leader->core.attr.read_format; struct sample_read_value *v; @@ -1515,7 +1516,7 @@ static int evsel__process_group_data(struct evsel *leader, int cpu, int thread, v = (struct sample_read_value *) data; - evsel__set_count(leader, cpu, thread, v[0].value, ena, run); + evsel__set_count(leader, cpu_map_idx, thread, v[0].value, ena, run); for (i = 1; i < nr; i++) { struct evsel *counter; @@ -1524,7 +1525,7 @@ static int evsel__process_group_data(struct evsel *leader, int cpu, int thread, if (!counter) return -EINVAL; - evsel__set_count(counter, cpu, thread, v[i].value, ena, run); + evsel__set_count(counter, cpu_map_idx, thread, v[i].value, ena, run); } return 0; @@ -1643,16 +1644,16 @@ static void evsel__remove_fd(struct evsel *pos, int nr_cpus, int nr_threads, int } static int update_fds(struct evsel *evsel, - int nr_cpus, int cpu_idx, + int nr_cpus, int cpu_map_idx, int nr_threads, int thread_idx) { struct evsel *pos; - if (cpu_idx >= nr_cpus || thread_idx >= nr_threads) + if (cpu_map_idx >= nr_cpus || thread_idx >= nr_threads) return -EINVAL; evlist__for_each_entry(evsel->evlist, pos) { - nr_cpus = pos != evsel ? nr_cpus : cpu_idx; + nr_cpus = pos != evsel ? nr_cpus : cpu_map_idx; evsel__remove_fd(pos, nr_cpus, nr_threads, thread_idx); @@ -1667,7 +1668,7 @@ static int update_fds(struct evsel *evsel, } static bool evsel__ignore_missing_thread(struct evsel *evsel, - int nr_cpus, int cpu, + int nr_cpus, int cpu_map_idx, struct perf_thread_map *threads, int thread, int err) { @@ -1692,7 +1693,7 @@ static bool evsel__ignore_missing_thread(struct evsel *evsel, * We should remove fd for missing_thread first * because thread_map__remove() will decrease threads->nr. */ - if (update_fds(evsel, nr_cpus, cpu, threads->nr, thread)) + if (update_fds(evsel, nr_cpus, cpu_map_idx, threads->nr, thread)) return false; if (thread_map__remove(threads, thread)) @@ -1974,9 +1975,9 @@ bool evsel__increase_rlimit(enum rlimit_action *set_rlimit) static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, struct perf_thread_map *threads, - int start_cpu, int end_cpu) + int start_cpu_map_idx, int end_cpu_map_idx) { - int cpu, thread, nthreads; + int idx, thread, nthreads; int pid = -1, err, old_errno; enum rlimit_action set_rlimit = NO_CHANGE; @@ -2003,7 +2004,7 @@ fallback_missing_features: display_attr(&evsel->core.attr); - for (cpu = start_cpu; cpu < end_cpu; cpu++) { + for (idx = start_cpu_map_idx; idx < end_cpu_map_idx; idx++) { for (thread = 0; thread < nthreads; thread++) { int fd, group_fd; @@ -2014,17 +2015,17 @@ retry_open: if (!evsel->cgrp && !evsel->core.system_wide) pid = perf_thread_map__pid(threads, thread); - group_fd = get_group_fd(evsel, cpu, thread); + group_fd = get_group_fd(evsel, idx, thread); test_attr__ready(); pr_debug2_peo("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx", - pid, cpus->map[cpu], group_fd, evsel->open_flags); + pid, cpus->map[idx], group_fd, evsel->open_flags); - fd = sys_perf_event_open(&evsel->core.attr, pid, cpus->map[cpu], + fd = sys_perf_event_open(&evsel->core.attr, pid, cpus->map[idx], group_fd, evsel->open_flags); - FD(evsel, cpu, thread) = fd; + FD(evsel, idx, thread) = fd; if (fd < 0) { err = -errno; @@ -2034,10 +2035,10 @@ retry_open: goto try_fallback; } - bpf_counter__install_pe(evsel, cpu, fd); + bpf_counter__install_pe(evsel, idx, fd); if (unlikely(test_attr__enabled)) { - test_attr__open(&evsel->core.attr, pid, cpus->map[cpu], + test_attr__open(&evsel->core.attr, pid, cpus->map[idx], fd, group_fd, evsel->open_flags); } @@ -2078,7 +2079,7 @@ try_fallback: if (evsel__precise_ip_fallback(evsel)) goto retry_open; - if (evsel__ignore_missing_thread(evsel, cpus->nr, cpu, threads, thread, err)) { + if (evsel__ignore_missing_thread(evsel, cpus->nr, idx, threads, thread, err)) { /* We just removed 1 thread, so lower the upper nthreads limit. */ nthreads--; @@ -2093,7 +2094,7 @@ try_fallback: if (err == -EMFILE && evsel__increase_rlimit(&set_rlimit)) goto retry_open; - if (err != -EINVAL || cpu > 0 || thread > 0) + if (err != -EINVAL || idx > 0 || thread > 0) goto out_close; if (evsel__detect_missing_features(evsel)) @@ -2105,12 +2106,12 @@ out_close: old_errno = errno; do { while (--thread >= 0) { - if (FD(evsel, cpu, thread) >= 0) - close(FD(evsel, cpu, thread)); - FD(evsel, cpu, thread) = -1; + if (FD(evsel, idx, thread) >= 0) + close(FD(evsel, idx, thread)); + FD(evsel, idx, thread) = -1; } thread = nthreads; - } while (--cpu >= 0); + } while (--idx >= 0); errno = old_errno; return err; } @@ -2127,13 +2128,13 @@ void evsel__close(struct evsel *evsel) perf_evsel__free_id(&evsel->core); } -int evsel__open_per_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, int cpu) +int evsel__open_per_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, int cpu_map_idx) { - if (cpu == -1) + if (cpu_map_idx == -1) return evsel__open_cpu(evsel, cpus, NULL, 0, cpus ? cpus->nr : 1); - return evsel__open_cpu(evsel, cpus, NULL, cpu, cpu + 1); + return evsel__open_cpu(evsel, cpus, NULL, cpu_map_idx, cpu_map_idx + 1); } int evsel__open_per_thread(struct evsel *evsel, struct perf_thread_map *threads) @@ -2958,15 +2959,15 @@ struct perf_env *evsel__env(struct evsel *evsel) static int store_evsel_ids(struct evsel *evsel, struct evlist *evlist) { - int cpu, thread; + int cpu_map_idx, thread; - for (cpu = 0; cpu < xyarray__max_x(evsel->core.fd); cpu++) { + for (cpu_map_idx = 0; cpu_map_idx < xyarray__max_x(evsel->core.fd); cpu_map_idx++) { for (thread = 0; thread < xyarray__max_y(evsel->core.fd); thread++) { - int fd = FD(evsel, cpu, thread); + int fd = FD(evsel, cpu_map_idx, thread); if (perf_evlist__id_add_fd(&evlist->core, &evsel->core, - cpu, thread, fd) < 0) + cpu_map_idx, thread, fd) < 0) return -1; } } diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index 84e597f6c395..5720ceebffac 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -284,12 +284,12 @@ void arch_evsel__fixup_new_cycles(struct perf_event_attr *attr); int evsel__set_filter(struct evsel *evsel, const char *filter); int evsel__append_tp_filter(struct evsel *evsel, const char *filter); int evsel__append_addr_filter(struct evsel *evsel, const char *filter); -int evsel__enable_cpu(struct evsel *evsel, int cpu); +int evsel__enable_cpu(struct evsel *evsel, int cpu_map_idx); int evsel__enable(struct evsel *evsel); int evsel__disable(struct evsel *evsel); -int evsel__disable_cpu(struct evsel *evsel, int cpu); +int evsel__disable_cpu(struct evsel *evsel, int cpu_map_idx); -int evsel__open_per_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, int cpu); +int evsel__open_per_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, int cpu_map_idx); int evsel__open_per_thread(struct evsel *evsel, struct perf_thread_map *threads); int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus, struct perf_thread_map *threads); diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c index 995cb5003133..f7f9757eba23 100644 --- a/tools/perf/util/stat.c +++ b/tools/perf/util/stat.c @@ -531,7 +531,7 @@ size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp) int create_perf_stat_counter(struct evsel *evsel, struct perf_stat_config *config, struct target *target, - int cpu) + int cpu_map_idx) { struct perf_event_attr *attr = &evsel->core.attr; struct evsel *leader = evsel__leader(evsel); @@ -585,7 +585,7 @@ int create_perf_stat_counter(struct evsel *evsel, } if (target__has_cpu(target) && !target__has_per_thread(target)) - return evsel__open_per_cpu(evsel, evsel__cpus(evsel), cpu); + return evsel__open_per_cpu(evsel, evsel__cpus(evsel), cpu_map_idx); return evsel__open_per_thread(evsel, evsel->core.threads); } diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h index 32cf24186229..5e25d53e891b 100644 --- a/tools/perf/util/stat.h +++ b/tools/perf/util/stat.h @@ -248,7 +248,7 @@ size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp); int create_perf_stat_counter(struct evsel *evsel, struct perf_stat_config *config, struct target *target, - int cpu); + int cpu_map_idx); void evlist__print_counters(struct evlist *evlist, struct perf_stat_config *config, struct target *_target, struct timespec *ts, int argc, const char **argv); -- cgit v1.2.3 From aa11e55a39950c0151e12abd30c7223dfc6f6a2c Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 4 Jan 2022 22:13:43 -0800 Subject: perf test: Use perf_cpu_map__for_each_cpu() Clean up variable naming to make cpu and index clearer. Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Mike Leach Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Riccardo Mancini Cc: Stephane Eranian Cc: Suzuki Poulouse Cc: Vineet Singh Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-41-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/openat-syscall-all-cpus.c | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/tools/perf/tests/openat-syscall-all-cpus.c b/tools/perf/tests/openat-syscall-all-cpus.c index 544db0839b3b..ca0a50e92839 100644 --- a/tools/perf/tests/openat-syscall-all-cpus.c +++ b/tools/perf/tests/openat-syscall-all-cpus.c @@ -22,7 +22,7 @@ static int test__openat_syscall_event_on_all_cpus(struct test_suite *test __maybe_unused, int subtest __maybe_unused) { - int err = -1, fd, cpu; + int err = -1, fd, idx, cpu; struct perf_cpu_map *cpus; struct evsel *evsel; unsigned int nr_openat_calls = 111, i; @@ -58,23 +58,23 @@ static int test__openat_syscall_event_on_all_cpus(struct test_suite *test __mayb goto out_evsel_delete; } - for (cpu = 0; cpu < cpus->nr; ++cpu) { - unsigned int ncalls = nr_openat_calls + cpu; + perf_cpu_map__for_each_cpu(cpu, idx, cpus) { + unsigned int ncalls = nr_openat_calls + idx; /* * XXX eventually lift this restriction in a way that * keeps perf building on older glibc installations * without CPU_ALLOC. 1024 cpus in 2010 still seems * a reasonable upper limit tho :-) */ - if (cpus->map[cpu] >= CPU_SETSIZE) { - pr_debug("Ignoring CPU %d\n", cpus->map[cpu]); + if (cpu >= CPU_SETSIZE) { + pr_debug("Ignoring CPU %d\n", cpu); continue; } - CPU_SET(cpus->map[cpu], &cpu_set); + CPU_SET(cpu, &cpu_set); if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) { pr_debug("sched_setaffinity() failed on CPU %d: %s ", - cpus->map[cpu], + cpu, str_error_r(errno, sbuf, sizeof(sbuf))); goto out_close_fd; } @@ -82,29 +82,29 @@ static int test__openat_syscall_event_on_all_cpus(struct test_suite *test __mayb fd = openat(0, "/etc/passwd", O_RDONLY); close(fd); } - CPU_CLR(cpus->map[cpu], &cpu_set); + CPU_CLR(cpu, &cpu_set); } evsel->core.cpus = perf_cpu_map__get(cpus); err = 0; - for (cpu = 0; cpu < cpus->nr; ++cpu) { + perf_cpu_map__for_each_cpu(cpu, idx, cpus) { unsigned int expected; - if (cpus->map[cpu] >= CPU_SETSIZE) + if (cpu >= CPU_SETSIZE) continue; - if (evsel__read_on_cpu(evsel, cpu, 0) < 0) { + if (evsel__read_on_cpu(evsel, idx, 0) < 0) { pr_debug("evsel__read_on_cpu\n"); err = -1; break; } - expected = nr_openat_calls + cpu; - if (perf_counts(evsel->counts, cpu, 0)->val != expected) { + expected = nr_openat_calls + idx; + if (perf_counts(evsel->counts, idx, 0)->val != expected) { pr_debug("evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n", - expected, cpus->map[cpu], perf_counts(evsel->counts, cpu, 0)->val); + expected, cpu, perf_counts(evsel->counts, idx, 0)->val); err = -1; } } -- cgit v1.2.3 From 379c224bef724cf52bd3d2364d29fc63f3e743d3 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 4 Jan 2022 22:13:44 -0800 Subject: perf stat: Correct check_per_pkg() cpu Code was incorrectly using the cpu map index as the CPU. Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Mike Leach Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Riccardo Mancini Cc: Stephane Eranian Cc: Suzuki Poulouse Cc: Vineet Singh Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-42-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/stat.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c index f7f9757eba23..86ab427e87fc 100644 --- a/tools/perf/util/stat.c +++ b/tools/perf/util/stat.c @@ -292,11 +292,12 @@ static bool pkg_id_equal(const void *__key1, const void *__key2, return *key1 == *key2; } -static int check_per_pkg(struct evsel *counter, - struct perf_counts_values *vals, int cpu, bool *skip) +static int check_per_pkg(struct evsel *counter, struct perf_counts_values *vals, + int cpu_map_idx, bool *skip) { struct hashmap *mask = counter->per_pkg_mask; struct perf_cpu_map *cpus = evsel__cpus(counter); + int cpu = perf_cpu_map__cpu(cpus, cpu_map_idx); int s, d, ret = 0; uint64_t *key; -- cgit v1.2.3 From 5b1af93dbc7e64ab1b872129cfe1f2318cc29c67 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 4 Jan 2022 22:13:45 -0800 Subject: perf stat: Swap variable name cpu to index The use of CPU is error prone, switch to cpu_map_idx. Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Mike Leach Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Riccardo Mancini Cc: Stephane Eranian Cc: Suzuki Poulouse Cc: Vineet Singh Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-43-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/stat-shadow.c | 308 +++++++++++++++++++++--------------------- tools/perf/util/stat.c | 16 +-- tools/perf/util/stat.h | 4 +- 3 files changed, 164 insertions(+), 164 deletions(-) diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c index 5c7308efa768..10af7804e482 100644 --- a/tools/perf/util/stat-shadow.c +++ b/tools/perf/util/stat-shadow.c @@ -32,7 +32,7 @@ struct saved_value { struct evsel *evsel; enum stat_type type; int ctx; - int cpu; + int cpu_map_idx; struct cgroup *cgrp; struct runtime_stat *stat; struct stats stats; @@ -47,8 +47,8 @@ static int saved_value_cmp(struct rb_node *rb_node, const void *entry) rb_node); const struct saved_value *b = entry; - if (a->cpu != b->cpu) - return a->cpu - b->cpu; + if (a->cpu_map_idx != b->cpu_map_idx) + return a->cpu_map_idx - b->cpu_map_idx; /* * Previously the rbtree was used to link generic metrics. @@ -105,7 +105,7 @@ static void saved_value_delete(struct rblist *rblist __maybe_unused, } static struct saved_value *saved_value_lookup(struct evsel *evsel, - int cpu, + int cpu_map_idx, bool create, enum stat_type type, int ctx, @@ -115,7 +115,7 @@ static struct saved_value *saved_value_lookup(struct evsel *evsel, struct rblist *rblist; struct rb_node *nd; struct saved_value dm = { - .cpu = cpu, + .cpu_map_idx = cpu_map_idx, .evsel = evsel, .type = type, .ctx = ctx, @@ -213,10 +213,10 @@ struct runtime_stat_data { static void update_runtime_stat(struct runtime_stat *st, enum stat_type type, - int cpu, u64 count, + int cpu_map_idx, u64 count, struct runtime_stat_data *rsd) { - struct saved_value *v = saved_value_lookup(NULL, cpu, true, type, + struct saved_value *v = saved_value_lookup(NULL, cpu_map_idx, true, type, rsd->ctx, st, rsd->cgrp); if (v) @@ -229,7 +229,7 @@ static void update_runtime_stat(struct runtime_stat *st, * instruction rates, etc: */ void perf_stat__update_shadow_stats(struct evsel *counter, u64 count, - int cpu, struct runtime_stat *st) + int cpu_map_idx, struct runtime_stat *st) { u64 count_ns = count; struct saved_value *v; @@ -241,88 +241,88 @@ void perf_stat__update_shadow_stats(struct evsel *counter, u64 count, count *= counter->scale; if (evsel__is_clock(counter)) - update_runtime_stat(st, STAT_NSECS, cpu, count_ns, &rsd); + update_runtime_stat(st, STAT_NSECS, cpu_map_idx, count_ns, &rsd); else if (evsel__match(counter, HARDWARE, HW_CPU_CYCLES)) - update_runtime_stat(st, STAT_CYCLES, cpu, count, &rsd); + update_runtime_stat(st, STAT_CYCLES, cpu_map_idx, count, &rsd); else if (perf_stat_evsel__is(counter, CYCLES_IN_TX)) - update_runtime_stat(st, STAT_CYCLES_IN_TX, cpu, count, &rsd); + update_runtime_stat(st, STAT_CYCLES_IN_TX, cpu_map_idx, count, &rsd); else if (perf_stat_evsel__is(counter, TRANSACTION_START)) - update_runtime_stat(st, STAT_TRANSACTION, cpu, count, &rsd); + update_runtime_stat(st, STAT_TRANSACTION, cpu_map_idx, count, &rsd); else if (perf_stat_evsel__is(counter, ELISION_START)) - update_runtime_stat(st, STAT_ELISION, cpu, count, &rsd); + update_runtime_stat(st, STAT_ELISION, cpu_map_idx, count, &rsd); else if (perf_stat_evsel__is(counter, TOPDOWN_TOTAL_SLOTS)) update_runtime_stat(st, STAT_TOPDOWN_TOTAL_SLOTS, - cpu, count, &rsd); + cpu_map_idx, count, &rsd); else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_ISSUED)) update_runtime_stat(st, STAT_TOPDOWN_SLOTS_ISSUED, - cpu, count, &rsd); + cpu_map_idx, count, &rsd); else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_RETIRED)) update_runtime_stat(st, STAT_TOPDOWN_SLOTS_RETIRED, - cpu, count, &rsd); + cpu_map_idx, count, &rsd); else if (perf_stat_evsel__is(counter, TOPDOWN_FETCH_BUBBLES)) update_runtime_stat(st, STAT_TOPDOWN_FETCH_BUBBLES, - cpu, count, &rsd); + cpu_map_idx, count, &rsd); else if (perf_stat_evsel__is(counter, TOPDOWN_RECOVERY_BUBBLES)) update_runtime_stat(st, STAT_TOPDOWN_RECOVERY_BUBBLES, - cpu, count, &rsd); + cpu_map_idx, count, &rsd); else if (perf_stat_evsel__is(counter, TOPDOWN_RETIRING)) update_runtime_stat(st, STAT_TOPDOWN_RETIRING, - cpu, count, &rsd); + cpu_map_idx, count, &rsd); else if (perf_stat_evsel__is(counter, TOPDOWN_BAD_SPEC)) update_runtime_stat(st, STAT_TOPDOWN_BAD_SPEC, - cpu, count, &rsd); + cpu_map_idx, count, &rsd); else if (perf_stat_evsel__is(counter, TOPDOWN_FE_BOUND)) update_runtime_stat(st, STAT_TOPDOWN_FE_BOUND, - cpu, count, &rsd); + cpu_map_idx, count, &rsd); else if (perf_stat_evsel__is(counter, TOPDOWN_BE_BOUND)) update_runtime_stat(st, STAT_TOPDOWN_BE_BOUND, - cpu, count, &rsd); + cpu_map_idx, count, &rsd); else if (perf_stat_evsel__is(counter, TOPDOWN_HEAVY_OPS)) update_runtime_stat(st, STAT_TOPDOWN_HEAVY_OPS, - cpu, count, &rsd); + cpu_map_idx, count, &rsd); else if (perf_stat_evsel__is(counter, TOPDOWN_BR_MISPREDICT)) update_runtime_stat(st, STAT_TOPDOWN_BR_MISPREDICT, - cpu, count, &rsd); + cpu_map_idx, count, &rsd); else if (perf_stat_evsel__is(counter, TOPDOWN_FETCH_LAT)) update_runtime_stat(st, STAT_TOPDOWN_FETCH_LAT, - cpu, count, &rsd); + cpu_map_idx, count, &rsd); else if (perf_stat_evsel__is(counter, TOPDOWN_MEM_BOUND)) update_runtime_stat(st, STAT_TOPDOWN_MEM_BOUND, - cpu, count, &rsd); + cpu_map_idx, count, &rsd); else if (evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) update_runtime_stat(st, STAT_STALLED_CYCLES_FRONT, - cpu, count, &rsd); + cpu_map_idx, count, &rsd); else if (evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND)) update_runtime_stat(st, STAT_STALLED_CYCLES_BACK, - cpu, count, &rsd); + cpu_map_idx, count, &rsd); else if (evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS)) - update_runtime_stat(st, STAT_BRANCHES, cpu, count, &rsd); + update_runtime_stat(st, STAT_BRANCHES, cpu_map_idx, count, &rsd); else if (evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES)) - update_runtime_stat(st, STAT_CACHEREFS, cpu, count, &rsd); + update_runtime_stat(st, STAT_CACHEREFS, cpu_map_idx, count, &rsd); else if (evsel__match(counter, HW_CACHE, HW_CACHE_L1D)) - update_runtime_stat(st, STAT_L1_DCACHE, cpu, count, &rsd); + update_runtime_stat(st, STAT_L1_DCACHE, cpu_map_idx, count, &rsd); else if (evsel__match(counter, HW_CACHE, HW_CACHE_L1I)) - update_runtime_stat(st, STAT_L1_ICACHE, cpu, count, &rsd); + update_runtime_stat(st, STAT_L1_ICACHE, cpu_map_idx, count, &rsd); else if (evsel__match(counter, HW_CACHE, HW_CACHE_LL)) - update_runtime_stat(st, STAT_LL_CACHE, cpu, count, &rsd); + update_runtime_stat(st, STAT_LL_CACHE, cpu_map_idx, count, &rsd); else if (evsel__match(counter, HW_CACHE, HW_CACHE_DTLB)) - update_runtime_stat(st, STAT_DTLB_CACHE, cpu, count, &rsd); + update_runtime_stat(st, STAT_DTLB_CACHE, cpu_map_idx, count, &rsd); else if (evsel__match(counter, HW_CACHE, HW_CACHE_ITLB)) - update_runtime_stat(st, STAT_ITLB_CACHE, cpu, count, &rsd); + update_runtime_stat(st, STAT_ITLB_CACHE, cpu_map_idx, count, &rsd); else if (perf_stat_evsel__is(counter, SMI_NUM)) - update_runtime_stat(st, STAT_SMI_NUM, cpu, count, &rsd); + update_runtime_stat(st, STAT_SMI_NUM, cpu_map_idx, count, &rsd); else if (perf_stat_evsel__is(counter, APERF)) - update_runtime_stat(st, STAT_APERF, cpu, count, &rsd); + update_runtime_stat(st, STAT_APERF, cpu_map_idx, count, &rsd); if (counter->collect_stat) { - v = saved_value_lookup(counter, cpu, true, STAT_NONE, 0, st, + v = saved_value_lookup(counter, cpu_map_idx, true, STAT_NONE, 0, st, rsd.cgrp); update_stats(&v->stats, count); if (counter->metric_leader) v->metric_total += count; } else if (counter->metric_leader) { v = saved_value_lookup(counter->metric_leader, - cpu, true, STAT_NONE, 0, st, rsd.cgrp); + cpu_map_idx, true, STAT_NONE, 0, st, rsd.cgrp); v->metric_total += count; v->metric_other++; } @@ -464,12 +464,12 @@ void perf_stat__collect_metric_expr(struct evlist *evsel_list) } static double runtime_stat_avg(struct runtime_stat *st, - enum stat_type type, int cpu, + enum stat_type type, int cpu_map_idx, struct runtime_stat_data *rsd) { struct saved_value *v; - v = saved_value_lookup(NULL, cpu, false, type, rsd->ctx, st, rsd->cgrp); + v = saved_value_lookup(NULL, cpu_map_idx, false, type, rsd->ctx, st, rsd->cgrp); if (!v) return 0.0; @@ -477,12 +477,12 @@ static double runtime_stat_avg(struct runtime_stat *st, } static double runtime_stat_n(struct runtime_stat *st, - enum stat_type type, int cpu, + enum stat_type type, int cpu_map_idx, struct runtime_stat_data *rsd) { struct saved_value *v; - v = saved_value_lookup(NULL, cpu, false, type, rsd->ctx, st, rsd->cgrp); + v = saved_value_lookup(NULL, cpu_map_idx, false, type, rsd->ctx, st, rsd->cgrp); if (!v) return 0.0; @@ -490,7 +490,7 @@ static double runtime_stat_n(struct runtime_stat *st, } static void print_stalled_cycles_frontend(struct perf_stat_config *config, - int cpu, double avg, + int cpu_map_idx, double avg, struct perf_stat_output_ctx *out, struct runtime_stat *st, struct runtime_stat_data *rsd) @@ -498,7 +498,7 @@ static void print_stalled_cycles_frontend(struct perf_stat_config *config, double total, ratio = 0.0; const char *color; - total = runtime_stat_avg(st, STAT_CYCLES, cpu, rsd); + total = runtime_stat_avg(st, STAT_CYCLES, cpu_map_idx, rsd); if (total) ratio = avg / total * 100.0; @@ -513,7 +513,7 @@ static void print_stalled_cycles_frontend(struct perf_stat_config *config, } static void print_stalled_cycles_backend(struct perf_stat_config *config, - int cpu, double avg, + int cpu_map_idx, double avg, struct perf_stat_output_ctx *out, struct runtime_stat *st, struct runtime_stat_data *rsd) @@ -521,7 +521,7 @@ static void print_stalled_cycles_backend(struct perf_stat_config *config, double total, ratio = 0.0; const char *color; - total = runtime_stat_avg(st, STAT_CYCLES, cpu, rsd); + total = runtime_stat_avg(st, STAT_CYCLES, cpu_map_idx, rsd); if (total) ratio = avg / total * 100.0; @@ -532,7 +532,7 @@ static void print_stalled_cycles_backend(struct perf_stat_config *config, } static void print_branch_misses(struct perf_stat_config *config, - int cpu, double avg, + int cpu_map_idx, double avg, struct perf_stat_output_ctx *out, struct runtime_stat *st, struct runtime_stat_data *rsd) @@ -540,7 +540,7 @@ static void print_branch_misses(struct perf_stat_config *config, double total, ratio = 0.0; const char *color; - total = runtime_stat_avg(st, STAT_BRANCHES, cpu, rsd); + total = runtime_stat_avg(st, STAT_BRANCHES, cpu_map_idx, rsd); if (total) ratio = avg / total * 100.0; @@ -551,7 +551,7 @@ static void print_branch_misses(struct perf_stat_config *config, } static void print_l1_dcache_misses(struct perf_stat_config *config, - int cpu, double avg, + int cpu_map_idx, double avg, struct perf_stat_output_ctx *out, struct runtime_stat *st, struct runtime_stat_data *rsd) @@ -559,7 +559,7 @@ static void print_l1_dcache_misses(struct perf_stat_config *config, double total, ratio = 0.0; const char *color; - total = runtime_stat_avg(st, STAT_L1_DCACHE, cpu, rsd); + total = runtime_stat_avg(st, STAT_L1_DCACHE, cpu_map_idx, rsd); if (total) ratio = avg / total * 100.0; @@ -570,7 +570,7 @@ static void print_l1_dcache_misses(struct perf_stat_config *config, } static void print_l1_icache_misses(struct perf_stat_config *config, - int cpu, double avg, + int cpu_map_idx, double avg, struct perf_stat_output_ctx *out, struct runtime_stat *st, struct runtime_stat_data *rsd) @@ -578,7 +578,7 @@ static void print_l1_icache_misses(struct perf_stat_config *config, double total, ratio = 0.0; const char *color; - total = runtime_stat_avg(st, STAT_L1_ICACHE, cpu, rsd); + total = runtime_stat_avg(st, STAT_L1_ICACHE, cpu_map_idx, rsd); if (total) ratio = avg / total * 100.0; @@ -588,7 +588,7 @@ static void print_l1_icache_misses(struct perf_stat_config *config, } static void print_dtlb_cache_misses(struct perf_stat_config *config, - int cpu, double avg, + int cpu_map_idx, double avg, struct perf_stat_output_ctx *out, struct runtime_stat *st, struct runtime_stat_data *rsd) @@ -596,7 +596,7 @@ static void print_dtlb_cache_misses(struct perf_stat_config *config, double total, ratio = 0.0; const char *color; - total = runtime_stat_avg(st, STAT_DTLB_CACHE, cpu, rsd); + total = runtime_stat_avg(st, STAT_DTLB_CACHE, cpu_map_idx, rsd); if (total) ratio = avg / total * 100.0; @@ -606,7 +606,7 @@ static void print_dtlb_cache_misses(struct perf_stat_config *config, } static void print_itlb_cache_misses(struct perf_stat_config *config, - int cpu, double avg, + int cpu_map_idx, double avg, struct perf_stat_output_ctx *out, struct runtime_stat *st, struct runtime_stat_data *rsd) @@ -614,7 +614,7 @@ static void print_itlb_cache_misses(struct perf_stat_config *config, double total, ratio = 0.0; const char *color; - total = runtime_stat_avg(st, STAT_ITLB_CACHE, cpu, rsd); + total = runtime_stat_avg(st, STAT_ITLB_CACHE, cpu_map_idx, rsd); if (total) ratio = avg / total * 100.0; @@ -624,7 +624,7 @@ static void print_itlb_cache_misses(struct perf_stat_config *config, } static void print_ll_cache_misses(struct perf_stat_config *config, - int cpu, double avg, + int cpu_map_idx, double avg, struct perf_stat_output_ctx *out, struct runtime_stat *st, struct runtime_stat_data *rsd) @@ -632,7 +632,7 @@ static void print_ll_cache_misses(struct perf_stat_config *config, double total, ratio = 0.0; const char *color; - total = runtime_stat_avg(st, STAT_LL_CACHE, cpu, rsd); + total = runtime_stat_avg(st, STAT_LL_CACHE, cpu_map_idx, rsd); if (total) ratio = avg / total * 100.0; @@ -690,61 +690,61 @@ static double sanitize_val(double x) return x; } -static double td_total_slots(int cpu, struct runtime_stat *st, +static double td_total_slots(int cpu_map_idx, struct runtime_stat *st, struct runtime_stat_data *rsd) { - return runtime_stat_avg(st, STAT_TOPDOWN_TOTAL_SLOTS, cpu, rsd); + return runtime_stat_avg(st, STAT_TOPDOWN_TOTAL_SLOTS, cpu_map_idx, rsd); } -static double td_bad_spec(int cpu, struct runtime_stat *st, +static double td_bad_spec(int cpu_map_idx, struct runtime_stat *st, struct runtime_stat_data *rsd) { double bad_spec = 0; double total_slots; double total; - total = runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_ISSUED, cpu, rsd) - - runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_RETIRED, cpu, rsd) + - runtime_stat_avg(st, STAT_TOPDOWN_RECOVERY_BUBBLES, cpu, rsd); + total = runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_ISSUED, cpu_map_idx, rsd) - + runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_RETIRED, cpu_map_idx, rsd) + + runtime_stat_avg(st, STAT_TOPDOWN_RECOVERY_BUBBLES, cpu_map_idx, rsd); - total_slots = td_total_slots(cpu, st, rsd); + total_slots = td_total_slots(cpu_map_idx, st, rsd); if (total_slots) bad_spec = total / total_slots; return sanitize_val(bad_spec); } -static double td_retiring(int cpu, struct runtime_stat *st, +static double td_retiring(int cpu_map_idx, struct runtime_stat *st, struct runtime_stat_data *rsd) { double retiring = 0; - double total_slots = td_total_slots(cpu, st, rsd); + double total_slots = td_total_slots(cpu_map_idx, st, rsd); double ret_slots = runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_RETIRED, - cpu, rsd); + cpu_map_idx, rsd); if (total_slots) retiring = ret_slots / total_slots; return retiring; } -static double td_fe_bound(int cpu, struct runtime_stat *st, +static double td_fe_bound(int cpu_map_idx, struct runtime_stat *st, struct runtime_stat_data *rsd) { double fe_bound = 0; - double total_slots = td_total_slots(cpu, st, rsd); + double total_slots = td_total_slots(cpu_map_idx, st, rsd); double fetch_bub = runtime_stat_avg(st, STAT_TOPDOWN_FETCH_BUBBLES, - cpu, rsd); + cpu_map_idx, rsd); if (total_slots) fe_bound = fetch_bub / total_slots; return fe_bound; } -static double td_be_bound(int cpu, struct runtime_stat *st, +static double td_be_bound(int cpu_map_idx, struct runtime_stat *st, struct runtime_stat_data *rsd) { - double sum = (td_fe_bound(cpu, st, rsd) + - td_bad_spec(cpu, st, rsd) + - td_retiring(cpu, st, rsd)); + double sum = (td_fe_bound(cpu_map_idx, st, rsd) + + td_bad_spec(cpu_map_idx, st, rsd) + + td_retiring(cpu_map_idx, st, rsd)); if (sum == 0) return 0; return sanitize_val(1.0 - sum); @@ -755,15 +755,15 @@ static double td_be_bound(int cpu, struct runtime_stat *st, * the ratios we need to recreate the sum. */ -static double td_metric_ratio(int cpu, enum stat_type type, +static double td_metric_ratio(int cpu_map_idx, enum stat_type type, struct runtime_stat *stat, struct runtime_stat_data *rsd) { - double sum = runtime_stat_avg(stat, STAT_TOPDOWN_RETIRING, cpu, rsd) + - runtime_stat_avg(stat, STAT_TOPDOWN_FE_BOUND, cpu, rsd) + - runtime_stat_avg(stat, STAT_TOPDOWN_BE_BOUND, cpu, rsd) + - runtime_stat_avg(stat, STAT_TOPDOWN_BAD_SPEC, cpu, rsd); - double d = runtime_stat_avg(stat, type, cpu, rsd); + double sum = runtime_stat_avg(stat, STAT_TOPDOWN_RETIRING, cpu_map_idx, rsd) + + runtime_stat_avg(stat, STAT_TOPDOWN_FE_BOUND, cpu_map_idx, rsd) + + runtime_stat_avg(stat, STAT_TOPDOWN_BE_BOUND, cpu_map_idx, rsd) + + runtime_stat_avg(stat, STAT_TOPDOWN_BAD_SPEC, cpu_map_idx, rsd); + double d = runtime_stat_avg(stat, type, cpu_map_idx, rsd); if (sum) return d / sum; @@ -775,23 +775,23 @@ static double td_metric_ratio(int cpu, enum stat_type type, * We allow two missing. */ -static bool full_td(int cpu, struct runtime_stat *stat, +static bool full_td(int cpu_map_idx, struct runtime_stat *stat, struct runtime_stat_data *rsd) { int c = 0; - if (runtime_stat_avg(stat, STAT_TOPDOWN_RETIRING, cpu, rsd) > 0) + if (runtime_stat_avg(stat, STAT_TOPDOWN_RETIRING, cpu_map_idx, rsd) > 0) c++; - if (runtime_stat_avg(stat, STAT_TOPDOWN_BE_BOUND, cpu, rsd) > 0) + if (runtime_stat_avg(stat, STAT_TOPDOWN_BE_BOUND, cpu_map_idx, rsd) > 0) c++; - if (runtime_stat_avg(stat, STAT_TOPDOWN_FE_BOUND, cpu, rsd) > 0) + if (runtime_stat_avg(stat, STAT_TOPDOWN_FE_BOUND, cpu_map_idx, rsd) > 0) c++; - if (runtime_stat_avg(stat, STAT_TOPDOWN_BAD_SPEC, cpu, rsd) > 0) + if (runtime_stat_avg(stat, STAT_TOPDOWN_BAD_SPEC, cpu_map_idx, rsd) > 0) c++; return c >= 2; } -static void print_smi_cost(struct perf_stat_config *config, int cpu, +static void print_smi_cost(struct perf_stat_config *config, int cpu_map_idx, struct perf_stat_output_ctx *out, struct runtime_stat *st, struct runtime_stat_data *rsd) @@ -799,9 +799,9 @@ static void print_smi_cost(struct perf_stat_config *config, int cpu, double smi_num, aperf, cycles, cost = 0.0; const char *color = NULL; - smi_num = runtime_stat_avg(st, STAT_SMI_NUM, cpu, rsd); - aperf = runtime_stat_avg(st, STAT_APERF, cpu, rsd); - cycles = runtime_stat_avg(st, STAT_CYCLES, cpu, rsd); + smi_num = runtime_stat_avg(st, STAT_SMI_NUM, cpu_map_idx, rsd); + aperf = runtime_stat_avg(st, STAT_APERF, cpu_map_idx, rsd); + cycles = runtime_stat_avg(st, STAT_CYCLES, cpu_map_idx, rsd); if ((cycles == 0) || (aperf == 0)) return; @@ -818,7 +818,7 @@ static void print_smi_cost(struct perf_stat_config *config, int cpu, static int prepare_metric(struct evsel **metric_events, struct metric_ref *metric_refs, struct expr_parse_ctx *pctx, - int cpu, + int cpu_map_idx, struct runtime_stat *st) { double scale; @@ -836,7 +836,7 @@ static int prepare_metric(struct evsel **metric_events, scale = 1e-9; source_count = 1; } else { - v = saved_value_lookup(metric_events[i], cpu, false, + v = saved_value_lookup(metric_events[i], cpu_map_idx, false, STAT_NONE, 0, st, metric_events[i]->cgrp); if (!v) @@ -874,7 +874,7 @@ static void generic_metric(struct perf_stat_config *config, const char *metric_name, const char *metric_unit, int runtime, - int cpu, + int cpu_map_idx, struct perf_stat_output_ctx *out, struct runtime_stat *st) { @@ -889,7 +889,7 @@ static void generic_metric(struct perf_stat_config *config, return; pctx->runtime = runtime; - i = prepare_metric(metric_events, metric_refs, pctx, cpu, st); + i = prepare_metric(metric_events, metric_refs, pctx, cpu_map_idx, st); if (i < 0) { expr__ctx_free(pctx); return; @@ -934,7 +934,7 @@ static void generic_metric(struct perf_stat_config *config, expr__ctx_free(pctx); } -double test_generic_metric(struct metric_expr *mexp, int cpu, struct runtime_stat *st) +double test_generic_metric(struct metric_expr *mexp, int cpu_map_idx, struct runtime_stat *st) { struct expr_parse_ctx *pctx; double ratio = 0.0; @@ -943,7 +943,7 @@ double test_generic_metric(struct metric_expr *mexp, int cpu, struct runtime_sta if (!pctx) return NAN; - if (prepare_metric(mexp->metric_events, mexp->metric_refs, pctx, cpu, st) < 0) + if (prepare_metric(mexp->metric_events, mexp->metric_refs, pctx, cpu_map_idx, st) < 0) goto out; if (expr__parse(&ratio, pctx, mexp->metric_expr)) @@ -956,7 +956,7 @@ out: void perf_stat__print_shadow_stats(struct perf_stat_config *config, struct evsel *evsel, - double avg, int cpu, + double avg, int cpu_map_idx, struct perf_stat_output_ctx *out, struct rblist *metric_events, struct runtime_stat *st) @@ -975,7 +975,7 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config, if (config->iostat_run) { iostat_print_metric(config, evsel, out); } else if (evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) { - total = runtime_stat_avg(st, STAT_CYCLES, cpu, &rsd); + total = runtime_stat_avg(st, STAT_CYCLES, cpu_map_idx, &rsd); if (total) { ratio = avg / total; @@ -985,11 +985,11 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config, print_metric(config, ctxp, NULL, NULL, "insn per cycle", 0); } - total = runtime_stat_avg(st, STAT_STALLED_CYCLES_FRONT, cpu, &rsd); + total = runtime_stat_avg(st, STAT_STALLED_CYCLES_FRONT, cpu_map_idx, &rsd); total = max(total, runtime_stat_avg(st, STAT_STALLED_CYCLES_BACK, - cpu, &rsd)); + cpu_map_idx, &rsd)); if (total && avg) { out->new_line(config, ctxp); @@ -999,8 +999,8 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config, ratio); } } else if (evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES)) { - if (runtime_stat_n(st, STAT_BRANCHES, cpu, &rsd) != 0) - print_branch_misses(config, cpu, avg, out, st, &rsd); + if (runtime_stat_n(st, STAT_BRANCHES, cpu_map_idx, &rsd) != 0) + print_branch_misses(config, cpu_map_idx, avg, out, st, &rsd); else print_metric(config, ctxp, NULL, NULL, "of all branches", 0); } else if ( @@ -1009,8 +1009,8 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config, ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) { - if (runtime_stat_n(st, STAT_L1_DCACHE, cpu, &rsd) != 0) - print_l1_dcache_misses(config, cpu, avg, out, st, &rsd); + if (runtime_stat_n(st, STAT_L1_DCACHE, cpu_map_idx, &rsd) != 0) + print_l1_dcache_misses(config, cpu_map_idx, avg, out, st, &rsd); else print_metric(config, ctxp, NULL, NULL, "of all L1-dcache accesses", 0); } else if ( @@ -1019,8 +1019,8 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config, ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) { - if (runtime_stat_n(st, STAT_L1_ICACHE, cpu, &rsd) != 0) - print_l1_icache_misses(config, cpu, avg, out, st, &rsd); + if (runtime_stat_n(st, STAT_L1_ICACHE, cpu_map_idx, &rsd) != 0) + print_l1_icache_misses(config, cpu_map_idx, avg, out, st, &rsd); else print_metric(config, ctxp, NULL, NULL, "of all L1-icache accesses", 0); } else if ( @@ -1029,8 +1029,8 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config, ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) { - if (runtime_stat_n(st, STAT_DTLB_CACHE, cpu, &rsd) != 0) - print_dtlb_cache_misses(config, cpu, avg, out, st, &rsd); + if (runtime_stat_n(st, STAT_DTLB_CACHE, cpu_map_idx, &rsd) != 0) + print_dtlb_cache_misses(config, cpu_map_idx, avg, out, st, &rsd); else print_metric(config, ctxp, NULL, NULL, "of all dTLB cache accesses", 0); } else if ( @@ -1039,8 +1039,8 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config, ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) { - if (runtime_stat_n(st, STAT_ITLB_CACHE, cpu, &rsd) != 0) - print_itlb_cache_misses(config, cpu, avg, out, st, &rsd); + if (runtime_stat_n(st, STAT_ITLB_CACHE, cpu_map_idx, &rsd) != 0) + print_itlb_cache_misses(config, cpu_map_idx, avg, out, st, &rsd); else print_metric(config, ctxp, NULL, NULL, "of all iTLB cache accesses", 0); } else if ( @@ -1049,27 +1049,27 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config, ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) { - if (runtime_stat_n(st, STAT_LL_CACHE, cpu, &rsd) != 0) - print_ll_cache_misses(config, cpu, avg, out, st, &rsd); + if (runtime_stat_n(st, STAT_LL_CACHE, cpu_map_idx, &rsd) != 0) + print_ll_cache_misses(config, cpu_map_idx, avg, out, st, &rsd); else print_metric(config, ctxp, NULL, NULL, "of all LL-cache accesses", 0); } else if (evsel__match(evsel, HARDWARE, HW_CACHE_MISSES)) { - total = runtime_stat_avg(st, STAT_CACHEREFS, cpu, &rsd); + total = runtime_stat_avg(st, STAT_CACHEREFS, cpu_map_idx, &rsd); if (total) ratio = avg * 100 / total; - if (runtime_stat_n(st, STAT_CACHEREFS, cpu, &rsd) != 0) + if (runtime_stat_n(st, STAT_CACHEREFS, cpu_map_idx, &rsd) != 0) print_metric(config, ctxp, NULL, "%8.3f %%", "of all cache refs", ratio); else print_metric(config, ctxp, NULL, NULL, "of all cache refs", 0); } else if (evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) { - print_stalled_cycles_frontend(config, cpu, avg, out, st, &rsd); + print_stalled_cycles_frontend(config, cpu_map_idx, avg, out, st, &rsd); } else if (evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND)) { - print_stalled_cycles_backend(config, cpu, avg, out, st, &rsd); + print_stalled_cycles_backend(config, cpu_map_idx, avg, out, st, &rsd); } else if (evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) { - total = runtime_stat_avg(st, STAT_NSECS, cpu, &rsd); + total = runtime_stat_avg(st, STAT_NSECS, cpu_map_idx, &rsd); if (total) { ratio = avg / total; @@ -1078,7 +1078,7 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config, print_metric(config, ctxp, NULL, NULL, "Ghz", 0); } } else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX)) { - total = runtime_stat_avg(st, STAT_CYCLES, cpu, &rsd); + total = runtime_stat_avg(st, STAT_CYCLES, cpu_map_idx, &rsd); if (total) print_metric(config, ctxp, NULL, @@ -1088,8 +1088,8 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config, print_metric(config, ctxp, NULL, NULL, "transactional cycles", 0); } else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX_CP)) { - total = runtime_stat_avg(st, STAT_CYCLES, cpu, &rsd); - total2 = runtime_stat_avg(st, STAT_CYCLES_IN_TX, cpu, &rsd); + total = runtime_stat_avg(st, STAT_CYCLES, cpu_map_idx, &rsd); + total2 = runtime_stat_avg(st, STAT_CYCLES_IN_TX, cpu_map_idx, &rsd); if (total2 < avg) total2 = avg; @@ -1099,19 +1099,19 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config, else print_metric(config, ctxp, NULL, NULL, "aborted cycles", 0); } else if (perf_stat_evsel__is(evsel, TRANSACTION_START)) { - total = runtime_stat_avg(st, STAT_CYCLES_IN_TX, cpu, &rsd); + total = runtime_stat_avg(st, STAT_CYCLES_IN_TX, cpu_map_idx, &rsd); if (avg) ratio = total / avg; - if (runtime_stat_n(st, STAT_CYCLES_IN_TX, cpu, &rsd) != 0) + if (runtime_stat_n(st, STAT_CYCLES_IN_TX, cpu_map_idx, &rsd) != 0) print_metric(config, ctxp, NULL, "%8.0f", "cycles / transaction", ratio); else print_metric(config, ctxp, NULL, NULL, "cycles / transaction", 0); } else if (perf_stat_evsel__is(evsel, ELISION_START)) { - total = runtime_stat_avg(st, STAT_CYCLES_IN_TX, cpu, &rsd); + total = runtime_stat_avg(st, STAT_CYCLES_IN_TX, cpu_map_idx, &rsd); if (avg) ratio = total / avg; @@ -1124,28 +1124,28 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config, else print_metric(config, ctxp, NULL, NULL, "CPUs utilized", 0); } else if (perf_stat_evsel__is(evsel, TOPDOWN_FETCH_BUBBLES)) { - double fe_bound = td_fe_bound(cpu, st, &rsd); + double fe_bound = td_fe_bound(cpu_map_idx, st, &rsd); if (fe_bound > 0.2) color = PERF_COLOR_RED; print_metric(config, ctxp, color, "%8.1f%%", "frontend bound", fe_bound * 100.); } else if (perf_stat_evsel__is(evsel, TOPDOWN_SLOTS_RETIRED)) { - double retiring = td_retiring(cpu, st, &rsd); + double retiring = td_retiring(cpu_map_idx, st, &rsd); if (retiring > 0.7) color = PERF_COLOR_GREEN; print_metric(config, ctxp, color, "%8.1f%%", "retiring", retiring * 100.); } else if (perf_stat_evsel__is(evsel, TOPDOWN_RECOVERY_BUBBLES)) { - double bad_spec = td_bad_spec(cpu, st, &rsd); + double bad_spec = td_bad_spec(cpu_map_idx, st, &rsd); if (bad_spec > 0.1) color = PERF_COLOR_RED; print_metric(config, ctxp, color, "%8.1f%%", "bad speculation", bad_spec * 100.); } else if (perf_stat_evsel__is(evsel, TOPDOWN_SLOTS_ISSUED)) { - double be_bound = td_be_bound(cpu, st, &rsd); + double be_bound = td_be_bound(cpu_map_idx, st, &rsd); const char *name = "backend bound"; static int have_recovery_bubbles = -1; @@ -1158,14 +1158,14 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config, if (be_bound > 0.2) color = PERF_COLOR_RED; - if (td_total_slots(cpu, st, &rsd) > 0) + if (td_total_slots(cpu_map_idx, st, &rsd) > 0) print_metric(config, ctxp, color, "%8.1f%%", name, be_bound * 100.); else print_metric(config, ctxp, NULL, NULL, name, 0); } else if (perf_stat_evsel__is(evsel, TOPDOWN_RETIRING) && - full_td(cpu, st, &rsd)) { - double retiring = td_metric_ratio(cpu, + full_td(cpu_map_idx, st, &rsd)) { + double retiring = td_metric_ratio(cpu_map_idx, STAT_TOPDOWN_RETIRING, st, &rsd); if (retiring > 0.7) @@ -1173,8 +1173,8 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config, print_metric(config, ctxp, color, "%8.1f%%", "retiring", retiring * 100.); } else if (perf_stat_evsel__is(evsel, TOPDOWN_FE_BOUND) && - full_td(cpu, st, &rsd)) { - double fe_bound = td_metric_ratio(cpu, + full_td(cpu_map_idx, st, &rsd)) { + double fe_bound = td_metric_ratio(cpu_map_idx, STAT_TOPDOWN_FE_BOUND, st, &rsd); if (fe_bound > 0.2) @@ -1182,8 +1182,8 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config, print_metric(config, ctxp, color, "%8.1f%%", "frontend bound", fe_bound * 100.); } else if (perf_stat_evsel__is(evsel, TOPDOWN_BE_BOUND) && - full_td(cpu, st, &rsd)) { - double be_bound = td_metric_ratio(cpu, + full_td(cpu_map_idx, st, &rsd)) { + double be_bound = td_metric_ratio(cpu_map_idx, STAT_TOPDOWN_BE_BOUND, st, &rsd); if (be_bound > 0.2) @@ -1191,8 +1191,8 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config, print_metric(config, ctxp, color, "%8.1f%%", "backend bound", be_bound * 100.); } else if (perf_stat_evsel__is(evsel, TOPDOWN_BAD_SPEC) && - full_td(cpu, st, &rsd)) { - double bad_spec = td_metric_ratio(cpu, + full_td(cpu_map_idx, st, &rsd)) { + double bad_spec = td_metric_ratio(cpu_map_idx, STAT_TOPDOWN_BAD_SPEC, st, &rsd); if (bad_spec > 0.1) @@ -1200,11 +1200,11 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config, print_metric(config, ctxp, color, "%8.1f%%", "bad speculation", bad_spec * 100.); } else if (perf_stat_evsel__is(evsel, TOPDOWN_HEAVY_OPS) && - full_td(cpu, st, &rsd) && (config->topdown_level > 1)) { - double retiring = td_metric_ratio(cpu, + full_td(cpu_map_idx, st, &rsd) && (config->topdown_level > 1)) { + double retiring = td_metric_ratio(cpu_map_idx, STAT_TOPDOWN_RETIRING, st, &rsd); - double heavy_ops = td_metric_ratio(cpu, + double heavy_ops = td_metric_ratio(cpu_map_idx, STAT_TOPDOWN_HEAVY_OPS, st, &rsd); double light_ops = retiring - heavy_ops; @@ -1220,11 +1220,11 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config, print_metric(config, ctxp, color, "%8.1f%%", "light operations", light_ops * 100.); } else if (perf_stat_evsel__is(evsel, TOPDOWN_BR_MISPREDICT) && - full_td(cpu, st, &rsd) && (config->topdown_level > 1)) { - double bad_spec = td_metric_ratio(cpu, + full_td(cpu_map_idx, st, &rsd) && (config->topdown_level > 1)) { + double bad_spec = td_metric_ratio(cpu_map_idx, STAT_TOPDOWN_BAD_SPEC, st, &rsd); - double br_mis = td_metric_ratio(cpu, + double br_mis = td_metric_ratio(cpu_map_idx, STAT_TOPDOWN_BR_MISPREDICT, st, &rsd); double m_clears = bad_spec - br_mis; @@ -1240,11 +1240,11 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config, print_metric(config, ctxp, color, "%8.1f%%", "machine clears", m_clears * 100.); } else if (perf_stat_evsel__is(evsel, TOPDOWN_FETCH_LAT) && - full_td(cpu, st, &rsd) && (config->topdown_level > 1)) { - double fe_bound = td_metric_ratio(cpu, + full_td(cpu_map_idx, st, &rsd) && (config->topdown_level > 1)) { + double fe_bound = td_metric_ratio(cpu_map_idx, STAT_TOPDOWN_FE_BOUND, st, &rsd); - double fetch_lat = td_metric_ratio(cpu, + double fetch_lat = td_metric_ratio(cpu_map_idx, STAT_TOPDOWN_FETCH_LAT, st, &rsd); double fetch_bw = fe_bound - fetch_lat; @@ -1260,11 +1260,11 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config, print_metric(config, ctxp, color, "%8.1f%%", "fetch bandwidth", fetch_bw * 100.); } else if (perf_stat_evsel__is(evsel, TOPDOWN_MEM_BOUND) && - full_td(cpu, st, &rsd) && (config->topdown_level > 1)) { - double be_bound = td_metric_ratio(cpu, + full_td(cpu_map_idx, st, &rsd) && (config->topdown_level > 1)) { + double be_bound = td_metric_ratio(cpu_map_idx, STAT_TOPDOWN_BE_BOUND, st, &rsd); - double mem_bound = td_metric_ratio(cpu, + double mem_bound = td_metric_ratio(cpu_map_idx, STAT_TOPDOWN_MEM_BOUND, st, &rsd); double core_bound = be_bound - mem_bound; @@ -1281,12 +1281,12 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config, core_bound * 100.); } else if (evsel->metric_expr) { generic_metric(config, evsel->metric_expr, evsel->metric_events, NULL, - evsel->name, evsel->metric_name, NULL, 1, cpu, out, st); - } else if (runtime_stat_n(st, STAT_NSECS, cpu, &rsd) != 0) { + evsel->name, evsel->metric_name, NULL, 1, cpu_map_idx, out, st); + } else if (runtime_stat_n(st, STAT_NSECS, cpu_map_idx, &rsd) != 0) { char unit = ' '; char unit_buf[10] = "/sec"; - total = runtime_stat_avg(st, STAT_NSECS, cpu, &rsd); + total = runtime_stat_avg(st, STAT_NSECS, cpu_map_idx, &rsd); if (total) ratio = convert_unit_double(1000000000.0 * avg / total, &unit); @@ -1294,7 +1294,7 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config, snprintf(unit_buf, sizeof(unit_buf), "%c/sec", unit); print_metric(config, ctxp, NULL, "%8.3f", unit_buf, ratio); } else if (perf_stat_evsel__is(evsel, SMI_NUM)) { - print_smi_cost(config, cpu, out, st, &rsd); + print_smi_cost(config, cpu_map_idx, out, st, &rsd); } else { num = 0; } @@ -1307,7 +1307,7 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config, out->new_line(config, ctxp); generic_metric(config, mexp->metric_expr, mexp->metric_events, mexp->metric_refs, evsel->name, mexp->metric_name, - mexp->metric_unit, mexp->runtime, cpu, out, st); + mexp->metric_unit, mexp->runtime, cpu_map_idx, out, st); } } if (num == 0) diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c index 86ab427e87fc..7dbd7c4f3c33 100644 --- a/tools/perf/util/stat.c +++ b/tools/perf/util/stat.c @@ -356,14 +356,14 @@ static int check_per_pkg(struct evsel *counter, struct perf_counts_values *vals, static int process_counter_values(struct perf_stat_config *config, struct evsel *evsel, - int cpu, int thread, + int cpu_map_idx, int thread, struct perf_counts_values *count) { struct perf_counts_values *aggr = &evsel->counts->aggr; static struct perf_counts_values zero; bool skip = false; - if (check_per_pkg(evsel, count, cpu, &skip)) { + if (check_per_pkg(evsel, count, cpu_map_idx, &skip)) { pr_err("failed to read per-pkg counter\n"); return -1; } @@ -379,11 +379,11 @@ process_counter_values(struct perf_stat_config *config, struct evsel *evsel, case AGGR_NODE: case AGGR_NONE: if (!evsel->snapshot) - evsel__compute_deltas(evsel, cpu, thread, count); + evsel__compute_deltas(evsel, cpu_map_idx, thread, count); perf_counts_values__scale(count, config->scale, NULL); if ((config->aggr_mode == AGGR_NONE) && (!evsel->percore)) { perf_stat__update_shadow_stats(evsel, count->val, - cpu, &rt_stat); + cpu_map_idx, &rt_stat); } if (config->aggr_mode == AGGR_THREAD) { @@ -412,15 +412,15 @@ static int process_counter_maps(struct perf_stat_config *config, { int nthreads = perf_thread_map__nr(counter->core.threads); int ncpus = evsel__nr_cpus(counter); - int cpu, thread; + int idx, thread; if (counter->core.system_wide) nthreads = 1; for (thread = 0; thread < nthreads; thread++) { - for (cpu = 0; cpu < ncpus; cpu++) { - if (process_counter_values(config, counter, cpu, thread, - perf_counts(counter->counts, cpu, thread))) + for (idx = 0; idx < ncpus; idx++) { + if (process_counter_values(config, counter, idx, thread, + perf_counts(counter->counts, idx, thread))) return -1; } } diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h index 5e25d53e891b..691c12fd8976 100644 --- a/tools/perf/util/stat.h +++ b/tools/perf/util/stat.h @@ -208,7 +208,7 @@ void perf_stat__init_shadow_stats(void); void perf_stat__reset_shadow_stats(void); void perf_stat__reset_shadow_per_stat(struct runtime_stat *st); void perf_stat__update_shadow_stats(struct evsel *counter, u64 count, - int cpu, struct runtime_stat *st); + int cpu_map_idx, struct runtime_stat *st); struct perf_stat_output_ctx { void *ctx; print_metric_t print_metric; @@ -253,5 +253,5 @@ void evlist__print_counters(struct evlist *evlist, struct perf_stat_config *conf struct target *_target, struct timespec *ts, int argc, const char **argv); struct metric_expr; -double test_generic_metric(struct metric_expr *mexp, int cpu, struct runtime_stat *st); +double test_generic_metric(struct metric_expr *mexp, int cpu_map_idx, struct runtime_stat *st); #endif -- cgit v1.2.3 From 91802e73f77146d69afa0be7eafc983ec84b2bb0 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 4 Jan 2022 22:13:46 -0800 Subject: libperf: Sync evsel documentation cpu was renamed cpu_map_idx, for clarity. Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Mike Leach Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Riccardo Mancini Cc: Stephane Eranian Cc: Suzuki Poulouse Cc: Vineet Singh Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-44-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/lib/perf/Documentation/libperf.txt | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tools/lib/perf/Documentation/libperf.txt b/tools/lib/perf/Documentation/libperf.txt index faef9ba3a540..32c5051c24eb 100644 --- a/tools/lib/perf/Documentation/libperf.txt +++ b/tools/lib/perf/Documentation/libperf.txt @@ -136,16 +136,16 @@ SYNOPSIS int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus, struct perf_thread_map *threads); void perf_evsel__close(struct perf_evsel *evsel); - void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu); + void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu_map_idx); int perf_evsel__mmap(struct perf_evsel *evsel, int pages); void perf_evsel__munmap(struct perf_evsel *evsel); - void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu, int thread); - int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread, + void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu_map_idx, int thread); + int perf_evsel__read(struct perf_evsel *evsel, int cpu_map_idx, int thread, struct perf_counts_values *count); int perf_evsel__enable(struct perf_evsel *evsel); - int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu); + int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu_map_idx); int perf_evsel__disable(struct perf_evsel *evsel); - int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu); + int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu_map_idx); struct perf_cpu_map *perf_evsel__cpus(struct perf_evsel *evsel); struct perf_thread_map *perf_evsel__threads(struct perf_evsel *evsel); struct perf_event_attr *perf_evsel__attr(struct perf_evsel *evsel); -- cgit v1.2.3 From 7263f3498ba8b6e65c1d810ccafec64cd61a6dc1 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 4 Jan 2022 22:13:47 -0800 Subject: perf bpf: Rename 'cpu' to 'cpu_map_idx' Synchronize the caller in evsel with the called function. Shorten 3 lines of code in bperf_read by using perf_cpu_map__for_each_cpu(). This code is frequently using variables named cpu as cpu map indices, which doesn't matter as all CPUs are in the CPU map. It is strange in some cases the cpumap is used at all. Committer notes: Found when building with BUILD_BPF_SKEL=1: Remove unused 'num_cpu' variable in bperf__read(). Make 'j' an 'int' as it is used in perf_cpu_map__for_each_cpu() to compare against an 'int' Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Mike Leach Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Riccardo Mancini Cc: Stephane Eranian Cc: Suzuki Poulouse Cc: Vineet Singh Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-45-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/bpf_counter.c | 19 +++++++++---------- tools/perf/util/bpf_counter.h | 4 ++-- 2 files changed, 11 insertions(+), 12 deletions(-) diff --git a/tools/perf/util/bpf_counter.c b/tools/perf/util/bpf_counter.c index c17d4a43ce06..80d1a3a31052 100644 --- a/tools/perf/util/bpf_counter.c +++ b/tools/perf/util/bpf_counter.c @@ -265,7 +265,7 @@ static int bpf_program_profiler__read(struct evsel *evsel) return 0; } -static int bpf_program_profiler__install_pe(struct evsel *evsel, int cpu, +static int bpf_program_profiler__install_pe(struct evsel *evsel, int cpu_map_idx, int fd) { struct bpf_prog_profiler_bpf *skel; @@ -277,7 +277,7 @@ static int bpf_program_profiler__install_pe(struct evsel *evsel, int cpu, assert(skel != NULL); ret = bpf_map_update_elem(bpf_map__fd(skel->maps.events), - &cpu, &fd, BPF_ANY); + &cpu_map_idx, &fd, BPF_ANY); if (ret) return ret; } @@ -566,12 +566,12 @@ out: return err; } -static int bperf__install_pe(struct evsel *evsel, int cpu, int fd) +static int bperf__install_pe(struct evsel *evsel, int cpu_map_idx, int fd) { struct bperf_leader_bpf *skel = evsel->leader_skel; return bpf_map_update_elem(bpf_map__fd(skel->maps.events), - &cpu, &fd, BPF_ANY); + &cpu_map_idx, &fd, BPF_ANY); } /* @@ -608,7 +608,8 @@ static int bperf__read(struct evsel *evsel) __u32 num_cpu_bpf = cpu__max_cpu(); struct bpf_perf_event_value values[num_cpu_bpf]; int reading_map_fd, err = 0; - __u32 i, j, num_cpu; + __u32 i; + int j; bperf_sync_counters(evsel); reading_map_fd = bpf_map__fd(skel->maps.accum_readings); @@ -623,9 +624,7 @@ static int bperf__read(struct evsel *evsel) case BPERF_FILTER_GLOBAL: assert(i == 0); - num_cpu = all_cpu_map->nr; - for (j = 0; j < num_cpu; j++) { - cpu = all_cpu_map->map[j]; + perf_cpu_map__for_each_cpu(cpu, j, all_cpu_map) { perf_counts(evsel->counts, cpu, 0)->val = values[cpu].counter; perf_counts(evsel->counts, cpu, 0)->ena = values[cpu].enabled; perf_counts(evsel->counts, cpu, 0)->run = values[cpu].running; @@ -757,11 +756,11 @@ static inline bool bpf_counter_skip(struct evsel *evsel) evsel->follower_skel == NULL; } -int bpf_counter__install_pe(struct evsel *evsel, int cpu, int fd) +int bpf_counter__install_pe(struct evsel *evsel, int cpu_map_idx, int fd) { if (bpf_counter_skip(evsel)) return 0; - return evsel->bpf_counter_ops->install_pe(evsel, cpu, fd); + return evsel->bpf_counter_ops->install_pe(evsel, cpu_map_idx, fd); } int bpf_counter__load(struct evsel *evsel, struct target *target) diff --git a/tools/perf/util/bpf_counter.h b/tools/perf/util/bpf_counter.h index 65ebaa6694fb..4dbf26408b69 100644 --- a/tools/perf/util/bpf_counter.h +++ b/tools/perf/util/bpf_counter.h @@ -16,7 +16,7 @@ typedef int (*bpf_counter_evsel_op)(struct evsel *evsel); typedef int (*bpf_counter_evsel_target_op)(struct evsel *evsel, struct target *target); typedef int (*bpf_counter_evsel_install_pe_op)(struct evsel *evsel, - int cpu, + int cpu_map_idx, int fd); struct bpf_counter_ops { @@ -40,7 +40,7 @@ int bpf_counter__enable(struct evsel *evsel); int bpf_counter__disable(struct evsel *evsel); int bpf_counter__read(struct evsel *evsel); void bpf_counter__destroy(struct evsel *evsel); -int bpf_counter__install_pe(struct evsel *evsel, int cpu, int fd); +int bpf_counter__install_pe(struct evsel *evsel, int cpu_map_idx, int fd); #else /* HAVE_BPF_SKEL */ -- cgit v1.2.3 From 84d2f4f0375d4857f9f9e57a9ad75cbf0f34e108 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 4 Jan 2022 22:13:48 -0800 Subject: perf c2c: Use more intention revealing iterator Use perf_cpu_map__for_each_cpu() in setup_nodes. Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Mike Leach Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Riccardo Mancini Cc: Stephane Eranian Cc: Suzuki Poulouse Cc: Vineet Singh Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-46-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-c2c.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c index b5c67ef73862..ad1fbeafc93d 100644 --- a/tools/perf/builtin-c2c.c +++ b/tools/perf/builtin-c2c.c @@ -2015,7 +2015,7 @@ static int setup_nodes(struct perf_session *session) { struct numa_node *n; unsigned long **nodes; - int node, cpu; + int node, cpu, idx; int *cpu2node; if (c2c.node_info > 2) @@ -2057,13 +2057,13 @@ static int setup_nodes(struct perf_session *session) if (perf_cpu_map__empty(map)) continue; - for (cpu = 0; cpu < map->nr; cpu++) { - set_bit(map->map[cpu], set); + perf_cpu_map__for_each_cpu(cpu, idx, map) { + set_bit(cpu, set); - if (WARN_ONCE(cpu2node[map->map[cpu]] != -1, "node/cpu topology bug")) + if (WARN_ONCE(cpu2node[cpu] != -1, "node/cpu topology bug")) return -EINVAL; - cpu2node[map->map[cpu]] = node; + cpu2node[cpu] = node; } } -- cgit v1.2.3 From b57af1b4017abff969425dffd6f59ddfdedce8cb Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 4 Jan 2022 22:13:49 -0800 Subject: perf script: Fix flipped index and cpu perf_counts are accessed by the densely packed index. Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Mike Leach Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Riccardo Mancini Cc: Stephane Eranian Cc: Suzuki Poulouse Cc: Vineet Singh Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-47-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-script.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index f40319144856..bb43529618b3 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c @@ -2131,7 +2131,7 @@ static void __process_stat(struct evsel *counter, u64 tstamp) perf_cpu_map__for_each_cpu(cpu, idx, evsel__cpus(counter)) { struct perf_counts_values *counts; - counts = perf_counts(counter->counts, cpu, thread); + counts = perf_counts(counter->counts, idx, thread); printf("%3d %8d %15" PRIu64 " %15" PRIu64 " %15" PRIu64 " %15" PRIu64 " %s\n", cpu, -- cgit v1.2.3 From ce37ab3eb2490aba60ab1a622a4c6c6ee9a7cc66 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 4 Jan 2022 22:13:50 -0800 Subject: perf stat: Correct first_shadow_cpu to return index perf_stat__update_shadow_stats() and perf_stat__print_shadow_stats() use a cpu map index rather than a CPU, but first_shadow_cpu is returning the wrong value for this. Change first_shadow_cpu to first_shadow_cpu_map_idx to make things agree. Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Mike Leach Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Riccardo Mancini Cc: Stephane Eranian Cc: Suzuki Poulouse Cc: Vineet Singh Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-48-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/stat-display.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c index 0f192360b6c6..ba95379efcfb 100644 --- a/tools/perf/util/stat-display.c +++ b/tools/perf/util/stat-display.c @@ -327,24 +327,23 @@ static void print_metric_header(struct perf_stat_config *config, fprintf(os->fh, "%*s ", config->metric_only_len, unit); } -static int first_shadow_cpu(struct perf_stat_config *config, - struct evsel *evsel, const struct aggr_cpu_id *id) +static int first_shadow_cpu_map_idx(struct perf_stat_config *config, + struct evsel *evsel, const struct aggr_cpu_id *id) { - struct perf_cpu_map *cpus; + struct perf_cpu_map *cpus = evsel__cpus(evsel); int cpu, idx; if (config->aggr_mode == AGGR_NONE) - return id->cpu; + return perf_cpu_map__idx(cpus, id->cpu); if (!config->aggr_get_id) return 0; - cpus = evsel__cpus(evsel); perf_cpu_map__for_each_cpu(cpu, idx, cpus) { struct aggr_cpu_id cpu_id = config->aggr_get_id(config, cpu); if (aggr_cpu_id__equal(&cpu_id, id)) - return cpu; + return idx; } return 0; } @@ -503,7 +502,7 @@ static void printout(struct perf_stat_config *config, struct aggr_cpu_id id, int } perf_stat__print_shadow_stats(config, counter, uval, - first_shadow_cpu(config, counter, &id), + first_shadow_cpu_map_idx(config, counter, &id), &out, &config->metric_events, st); if (!config->csv_output && !config->metric_only) { print_noise(config, counter, noise); @@ -532,7 +531,7 @@ static void aggr_update_shadow(struct perf_stat_config *config, val += perf_counts(counter->counts, idx, 0)->val; } perf_stat__update_shadow_stats(counter, val, - first_shadow_cpu(config, counter, &id), + first_shadow_cpu_map_idx(config, counter, &id), &rt_stat); } } -- cgit v1.2.3 From 6d18804b963b78dcd53851f11e9080408b3d85c2 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 4 Jan 2022 22:13:51 -0800 Subject: perf cpumap: Give CPUs their own type A common problem is confusing CPU map indices with the CPU, by wrapping the CPU with a struct then this is avoided. This approach is similar to atomic_t. Committer notes: To make it build with BUILD_BPF_SKEL=1 these files needed the conversions to 'struct perf_cpu' usage: tools/perf/util/bpf_counter.c tools/perf/util/bpf_counter_cgroup.c tools/perf/util/bpf_ftrace.c Also perf_env__get_cpu() was removed back in "perf cpumap: Switch cpu_map__build_map to cpu function". Additionally these needed to be fixed for the ARM builds to complete: tools/perf/arch/arm/util/cs-etm.c tools/perf/arch/arm64/util/pmu.c Suggested-by: John Garry Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mathieu Poirier Cc: Mike Leach Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Riccardo Mancini Cc: Stephane Eranian Cc: Suzuki Poulouse Cc: Vineet Singh Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-49-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/lib/perf/cpumap.c | 103 ++++++++++++--------- tools/lib/perf/evlist.c | 4 +- tools/lib/perf/evsel.c | 9 +- tools/lib/perf/include/internal/cpumap.h | 9 +- tools/lib/perf/include/internal/evlist.h | 3 +- tools/lib/perf/include/internal/evsel.h | 4 +- tools/lib/perf/include/internal/mmap.h | 5 +- tools/lib/perf/include/perf/cpumap.h | 9 +- tools/lib/perf/mmap.c | 2 +- tools/perf/arch/arm/util/cs-etm.c | 35 ++++--- tools/perf/arch/arm64/util/pmu.c | 2 +- tools/perf/bench/epoll-ctl.c | 2 +- tools/perf/bench/epoll-wait.c | 2 +- tools/perf/bench/futex-hash.c | 2 +- tools/perf/bench/futex-lock-pi.c | 2 +- tools/perf/bench/futex-requeue.c | 2 +- tools/perf/bench/futex-wake-parallel.c | 2 +- tools/perf/bench/futex-wake.c | 2 +- tools/perf/builtin-c2c.c | 13 +-- tools/perf/builtin-ftrace.c | 2 +- tools/perf/builtin-kmem.c | 2 +- tools/perf/builtin-record.c | 2 +- tools/perf/builtin-sched.c | 65 +++++++------ tools/perf/builtin-script.c | 5 +- tools/perf/builtin-stat.c | 67 +++++++------- tools/perf/tests/attr.c | 6 +- tools/perf/tests/bitmap.c | 2 +- tools/perf/tests/cpumap.c | 6 +- tools/perf/tests/event_update.c | 6 +- tools/perf/tests/mem2node.c | 2 +- tools/perf/tests/mmap-basic.c | 4 +- tools/perf/tests/openat-syscall-all-cpus.c | 17 ++-- tools/perf/tests/stat.c | 3 +- tools/perf/tests/topology.c | 30 +++--- tools/perf/util/affinity.c | 2 +- tools/perf/util/auxtrace.c | 12 +-- tools/perf/util/auxtrace.h | 5 +- tools/perf/util/bpf_counter.c | 12 ++- tools/perf/util/bpf_counter_cgroup.c | 10 +- tools/perf/util/bpf_ftrace.c | 4 +- tools/perf/util/cpumap.c | 91 +++++++++--------- tools/perf/util/cpumap.h | 26 +++--- tools/perf/util/cputopo.c | 6 +- tools/perf/util/env.c | 29 +++--- tools/perf/util/env.h | 3 +- tools/perf/util/evlist.c | 8 +- tools/perf/util/evlist.h | 2 +- tools/perf/util/evsel.c | 6 +- tools/perf/util/expr.c | 2 +- tools/perf/util/header.c | 6 +- tools/perf/util/mmap.c | 19 ++-- tools/perf/util/mmap.h | 3 +- tools/perf/util/perf_api_probe.c | 15 +-- tools/perf/util/python.c | 4 +- tools/perf/util/record.c | 11 ++- .../util/scripting-engines/trace-event-python.c | 6 +- tools/perf/util/session.c | 10 +- tools/perf/util/stat-display.c | 34 ++++--- tools/perf/util/stat.c | 2 +- tools/perf/util/stat.h | 2 +- tools/perf/util/svghelper.c | 6 +- tools/perf/util/synthetic-events.c | 12 +-- tools/perf/util/synthetic-events.h | 3 +- tools/perf/util/util.h | 5 +- 64 files changed, 431 insertions(+), 356 deletions(-) diff --git a/tools/lib/perf/cpumap.c b/tools/lib/perf/cpumap.c index eacea3ab965a..ee66760f1e63 100644 --- a/tools/lib/perf/cpumap.c +++ b/tools/lib/perf/cpumap.c @@ -10,15 +10,24 @@ #include #include -struct perf_cpu_map *perf_cpu_map__dummy_new(void) +static struct perf_cpu_map *perf_cpu_map__alloc(int nr_cpus) { - struct perf_cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int)); + struct perf_cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(struct perf_cpu) * nr_cpus); if (cpus != NULL) { - cpus->nr = 1; - cpus->map[0] = -1; + cpus->nr = nr_cpus; refcount_set(&cpus->refcnt, 1); + } + return cpus; +} + +struct perf_cpu_map *perf_cpu_map__dummy_new(void) +{ + struct perf_cpu_map *cpus = perf_cpu_map__alloc(1); + + if (cpus) + cpus->map[0].cpu = -1; return cpus; } @@ -54,15 +63,12 @@ static struct perf_cpu_map *cpu_map__default_new(void) if (nr_cpus < 0) return NULL; - cpus = malloc(sizeof(*cpus) + nr_cpus * sizeof(int)); + cpus = perf_cpu_map__alloc(nr_cpus); if (cpus != NULL) { int i; for (i = 0; i < nr_cpus; ++i) - cpus->map[i] = i; - - cpus->nr = nr_cpus; - refcount_set(&cpus->refcnt, 1); + cpus->map[i].cpu = i; } return cpus; @@ -73,31 +79,32 @@ struct perf_cpu_map *perf_cpu_map__default_new(void) return cpu_map__default_new(); } -static int cmp_int(const void *a, const void *b) + +static int cmp_cpu(const void *a, const void *b) { - return *(const int *)a - *(const int*)b; + const struct perf_cpu *cpu_a = a, *cpu_b = b; + + return cpu_a->cpu - cpu_b->cpu; } -static struct perf_cpu_map *cpu_map__trim_new(int nr_cpus, int *tmp_cpus) +static struct perf_cpu_map *cpu_map__trim_new(int nr_cpus, const struct perf_cpu *tmp_cpus) { - size_t payload_size = nr_cpus * sizeof(int); - struct perf_cpu_map *cpus = malloc(sizeof(*cpus) + payload_size); + size_t payload_size = nr_cpus * sizeof(struct perf_cpu); + struct perf_cpu_map *cpus = perf_cpu_map__alloc(nr_cpus); int i, j; if (cpus != NULL) { memcpy(cpus->map, tmp_cpus, payload_size); - qsort(cpus->map, nr_cpus, sizeof(int), cmp_int); + qsort(cpus->map, nr_cpus, sizeof(struct perf_cpu), cmp_cpu); /* Remove dups */ j = 0; for (i = 0; i < nr_cpus; i++) { - if (i == 0 || cpus->map[i] != cpus->map[i - 1]) - cpus->map[j++] = cpus->map[i]; + if (i == 0 || cpus->map[i].cpu != cpus->map[i - 1].cpu) + cpus->map[j++].cpu = cpus->map[i].cpu; } cpus->nr = j; assert(j <= nr_cpus); - refcount_set(&cpus->refcnt, 1); } - return cpus; } @@ -105,7 +112,7 @@ struct perf_cpu_map *perf_cpu_map__read(FILE *file) { struct perf_cpu_map *cpus = NULL; int nr_cpus = 0; - int *tmp_cpus = NULL, *tmp; + struct perf_cpu *tmp_cpus = NULL, *tmp; int max_entries = 0; int n, cpu, prev; char sep; @@ -124,24 +131,24 @@ struct perf_cpu_map *perf_cpu_map__read(FILE *file) if (new_max >= max_entries) { max_entries = new_max + MAX_NR_CPUS / 2; - tmp = realloc(tmp_cpus, max_entries * sizeof(int)); + tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu)); if (tmp == NULL) goto out_free_tmp; tmp_cpus = tmp; } while (++prev < cpu) - tmp_cpus[nr_cpus++] = prev; + tmp_cpus[nr_cpus++].cpu = prev; } if (nr_cpus == max_entries) { max_entries += MAX_NR_CPUS; - tmp = realloc(tmp_cpus, max_entries * sizeof(int)); + tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu)); if (tmp == NULL) goto out_free_tmp; tmp_cpus = tmp; } - tmp_cpus[nr_cpus++] = cpu; + tmp_cpus[nr_cpus++].cpu = cpu; if (n == 2 && sep == '-') prev = cpu; else @@ -179,7 +186,7 @@ struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list) unsigned long start_cpu, end_cpu = 0; char *p = NULL; int i, nr_cpus = 0; - int *tmp_cpus = NULL, *tmp; + struct perf_cpu *tmp_cpus = NULL, *tmp; int max_entries = 0; if (!cpu_list) @@ -220,17 +227,17 @@ struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list) for (; start_cpu <= end_cpu; start_cpu++) { /* check for duplicates */ for (i = 0; i < nr_cpus; i++) - if (tmp_cpus[i] == (int)start_cpu) + if (tmp_cpus[i].cpu == (int)start_cpu) goto invalid; if (nr_cpus == max_entries) { max_entries += MAX_NR_CPUS; - tmp = realloc(tmp_cpus, max_entries * sizeof(int)); + tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu)); if (tmp == NULL) goto invalid; tmp_cpus = tmp; } - tmp_cpus[nr_cpus++] = (int)start_cpu; + tmp_cpus[nr_cpus++].cpu = (int)start_cpu; } if (*p) ++p; @@ -250,12 +257,16 @@ out: return cpus; } -int perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx) +struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx) { + struct perf_cpu result = { + .cpu = -1 + }; + if (cpus && idx < cpus->nr) return cpus->map[idx]; - return -1; + return result; } int perf_cpu_map__nr(const struct perf_cpu_map *cpus) @@ -265,10 +276,10 @@ int perf_cpu_map__nr(const struct perf_cpu_map *cpus) bool perf_cpu_map__empty(const struct perf_cpu_map *map) { - return map ? map->map[0] == -1 : true; + return map ? map->map[0].cpu == -1 : true; } -int perf_cpu_map__idx(const struct perf_cpu_map *cpus, int cpu) +int perf_cpu_map__idx(const struct perf_cpu_map *cpus, struct perf_cpu cpu) { int low, high; @@ -278,13 +289,13 @@ int perf_cpu_map__idx(const struct perf_cpu_map *cpus, int cpu) low = 0; high = cpus->nr; while (low < high) { - int idx = (low + high) / 2, - cpu_at_idx = cpus->map[idx]; + int idx = (low + high) / 2; + struct perf_cpu cpu_at_idx = cpus->map[idx]; - if (cpu_at_idx == cpu) + if (cpu_at_idx.cpu == cpu.cpu) return idx; - if (cpu_at_idx > cpu) + if (cpu_at_idx.cpu > cpu.cpu) high = idx; else low = idx + 1; @@ -293,15 +304,19 @@ int perf_cpu_map__idx(const struct perf_cpu_map *cpus, int cpu) return -1; } -bool perf_cpu_map__has(const struct perf_cpu_map *cpus, int cpu) +bool perf_cpu_map__has(const struct perf_cpu_map *cpus, struct perf_cpu cpu) { return perf_cpu_map__idx(cpus, cpu) != -1; } -int perf_cpu_map__max(struct perf_cpu_map *map) +struct perf_cpu perf_cpu_map__max(struct perf_cpu_map *map) { + struct perf_cpu result = { + .cpu = -1 + }; + // cpu_map__trim_new() qsort()s it, cpu_map__default_new() sorts it as well. - return map->nr > 0 ? map->map[map->nr - 1] : -1; + return map->nr > 0 ? map->map[map->nr - 1] : result; } /* @@ -315,7 +330,7 @@ int perf_cpu_map__max(struct perf_cpu_map *map) struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig, struct perf_cpu_map *other) { - int *tmp_cpus; + struct perf_cpu *tmp_cpus; int tmp_len; int i, j, k; struct perf_cpu_map *merged; @@ -329,19 +344,19 @@ struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig, if (!other) return orig; if (orig->nr == other->nr && - !memcmp(orig->map, other->map, orig->nr * sizeof(int))) + !memcmp(orig->map, other->map, orig->nr * sizeof(struct perf_cpu))) return orig; tmp_len = orig->nr + other->nr; - tmp_cpus = malloc(tmp_len * sizeof(int)); + tmp_cpus = malloc(tmp_len * sizeof(struct perf_cpu)); if (!tmp_cpus) return NULL; /* Standard merge algorithm from wikipedia */ i = j = k = 0; while (i < orig->nr && j < other->nr) { - if (orig->map[i] <= other->map[j]) { - if (orig->map[i] == other->map[j]) + if (orig->map[i].cpu <= other->map[j].cpu) { + if (orig->map[i].cpu == other->map[j].cpu) j++; tmp_cpus[k++] = orig->map[i++]; } else diff --git a/tools/lib/perf/evlist.c b/tools/lib/perf/evlist.c index 245acbc53bd3..9a770bfdc804 100644 --- a/tools/lib/perf/evlist.c +++ b/tools/lib/perf/evlist.c @@ -407,7 +407,7 @@ perf_evlist__mmap_cb_get(struct perf_evlist *evlist, bool overwrite, int idx) static int perf_evlist__mmap_cb_mmap(struct perf_mmap *map, struct perf_mmap_param *mp, - int output, int cpu) + int output, struct perf_cpu cpu) { return perf_mmap__mmap(map, mp, output, cpu); } @@ -426,7 +426,7 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops, int idx, struct perf_mmap_param *mp, int cpu_idx, int thread, int *_output, int *_output_overwrite) { - int evlist_cpu = perf_cpu_map__cpu(evlist->cpus, cpu_idx); + struct perf_cpu evlist_cpu = perf_cpu_map__cpu(evlist->cpus, cpu_idx); struct perf_evsel *evsel; int revent; diff --git a/tools/lib/perf/evsel.c b/tools/lib/perf/evsel.c index f1e1665ef4bd..7ea86a44eae5 100644 --- a/tools/lib/perf/evsel.c +++ b/tools/lib/perf/evsel.c @@ -78,10 +78,10 @@ static int perf_evsel__alloc_mmap(struct perf_evsel *evsel, int ncpus, int nthre static int sys_perf_event_open(struct perf_event_attr *attr, - pid_t pid, int cpu, int group_fd, + pid_t pid, struct perf_cpu cpu, int group_fd, unsigned long flags) { - return syscall(__NR_perf_event_open, attr, pid, cpu, group_fd, flags); + return syscall(__NR_perf_event_open, attr, pid, cpu.cpu, group_fd, flags); } static int get_group_fd(struct perf_evsel *evsel, int cpu_map_idx, int thread, int *group_fd) @@ -113,7 +113,8 @@ static int get_group_fd(struct perf_evsel *evsel, int cpu_map_idx, int thread, i int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus, struct perf_thread_map *threads) { - int cpu, idx, thread, err = 0; + struct perf_cpu cpu; + int idx, thread, err = 0; if (cpus == NULL) { static struct perf_cpu_map *empty_cpu_map; @@ -252,7 +253,7 @@ int perf_evsel__mmap(struct perf_evsel *evsel, int pages) for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) { int *fd = FD(evsel, idx, thread); struct perf_mmap *map; - int cpu = perf_cpu_map__cpu(evsel->cpus, idx); + struct perf_cpu cpu = perf_cpu_map__cpu(evsel->cpus, idx); if (fd == NULL || *fd < 0) continue; diff --git a/tools/lib/perf/include/internal/cpumap.h b/tools/lib/perf/include/internal/cpumap.h index 71a31ed738c9..581f9ffb4237 100644 --- a/tools/lib/perf/include/internal/cpumap.h +++ b/tools/lib/perf/include/internal/cpumap.h @@ -4,6 +4,11 @@ #include +/** A wrapper around a CPU to avoid confusion with the perf_cpu_map's map's indices. */ +struct perf_cpu { + int cpu; +}; + /** * A sized, reference counted, sorted array of integers representing CPU * numbers. This is commonly used to capture which CPUs a PMU is associated @@ -16,13 +21,13 @@ struct perf_cpu_map { /** Length of the map array. */ int nr; /** The CPU values. */ - int map[]; + struct perf_cpu map[]; }; #ifndef MAX_NR_CPUS #define MAX_NR_CPUS 2048 #endif -int perf_cpu_map__idx(const struct perf_cpu_map *cpus, int cpu); +int perf_cpu_map__idx(const struct perf_cpu_map *cpus, struct perf_cpu cpu); #endif /* __LIBPERF_INTERNAL_CPUMAP_H */ diff --git a/tools/lib/perf/include/internal/evlist.h b/tools/lib/perf/include/internal/evlist.h index 6f74269a3ad4..4cefade540bd 100644 --- a/tools/lib/perf/include/internal/evlist.h +++ b/tools/lib/perf/include/internal/evlist.h @@ -4,6 +4,7 @@ #include #include +#include #include #define PERF_EVLIST__HLIST_BITS 8 @@ -36,7 +37,7 @@ typedef void typedef struct perf_mmap* (*perf_evlist_mmap__cb_get_t)(struct perf_evlist*, bool, int); typedef int -(*perf_evlist_mmap__cb_mmap_t)(struct perf_mmap*, struct perf_mmap_param*, int, int); +(*perf_evlist_mmap__cb_mmap_t)(struct perf_mmap*, struct perf_mmap_param*, int, struct perf_cpu); struct perf_evlist_mmap_ops { perf_evlist_mmap__cb_idx_t idx; diff --git a/tools/lib/perf/include/internal/evsel.h b/tools/lib/perf/include/internal/evsel.h index 1f3eacbad2e8..cfc9ebd7968e 100644 --- a/tools/lib/perf/include/internal/evsel.h +++ b/tools/lib/perf/include/internal/evsel.h @@ -6,8 +6,8 @@ #include #include #include +#include -struct perf_cpu_map; struct perf_thread_map; struct xyarray; @@ -27,7 +27,7 @@ struct perf_sample_id { * queue number. */ int idx; - int cpu; + struct perf_cpu cpu; pid_t tid; /* Holds total ID period value for PERF_SAMPLE_READ processing. */ diff --git a/tools/lib/perf/include/internal/mmap.h b/tools/lib/perf/include/internal/mmap.h index 5e3422f40ed5..5a062af8e9d8 100644 --- a/tools/lib/perf/include/internal/mmap.h +++ b/tools/lib/perf/include/internal/mmap.h @@ -6,6 +6,7 @@ #include #include #include +#include /* perf sample has 16 bits size limit */ #define PERF_SAMPLE_MAX_SIZE (1 << 16) @@ -24,7 +25,7 @@ struct perf_mmap { void *base; int mask; int fd; - int cpu; + struct perf_cpu cpu; refcount_t refcnt; u64 prev; u64 start; @@ -46,7 +47,7 @@ size_t perf_mmap__mmap_len(struct perf_mmap *map); void perf_mmap__init(struct perf_mmap *map, struct perf_mmap *prev, bool overwrite, libperf_unmap_cb_t unmap_cb); int perf_mmap__mmap(struct perf_mmap *map, struct perf_mmap_param *mp, - int fd, int cpu); + int fd, struct perf_cpu cpu); void perf_mmap__munmap(struct perf_mmap *map); void perf_mmap__get(struct perf_mmap *map); void perf_mmap__put(struct perf_mmap *map); diff --git a/tools/lib/perf/include/perf/cpumap.h b/tools/lib/perf/include/perf/cpumap.h index 3f1c0afa3ccd..15b8faafd615 100644 --- a/tools/lib/perf/include/perf/cpumap.h +++ b/tools/lib/perf/include/perf/cpumap.h @@ -3,11 +3,10 @@ #define __LIBPERF_CPUMAP_H #include +#include #include #include -struct perf_cpu_map; - LIBPERF_API struct perf_cpu_map *perf_cpu_map__dummy_new(void); LIBPERF_API struct perf_cpu_map *perf_cpu_map__default_new(void); LIBPERF_API struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list); @@ -16,11 +15,11 @@ LIBPERF_API struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map); LIBPERF_API struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig, struct perf_cpu_map *other); LIBPERF_API void perf_cpu_map__put(struct perf_cpu_map *map); -LIBPERF_API int perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx); +LIBPERF_API struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx); LIBPERF_API int perf_cpu_map__nr(const struct perf_cpu_map *cpus); LIBPERF_API bool perf_cpu_map__empty(const struct perf_cpu_map *map); -LIBPERF_API int perf_cpu_map__max(struct perf_cpu_map *map); -LIBPERF_API bool perf_cpu_map__has(const struct perf_cpu_map *map, int cpu); +LIBPERF_API struct perf_cpu perf_cpu_map__max(struct perf_cpu_map *map); +LIBPERF_API bool perf_cpu_map__has(const struct perf_cpu_map *map, struct perf_cpu cpu); #define perf_cpu_map__for_each_cpu(cpu, idx, cpus) \ for ((idx) = 0, (cpu) = perf_cpu_map__cpu(cpus, idx); \ diff --git a/tools/lib/perf/mmap.c b/tools/lib/perf/mmap.c index aaa457904008..f7ee07cb5818 100644 --- a/tools/lib/perf/mmap.c +++ b/tools/lib/perf/mmap.c @@ -32,7 +32,7 @@ size_t perf_mmap__mmap_len(struct perf_mmap *map) } int perf_mmap__mmap(struct perf_mmap *map, struct perf_mmap_param *mp, - int fd, int cpu) + int fd, struct perf_cpu cpu) { map->prev = 0; map->mask = mp->mask; diff --git a/tools/perf/arch/arm/util/cs-etm.c b/tools/perf/arch/arm/util/cs-etm.c index 129c0272d65b..2e8b2c4365a0 100644 --- a/tools/perf/arch/arm/util/cs-etm.c +++ b/tools/perf/arch/arm/util/cs-etm.c @@ -203,9 +203,11 @@ static int cs_etm_set_option(struct auxtrace_record *itr, struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL); /* Set option of each CPU we have */ - for (i = 0; i < cpu__max_cpu(); i++) { - if (!perf_cpu_map__has(event_cpus, i) || - !perf_cpu_map__has(online_cpus, i)) + for (i = 0; i < cpu__max_cpu().cpu; i++) { + struct perf_cpu cpu = { .cpu = i, }; + + if (!perf_cpu_map__has(event_cpus, cpu) || + !perf_cpu_map__has(online_cpus, cpu)) continue; if (option & BIT(ETM_OPT_CTXTID)) { @@ -522,9 +524,11 @@ cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused, /* cpu map is not empty, we have specific CPUs to work with */ if (!perf_cpu_map__empty(event_cpus)) { - for (i = 0; i < cpu__max_cpu(); i++) { - if (!perf_cpu_map__has(event_cpus, i) || - !perf_cpu_map__has(online_cpus, i)) + for (i = 0; i < cpu__max_cpu().cpu; i++) { + struct perf_cpu cpu = { .cpu = i, }; + + if (!perf_cpu_map__has(event_cpus, cpu) || + !perf_cpu_map__has(online_cpus, cpu)) continue; if (cs_etm_is_ete(itr, i)) @@ -536,8 +540,10 @@ cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused, } } else { /* get configuration for all CPUs in the system */ - for (i = 0; i < cpu__max_cpu(); i++) { - if (!perf_cpu_map__has(online_cpus, i)) + for (i = 0; i < cpu__max_cpu().cpu; i++) { + struct perf_cpu cpu = { .cpu = i, }; + + if (!perf_cpu_map__has(online_cpus, cpu)) continue; if (cs_etm_is_ete(itr, i)) @@ -722,8 +728,10 @@ static int cs_etm_info_fill(struct auxtrace_record *itr, } else { /* Make sure all specified CPUs are online */ for (i = 0; i < perf_cpu_map__nr(event_cpus); i++) { - if (perf_cpu_map__has(event_cpus, i) && - !perf_cpu_map__has(online_cpus, i)) + struct perf_cpu cpu = { .cpu = i, }; + + if (perf_cpu_map__has(event_cpus, cpu) && + !perf_cpu_map__has(online_cpus, cpu)) return -EINVAL; } @@ -743,9 +751,12 @@ static int cs_etm_info_fill(struct auxtrace_record *itr, offset = CS_ETM_SNAPSHOT + 1; - for (i = 0; i < cpu__max_cpu() && offset < priv_size; i++) - if (perf_cpu_map__has(cpu_map, i)) + for (i = 0; i < cpu__max_cpu().cpu && offset < priv_size; i++) { + struct perf_cpu cpu = { .cpu = i, }; + + if (perf_cpu_map__has(cpu_map, cpu)) cs_etm_get_metadata(i, &offset, itr, info); + } perf_cpu_map__put(online_cpus); diff --git a/tools/perf/arch/arm64/util/pmu.c b/tools/perf/arch/arm64/util/pmu.c index d3a18f9c85f6..79124bba713e 100644 --- a/tools/perf/arch/arm64/util/pmu.c +++ b/tools/perf/arch/arm64/util/pmu.c @@ -15,7 +15,7 @@ const struct pmu_events_map *pmu_events_map__find(void) * The cpumap should cover all CPUs. Otherwise, some CPUs may * not support some events or have different event IDs. */ - if (pmu->cpus->nr != cpu__max_cpu()) + if (pmu->cpus->nr != cpu__max_cpu().cpu) return NULL; return perf_pmu__find_map(pmu); diff --git a/tools/perf/bench/epoll-ctl.c b/tools/perf/bench/epoll-ctl.c index ddaca75c3bc0..1a17ec83d3c4 100644 --- a/tools/perf/bench/epoll-ctl.c +++ b/tools/perf/bench/epoll-ctl.c @@ -253,7 +253,7 @@ static int do_threads(struct worker *worker, struct perf_cpu_map *cpu) if (!noaffinity) { CPU_ZERO(&cpuset); - CPU_SET(cpu->map[i % cpu->nr], &cpuset); + CPU_SET(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, &cpuset); ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset); if (ret) diff --git a/tools/perf/bench/epoll-wait.c b/tools/perf/bench/epoll-wait.c index 79d13dbc0a47..0d1dd8879197 100644 --- a/tools/perf/bench/epoll-wait.c +++ b/tools/perf/bench/epoll-wait.c @@ -342,7 +342,7 @@ static int do_threads(struct worker *worker, struct perf_cpu_map *cpu) if (!noaffinity) { CPU_ZERO(&cpuset); - CPU_SET(cpu->map[i % cpu->nr], &cpuset); + CPU_SET(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, &cpuset); ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset); if (ret) diff --git a/tools/perf/bench/futex-hash.c b/tools/perf/bench/futex-hash.c index fcdea3e44937..9627b6ab8670 100644 --- a/tools/perf/bench/futex-hash.c +++ b/tools/perf/bench/futex-hash.c @@ -177,7 +177,7 @@ int bench_futex_hash(int argc, const char **argv) goto errmem; CPU_ZERO(&cpuset); - CPU_SET(cpu->map[i % cpu->nr], &cpuset); + CPU_SET(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, &cpuset); ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset); if (ret) diff --git a/tools/perf/bench/futex-lock-pi.c b/tools/perf/bench/futex-lock-pi.c index 137890f78e17..a512a320df74 100644 --- a/tools/perf/bench/futex-lock-pi.c +++ b/tools/perf/bench/futex-lock-pi.c @@ -136,7 +136,7 @@ static void create_threads(struct worker *w, pthread_attr_t thread_attr, worker[i].futex = &global_futex; CPU_ZERO(&cpuset); - CPU_SET(cpu->map[i % cpu->nr], &cpuset); + CPU_SET(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, &cpuset); if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset)) err(EXIT_FAILURE, "pthread_attr_setaffinity_np"); diff --git a/tools/perf/bench/futex-requeue.c b/tools/perf/bench/futex-requeue.c index f7a5ffebb940..aca47ce8b1e7 100644 --- a/tools/perf/bench/futex-requeue.c +++ b/tools/perf/bench/futex-requeue.c @@ -131,7 +131,7 @@ static void block_threads(pthread_t *w, /* create and block all threads */ for (i = 0; i < params.nthreads; i++) { CPU_ZERO(&cpuset); - CPU_SET(cpu->map[i % cpu->nr], &cpuset); + CPU_SET(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, &cpuset); if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset)) err(EXIT_FAILURE, "pthread_attr_setaffinity_np"); diff --git a/tools/perf/bench/futex-wake-parallel.c b/tools/perf/bench/futex-wake-parallel.c index 0983f40b4b40..888ee6037945 100644 --- a/tools/perf/bench/futex-wake-parallel.c +++ b/tools/perf/bench/futex-wake-parallel.c @@ -152,7 +152,7 @@ static void block_threads(pthread_t *w, pthread_attr_t thread_attr, /* create and block all threads */ for (i = 0; i < params.nthreads; i++) { CPU_ZERO(&cpuset); - CPU_SET(cpu->map[i % cpu->nr], &cpuset); + CPU_SET(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, &cpuset); if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset)) err(EXIT_FAILURE, "pthread_attr_setaffinity_np"); diff --git a/tools/perf/bench/futex-wake.c b/tools/perf/bench/futex-wake.c index 2226a475e782..aa82db51c0ab 100644 --- a/tools/perf/bench/futex-wake.c +++ b/tools/perf/bench/futex-wake.c @@ -105,7 +105,7 @@ static void block_threads(pthread_t *w, /* create and block all threads */ for (i = 0; i < params.nthreads; i++) { CPU_ZERO(&cpuset); - CPU_SET(cpu->map[i % cpu->nr], &cpuset); + CPU_SET(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, &cpuset); if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset)) err(EXIT_FAILURE, "pthread_attr_setaffinity_np"); diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c index ad1fbeafc93d..77dd4afacca4 100644 --- a/tools/perf/builtin-c2c.c +++ b/tools/perf/builtin-c2c.c @@ -2015,7 +2015,8 @@ static int setup_nodes(struct perf_session *session) { struct numa_node *n; unsigned long **nodes; - int node, cpu, idx; + int node, idx; + struct perf_cpu cpu; int *cpu2node; if (c2c.node_info > 2) @@ -2038,8 +2039,8 @@ static int setup_nodes(struct perf_session *session) if (!cpu2node) return -ENOMEM; - for (cpu = 0; cpu < c2c.cpus_cnt; cpu++) - cpu2node[cpu] = -1; + for (idx = 0; idx < c2c.cpus_cnt; idx++) + cpu2node[idx] = -1; c2c.cpu2node = cpu2node; @@ -2058,12 +2059,12 @@ static int setup_nodes(struct perf_session *session) continue; perf_cpu_map__for_each_cpu(cpu, idx, map) { - set_bit(cpu, set); + set_bit(cpu.cpu, set); - if (WARN_ONCE(cpu2node[cpu] != -1, "node/cpu topology bug")) + if (WARN_ONCE(cpu2node[cpu.cpu] != -1, "node/cpu topology bug")) return -EINVAL; - cpu2node[cpu] = node; + cpu2node[cpu.cpu] = node; } } diff --git a/tools/perf/builtin-ftrace.c b/tools/perf/builtin-ftrace.c index f16c39a37a52..71452599f87d 100644 --- a/tools/perf/builtin-ftrace.c +++ b/tools/perf/builtin-ftrace.c @@ -281,7 +281,7 @@ static int set_tracing_cpumask(struct perf_cpu_map *cpumap) int ret; int last_cpu; - last_cpu = perf_cpu_map__cpu(cpumap, cpumap->nr - 1); + last_cpu = perf_cpu_map__cpu(cpumap, cpumap->nr - 1).cpu; mask_size = last_cpu / 4 + 2; /* one more byte for EOS */ mask_size += last_cpu / 32; /* ',' is needed for every 32th cpus */ diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c index da03a341c63c..99d7ff9a8eff 100644 --- a/tools/perf/builtin-kmem.c +++ b/tools/perf/builtin-kmem.c @@ -192,7 +192,7 @@ static int evsel__process_alloc_node_event(struct evsel *evsel, struct perf_samp int ret = evsel__process_alloc_event(evsel, sample); if (!ret) { - int node1 = cpu__get_node(sample->cpu), + int node1 = cpu__get_node((struct perf_cpu){.cpu = sample->cpu}), node2 = evsel__intval(evsel, sample, "node"); if (node1 != node2) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 6ac2160913ea..0a63295d30f0 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -2796,7 +2796,7 @@ int cmd_record(int argc, const char **argv) symbol__init(NULL); if (rec->opts.affinity != PERF_AFFINITY_SYS) { - rec->affinity_mask.nbits = cpu__max_cpu(); + rec->affinity_mask.nbits = cpu__max_cpu().cpu; rec->affinity_mask.bits = bitmap_zalloc(rec->affinity_mask.nbits); if (!rec->affinity_mask.bits) { pr_err("Failed to allocate thread mask for %zd cpus\n", rec->affinity_mask.nbits); diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index 9da1da4749c9..72d446de9c60 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -167,7 +167,7 @@ struct trace_sched_handler { struct perf_sched_map { DECLARE_BITMAP(comp_cpus_mask, MAX_CPUS); - int *comp_cpus; + struct perf_cpu *comp_cpus; bool comp; struct perf_thread_map *color_pids; const char *color_pids_str; @@ -191,7 +191,7 @@ struct perf_sched { * Track the current task - that way we can know whether there's any * weird events, such as a task being switched away that is not current. */ - int max_cpu; + struct perf_cpu max_cpu; u32 curr_pid[MAX_CPUS]; struct thread *curr_thread[MAX_CPUS]; char next_shortname1; @@ -1535,28 +1535,31 @@ static int map_switch_event(struct perf_sched *sched, struct evsel *evsel, int new_shortname; u64 timestamp0, timestamp = sample->time; s64 delta; - int i, this_cpu = sample->cpu; + int i; + struct perf_cpu this_cpu = { + .cpu = sample->cpu, + }; int cpus_nr; bool new_cpu = false; const char *color = PERF_COLOR_NORMAL; char stimestamp[32]; - BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0); + BUG_ON(this_cpu.cpu >= MAX_CPUS || this_cpu.cpu < 0); - if (this_cpu > sched->max_cpu) + if (this_cpu.cpu > sched->max_cpu.cpu) sched->max_cpu = this_cpu; if (sched->map.comp) { cpus_nr = bitmap_weight(sched->map.comp_cpus_mask, MAX_CPUS); - if (!test_and_set_bit(this_cpu, sched->map.comp_cpus_mask)) { + if (!test_and_set_bit(this_cpu.cpu, sched->map.comp_cpus_mask)) { sched->map.comp_cpus[cpus_nr++] = this_cpu; new_cpu = true; } } else - cpus_nr = sched->max_cpu; + cpus_nr = sched->max_cpu.cpu; - timestamp0 = sched->cpu_last_switched[this_cpu]; - sched->cpu_last_switched[this_cpu] = timestamp; + timestamp0 = sched->cpu_last_switched[this_cpu.cpu]; + sched->cpu_last_switched[this_cpu.cpu] = timestamp; if (timestamp0) delta = timestamp - timestamp0; else @@ -1577,7 +1580,7 @@ static int map_switch_event(struct perf_sched *sched, struct evsel *evsel, return -1; } - sched->curr_thread[this_cpu] = thread__get(sched_in); + sched->curr_thread[this_cpu.cpu] = thread__get(sched_in); printf(" "); @@ -1608,8 +1611,10 @@ static int map_switch_event(struct perf_sched *sched, struct evsel *evsel, } for (i = 0; i < cpus_nr; i++) { - int cpu = sched->map.comp ? sched->map.comp_cpus[i] : i; - struct thread *curr_thread = sched->curr_thread[cpu]; + struct perf_cpu cpu = { + .cpu = sched->map.comp ? sched->map.comp_cpus[i].cpu : i, + }; + struct thread *curr_thread = sched->curr_thread[cpu.cpu]; struct thread_runtime *curr_tr; const char *pid_color = color; const char *cpu_color = color; @@ -1623,13 +1628,13 @@ static int map_switch_event(struct perf_sched *sched, struct evsel *evsel, if (sched->map.color_cpus && perf_cpu_map__has(sched->map.color_cpus, cpu)) cpu_color = COLOR_CPUS; - if (cpu != this_cpu) + if (cpu.cpu != this_cpu.cpu) color_fprintf(stdout, color, " "); else color_fprintf(stdout, cpu_color, "*"); - if (sched->curr_thread[cpu]) { - curr_tr = thread__get_runtime(sched->curr_thread[cpu]); + if (sched->curr_thread[cpu.cpu]) { + curr_tr = thread__get_runtime(sched->curr_thread[cpu.cpu]); if (curr_tr == NULL) { thread__put(sched_in); return -1; @@ -1929,7 +1934,7 @@ static char *timehist_get_commstr(struct thread *thread) static void timehist_header(struct perf_sched *sched) { - u32 ncpus = sched->max_cpu + 1; + u32 ncpus = sched->max_cpu.cpu + 1; u32 i, j; printf("%15s %6s ", "time", "cpu"); @@ -2008,7 +2013,7 @@ static void timehist_print_sample(struct perf_sched *sched, struct thread_runtime *tr = thread__priv(thread); const char *next_comm = evsel__strval(evsel, sample, "next_comm"); const u32 next_pid = evsel__intval(evsel, sample, "next_pid"); - u32 max_cpus = sched->max_cpu + 1; + u32 max_cpus = sched->max_cpu.cpu + 1; char tstr[64]; char nstr[30]; u64 wait_time; @@ -2389,7 +2394,7 @@ static void timehist_print_wakeup_event(struct perf_sched *sched, timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr)); printf("%15s [%04d] ", tstr, sample->cpu); if (sched->show_cpu_visual) - printf(" %*s ", sched->max_cpu + 1, ""); + printf(" %*s ", sched->max_cpu.cpu + 1, ""); printf(" %-*s ", comm_width, timehist_get_commstr(thread)); @@ -2449,13 +2454,13 @@ static void timehist_print_migration_event(struct perf_sched *sched, { struct thread *thread; char tstr[64]; - u32 max_cpus = sched->max_cpu + 1; + u32 max_cpus; u32 ocpu, dcpu; if (sched->summary_only) return; - max_cpus = sched->max_cpu + 1; + max_cpus = sched->max_cpu.cpu + 1; ocpu = evsel__intval(evsel, sample, "orig_cpu"); dcpu = evsel__intval(evsel, sample, "dest_cpu"); @@ -2918,7 +2923,7 @@ static void timehist_print_summary(struct perf_sched *sched, printf(" Total scheduling time (msec): "); print_sched_time(hist_time, 2); - printf(" (x %d)\n", sched->max_cpu); + printf(" (x %d)\n", sched->max_cpu.cpu); } typedef int (*sched_handler)(struct perf_tool *tool, @@ -2935,9 +2940,11 @@ static int perf_timehist__process_sample(struct perf_tool *tool, { struct perf_sched *sched = container_of(tool, struct perf_sched, tool); int err = 0; - int this_cpu = sample->cpu; + struct perf_cpu this_cpu = { + .cpu = sample->cpu, + }; - if (this_cpu > sched->max_cpu) + if (this_cpu.cpu > sched->max_cpu.cpu) sched->max_cpu = this_cpu; if (evsel->handler != NULL) { @@ -3054,10 +3061,10 @@ static int perf_sched__timehist(struct perf_sched *sched) goto out; /* pre-allocate struct for per-CPU idle stats */ - sched->max_cpu = session->header.env.nr_cpus_online; - if (sched->max_cpu == 0) - sched->max_cpu = 4; - if (init_idle_threads(sched->max_cpu)) + sched->max_cpu.cpu = session->header.env.nr_cpus_online; + if (sched->max_cpu.cpu == 0) + sched->max_cpu.cpu = 4; + if (init_idle_threads(sched->max_cpu.cpu)) goto out; /* summary_only implies summary option, but don't overwrite summary if set */ @@ -3209,10 +3216,10 @@ static int setup_map_cpus(struct perf_sched *sched) { struct perf_cpu_map *map; - sched->max_cpu = sysconf(_SC_NPROCESSORS_CONF); + sched->max_cpu.cpu = sysconf(_SC_NPROCESSORS_CONF); if (sched->map.comp) { - sched->map.comp_cpus = zalloc(sched->max_cpu * sizeof(int)); + sched->map.comp_cpus = zalloc(sched->max_cpu.cpu * sizeof(int)); if (!sched->map.comp_cpus) return -1; } diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index bb43529618b3..ecd4f99a6c14 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c @@ -2115,7 +2115,8 @@ static struct scripting_ops *scripting_ops; static void __process_stat(struct evsel *counter, u64 tstamp) { int nthreads = perf_thread_map__nr(counter->core.threads); - int idx, cpu, thread; + int idx, thread; + struct perf_cpu cpu; static int header_printed; if (counter->core.system_wide) @@ -2134,7 +2135,7 @@ static void __process_stat(struct evsel *counter, u64 tstamp) counts = perf_counts(counter->counts, idx, thread); printf("%3d %8d %15" PRIu64 " %15" PRIu64 " %15" PRIu64 " %15" PRIu64 " %s\n", - cpu, + cpu.cpu, perf_thread_map__pid(counter->core.threads, thread), counts->val, counts->ena, diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index dfb8f7847e6c..973ade18b72a 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -234,7 +234,7 @@ static bool cpus_map_matched(struct evsel *a, struct evsel *b) return false; for (int i = 0; i < a->core.cpus->nr; i++) { - if (a->core.cpus->map[i] != b->core.cpus->map[i]) + if (a->core.cpus->map[i].cpu != b->core.cpus->map[i].cpu) return false; } @@ -331,7 +331,7 @@ static int evsel__write_stat_event(struct evsel *counter, int cpu_map_idx, u32 t struct perf_counts_values *count) { struct perf_sample_id *sid = SID(counter, cpu_map_idx, thread); - int cpu = perf_cpu_map__cpu(evsel__cpus(counter), cpu_map_idx); + struct perf_cpu cpu = perf_cpu_map__cpu(evsel__cpus(counter), cpu_map_idx); return perf_event__synthesize_stat(NULL, cpu, thread, sid->id, count, process_synthesized_event, NULL); @@ -396,7 +396,8 @@ static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu_ fprintf(stat_config.output, "%s: %d: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n", evsel__name(counter), - perf_cpu_map__cpu(evsel__cpus(counter), cpu_map_idx), + perf_cpu_map__cpu(evsel__cpus(counter), + cpu_map_idx).cpu, count->val, count->ena, count->run); } } @@ -1328,61 +1329,61 @@ static const char *const aggr_mode__string[] = { }; static struct aggr_cpu_id perf_stat__get_socket(struct perf_stat_config *config __maybe_unused, - int cpu) + struct perf_cpu cpu) { return aggr_cpu_id__socket(cpu, /*data=*/NULL); } static struct aggr_cpu_id perf_stat__get_die(struct perf_stat_config *config __maybe_unused, - int cpu) + struct perf_cpu cpu) { return aggr_cpu_id__die(cpu, /*data=*/NULL); } static struct aggr_cpu_id perf_stat__get_core(struct perf_stat_config *config __maybe_unused, - int cpu) + struct perf_cpu cpu) { return aggr_cpu_id__core(cpu, /*data=*/NULL); } static struct aggr_cpu_id perf_stat__get_node(struct perf_stat_config *config __maybe_unused, - int cpu) + struct perf_cpu cpu) { return aggr_cpu_id__node(cpu, /*data=*/NULL); } static struct aggr_cpu_id perf_stat__get_aggr(struct perf_stat_config *config, - aggr_get_id_t get_id, int cpu) + aggr_get_id_t get_id, struct perf_cpu cpu) { struct aggr_cpu_id id = aggr_cpu_id__empty(); - if (aggr_cpu_id__is_empty(&config->cpus_aggr_map->map[cpu])) - config->cpus_aggr_map->map[cpu] = get_id(config, cpu); + if (aggr_cpu_id__is_empty(&config->cpus_aggr_map->map[cpu.cpu])) + config->cpus_aggr_map->map[cpu.cpu] = get_id(config, cpu); - id = config->cpus_aggr_map->map[cpu]; + id = config->cpus_aggr_map->map[cpu.cpu]; return id; } static struct aggr_cpu_id perf_stat__get_socket_cached(struct perf_stat_config *config, - int cpu) + struct perf_cpu cpu) { return perf_stat__get_aggr(config, perf_stat__get_socket, cpu); } static struct aggr_cpu_id perf_stat__get_die_cached(struct perf_stat_config *config, - int cpu) + struct perf_cpu cpu) { return perf_stat__get_aggr(config, perf_stat__get_die, cpu); } static struct aggr_cpu_id perf_stat__get_core_cached(struct perf_stat_config *config, - int cpu) + struct perf_cpu cpu) { return perf_stat__get_aggr(config, perf_stat__get_core, cpu); } static struct aggr_cpu_id perf_stat__get_node_cached(struct perf_stat_config *config, - int cpu) + struct perf_cpu cpu) { return perf_stat__get_aggr(config, perf_stat__get_node, cpu); } @@ -1467,7 +1468,7 @@ static int perf_stat_init_aggr_mode(void) * taking the highest cpu number to be the size of * the aggregation translate cpumap. */ - nr = perf_cpu_map__max(evsel_list->core.cpus); + nr = perf_cpu_map__max(evsel_list->core.cpus).cpu; stat_config.cpus_aggr_map = cpu_aggr_map__empty_new(nr + 1); return stat_config.cpus_aggr_map ? 0 : -ENOMEM; } @@ -1495,55 +1496,55 @@ static void perf_stat__exit_aggr_mode(void) stat_config.cpus_aggr_map = NULL; } -static struct aggr_cpu_id perf_env__get_socket_aggr_by_cpu(int cpu, void *data) +static struct aggr_cpu_id perf_env__get_socket_aggr_by_cpu(struct perf_cpu cpu, void *data) { struct perf_env *env = data; struct aggr_cpu_id id = aggr_cpu_id__empty(); - if (cpu != -1) - id.socket = env->cpu[cpu].socket_id; + if (cpu.cpu != -1) + id.socket = env->cpu[cpu.cpu].socket_id; return id; } -static struct aggr_cpu_id perf_env__get_die_aggr_by_cpu(int cpu, void *data) +static struct aggr_cpu_id perf_env__get_die_aggr_by_cpu(struct perf_cpu cpu, void *data) { struct perf_env *env = data; struct aggr_cpu_id id = aggr_cpu_id__empty(); - if (cpu != -1) { + if (cpu.cpu != -1) { /* * die_id is relative to socket, so start * with the socket ID and then add die to * make a unique ID. */ - id.socket = env->cpu[cpu].socket_id; - id.die = env->cpu[cpu].die_id; + id.socket = env->cpu[cpu.cpu].socket_id; + id.die = env->cpu[cpu.cpu].die_id; } return id; } -static struct aggr_cpu_id perf_env__get_core_aggr_by_cpu(int cpu, void *data) +static struct aggr_cpu_id perf_env__get_core_aggr_by_cpu(struct perf_cpu cpu, void *data) { struct perf_env *env = data; struct aggr_cpu_id id = aggr_cpu_id__empty(); - if (cpu != -1) { + if (cpu.cpu != -1) { /* * core_id is relative to socket and die, * we need a global id. So we set * socket, die id and core id */ - id.socket = env->cpu[cpu].socket_id; - id.die = env->cpu[cpu].die_id; - id.core = env->cpu[cpu].core_id; + id.socket = env->cpu[cpu.cpu].socket_id; + id.die = env->cpu[cpu.cpu].die_id; + id.core = env->cpu[cpu.cpu].core_id; } return id; } -static struct aggr_cpu_id perf_env__get_node_aggr_by_cpu(int cpu, void *data) +static struct aggr_cpu_id perf_env__get_node_aggr_by_cpu(struct perf_cpu cpu, void *data) { struct aggr_cpu_id id = aggr_cpu_id__empty(); @@ -1552,24 +1553,24 @@ static struct aggr_cpu_id perf_env__get_node_aggr_by_cpu(int cpu, void *data) } static struct aggr_cpu_id perf_stat__get_socket_file(struct perf_stat_config *config __maybe_unused, - int cpu) + struct perf_cpu cpu) { return perf_env__get_socket_aggr_by_cpu(cpu, &perf_stat.session->header.env); } static struct aggr_cpu_id perf_stat__get_die_file(struct perf_stat_config *config __maybe_unused, - int cpu) + struct perf_cpu cpu) { return perf_env__get_die_aggr_by_cpu(cpu, &perf_stat.session->header.env); } static struct aggr_cpu_id perf_stat__get_core_file(struct perf_stat_config *config __maybe_unused, - int cpu) + struct perf_cpu cpu) { return perf_env__get_core_aggr_by_cpu(cpu, &perf_stat.session->header.env); } static struct aggr_cpu_id perf_stat__get_node_file(struct perf_stat_config *config __maybe_unused, - int cpu) + struct perf_cpu cpu) { return perf_env__get_node_aggr_by_cpu(cpu, &perf_stat.session->header.env); } diff --git a/tools/perf/tests/attr.c b/tools/perf/tests/attr.c index 0f73e300f207..56fba08a3037 100644 --- a/tools/perf/tests/attr.c +++ b/tools/perf/tests/attr.c @@ -65,7 +65,7 @@ do { \ #define WRITE_ASS(field, fmt) __WRITE_ASS(field, fmt, attr->field) -static int store_event(struct perf_event_attr *attr, pid_t pid, int cpu, +static int store_event(struct perf_event_attr *attr, pid_t pid, struct perf_cpu cpu, int fd, int group_fd, unsigned long flags) { FILE *file; @@ -93,7 +93,7 @@ static int store_event(struct perf_event_attr *attr, pid_t pid, int cpu, /* syscall arguments */ __WRITE_ASS(fd, "d", fd); __WRITE_ASS(group_fd, "d", group_fd); - __WRITE_ASS(cpu, "d", cpu); + __WRITE_ASS(cpu, "d", cpu.cpu); __WRITE_ASS(pid, "d", pid); __WRITE_ASS(flags, "lu", flags); @@ -144,7 +144,7 @@ static int store_event(struct perf_event_attr *attr, pid_t pid, int cpu, return 0; } -void test_attr__open(struct perf_event_attr *attr, pid_t pid, int cpu, +void test_attr__open(struct perf_event_attr *attr, pid_t pid, struct perf_cpu cpu, int fd, int group_fd, unsigned long flags) { int errno_saved = errno; diff --git a/tools/perf/tests/bitmap.c b/tools/perf/tests/bitmap.c index 384856347236..0bf399c49849 100644 --- a/tools/perf/tests/bitmap.c +++ b/tools/perf/tests/bitmap.c @@ -18,7 +18,7 @@ static unsigned long *get_bitmap(const char *str, int nbits) if (map && bm) { for (i = 0; i < map->nr; i++) - set_bit(map->map[i], bm); + set_bit(map->map[i].cpu, bm); } if (map) diff --git a/tools/perf/tests/cpumap.c b/tools/perf/tests/cpumap.c index 89a155092f85..84e87e31f119 100644 --- a/tools/perf/tests/cpumap.c +++ b/tools/perf/tests/cpumap.c @@ -38,7 +38,7 @@ static int process_event_mask(struct perf_tool *tool __maybe_unused, TEST_ASSERT_VAL("wrong nr", map->nr == 20); for (i = 0; i < 20; i++) { - TEST_ASSERT_VAL("wrong cpu", map->map[i] == i); + TEST_ASSERT_VAL("wrong cpu", map->map[i].cpu == i); } perf_cpu_map__put(map); @@ -67,8 +67,8 @@ static int process_event_cpus(struct perf_tool *tool __maybe_unused, map = cpu_map__new_data(data); TEST_ASSERT_VAL("wrong nr", map->nr == 2); - TEST_ASSERT_VAL("wrong cpu", map->map[0] == 1); - TEST_ASSERT_VAL("wrong cpu", map->map[1] == 256); + TEST_ASSERT_VAL("wrong cpu", map->map[0].cpu == 1); + TEST_ASSERT_VAL("wrong cpu", map->map[1].cpu == 256); TEST_ASSERT_VAL("wrong refcnt", refcount_read(&map->refcnt) == 1); perf_cpu_map__put(map); return 0; diff --git a/tools/perf/tests/event_update.c b/tools/perf/tests/event_update.c index d01532d40acb..16b6d6f47f38 100644 --- a/tools/perf/tests/event_update.c +++ b/tools/perf/tests/event_update.c @@ -76,9 +76,9 @@ static int process_event_cpus(struct perf_tool *tool __maybe_unused, TEST_ASSERT_VAL("wrong id", ev->id == 123); TEST_ASSERT_VAL("wrong type", ev->type == PERF_EVENT_UPDATE__CPUS); TEST_ASSERT_VAL("wrong cpus", map->nr == 3); - TEST_ASSERT_VAL("wrong cpus", map->map[0] == 1); - TEST_ASSERT_VAL("wrong cpus", map->map[1] == 2); - TEST_ASSERT_VAL("wrong cpus", map->map[2] == 3); + TEST_ASSERT_VAL("wrong cpus", map->map[0].cpu == 1); + TEST_ASSERT_VAL("wrong cpus", map->map[1].cpu == 2); + TEST_ASSERT_VAL("wrong cpus", map->map[2].cpu == 3); perf_cpu_map__put(map); return 0; } diff --git a/tools/perf/tests/mem2node.c b/tools/perf/tests/mem2node.c index b17b86391383..f4a4aba33f76 100644 --- a/tools/perf/tests/mem2node.c +++ b/tools/perf/tests/mem2node.c @@ -31,7 +31,7 @@ static unsigned long *get_bitmap(const char *str, int nbits) if (map && bm) { for (i = 0; i < map->nr; i++) { - set_bit(map->map[i], bm); + set_bit(map->map[i].cpu, bm); } } diff --git a/tools/perf/tests/mmap-basic.c b/tools/perf/tests/mmap-basic.c index 90b2feda31ac..0ad62914b4d7 100644 --- a/tools/perf/tests/mmap-basic.c +++ b/tools/perf/tests/mmap-basic.c @@ -59,11 +59,11 @@ static int test__basic_mmap(struct test_suite *test __maybe_unused, int subtest } CPU_ZERO(&cpu_set); - CPU_SET(cpus->map[0], &cpu_set); + CPU_SET(cpus->map[0].cpu, &cpu_set); sched_setaffinity(0, sizeof(cpu_set), &cpu_set); if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) { pr_debug("sched_setaffinity() failed on CPU %d: %s ", - cpus->map[0], str_error_r(errno, sbuf, sizeof(sbuf))); + cpus->map[0].cpu, str_error_r(errno, sbuf, sizeof(sbuf))); goto out_free_cpus; } diff --git a/tools/perf/tests/openat-syscall-all-cpus.c b/tools/perf/tests/openat-syscall-all-cpus.c index ca0a50e92839..1ab362323d25 100644 --- a/tools/perf/tests/openat-syscall-all-cpus.c +++ b/tools/perf/tests/openat-syscall-all-cpus.c @@ -22,7 +22,8 @@ static int test__openat_syscall_event_on_all_cpus(struct test_suite *test __maybe_unused, int subtest __maybe_unused) { - int err = -1, fd, idx, cpu; + int err = -1, fd, idx; + struct perf_cpu cpu; struct perf_cpu_map *cpus; struct evsel *evsel; unsigned int nr_openat_calls = 111, i; @@ -66,15 +67,15 @@ static int test__openat_syscall_event_on_all_cpus(struct test_suite *test __mayb * without CPU_ALLOC. 1024 cpus in 2010 still seems * a reasonable upper limit tho :-) */ - if (cpu >= CPU_SETSIZE) { - pr_debug("Ignoring CPU %d\n", cpu); + if (cpu.cpu >= CPU_SETSIZE) { + pr_debug("Ignoring CPU %d\n", cpu.cpu); continue; } - CPU_SET(cpu, &cpu_set); + CPU_SET(cpu.cpu, &cpu_set); if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) { pr_debug("sched_setaffinity() failed on CPU %d: %s ", - cpu, + cpu.cpu, str_error_r(errno, sbuf, sizeof(sbuf))); goto out_close_fd; } @@ -82,7 +83,7 @@ static int test__openat_syscall_event_on_all_cpus(struct test_suite *test __mayb fd = openat(0, "/etc/passwd", O_RDONLY); close(fd); } - CPU_CLR(cpu, &cpu_set); + CPU_CLR(cpu.cpu, &cpu_set); } evsel->core.cpus = perf_cpu_map__get(cpus); @@ -92,7 +93,7 @@ static int test__openat_syscall_event_on_all_cpus(struct test_suite *test __mayb perf_cpu_map__for_each_cpu(cpu, idx, cpus) { unsigned int expected; - if (cpu >= CPU_SETSIZE) + if (cpu.cpu >= CPU_SETSIZE) continue; if (evsel__read_on_cpu(evsel, idx, 0) < 0) { @@ -104,7 +105,7 @@ static int test__openat_syscall_event_on_all_cpus(struct test_suite *test __mayb expected = nr_openat_calls + idx; if (perf_counts(evsel->counts, idx, 0)->val != expected) { pr_debug("evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n", - expected, cpu, perf_counts(evsel->counts, idx, 0)->val); + expected, cpu.cpu, perf_counts(evsel->counts, idx, 0)->val); err = -1; } } diff --git a/tools/perf/tests/stat.c b/tools/perf/tests/stat.c index 2eb096b5e6da..500974040fe3 100644 --- a/tools/perf/tests/stat.c +++ b/tools/perf/tests/stat.c @@ -87,7 +87,8 @@ static int test__synthesize_stat(struct test_suite *test __maybe_unused, int sub count.run = 300; TEST_ASSERT_VAL("failed to synthesize stat_config", - !perf_event__synthesize_stat(NULL, 1, 2, 3, &count, process_stat_event, NULL)); + !perf_event__synthesize_stat(NULL, (struct perf_cpu){.cpu = 1}, 2, 3, + &count, process_stat_event, NULL)); return 0; } diff --git a/tools/perf/tests/topology.c b/tools/perf/tests/topology.c index 33e4cb81265c..c4ef0c7002f1 100644 --- a/tools/perf/tests/topology.c +++ b/tools/perf/tests/topology.c @@ -112,7 +112,9 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map) TEST_ASSERT_VAL("Session header CPU map not set", session->header.env.cpu); for (i = 0; i < session->header.env.nr_cpus_avail; i++) { - if (!perf_cpu_map__has(map, i)) + struct perf_cpu cpu = { .cpu = i }; + + if (!perf_cpu_map__has(map, cpu)) continue; pr_debug("CPU %d, core %d, socket %d\n", i, session->header.env.cpu[i].core_id, @@ -122,15 +124,15 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map) // Test that CPU ID contains socket, die, core and CPU for (i = 0; i < map->nr; i++) { id = aggr_cpu_id__cpu(perf_cpu_map__cpu(map, i), NULL); - TEST_ASSERT_VAL("Cpu map - CPU ID doesn't match", map->map[i] == id.cpu); + TEST_ASSERT_VAL("Cpu map - CPU ID doesn't match", map->map[i].cpu == id.cpu.cpu); TEST_ASSERT_VAL("Cpu map - Core ID doesn't match", - session->header.env.cpu[map->map[i]].core_id == id.core); + session->header.env.cpu[map->map[i].cpu].core_id == id.core); TEST_ASSERT_VAL("Cpu map - Socket ID doesn't match", - session->header.env.cpu[map->map[i]].socket_id == id.socket); + session->header.env.cpu[map->map[i].cpu].socket_id == id.socket); TEST_ASSERT_VAL("Cpu map - Die ID doesn't match", - session->header.env.cpu[map->map[i]].die_id == id.die); + session->header.env.cpu[map->map[i].cpu].die_id == id.die); TEST_ASSERT_VAL("Cpu map - Node ID is set", id.node == -1); TEST_ASSERT_VAL("Cpu map - Thread is set", id.thread == -1); } @@ -139,13 +141,13 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map) for (i = 0; i < map->nr; i++) { id = aggr_cpu_id__core(perf_cpu_map__cpu(map, i), NULL); TEST_ASSERT_VAL("Core map - Core ID doesn't match", - session->header.env.cpu[map->map[i]].core_id == id.core); + session->header.env.cpu[map->map[i].cpu].core_id == id.core); TEST_ASSERT_VAL("Core map - Socket ID doesn't match", - session->header.env.cpu[map->map[i]].socket_id == id.socket); + session->header.env.cpu[map->map[i].cpu].socket_id == id.socket); TEST_ASSERT_VAL("Core map - Die ID doesn't match", - session->header.env.cpu[map->map[i]].die_id == id.die); + session->header.env.cpu[map->map[i].cpu].die_id == id.die); TEST_ASSERT_VAL("Core map - Node ID is set", id.node == -1); TEST_ASSERT_VAL("Core map - Thread is set", id.thread == -1); } @@ -154,14 +156,14 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map) for (i = 0; i < map->nr; i++) { id = aggr_cpu_id__die(perf_cpu_map__cpu(map, i), NULL); TEST_ASSERT_VAL("Die map - Socket ID doesn't match", - session->header.env.cpu[map->map[i]].socket_id == id.socket); + session->header.env.cpu[map->map[i].cpu].socket_id == id.socket); TEST_ASSERT_VAL("Die map - Die ID doesn't match", - session->header.env.cpu[map->map[i]].die_id == id.die); + session->header.env.cpu[map->map[i].cpu].die_id == id.die); TEST_ASSERT_VAL("Die map - Node ID is set", id.node == -1); TEST_ASSERT_VAL("Die map - Core is set", id.core == -1); - TEST_ASSERT_VAL("Die map - CPU is set", id.cpu == -1); + TEST_ASSERT_VAL("Die map - CPU is set", id.cpu.cpu == -1); TEST_ASSERT_VAL("Die map - Thread is set", id.thread == -1); } @@ -169,12 +171,12 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map) for (i = 0; i < map->nr; i++) { id = aggr_cpu_id__socket(perf_cpu_map__cpu(map, i), NULL); TEST_ASSERT_VAL("Socket map - Socket ID doesn't match", - session->header.env.cpu[map->map[i]].socket_id == id.socket); + session->header.env.cpu[map->map[i].cpu].socket_id == id.socket); TEST_ASSERT_VAL("Socket map - Node ID is set", id.node == -1); TEST_ASSERT_VAL("Socket map - Die ID is set", id.die == -1); TEST_ASSERT_VAL("Socket map - Core is set", id.core == -1); - TEST_ASSERT_VAL("Socket map - CPU is set", id.cpu == -1); + TEST_ASSERT_VAL("Socket map - CPU is set", id.cpu.cpu == -1); TEST_ASSERT_VAL("Socket map - Thread is set", id.thread == -1); } @@ -186,7 +188,7 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map) TEST_ASSERT_VAL("Node map - Socket is set", id.socket == -1); TEST_ASSERT_VAL("Node map - Die ID is set", id.die == -1); TEST_ASSERT_VAL("Node map - Core is set", id.core == -1); - TEST_ASSERT_VAL("Node map - CPU is set", id.cpu == -1); + TEST_ASSERT_VAL("Node map - CPU is set", id.cpu.cpu == -1); TEST_ASSERT_VAL("Node map - Thread is set", id.thread == -1); } perf_session__delete(session); diff --git a/tools/perf/util/affinity.c b/tools/perf/util/affinity.c index 7b12bd7a3080..f1e30d566db3 100644 --- a/tools/perf/util/affinity.c +++ b/tools/perf/util/affinity.c @@ -11,7 +11,7 @@ static int get_cpu_set_size(void) { - int sz = cpu__max_cpu() + 8 - 1; + int sz = cpu__max_cpu().cpu + 8 - 1; /* * sched_getaffinity doesn't like masks smaller than the kernel. * Hopefully that's big enough. diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c index c679394b898d..5632efc44738 100644 --- a/tools/perf/util/auxtrace.c +++ b/tools/perf/util/auxtrace.c @@ -123,7 +123,7 @@ int auxtrace_mmap__mmap(struct auxtrace_mmap *mm, mm->prev = 0; mm->idx = mp->idx; mm->tid = mp->tid; - mm->cpu = mp->cpu; + mm->cpu = mp->cpu.cpu; if (!mp->len) { mm->base = NULL; @@ -180,7 +180,7 @@ void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp, else mp->tid = -1; } else { - mp->cpu = -1; + mp->cpu.cpu = -1; mp->tid = perf_thread_map__pid(evlist->core.threads, idx); } } @@ -292,7 +292,7 @@ static int auxtrace_queues__queue_buffer(struct auxtrace_queues *queues, if (!queue->set) { queue->set = true; queue->tid = buffer->tid; - queue->cpu = buffer->cpu; + queue->cpu = buffer->cpu.cpu; } buffer->buffer_nr = queues->next_buffer_nr++; @@ -339,11 +339,11 @@ static int auxtrace_queues__split_buffer(struct auxtrace_queues *queues, return 0; } -static bool filter_cpu(struct perf_session *session, int cpu) +static bool filter_cpu(struct perf_session *session, struct perf_cpu cpu) { unsigned long *cpu_bitmap = session->itrace_synth_opts->cpu_bitmap; - return cpu_bitmap && cpu != -1 && !test_bit(cpu, cpu_bitmap); + return cpu_bitmap && cpu.cpu != -1 && !test_bit(cpu.cpu, cpu_bitmap); } static int auxtrace_queues__add_buffer(struct auxtrace_queues *queues, @@ -399,7 +399,7 @@ int auxtrace_queues__add_event(struct auxtrace_queues *queues, struct auxtrace_buffer buffer = { .pid = -1, .tid = event->auxtrace.tid, - .cpu = event->auxtrace.cpu, + .cpu = { event->auxtrace.cpu }, .data_offset = data_offset, .offset = event->auxtrace.offset, .reference = event->auxtrace.reference, diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h index bbf0d78c6401..19910b9011f3 100644 --- a/tools/perf/util/auxtrace.h +++ b/tools/perf/util/auxtrace.h @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -240,7 +241,7 @@ struct auxtrace_buffer { size_t size; pid_t pid; pid_t tid; - int cpu; + struct perf_cpu cpu; void *data; off_t data_offset; void *mmap_addr; @@ -350,7 +351,7 @@ struct auxtrace_mmap_params { int prot; int idx; pid_t tid; - int cpu; + struct perf_cpu cpu; }; /** diff --git a/tools/perf/util/bpf_counter.c b/tools/perf/util/bpf_counter.c index 80d1a3a31052..328479df5e16 100644 --- a/tools/perf/util/bpf_counter.c +++ b/tools/perf/util/bpf_counter.c @@ -540,7 +540,7 @@ static int bperf__load(struct evsel *evsel, struct target *target) filter_type == BPERF_FILTER_TGID) key = evsel->core.threads->map[i].pid; else if (filter_type == BPERF_FILTER_CPU) - key = evsel->core.cpus->map[i]; + key = evsel->core.cpus->map[i].cpu; else break; @@ -584,7 +584,7 @@ static int bperf_sync_counters(struct evsel *evsel) num_cpu = all_cpu_map->nr; for (i = 0; i < num_cpu; i++) { - cpu = all_cpu_map->map[i]; + cpu = all_cpu_map->map[i].cpu; bperf_trigger_reading(evsel->bperf_leader_prog_fd, cpu); } return 0; @@ -605,7 +605,7 @@ static int bperf__disable(struct evsel *evsel) static int bperf__read(struct evsel *evsel) { struct bperf_follower_bpf *skel = evsel->follower_skel; - __u32 num_cpu_bpf = cpu__max_cpu(); + __u32 num_cpu_bpf = cpu__max_cpu().cpu; struct bpf_perf_event_value values[num_cpu_bpf]; int reading_map_fd, err = 0; __u32 i; @@ -615,6 +615,7 @@ static int bperf__read(struct evsel *evsel) reading_map_fd = bpf_map__fd(skel->maps.accum_readings); for (i = 0; i < bpf_map__max_entries(skel->maps.accum_readings); i++) { + struct perf_cpu entry; __u32 cpu; err = bpf_map_lookup_elem(reading_map_fd, &i, values); @@ -624,14 +625,15 @@ static int bperf__read(struct evsel *evsel) case BPERF_FILTER_GLOBAL: assert(i == 0); - perf_cpu_map__for_each_cpu(cpu, j, all_cpu_map) { + perf_cpu_map__for_each_cpu(entry, j, all_cpu_map) { + cpu = entry.cpu; perf_counts(evsel->counts, cpu, 0)->val = values[cpu].counter; perf_counts(evsel->counts, cpu, 0)->ena = values[cpu].enabled; perf_counts(evsel->counts, cpu, 0)->run = values[cpu].running; } break; case BPERF_FILTER_CPU: - cpu = evsel->core.cpus->map[i]; + cpu = evsel->core.cpus->map[i].cpu; perf_counts(evsel->counts, i, 0)->val = values[cpu].counter; perf_counts(evsel->counts, i, 0)->ena = values[cpu].enabled; perf_counts(evsel->counts, i, 0)->run = values[cpu].running; diff --git a/tools/perf/util/bpf_counter_cgroup.c b/tools/perf/util/bpf_counter_cgroup.c index cbc6c2bca488..631e34a0b66f 100644 --- a/tools/perf/util/bpf_counter_cgroup.c +++ b/tools/perf/util/bpf_counter_cgroup.c @@ -48,7 +48,7 @@ static int bperf_load_program(struct evlist *evlist) struct cgroup *cgrp, *leader_cgrp; __u32 i, cpu; __u32 nr_cpus = evlist->core.all_cpus->nr; - int total_cpus = cpu__max_cpu(); + int total_cpus = cpu__max_cpu().cpu; int map_size, map_fd; int prog_fd, err; @@ -125,7 +125,7 @@ static int bperf_load_program(struct evlist *evlist) for (cpu = 0; cpu < nr_cpus; cpu++) { int fd = FD(evsel, cpu); __u32 idx = evsel->core.idx * total_cpus + - evlist->core.all_cpus->map[cpu]; + evlist->core.all_cpus->map[cpu].cpu; err = bpf_map_update_elem(map_fd, &idx, &fd, BPF_ANY); @@ -212,7 +212,7 @@ static int bperf_cgrp__sync_counters(struct evlist *evlist) int prog_fd = bpf_program__fd(skel->progs.trigger_read); for (i = 0; i < nr_cpus; i++) { - cpu = evlist->core.all_cpus->map[i]; + cpu = evlist->core.all_cpus->map[i].cpu; bperf_trigger_reading(prog_fd, cpu); } @@ -245,7 +245,7 @@ static int bperf_cgrp__read(struct evsel *evsel) { struct evlist *evlist = evsel->evlist; int i, cpu, nr_cpus = evlist->core.all_cpus->nr; - int total_cpus = cpu__max_cpu(); + int total_cpus = cpu__max_cpu().cpu; struct perf_counts_values *counts; struct bpf_perf_event_value *values; int reading_map_fd, err = 0; @@ -272,7 +272,7 @@ static int bperf_cgrp__read(struct evsel *evsel) } for (i = 0; i < nr_cpus; i++) { - cpu = evlist->core.all_cpus->map[i]; + cpu = evlist->core.all_cpus->map[i].cpu; counts = perf_counts(evsel->counts, i, 0); counts->val = values[cpu].counter; diff --git a/tools/perf/util/bpf_ftrace.c b/tools/perf/util/bpf_ftrace.c index 28dc4c60c788..d756cc66eef3 100644 --- a/tools/perf/util/bpf_ftrace.c +++ b/tools/perf/util/bpf_ftrace.c @@ -63,7 +63,7 @@ int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace) fd = bpf_map__fd(skel->maps.cpu_filter); for (i = 0; i < ncpus; i++) { - cpu = perf_cpu_map__cpu(ftrace->evlist->core.cpus, i); + cpu = perf_cpu_map__cpu(ftrace->evlist->core.cpus, i).cpu; bpf_map_update_elem(fd, &cpu, &val, BPF_ANY); } } @@ -122,7 +122,7 @@ int perf_ftrace__latency_read_bpf(struct perf_ftrace *ftrace __maybe_unused, int i, fd, err; u32 idx; u64 *hist; - int ncpus = cpu__max_cpu(); + int ncpus = cpu__max_cpu().cpu; fd = bpf_map__fd(skel->maps.latency); diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c index 48ce583af0ec..12b2243222b0 100644 --- a/tools/perf/util/cpumap.c +++ b/tools/perf/util/cpumap.c @@ -13,8 +13,8 @@ #include #include -static int max_cpu_num; -static int max_present_cpu_num; +static struct perf_cpu max_cpu_num; +static struct perf_cpu max_present_cpu_num; static int max_node_num; /** * The numa node X as read from /sys/devices/system/node/nodeX indexed by the @@ -37,9 +37,9 @@ static struct perf_cpu_map *cpu_map__from_entries(struct cpu_map_entries *cpus) * otherwise it would become 65535. */ if (cpus->cpu[i] == (u16) -1) - map->map[i] = -1; + map->map[i].cpu = -1; else - map->map[i] = (int) cpus->cpu[i]; + map->map[i].cpu = (int) cpus->cpu[i]; } } @@ -58,7 +58,7 @@ static struct perf_cpu_map *cpu_map__from_mask(struct perf_record_record_cpu_map int cpu, i = 0; for_each_set_bit(cpu, mask->mask, nbits) - map->map[i++] = cpu; + map->map[i++].cpu = cpu; } return map; @@ -91,7 +91,7 @@ struct perf_cpu_map *perf_cpu_map__empty_new(int nr) cpus->nr = nr; for (i = 0; i < nr; i++) - cpus->map[i] = -1; + cpus->map[i].cpu = -1; refcount_set(&cpus->refcnt, 1); } @@ -126,13 +126,13 @@ static int cpu__get_topology_int(int cpu, const char *name, int *value) return sysfs__read_int(path, value); } -int cpu__get_socket_id(int cpu) +int cpu__get_socket_id(struct perf_cpu cpu) { - int value, ret = cpu__get_topology_int(cpu, "physical_package_id", &value); + int value, ret = cpu__get_topology_int(cpu.cpu, "physical_package_id", &value); return ret ?: value; } -struct aggr_cpu_id aggr_cpu_id__socket(int cpu, void *data __maybe_unused) +struct aggr_cpu_id aggr_cpu_id__socket(struct perf_cpu cpu, void *data __maybe_unused) { struct aggr_cpu_id id = aggr_cpu_id__empty(); @@ -161,7 +161,8 @@ struct cpu_aggr_map *cpu_aggr_map__new(const struct perf_cpu_map *cpus, aggr_cpu_id_get_t get_id, void *data) { - int cpu, idx; + int idx; + struct perf_cpu cpu; struct cpu_aggr_map *c = cpu_aggr_map__empty_new(cpus->nr); if (!c) @@ -201,14 +202,14 @@ struct cpu_aggr_map *cpu_aggr_map__new(const struct perf_cpu_map *cpus, } -int cpu__get_die_id(int cpu) +int cpu__get_die_id(struct perf_cpu cpu) { - int value, ret = cpu__get_topology_int(cpu, "die_id", &value); + int value, ret = cpu__get_topology_int(cpu.cpu, "die_id", &value); return ret ?: value; } -struct aggr_cpu_id aggr_cpu_id__die(int cpu, void *data) +struct aggr_cpu_id aggr_cpu_id__die(struct perf_cpu cpu, void *data) { struct aggr_cpu_id id; int die; @@ -231,13 +232,13 @@ struct aggr_cpu_id aggr_cpu_id__die(int cpu, void *data) return id; } -int cpu__get_core_id(int cpu) +int cpu__get_core_id(struct perf_cpu cpu) { - int value, ret = cpu__get_topology_int(cpu, "core_id", &value); + int value, ret = cpu__get_topology_int(cpu.cpu, "core_id", &value); return ret ?: value; } -struct aggr_cpu_id aggr_cpu_id__core(int cpu, void *data) +struct aggr_cpu_id aggr_cpu_id__core(struct perf_cpu cpu, void *data) { struct aggr_cpu_id id; int core = cpu__get_core_id(cpu); @@ -256,7 +257,7 @@ struct aggr_cpu_id aggr_cpu_id__core(int cpu, void *data) } -struct aggr_cpu_id aggr_cpu_id__cpu(int cpu, void *data) +struct aggr_cpu_id aggr_cpu_id__cpu(struct perf_cpu cpu, void *data) { struct aggr_cpu_id id; @@ -270,7 +271,7 @@ struct aggr_cpu_id aggr_cpu_id__cpu(int cpu, void *data) } -struct aggr_cpu_id aggr_cpu_id__node(int cpu, void *data __maybe_unused) +struct aggr_cpu_id aggr_cpu_id__node(struct perf_cpu cpu, void *data __maybe_unused) { struct aggr_cpu_id id = aggr_cpu_id__empty(); @@ -318,8 +319,8 @@ static void set_max_cpu_num(void) int ret = -1; /* set up default */ - max_cpu_num = 4096; - max_present_cpu_num = 4096; + max_cpu_num.cpu = 4096; + max_present_cpu_num.cpu = 4096; mnt = sysfs__mountpoint(); if (!mnt) @@ -332,7 +333,7 @@ static void set_max_cpu_num(void) goto out; } - ret = get_max_num(path, &max_cpu_num); + ret = get_max_num(path, &max_cpu_num.cpu); if (ret) goto out; @@ -343,11 +344,11 @@ static void set_max_cpu_num(void) goto out; } - ret = get_max_num(path, &max_present_cpu_num); + ret = get_max_num(path, &max_present_cpu_num.cpu); out: if (ret) - pr_err("Failed to read max cpus, using default of %d\n", max_cpu_num); + pr_err("Failed to read max cpus, using default of %d\n", max_cpu_num.cpu); } /* Determine highest possible node in the system for sparse allocation */ @@ -386,31 +387,31 @@ int cpu__max_node(void) return max_node_num; } -int cpu__max_cpu(void) +struct perf_cpu cpu__max_cpu(void) { - if (unlikely(!max_cpu_num)) + if (unlikely(!max_cpu_num.cpu)) set_max_cpu_num(); return max_cpu_num; } -int cpu__max_present_cpu(void) +struct perf_cpu cpu__max_present_cpu(void) { - if (unlikely(!max_present_cpu_num)) + if (unlikely(!max_present_cpu_num.cpu)) set_max_cpu_num(); return max_present_cpu_num; } -int cpu__get_node(int cpu) +int cpu__get_node(struct perf_cpu cpu) { if (unlikely(cpunode_map == NULL)) { pr_debug("cpu_map not initialized\n"); return -1; } - return cpunode_map[cpu]; + return cpunode_map[cpu.cpu]; } static int init_cpunode_map(void) @@ -420,13 +421,13 @@ static int init_cpunode_map(void) set_max_cpu_num(); set_max_node_num(); - cpunode_map = calloc(max_cpu_num, sizeof(int)); + cpunode_map = calloc(max_cpu_num.cpu, sizeof(int)); if (!cpunode_map) { pr_err("%s: calloc failed\n", __func__); return -1; } - for (i = 0; i < max_cpu_num; i++) + for (i = 0; i < max_cpu_num.cpu; i++) cpunode_map[i] = -1; return 0; @@ -487,35 +488,37 @@ int cpu__setup_cpunode_map(void) size_t cpu_map__snprint(struct perf_cpu_map *map, char *buf, size_t size) { - int i, cpu, start = -1; + int i, start = -1; bool first = true; size_t ret = 0; #define COMMA first ? "" : "," for (i = 0; i < map->nr + 1; i++) { + struct perf_cpu cpu = { .cpu = INT_MAX }; bool last = i == map->nr; - cpu = last ? INT_MAX : map->map[i]; + if (!last) + cpu = map->map[i]; if (start == -1) { start = i; if (last) { ret += snprintf(buf + ret, size - ret, "%s%d", COMMA, - map->map[i]); + map->map[i].cpu); } - } else if (((i - start) != (cpu - map->map[start])) || last) { + } else if (((i - start) != (cpu.cpu - map->map[start].cpu)) || last) { int end = i - 1; if (start == end) { ret += snprintf(buf + ret, size - ret, "%s%d", COMMA, - map->map[start]); + map->map[start].cpu); } else { ret += snprintf(buf + ret, size - ret, "%s%d-%d", COMMA, - map->map[start], map->map[end]); + map->map[start].cpu, map->map[end].cpu); } first = false; start = i; @@ -542,23 +545,23 @@ size_t cpu_map__snprint_mask(struct perf_cpu_map *map, char *buf, size_t size) int i, cpu; char *ptr = buf; unsigned char *bitmap; - int last_cpu = perf_cpu_map__cpu(map, map->nr - 1); + struct perf_cpu last_cpu = perf_cpu_map__cpu(map, map->nr - 1); if (buf == NULL) return 0; - bitmap = zalloc(last_cpu / 8 + 1); + bitmap = zalloc(last_cpu.cpu / 8 + 1); if (bitmap == NULL) { buf[0] = '\0'; return 0; } for (i = 0; i < map->nr; i++) { - cpu = perf_cpu_map__cpu(map, i); + cpu = perf_cpu_map__cpu(map, i).cpu; bitmap[cpu / 8] |= 1 << (cpu % 8); } - for (cpu = last_cpu / 4 * 4; cpu >= 0; cpu -= 4) { + for (cpu = last_cpu.cpu / 4 * 4; cpu >= 0; cpu -= 4) { unsigned char bits = bitmap[cpu / 8]; if (cpu % 8) @@ -594,7 +597,7 @@ bool aggr_cpu_id__equal(const struct aggr_cpu_id *a, const struct aggr_cpu_id *b a->socket == b->socket && a->die == b->die && a->core == b->core && - a->cpu == b->cpu; + a->cpu.cpu == b->cpu.cpu; } bool aggr_cpu_id__is_empty(const struct aggr_cpu_id *a) @@ -604,7 +607,7 @@ bool aggr_cpu_id__is_empty(const struct aggr_cpu_id *a) a->socket == -1 && a->die == -1 && a->core == -1 && - a->cpu == -1; + a->cpu.cpu == -1; } struct aggr_cpu_id aggr_cpu_id__empty(void) @@ -615,7 +618,7 @@ struct aggr_cpu_id aggr_cpu_id__empty(void) .socket = -1, .die = -1, .core = -1, - .cpu = -1 + .cpu = (struct perf_cpu){ .cpu = -1 }, }; return ret; } diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h index b98cd1739677..afc15027d678 100644 --- a/tools/perf/util/cpumap.h +++ b/tools/perf/util/cpumap.h @@ -23,7 +23,7 @@ struct aggr_cpu_id { /** The core id as read from /sys/devices/system/cpu/cpuX/topology/core_id. */ int core; /** CPU aggregation, note there is one CPU for each SMT thread. */ - int cpu; + struct perf_cpu cpu; }; /** A collection of aggr_cpu_id values, the "built" version is sorted and uniqued. */ @@ -48,28 +48,28 @@ const struct perf_cpu_map *cpu_map__online(void); /* thread unsafe */ int cpu__setup_cpunode_map(void); int cpu__max_node(void); -int cpu__max_cpu(void); -int cpu__max_present_cpu(void); +struct perf_cpu cpu__max_cpu(void); +struct perf_cpu cpu__max_present_cpu(void); /** * cpu__get_node - Returns the numa node X as read from * /sys/devices/system/node/nodeX for the given CPU. */ -int cpu__get_node(int cpu); +int cpu__get_node(struct perf_cpu cpu); /** * cpu__get_socket_id - Returns the socket number as read from * /sys/devices/system/cpu/cpuX/topology/physical_package_id for the given CPU. */ -int cpu__get_socket_id(int cpu); +int cpu__get_socket_id(struct perf_cpu cpu); /** * cpu__get_die_id - Returns the die id as read from * /sys/devices/system/cpu/cpuX/topology/die_id for the given CPU. */ -int cpu__get_die_id(int cpu); +int cpu__get_die_id(struct perf_cpu cpu); /** * cpu__get_core_id - Returns the core id as read from * /sys/devices/system/cpu/cpuX/topology/core_id for the given CPU. */ -int cpu__get_core_id(int cpu); +int cpu__get_core_id(struct perf_cpu cpu); /** * cpu_aggr_map__empty_new - Create a cpu_aggr_map of size nr with every entry @@ -77,7 +77,7 @@ int cpu__get_core_id(int cpu); */ struct cpu_aggr_map *cpu_aggr_map__empty_new(int nr); -typedef struct aggr_cpu_id (*aggr_cpu_id_get_t)(int cpu, void *data); +typedef struct aggr_cpu_id (*aggr_cpu_id_get_t)(struct perf_cpu cpu, void *data); /** * cpu_aggr_map__new - Create a cpu_aggr_map with an aggr_cpu_id for each cpu in @@ -98,29 +98,29 @@ struct aggr_cpu_id aggr_cpu_id__empty(void); * the socket for cpu. The function signature is compatible with * aggr_cpu_id_get_t. */ -struct aggr_cpu_id aggr_cpu_id__socket(int cpu, void *data); +struct aggr_cpu_id aggr_cpu_id__socket(struct perf_cpu cpu, void *data); /** * aggr_cpu_id__die - Create an aggr_cpu_id with the die and socket populated * with the die and socket for cpu. The function signature is compatible with * aggr_cpu_id_get_t. */ -struct aggr_cpu_id aggr_cpu_id__die(int cpu, void *data); +struct aggr_cpu_id aggr_cpu_id__die(struct perf_cpu cpu, void *data); /** * aggr_cpu_id__core - Create an aggr_cpu_id with the core, die and socket * populated with the core, die and socket for cpu. The function signature is * compatible with aggr_cpu_id_get_t. */ -struct aggr_cpu_id aggr_cpu_id__core(int cpu, void *data); +struct aggr_cpu_id aggr_cpu_id__core(struct perf_cpu cpu, void *data); /** * aggr_cpu_id__core - Create an aggr_cpu_id with the cpu, core, die and socket * populated with the cpu, core, die and socket for cpu. The function signature * is compatible with aggr_cpu_id_get_t. */ -struct aggr_cpu_id aggr_cpu_id__cpu(int cpu, void *data); +struct aggr_cpu_id aggr_cpu_id__cpu(struct perf_cpu cpu, void *data); /** * aggr_cpu_id__node - Create an aggr_cpu_id with the numa node populated for * cpu. The function signature is compatible with aggr_cpu_id_get_t. */ -struct aggr_cpu_id aggr_cpu_id__node(int cpu, void *data); +struct aggr_cpu_id aggr_cpu_id__node(struct perf_cpu cpu, void *data); #endif /* __PERF_CPUMAP_H */ diff --git a/tools/perf/util/cputopo.c b/tools/perf/util/cputopo.c index 8affb37d90e7..84ca106a3246 100644 --- a/tools/perf/util/cputopo.c +++ b/tools/perf/util/cputopo.c @@ -187,7 +187,7 @@ struct cpu_topology *cpu_topology__new(void) struct perf_cpu_map *map; bool has_die = has_die_topology(); - ncpus = cpu__max_present_cpu(); + ncpus = cpu__max_present_cpu().cpu; /* build online CPU map */ map = perf_cpu_map__new(NULL); @@ -218,7 +218,7 @@ struct cpu_topology *cpu_topology__new(void) tp->core_cpus_list = addr; for (i = 0; i < nr; i++) { - if (!perf_cpu_map__has(map, i)) + if (!perf_cpu_map__has(map, (struct perf_cpu){ .cpu = i })) continue; ret = build_cpu_topology(tp, i); @@ -333,7 +333,7 @@ struct numa_topology *numa_topology__new(void) tp->nr = nr; for (i = 0; i < nr; i++) { - if (load_numa_node(&tp->nodes[i], node_map->map[i])) { + if (load_numa_node(&tp->nodes[i], node_map->map[i].cpu)) { numa_topology__delete(tp); tp = NULL; break; diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c index fd12c0dcaefb..579e44c59914 100644 --- a/tools/perf/util/env.c +++ b/tools/perf/util/env.c @@ -285,13 +285,13 @@ out_enomem: int perf_env__read_cpu_topology_map(struct perf_env *env) { - int cpu, nr_cpus; + int idx, nr_cpus; if (env->cpu != NULL) return 0; if (env->nr_cpus_avail == 0) - env->nr_cpus_avail = cpu__max_present_cpu(); + env->nr_cpus_avail = cpu__max_present_cpu().cpu; nr_cpus = env->nr_cpus_avail; if (nr_cpus == -1) @@ -301,10 +301,12 @@ int perf_env__read_cpu_topology_map(struct perf_env *env) if (env->cpu == NULL) return -ENOMEM; - for (cpu = 0; cpu < nr_cpus; ++cpu) { - env->cpu[cpu].core_id = cpu__get_core_id(cpu); - env->cpu[cpu].socket_id = cpu__get_socket_id(cpu); - env->cpu[cpu].die_id = cpu__get_die_id(cpu); + for (idx = 0; idx < nr_cpus; ++idx) { + struct perf_cpu cpu = { .cpu = idx }; + + env->cpu[idx].core_id = cpu__get_core_id(cpu); + env->cpu[idx].socket_id = cpu__get_socket_id(cpu); + env->cpu[idx].die_id = cpu__get_die_id(cpu); } env->nr_cpus_avail = nr_cpus; @@ -381,7 +383,7 @@ static int perf_env__read_arch(struct perf_env *env) static int perf_env__read_nr_cpus_avail(struct perf_env *env) { if (env->nr_cpus_avail == 0) - env->nr_cpus_avail = cpu__max_present_cpu(); + env->nr_cpus_avail = cpu__max_present_cpu().cpu; return env->nr_cpus_avail ? 0 : -ENOENT; } @@ -487,7 +489,7 @@ const char *perf_env__pmu_mappings(struct perf_env *env) return env->pmu_mappings; } -int perf_env__numa_node(struct perf_env *env, int cpu) +int perf_env__numa_node(struct perf_env *env, struct perf_cpu cpu) { if (!env->nr_numa_map) { struct numa_node *nn; @@ -495,7 +497,7 @@ int perf_env__numa_node(struct perf_env *env, int cpu) for (i = 0; i < env->nr_numa_nodes; i++) { nn = &env->numa_nodes[i]; - nr = max(nr, perf_cpu_map__max(nn->map)); + nr = max(nr, perf_cpu_map__max(nn->map).cpu); } nr++; @@ -514,13 +516,14 @@ int perf_env__numa_node(struct perf_env *env, int cpu) env->nr_numa_map = nr; for (i = 0; i < env->nr_numa_nodes; i++) { - int tmp, j; + struct perf_cpu tmp; + int j; nn = &env->numa_nodes[i]; - perf_cpu_map__for_each_cpu(j, tmp, nn->map) - env->numa_map[j] = i; + perf_cpu_map__for_each_cpu(tmp, j, nn->map) + env->numa_map[tmp.cpu] = i; } } - return cpu >= 0 && cpu < env->nr_numa_map ? env->numa_map[cpu] : -1; + return cpu.cpu >= 0 && cpu.cpu < env->nr_numa_map ? env->numa_map[cpu.cpu] : -1; } diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h index 163e5ec503a2..a3541f98e1fc 100644 --- a/tools/perf/util/env.h +++ b/tools/perf/util/env.h @@ -4,6 +4,7 @@ #include #include +#include "cpumap.h" #include "rwsem.h" struct perf_cpu_map; @@ -170,5 +171,5 @@ struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env, bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node); struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id); -int perf_env__numa_node(struct perf_env *env, int cpu); +int perf_env__numa_node(struct perf_env *env, struct perf_cpu cpu); #endif /* __PERF_ENV_H */ diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index 39d294f6c321..11eb95b2106b 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -350,13 +350,13 @@ struct evlist_cpu_iterator evlist__cpu_begin(struct evlist *evlist, struct affin .cpu_map_idx = 0, .evlist_cpu_map_idx = 0, .evlist_cpu_map_nr = perf_cpu_map__nr(evlist->core.all_cpus), - .cpu = -1, + .cpu = (struct perf_cpu){ .cpu = -1}, .affinity = affinity, }; if (itr.affinity) { itr.cpu = perf_cpu_map__cpu(evlist->core.all_cpus, 0); - affinity__set(itr.affinity, itr.cpu); + affinity__set(itr.affinity, itr.cpu.cpu); itr.cpu_map_idx = perf_cpu_map__idx(itr.evsel->core.cpus, itr.cpu); /* * If this CPU isn't in the evsel's cpu map then advance through @@ -385,7 +385,7 @@ void evlist_cpu_iterator__next(struct evlist_cpu_iterator *evlist_cpu_itr) perf_cpu_map__cpu(evlist_cpu_itr->container->core.all_cpus, evlist_cpu_itr->evlist_cpu_map_idx); if (evlist_cpu_itr->affinity) - affinity__set(evlist_cpu_itr->affinity, evlist_cpu_itr->cpu); + affinity__set(evlist_cpu_itr->affinity, evlist_cpu_itr->cpu.cpu); evlist_cpu_itr->cpu_map_idx = perf_cpu_map__idx(evlist_cpu_itr->evsel->core.cpus, evlist_cpu_itr->cpu); @@ -819,7 +819,7 @@ perf_evlist__mmap_cb_get(struct perf_evlist *_evlist, bool overwrite, int idx) static int perf_evlist__mmap_cb_mmap(struct perf_mmap *_map, struct perf_mmap_param *_mp, - int output, int cpu) + int output, struct perf_cpu cpu) { struct mmap *map = container_of(_map, struct mmap, core); struct mmap_params *mp = container_of(_mp, struct mmap_params, core); diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index 57828ebfcb61..64cba56fbc74 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h @@ -344,7 +344,7 @@ struct evlist_cpu_iterator { /** The number of CPU map entries in evlist->core.all_cpus. */ int evlist_cpu_map_nr; /** The current CPU of the iterator. */ - int cpu; + struct perf_cpu cpu; /** If present, used to set the affinity when switching between CPUs. */ struct affinity *affinity; }; diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 796923c80ff6..7660e0bf3b50 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -1594,7 +1594,7 @@ int __evsel__read_on_cpu(struct evsel *evsel, int cpu_map_idx, int thread, bool static int evsel__match_other_cpu(struct evsel *evsel, struct evsel *other, int cpu_map_idx) { - int cpu; + struct perf_cpu cpu; cpu = perf_cpu_map__cpu(evsel->core.cpus, cpu_map_idx); return perf_cpu_map__idx(other->core.cpus, cpu); @@ -2020,9 +2020,9 @@ retry_open: test_attr__ready(); pr_debug2_peo("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx", - pid, cpus->map[idx], group_fd, evsel->open_flags); + pid, cpus->map[idx].cpu, group_fd, evsel->open_flags); - fd = sys_perf_event_open(&evsel->core.attr, pid, cpus->map[idx], + fd = sys_perf_event_open(&evsel->core.attr, pid, cpus->map[idx].cpu, group_fd, evsel->open_flags); FD(evsel, idx, thread) = fd; diff --git a/tools/perf/util/expr.c b/tools/perf/util/expr.c index 666b59baeb70..e808738493e2 100644 --- a/tools/perf/util/expr.c +++ b/tools/perf/util/expr.c @@ -410,7 +410,7 @@ double expr__get_literal(const char *literal) return smt_on() > 0 ? 1.0 : 0.0; if (!strcmp("#num_cpus", literal)) - return cpu__max_present_cpu(); + return cpu__max_present_cpu().cpu; /* * Assume that topology strings are consistent, such as CPUs "0-1" diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index e3c1a532d059..6da12e522edc 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -472,7 +472,7 @@ static int write_nrcpus(struct feat_fd *ff, u32 nrc, nra; int ret; - nrc = cpu__max_present_cpu(); + nrc = cpu__max_present_cpu().cpu; nr = sysconf(_SC_NPROCESSORS_ONLN); if (nr < 0) @@ -1163,7 +1163,7 @@ static int build_caches(struct cpu_cache_level caches[], u32 *cntp) u32 nr, cpu; u16 level; - nr = cpu__max_cpu(); + nr = cpu__max_cpu().cpu; for (cpu = 0; cpu < nr; cpu++) { for (level = 0; level < MAX_CACHE_LVL; level++) { @@ -1195,7 +1195,7 @@ static int build_caches(struct cpu_cache_level caches[], u32 *cntp) static int write_cache(struct feat_fd *ff, struct evlist *evlist __maybe_unused) { - u32 max_caches = cpu__max_cpu() * MAX_CACHE_LVL; + u32 max_caches = cpu__max_cpu().cpu * MAX_CACHE_LVL; struct cpu_cache_level caches[max_caches]; u32 cnt = 0, i, version = 1; int ret; diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c index 23ecdba9e670..12261ed8c15b 100644 --- a/tools/perf/util/mmap.c +++ b/tools/perf/util/mmap.c @@ -94,7 +94,7 @@ static void perf_mmap__aio_free(struct mmap *map, int idx) } } -static int perf_mmap__aio_bind(struct mmap *map, int idx, int cpu, int affinity) +static int perf_mmap__aio_bind(struct mmap *map, int idx, struct perf_cpu cpu, int affinity) { void *data; size_t mmap_len; @@ -138,7 +138,7 @@ static void perf_mmap__aio_free(struct mmap *map, int idx) } static int perf_mmap__aio_bind(struct mmap *map __maybe_unused, int idx __maybe_unused, - int cpu __maybe_unused, int affinity __maybe_unused) + struct perf_cpu cpu __maybe_unused, int affinity __maybe_unused) { return 0; } @@ -240,7 +240,8 @@ void mmap__munmap(struct mmap *map) static void build_node_mask(int node, struct mmap_cpu_mask *mask) { - int c, cpu, nr_cpus; + int idx, nr_cpus; + struct perf_cpu cpu; const struct perf_cpu_map *cpu_map = NULL; cpu_map = cpu_map__online(); @@ -248,16 +249,16 @@ static void build_node_mask(int node, struct mmap_cpu_mask *mask) return; nr_cpus = perf_cpu_map__nr(cpu_map); - for (c = 0; c < nr_cpus; c++) { - cpu = cpu_map->map[c]; /* map c index to online cpu index */ + for (idx = 0; idx < nr_cpus; idx++) { + cpu = cpu_map->map[idx]; /* map c index to online cpu index */ if (cpu__get_node(cpu) == node) - set_bit(cpu, mask->bits); + set_bit(cpu.cpu, mask->bits); } } static int perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params *mp) { - map->affinity_mask.nbits = cpu__max_cpu(); + map->affinity_mask.nbits = cpu__max_cpu().cpu; map->affinity_mask.bits = bitmap_zalloc(map->affinity_mask.nbits); if (!map->affinity_mask.bits) return -1; @@ -265,12 +266,12 @@ static int perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params * if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1) build_node_mask(cpu__get_node(map->core.cpu), &map->affinity_mask); else if (mp->affinity == PERF_AFFINITY_CPU) - set_bit(map->core.cpu, map->affinity_mask.bits); + set_bit(map->core.cpu.cpu, map->affinity_mask.bits); return 0; } -int mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu) +int mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, struct perf_cpu cpu) { if (perf_mmap__mmap(&map->core, &mp->core, fd, cpu)) { pr_debug2("failed to mmap perf event ring buffer, error %d\n", diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h index 8e259b9610f8..83f6bd4d4082 100644 --- a/tools/perf/util/mmap.h +++ b/tools/perf/util/mmap.h @@ -7,6 +7,7 @@ #include #include #include +#include #include #include // for cpu_set_t #ifdef HAVE_AIO_SUPPORT @@ -52,7 +53,7 @@ struct mmap_params { struct auxtrace_mmap_params auxtrace_mp; }; -int mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu); +int mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, struct perf_cpu cpu); void mmap__munmap(struct mmap *map); union perf_event *perf_mmap__read_forward(struct mmap *map); diff --git a/tools/perf/util/perf_api_probe.c b/tools/perf/util/perf_api_probe.c index 020411682a3c..734d006d9a8c 100644 --- a/tools/perf/util/perf_api_probe.c +++ b/tools/perf/util/perf_api_probe.c @@ -11,7 +11,7 @@ typedef void (*setup_probe_fn_t)(struct evsel *evsel); -static int perf_do_probe_api(setup_probe_fn_t fn, int cpu, const char *str) +static int perf_do_probe_api(setup_probe_fn_t fn, struct perf_cpu cpu, const char *str) { struct evlist *evlist; struct evsel *evsel; @@ -29,7 +29,7 @@ static int perf_do_probe_api(setup_probe_fn_t fn, int cpu, const char *str) evsel = evlist__first(evlist); while (1) { - fd = sys_perf_event_open(&evsel->core.attr, pid, cpu, -1, flags); + fd = sys_perf_event_open(&evsel->core.attr, pid, cpu.cpu, -1, flags); if (fd < 0) { if (pid == -1 && errno == EACCES) { pid = 0; @@ -43,7 +43,7 @@ static int perf_do_probe_api(setup_probe_fn_t fn, int cpu, const char *str) fn(evsel); - fd = sys_perf_event_open(&evsel->core.attr, pid, cpu, -1, flags); + fd = sys_perf_event_open(&evsel->core.attr, pid, cpu.cpu, -1, flags); if (fd < 0) { if (errno == EINVAL) err = -EINVAL; @@ -61,7 +61,8 @@ static bool perf_probe_api(setup_probe_fn_t fn) { const char *try[] = {"cycles:u", "instructions:u", "cpu-clock:u", NULL}; struct perf_cpu_map *cpus; - int cpu, ret, i = 0; + struct perf_cpu cpu; + int ret, i = 0; cpus = perf_cpu_map__new(NULL); if (!cpus) @@ -136,15 +137,17 @@ bool perf_can_record_cpu_wide(void) .exclude_kernel = 1, }; struct perf_cpu_map *cpus; - int cpu, fd; + struct perf_cpu cpu; + int fd; cpus = perf_cpu_map__new(NULL); if (!cpus) return false; + cpu = cpus->map[0]; perf_cpu_map__put(cpus); - fd = sys_perf_event_open(&attr, -1, cpu, -1, 0); + fd = sys_perf_event_open(&attr, -1, cpu.cpu, -1, 0); if (fd < 0) return false; close(fd); diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c index 7f782a31bda3..95fb53899bcd 100644 --- a/tools/perf/util/python.c +++ b/tools/perf/util/python.c @@ -1057,7 +1057,7 @@ static struct mmap *get_md(struct evlist *evlist, int cpu) for (i = 0; i < evlist->core.nr_mmaps; i++) { struct mmap *md = &evlist->mmap[i]; - if (md->core.cpu == cpu) + if (md->core.cpu.cpu == cpu) return md; } @@ -1443,7 +1443,7 @@ error: * Dummy, to avoid dragging all the test_attr infrastructure in the python * binding. */ -void test_attr__open(struct perf_event_attr *attr, pid_t pid, int cpu, +void test_attr__open(struct perf_event_attr *attr, pid_t pid, struct perf_cpu cpu, int fd, int group_fd, unsigned long flags) { } diff --git a/tools/perf/util/record.c b/tools/perf/util/record.c index bff669b615ee..20461f174991 100644 --- a/tools/perf/util/record.c +++ b/tools/perf/util/record.c @@ -106,7 +106,7 @@ void evlist__config(struct evlist *evlist, struct record_opts *opts, struct call if (opts->group) evlist__set_leader(evlist); - if (evlist->core.cpus->map[0] < 0) + if (evlist->core.cpus->map[0].cpu < 0) opts->no_inherit = true; use_comm_exec = perf_can_comm_exec(); @@ -229,7 +229,8 @@ bool evlist__can_select_event(struct evlist *evlist, const char *str) { struct evlist *temp_evlist; struct evsel *evsel; - int err, fd, cpu; + int err, fd; + struct perf_cpu cpu = { .cpu = 0 }; bool ret = false; pid_t pid = -1; @@ -246,14 +247,16 @@ bool evlist__can_select_event(struct evlist *evlist, const char *str) if (!evlist || perf_cpu_map__empty(evlist->core.cpus)) { struct perf_cpu_map *cpus = perf_cpu_map__new(NULL); - cpu = cpus ? cpus->map[0] : 0; + if (cpus) + cpu = cpus->map[0]; + perf_cpu_map__put(cpus); } else { cpu = evlist->core.cpus->map[0]; } while (1) { - fd = sys_perf_event_open(&evsel->core.attr, pid, cpu, -1, + fd = sys_perf_event_open(&evsel->core.attr, pid, cpu.cpu, -1, perf_event_open_cloexec_flag()); if (fd < 0) { if (pid == -1 && errno == EACCES) { diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c index 0445bee9290f..bd95d60018a9 100644 --- a/tools/perf/util/scripting-engines/trace-event-python.c +++ b/tools/perf/util/scripting-engines/trace-event-python.c @@ -1555,7 +1555,7 @@ static void get_handler_name(char *str, size_t size, } static void -process_stat(struct evsel *counter, int cpu, int thread, u64 tstamp, +process_stat(struct evsel *counter, struct perf_cpu cpu, int thread, u64 tstamp, struct perf_counts_values *count) { PyObject *handler, *t; @@ -1575,7 +1575,7 @@ process_stat(struct evsel *counter, int cpu, int thread, u64 tstamp, return; } - PyTuple_SetItem(t, n++, _PyLong_FromLong(cpu)); + PyTuple_SetItem(t, n++, _PyLong_FromLong(cpu.cpu)); PyTuple_SetItem(t, n++, _PyLong_FromLong(thread)); tuple_set_u64(t, n++, tstamp); @@ -1599,7 +1599,7 @@ static void python_process_stat(struct perf_stat_config *config, int cpu, thread; if (config->aggr_mode == AGGR_GLOBAL) { - process_stat(counter, -1, -1, tstamp, + process_stat(counter, (struct perf_cpu){ .cpu = -1 }, -1, tstamp, &counter->counts->aggr); return; } diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index e1a273048681..f19348dddd55 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -2538,15 +2538,15 @@ int perf_session__cpu_bitmap(struct perf_session *session, } for (i = 0; i < map->nr; i++) { - int cpu = map->map[i]; + struct perf_cpu cpu = map->map[i]; - if (cpu >= nr_cpus) { + if (cpu.cpu >= nr_cpus) { pr_err("Requested CPU %d too large. " - "Consider raising MAX_NR_CPUS\n", cpu); + "Consider raising MAX_NR_CPUS\n", cpu.cpu); goto out_delete_map; } - set_bit(cpu, cpu_bitmap); + set_bit(cpu.cpu, cpu_bitmap); } err = 0; @@ -2598,7 +2598,7 @@ int perf_event__process_id_index(struct perf_session *session, if (!sid) return -ENOENT; sid->idx = e->idx; - sid->cpu = e->cpu; + sid->cpu.cpu = e->cpu; sid->tid = e->tid; } return 0; diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c index ba95379efcfb..5db83e51ceef 100644 --- a/tools/perf/util/stat-display.c +++ b/tools/perf/util/stat-display.c @@ -121,10 +121,10 @@ static void aggr_printout(struct perf_stat_config *config, id.die, config->csv_output ? 0 : -3, id.core, config->csv_sep); - } else if (id.cpu > -1) { + } else if (id.cpu.cpu > -1) { fprintf(config->output, "CPU%*d%s", config->csv_output ? 0 : -7, - id.cpu, config->csv_sep); + id.cpu.cpu, config->csv_sep); } break; case AGGR_THREAD: @@ -331,7 +331,8 @@ static int first_shadow_cpu_map_idx(struct perf_stat_config *config, struct evsel *evsel, const struct aggr_cpu_id *id) { struct perf_cpu_map *cpus = evsel__cpus(evsel); - int cpu, idx; + struct perf_cpu cpu; + int idx; if (config->aggr_mode == AGGR_NONE) return perf_cpu_map__idx(cpus, id->cpu); @@ -513,7 +514,8 @@ static void printout(struct perf_stat_config *config, struct aggr_cpu_id id, int static void aggr_update_shadow(struct perf_stat_config *config, struct evlist *evlist) { - int cpu, idx, s; + int idx, s; + struct perf_cpu cpu; struct aggr_cpu_id s2, id; u64 val; struct evsel *counter; @@ -633,7 +635,8 @@ static void aggr_cb(struct perf_stat_config *config, struct evsel *counter, void *data, bool first) { struct aggr_data *ad = data; - int idx, cpu; + int idx; + struct perf_cpu cpu; struct perf_cpu_map *cpus; struct aggr_cpu_id s2; @@ -666,7 +669,7 @@ static void aggr_cb(struct perf_stat_config *config, static void print_counter_aggrdata(struct perf_stat_config *config, struct evsel *counter, int s, char *prefix, bool metric_only, - bool *first, int cpu) + bool *first, struct perf_cpu cpu) { struct aggr_data ad; FILE *output = config->output; @@ -696,7 +699,7 @@ static void print_counter_aggrdata(struct perf_stat_config *config, fprintf(output, "%s", prefix); uval = val * counter->scale; - if (cpu != -1) + if (cpu.cpu != -1) id = aggr_cpu_id__cpu(cpu, /*data=*/NULL); printout(config, id, nr, counter, uval, @@ -731,8 +734,8 @@ static void print_aggr(struct perf_stat_config *config, first = true; evlist__for_each_entry(evlist, counter) { print_counter_aggrdata(config, counter, s, - prefix, metric_only, - &first, /*cpu=*/-1); + prefix, metric_only, + &first, (struct perf_cpu){ .cpu = -1 }); } if (metric_only) fputc('\n', output); @@ -893,7 +896,8 @@ static void print_counter(struct perf_stat_config *config, FILE *output = config->output; u64 ena, run, val; double uval; - int idx, cpu; + int idx; + struct perf_cpu cpu; struct aggr_cpu_id id; perf_cpu_map__for_each_cpu(cpu, idx, evsel__cpus(counter)) { @@ -921,7 +925,8 @@ static void print_no_aggr_metric(struct perf_stat_config *config, struct evlist *evlist, char *prefix) { - int all_idx, cpu; + int all_idx; + struct perf_cpu cpu; perf_cpu_map__for_each_cpu(cpu, all_idx, evlist->core.cpus) { struct evsel *counter; @@ -1211,7 +1216,8 @@ static void print_percore_thread(struct perf_stat_config *config, struct aggr_cpu_id s2, id; struct perf_cpu_map *cpus; bool first = true; - int idx, cpu; + int idx; + struct perf_cpu cpu; cpus = evsel__cpus(counter); perf_cpu_map__for_each_cpu(cpu, idx, cpus) { @@ -1247,8 +1253,8 @@ static void print_percore(struct perf_stat_config *config, fprintf(output, "%s", prefix); print_counter_aggrdata(config, counter, s, - prefix, metric_only, - &first, /*cpu=*/-1); + prefix, metric_only, + &first, (struct perf_cpu){ .cpu = -1 }); } if (metric_only) diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c index 7dbd7c4f3c33..ee6f03481215 100644 --- a/tools/perf/util/stat.c +++ b/tools/perf/util/stat.c @@ -297,7 +297,7 @@ static int check_per_pkg(struct evsel *counter, struct perf_counts_values *vals, { struct hashmap *mask = counter->per_pkg_mask; struct perf_cpu_map *cpus = evsel__cpus(counter); - int cpu = perf_cpu_map__cpu(cpus, cpu_map_idx); + struct perf_cpu cpu = perf_cpu_map__cpu(cpus, cpu_map_idx); int s, d, ret = 0; uint64_t *key; diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h index 691c12fd8976..335d19cc3063 100644 --- a/tools/perf/util/stat.h +++ b/tools/perf/util/stat.h @@ -108,7 +108,7 @@ struct runtime_stat { struct rblist value_list; }; -typedef struct aggr_cpu_id (*aggr_get_id_t)(struct perf_stat_config *config, int cpu); +typedef struct aggr_cpu_id (*aggr_get_id_t)(struct perf_stat_config *config, struct perf_cpu cpu); struct perf_stat_config { enum aggr_mode aggr_mode; diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c index 96f941e01681..4c9f211249db 100644 --- a/tools/perf/util/svghelper.c +++ b/tools/perf/util/svghelper.c @@ -728,7 +728,7 @@ static int str_to_bitmap(char *s, cpumask_t *b, int nr_cpus) int i; int ret = 0; struct perf_cpu_map *m; - int c; + struct perf_cpu c; m = perf_cpu_map__new(s); if (!m) @@ -736,12 +736,12 @@ static int str_to_bitmap(char *s, cpumask_t *b, int nr_cpus) for (i = 0; i < m->nr; i++) { c = m->map[i]; - if (c >= nr_cpus) { + if (c.cpu >= nr_cpus) { ret = -1; break; } - set_bit(c, cpumask_bits(b)); + set_bit(c.cpu, cpumask_bits(b)); } perf_cpu_map__put(m); diff --git a/tools/perf/util/synthetic-events.c b/tools/perf/util/synthetic-events.c index 198982109f0f..c9ba8050cc2b 100644 --- a/tools/perf/util/synthetic-events.c +++ b/tools/perf/util/synthetic-events.c @@ -1191,7 +1191,7 @@ static void synthesize_cpus(struct cpu_map_entries *cpus, cpus->nr = map->nr; for (i = 0; i < map->nr; i++) - cpus->cpu[i] = map->map[i]; + cpus->cpu[i] = map->map[i].cpu; } static void synthesize_mask(struct perf_record_record_cpu_map *mask, @@ -1203,7 +1203,7 @@ static void synthesize_mask(struct perf_record_record_cpu_map *mask, mask->long_size = sizeof(long); for (i = 0; i < map->nr; i++) - set_bit(map->map[i], mask->mask); + set_bit(map->map[i].cpu, mask->mask); } static size_t cpus_size(struct perf_cpu_map *map) @@ -1219,7 +1219,7 @@ static size_t mask_size(struct perf_cpu_map *map, int *max) for (i = 0; i < map->nr; i++) { /* bit position of the cpu is + 1 */ - int bit = map->map[i] + 1; + int bit = map->map[i].cpu + 1; if (bit > *max) *max = bit; @@ -1354,7 +1354,7 @@ int perf_event__synthesize_stat_config(struct perf_tool *tool, } int perf_event__synthesize_stat(struct perf_tool *tool, - u32 cpu, u32 thread, u64 id, + struct perf_cpu cpu, u32 thread, u64 id, struct perf_counts_values *count, perf_event__handler_t process, struct machine *machine) @@ -1366,7 +1366,7 @@ int perf_event__synthesize_stat(struct perf_tool *tool, event.header.misc = 0; event.id = id; - event.cpu = cpu; + event.cpu = cpu.cpu; event.thread = thread; event.val = count->val; event.ena = count->ena; @@ -1763,7 +1763,7 @@ int perf_event__synthesize_id_index(struct perf_tool *tool, perf_event__handler_ } e->idx = sid->idx; - e->cpu = sid->cpu; + e->cpu = sid->cpu.cpu; e->tid = sid->tid; } } diff --git a/tools/perf/util/synthetic-events.h b/tools/perf/util/synthetic-events.h index c931433bacbf..78a0450db164 100644 --- a/tools/perf/util/synthetic-events.h +++ b/tools/perf/util/synthetic-events.h @@ -6,6 +6,7 @@ #include // pid_t #include #include +#include struct auxtrace_record; struct dso; @@ -63,7 +64,7 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_fo int perf_event__synthesize_stat_config(struct perf_tool *tool, struct perf_stat_config *config, perf_event__handler_t process, struct machine *machine); int perf_event__synthesize_stat_events(struct perf_stat_config *config, struct perf_tool *tool, struct evlist *evlist, perf_event__handler_t process, bool attrs); int perf_event__synthesize_stat_round(struct perf_tool *tool, u64 time, u64 type, perf_event__handler_t process, struct machine *machine); -int perf_event__synthesize_stat(struct perf_tool *tool, u32 cpu, u32 thread, u64 id, struct perf_counts_values *count, perf_event__handler_t process, struct machine *machine); +int perf_event__synthesize_stat(struct perf_tool *tool, struct perf_cpu cpu, u32 thread, u64 id, struct perf_counts_values *count, perf_event__handler_t process, struct machine *machine); int perf_event__synthesize_thread_map2(struct perf_tool *tool, struct perf_thread_map *threads, perf_event__handler_t process, struct machine *machine); int perf_event__synthesize_thread_map(struct perf_tool *tool, struct perf_thread_map *threads, perf_event__handler_t process, struct machine *machine, bool needs_mmap, bool mmap_data); int perf_event__synthesize_threads(struct perf_tool *tool, perf_event__handler_t process, struct machine *machine, bool needs_mmap, bool mmap_data, unsigned int nr_threads_synthesize); diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h index 9f0d36ba77f2..9443c29afa52 100644 --- a/tools/perf/util/util.h +++ b/tools/perf/util/util.h @@ -11,6 +11,9 @@ #include #include #include +#ifndef __cplusplus +#include +#endif /* General helper functions */ void usage(const char *err) __noreturn; @@ -66,6 +69,6 @@ extern bool test_attr__enabled; void test_attr__ready(void); void test_attr__init(void); struct perf_event_attr; -void test_attr__open(struct perf_event_attr *attr, pid_t pid, int cpu, +void test_attr__open(struct perf_event_attr *attr, pid_t pid, struct perf_cpu cpu, int fd, int group_fd, unsigned long flags); #endif /* GIT_COMPAT_UTIL_H */ -- cgit v1.2.3 From 0ce05781f4905fcfbbb489519e36be71c7b0bbcc Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 23 Nov 2021 16:12:30 -0800 Subject: perf tools: Fix SMT fallback with large core counts strtoull can only read a 64-bit bitmap. On an AMD EPYC core_cpus may look like: 00000000,00000000,00000000,00000001,00000000,00000000,00000000,00000001 and so the sibling wasn't spotted. Fix by writing a simple hweight string parser. Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: Jiri Olsa Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Konstantin Khlebnikov Cc: Mark Rutland Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Stephane Eranian Link: https://lore.kernel.org/r/20211124001231.3277836-3-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/smt.c | 68 +++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 58 insertions(+), 10 deletions(-) diff --git a/tools/perf/util/smt.c b/tools/perf/util/smt.c index 34f1b1b1176c..2636be65305a 100644 --- a/tools/perf/util/smt.c +++ b/tools/perf/util/smt.c @@ -5,6 +5,56 @@ #include "api/fs/fs.h" #include "smt.h" +/** + * hweight_str - Returns the number of bits set in str. Stops at first non-hex + * or ',' character. + */ +static int hweight_str(char *str) +{ + int result = 0; + + while (*str) { + switch (*str++) { + case '0': + case ',': + break; + case '1': + case '2': + case '4': + case '8': + result++; + break; + case '3': + case '5': + case '6': + case '9': + case 'a': + case 'A': + case 'c': + case 'C': + result += 2; + break; + case '7': + case 'b': + case 'B': + case 'd': + case 'D': + case 'e': + case 'E': + result += 3; + break; + case 'f': + case 'F': + result += 4; + break; + default: + goto done; + } + } +done: + return result; +} + int smt_on(void) { static bool cached; @@ -15,9 +65,12 @@ int smt_on(void) if (cached) return cached_result; - if (sysfs__read_int("devices/system/cpu/smt/active", &cached_result) >= 0) - goto done; + if (sysfs__read_int("devices/system/cpu/smt/active", &cached_result) >= 0) { + cached = true; + return cached_result; + } + cached_result = 0; ncpu = sysconf(_SC_NPROCESSORS_CONF); for (cpu = 0; cpu < ncpu; cpu++) { unsigned long long siblings; @@ -35,18 +88,13 @@ int smt_on(void) continue; } /* Entry is hex, but does not have 0x, so need custom parser */ - siblings = strtoull(str, NULL, 16); + siblings = hweight_str(str); free(str); - if (hweight64(siblings) > 1) { + if (siblings > 1) { cached_result = 1; - cached = true; break; } } - if (!cached) { - cached_result = 0; -done: - cached = true; - } + cached = true; return cached_result; } -- cgit v1.2.3 From 6dd8646939a770e4ec0220c1c19d6af25c5877b7 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 23 Nov 2021 16:12:31 -0800 Subject: perf tools: Probe non-deprecated sysfs path 1st Following Documentation/ABI/stable/sysfs-devices-system-cpu the /sys/devices/system/cpu/cpuX/topology/core_cpus is deprecated in favor of thread_siblings, so probe thread_siblings before falling back on core_cpus. Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ingo Molnar Cc: Jiri Olsa Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Konstantin Khlebnikov Cc: Mark Rutland Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Stephane Eranian Link: https://lore.kernel.org/r/20211124001231.3277836-4-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/smt.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tools/perf/util/smt.c b/tools/perf/util/smt.c index 2636be65305a..2b0a36ebf27a 100644 --- a/tools/perf/util/smt.c +++ b/tools/perf/util/smt.c @@ -79,11 +79,10 @@ int smt_on(void) char fn[256]; snprintf(fn, sizeof fn, - "devices/system/cpu/cpu%d/topology/core_cpus", cpu); + "devices/system/cpu/cpu%d/topology/thread_siblings", cpu); if (sysfs__read_str(fn, &str, &strlen) < 0) { snprintf(fn, sizeof fn, - "devices/system/cpu/cpu%d/topology/thread_siblings", - cpu); + "devices/system/cpu/cpu%d/topology/core_cpus", cpu); if (sysfs__read_str(fn, &str, &strlen) < 0) continue; } -- cgit v1.2.3 From f56ef30a31d388663e78b9be687d67748c9b7297 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 23 Nov 2021 16:12:28 -0800 Subject: perf expr: Add debug logging for literals Useful for diagnosing problems with metrics. Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Jiri Olsa Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Konstantin Khlebnikov Cc: Mark Rutland Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lore.kernel.org/lkml/20211124001231.3277836-1-irogers@google.com [ Fixed up perf_cpu conflict, i.e. we need to append ".cpu" to cpu__max_present_cpu() result ] Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/expr.c | 37 +++++++++++++++++++++++++------------ 1 file changed, 25 insertions(+), 12 deletions(-) diff --git a/tools/perf/util/expr.c b/tools/perf/util/expr.c index e808738493e2..c94fb9bef919 100644 --- a/tools/perf/util/expr.c +++ b/tools/perf/util/expr.c @@ -405,12 +405,17 @@ double expr_id_data__source_count(const struct expr_id_data *data) double expr__get_literal(const char *literal) { static struct cpu_topology *topology; + double result = NAN; - if (!strcmp("#smt_on", literal)) - return smt_on() > 0 ? 1.0 : 0.0; + if (!strcmp("#smt_on", literal)) { + result = smt_on() > 0 ? 1.0 : 0.0; + goto out; + } - if (!strcmp("#num_cpus", literal)) - return cpu__max_present_cpu().cpu; + if (!strcmp("#num_cpus", literal)) { + result = cpu__max_present_cpu().cpu; + goto out; + } /* * Assume that topology strings are consistent, such as CPUs "0-1" @@ -422,16 +427,24 @@ double expr__get_literal(const char *literal) topology = cpu_topology__new(); if (!topology) { pr_err("Error creating CPU topology"); - return NAN; + goto out; } } - if (!strcmp("#num_packages", literal)) - return topology->package_cpus_lists; - if (!strcmp("#num_dies", literal)) - return topology->die_cpus_lists; - if (!strcmp("#num_cores", literal)) - return topology->core_cpus_lists; + if (!strcmp("#num_packages", literal)) { + result = topology->package_cpus_lists; + goto out; + } + if (!strcmp("#num_dies", literal)) { + result = topology->die_cpus_lists; + goto out; + } + if (!strcmp("#num_cores", literal)) { + result = topology->core_cpus_lists; + goto out; + } pr_err("Unrecognized literal '%s'", literal); - return NAN; +out: + pr_debug2("literal: %s = %f\n", literal, result); + return result; } -- cgit v1.2.3 From c0dd94558d0e473aa92254e1c48a47900c911e69 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Thu, 25 Nov 2021 23:13:05 -0800 Subject: perf pmu-events: Don't lower case MetricExpr This patch changes MetricExpr to be written out in the same case. This enables events in metrics to use modifiers like 'G' which currently yield parse errors when made lower case. To keep tests passing the literal #smt_on is compared in a non-case sensitive way - #SMT_on is present in at least SkylakeX metrics. Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Jiri Olsa Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Mark Rutland Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lore.kernel.org/lkml/20211126071305.3733878-1-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/pmu-events/jevents.c | 2 -- tools/perf/util/expr.c | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c index 2e7c4153875b..1a57c3f81dd4 100644 --- a/tools/perf/pmu-events/jevents.c +++ b/tools/perf/pmu-events/jevents.c @@ -672,8 +672,6 @@ static int json_events(const char *fn, addfield(map, &je.metric_constraint, "", "", val); } else if (json_streq(map, field, "MetricExpr")) { addfield(map, &je.metric_expr, "", "", val); - for (s = je.metric_expr; *s; s++) - *s = tolower(*s); } else if (json_streq(map, field, "ArchStdEvent")) { addfield(map, &arch_std, "", "", val); for (s = arch_std; *s; s++) diff --git a/tools/perf/util/expr.c b/tools/perf/util/expr.c index c94fb9bef919..675f318ce7c1 100644 --- a/tools/perf/util/expr.c +++ b/tools/perf/util/expr.c @@ -407,7 +407,7 @@ double expr__get_literal(const char *literal) static struct cpu_topology *topology; double result = NAN; - if (!strcmp("#smt_on", literal)) { + if (!strcasecmp("#smt_on", literal)) { result = smt_on() > 0 ? 1.0 : 0.0; goto out; } -- cgit v1.2.3 From 35cb8c713a496e8c114eed5e2a5a30b359876df2 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Sun, 9 May 2021 10:19:37 -0300 Subject: tools arch: Update arch/x86/lib/mem{cpy,set}_64.S copies used in 'perf bench mem memcpy' To bring in the change made in this cset: f94909ceb1ed4bfd ("x86: Prepare asm files for straight-line-speculation") It silences these perf tools build warnings, no change in the tools: Warning: Kernel ABI header at 'tools/arch/x86/lib/memcpy_64.S' differs from latest version at 'arch/x86/lib/memcpy_64.S' diff -u tools/arch/x86/lib/memcpy_64.S arch/x86/lib/memcpy_64.S Warning: Kernel ABI header at 'tools/arch/x86/lib/memset_64.S' differs from latest version at 'arch/x86/lib/memset_64.S' diff -u tools/arch/x86/lib/memset_64.S arch/x86/lib/memset_64.S The code generated was checked before and after using 'objdump -d /tmp/build/perf/bench/mem-memcpy-x86-64-asm.o', no changes. Cc: Borislav Petkov Cc: Peter Zijlstra Signed-off-by: Arnaldo Carvalho de Melo --- tools/arch/x86/lib/memcpy_64.S | 12 ++++++------ tools/arch/x86/lib/memset_64.S | 6 +++--- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/tools/arch/x86/lib/memcpy_64.S b/tools/arch/x86/lib/memcpy_64.S index 1cc9da6e29c7..59cf2343f3d9 100644 --- a/tools/arch/x86/lib/memcpy_64.S +++ b/tools/arch/x86/lib/memcpy_64.S @@ -39,7 +39,7 @@ SYM_FUNC_START_WEAK(memcpy) rep movsq movl %edx, %ecx rep movsb - ret + RET SYM_FUNC_END(memcpy) SYM_FUNC_END_ALIAS(__memcpy) EXPORT_SYMBOL(memcpy) @@ -53,7 +53,7 @@ SYM_FUNC_START_LOCAL(memcpy_erms) movq %rdi, %rax movq %rdx, %rcx rep movsb - ret + RET SYM_FUNC_END(memcpy_erms) SYM_FUNC_START_LOCAL(memcpy_orig) @@ -137,7 +137,7 @@ SYM_FUNC_START_LOCAL(memcpy_orig) movq %r9, 1*8(%rdi) movq %r10, -2*8(%rdi, %rdx) movq %r11, -1*8(%rdi, %rdx) - retq + RET .p2align 4 .Lless_16bytes: cmpl $8, %edx @@ -149,7 +149,7 @@ SYM_FUNC_START_LOCAL(memcpy_orig) movq -1*8(%rsi, %rdx), %r9 movq %r8, 0*8(%rdi) movq %r9, -1*8(%rdi, %rdx) - retq + RET .p2align 4 .Lless_8bytes: cmpl $4, %edx @@ -162,7 +162,7 @@ SYM_FUNC_START_LOCAL(memcpy_orig) movl -4(%rsi, %rdx), %r8d movl %ecx, (%rdi) movl %r8d, -4(%rdi, %rdx) - retq + RET .p2align 4 .Lless_3bytes: subl $1, %edx @@ -180,7 +180,7 @@ SYM_FUNC_START_LOCAL(memcpy_orig) movb %cl, (%rdi) .Lend: - retq + RET SYM_FUNC_END(memcpy_orig) .popsection diff --git a/tools/arch/x86/lib/memset_64.S b/tools/arch/x86/lib/memset_64.S index 9827ae267f96..d624f2bc42f1 100644 --- a/tools/arch/x86/lib/memset_64.S +++ b/tools/arch/x86/lib/memset_64.S @@ -40,7 +40,7 @@ SYM_FUNC_START(__memset) movl %edx,%ecx rep stosb movq %r9,%rax - ret + RET SYM_FUNC_END(__memset) SYM_FUNC_END_ALIAS(memset) EXPORT_SYMBOL(memset) @@ -63,7 +63,7 @@ SYM_FUNC_START_LOCAL(memset_erms) movq %rdx,%rcx rep stosb movq %r9,%rax - ret + RET SYM_FUNC_END(memset_erms) SYM_FUNC_START_LOCAL(memset_orig) @@ -125,7 +125,7 @@ SYM_FUNC_START_LOCAL(memset_orig) .Lende: movq %r10,%rax - ret + RET .Lbad_alignment: cmpq $7,%rdx -- cgit v1.2.3 From f1dcda0f79548c04f585108e2e165cb4fec951e8 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 3 May 2021 11:48:26 -0300 Subject: tools headers UAPI: Update tools's copy of drm.h header Picking the changes from: 43d5ac7d07023cd1 ("drm: document DRM_IOCTL_MODE_GETFB2") It is just a comment, so no changes and silences these perf build warnings: Warning: Kernel ABI header at 'tools/include/uapi/drm/drm.h' differs from latest version at 'include/uapi/drm/drm.h' diff -u tools/include/uapi/drm/drm.h include/uapi/drm/drm.h Cc: Simon Ser Signed-off-by: Arnaldo Carvalho de Melo --- tools/include/uapi/drm/drm.h | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/tools/include/uapi/drm/drm.h b/tools/include/uapi/drm/drm.h index 3b810b53ba8b..642808520d92 100644 --- a/tools/include/uapi/drm/drm.h +++ b/tools/include/uapi/drm/drm.h @@ -1096,6 +1096,24 @@ extern "C" { #define DRM_IOCTL_SYNCOBJ_TRANSFER DRM_IOWR(0xCC, struct drm_syncobj_transfer) #define DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL DRM_IOWR(0xCD, struct drm_syncobj_timeline_array) +/** + * DRM_IOCTL_MODE_GETFB2 - Get framebuffer metadata. + * + * This queries metadata about a framebuffer. User-space fills + * &drm_mode_fb_cmd2.fb_id as the input, and the kernels fills the rest of the + * struct as the output. + * + * If the client is DRM master or has &CAP_SYS_ADMIN, &drm_mode_fb_cmd2.handles + * will be filled with GEM buffer handles. Planes are valid until one has a + * zero handle -- this can be used to compute the number of planes. + * + * Otherwise, &drm_mode_fb_cmd2.handles will be zeroed and planes are valid + * until one has a zero &drm_mode_fb_cmd2.pitches. + * + * If the framebuffer has a format modifier, &DRM_MODE_FB_MODIFIERS will be set + * in &drm_mode_fb_cmd2.flags and &drm_mode_fb_cmd2.modifier will contain the + * modifier. Otherwise, user-space must ignore &drm_mode_fb_cmd2.modifier. + */ #define DRM_IOCTL_MODE_GETFB2 DRM_IOWR(0xCE, struct drm_mode_fb_cmd2) /* -- cgit v1.2.3 From 486e5ed88827dabd295cd55f368d513ee8c30eb1 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 1 Jul 2021 13:39:15 -0300 Subject: tools headers cpufeatures: Sync with the kernel sources To pick the changes from: d341db8f48ea4331 ("x86/cpufeatures: Add AMD Collaborative Processor Performance Control feature flag") This only causes these perf files to be rebuilt: CC /tmp/build/perf/bench/mem-memcpy-x86-64-asm.o CC /tmp/build/perf/bench/mem-memset-x86-64-asm.o And addresses this perf build warning: Warning: Kernel ABI header at 'tools/arch/x86/include/asm/cpufeatures.h' differs from latest version at 'arch/x86/include/asm/cpufeatures.h' diff -u tools/arch/x86/include/asm/cpufeatures.h arch/x86/include/asm/cpufeatures.h Cc: Huang Rui Cc: Rafael J. Wysocki Signed-off-by: Arnaldo Carvalho de Melo --- tools/arch/x86/include/asm/cpufeatures.h | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h index d5b5f2ab87a0..18de5f76f198 100644 --- a/tools/arch/x86/include/asm/cpufeatures.h +++ b/tools/arch/x86/include/asm/cpufeatures.h @@ -315,6 +315,7 @@ #define X86_FEATURE_AMD_SSBD (13*32+24) /* "" Speculative Store Bypass Disable */ #define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */ #define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */ +#define X86_FEATURE_CPPC (13*32+27) /* Collaborative Processor Performance Control */ /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */ #define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */ -- cgit v1.2.3 From e652ab64e5846d3fe5ac2c0405d55d79ecc52c36 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 7 Aug 2020 08:45:47 -0300 Subject: tools arch x86: Sync the msr-index.h copy with the kernel sources To pick up the changes in: 89aa94b4a218339b ("x86/msr: Add AMD CPPC MSR definitions") Addressing these tools/perf build warnings: diff -u tools/arch/x86/include/asm/msr-index.h arch/x86/include/asm/msr-index.h Warning: Kernel ABI header at 'tools/arch/x86/include/asm/msr-index.h' differs from latest version at 'arch/x86/include/asm/msr-index.h' That makes the beautification scripts to pick some new entries: $ tools/perf/trace/beauty/tracepoints/x86_msr.sh > before $ cp arch/x86/include/asm/msr-index.h tools/arch/x86/include/asm/msr-index.h $ tools/perf/trace/beauty/tracepoints/x86_msr.sh > after $ diff -u before after --- before 2022-01-13 10:59:51.743416890 -0300 +++ after 2022-01-13 11:00:00.776644178 -0300 @@ -303,6 +303,11 @@ [0xc0010299 - x86_AMD_V_KVM_MSRs_offset] = "AMD_RAPL_POWER_UNIT", [0xc001029a - x86_AMD_V_KVM_MSRs_offset] = "AMD_CORE_ENERGY_STATUS", [0xc001029b - x86_AMD_V_KVM_MSRs_offset] = "AMD_PKG_ENERGY_STATUS", + [0xc00102b0 - x86_AMD_V_KVM_MSRs_offset] = "AMD_CPPC_CAP1", + [0xc00102b1 - x86_AMD_V_KVM_MSRs_offset] = "AMD_CPPC_ENABLE", + [0xc00102b2 - x86_AMD_V_KVM_MSRs_offset] = "AMD_CPPC_CAP2", + [0xc00102b3 - x86_AMD_V_KVM_MSRs_offset] = "AMD_CPPC_REQ", + [0xc00102b4 - x86_AMD_V_KVM_MSRs_offset] = "AMD_CPPC_STATUS", [0xc00102f0 - x86_AMD_V_KVM_MSRs_offset] = "AMD_PPIN_CTL", [0xc00102f1 - x86_AMD_V_KVM_MSRs_offset] = "AMD_PPIN", }; $ And this gets rebuilt: CC /tmp/build/perf/trace/beauty/tracepoints/x86_msr.o INSTALL trace_plugins LD /tmp/build/perf/trace/beauty/tracepoints/perf-in.o LD /tmp/build/perf/trace/beauty/perf-in.o LD /tmp/build/perf/perf-in.o LINK /tmp/build/perf/perf Now one can trace systemwide asking to see backtraces to where those MSRs are being read/written with: # perf trace -e msr:*_msr/max-stack=32/ --filter="msr>=AMD_CPPC_CAP1 && msr<=AMD_CPPC_STATUS" ^C# If we use -v (verbose mode) we can see what it does behind the scenes: # perf trace -v -e msr:*_msr/max-stack=32/ --filter="msr>=AMD_CPPC_CAP1 && msr<=AMD_CPPC_STATUS" New filter for msr:read_msr: (msr>=0xc00102b0 && msr<=0xc00102b4) && (common_pid != 2612102 && common_pid != 3841) New filter for msr:write_msr: (msr>=0xc00102b0 && msr<=0xc00102b4) && (common_pid != 2612102 && common_pid != 3841) ^C# Example with a frequent msr: # perf trace -v -e msr:*_msr/max-stack=32/ --filter="msr==IA32_SPEC_CTRL" --max-events 2 Using CPUID AuthenticAMD-25-21-0 0x48 New filter for msr:read_msr: (msr==0x48) && (common_pid != 2612129 && common_pid != 3841) 0x48 New filter for msr:write_msr: (msr==0x48) && (common_pid != 2612129 && common_pid != 3841) mmap size 528384B Looking at the vmlinux_path (8 entries long) symsrc__init: build id mismatch for vmlinux. Using /proc/kcore for kernel data Using /proc/kallsyms for symbols 0.000 Timer/2525383 msr:write_msr(msr: IA32_SPEC_CTRL, val: 6) do_trace_write_msr ([kernel.kallsyms]) do_trace_write_msr ([kernel.kallsyms]) __switch_to_xtra ([kernel.kallsyms]) __switch_to ([kernel.kallsyms]) __schedule ([kernel.kallsyms]) schedule ([kernel.kallsyms]) futex_wait_queue_me ([kernel.kallsyms]) futex_wait ([kernel.kallsyms]) do_futex ([kernel.kallsyms]) __x64_sys_futex ([kernel.kallsyms]) do_syscall_64 ([kernel.kallsyms]) entry_SYSCALL_64_after_hwframe ([kernel.kallsyms]) __futex_abstimed_wait_common64 (/usr/lib64/libpthread-2.33.so) 0.030 :0/0 msr:write_msr(msr: IA32_SPEC_CTRL, val: 2) do_trace_write_msr ([kernel.kallsyms]) do_trace_write_msr ([kernel.kallsyms]) __switch_to_xtra ([kernel.kallsyms]) __switch_to ([kernel.kallsyms]) __schedule ([kernel.kallsyms]) schedule_idle ([kernel.kallsyms]) do_idle ([kernel.kallsyms]) cpu_startup_entry ([kernel.kallsyms]) secondary_startup_64_no_verify ([kernel.kallsyms]) # Acked-by: Huang Rui Acked-by: Rafael J. Wysocki Cc: Adrian Hunter Cc: Ian Rogers Cc: Jiri Olsa Cc: Namhyung Kim Link: https://lore.kernel.org/all/YeA2PAvHV+uHRhLj@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/arch/x86/include/asm/msr-index.h | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h index 01e2650b9585..3faf0f97edb1 100644 --- a/tools/arch/x86/include/asm/msr-index.h +++ b/tools/arch/x86/include/asm/msr-index.h @@ -486,6 +486,23 @@ #define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f +/* AMD Collaborative Processor Performance Control MSRs */ +#define MSR_AMD_CPPC_CAP1 0xc00102b0 +#define MSR_AMD_CPPC_ENABLE 0xc00102b1 +#define MSR_AMD_CPPC_CAP2 0xc00102b2 +#define MSR_AMD_CPPC_REQ 0xc00102b3 +#define MSR_AMD_CPPC_STATUS 0xc00102b4 + +#define AMD_CPPC_LOWEST_PERF(x) (((x) >> 0) & 0xff) +#define AMD_CPPC_LOWNONLIN_PERF(x) (((x) >> 8) & 0xff) +#define AMD_CPPC_NOMINAL_PERF(x) (((x) >> 16) & 0xff) +#define AMD_CPPC_HIGHEST_PERF(x) (((x) >> 24) & 0xff) + +#define AMD_CPPC_MAX_PERF(x) (((x) & 0xff) << 0) +#define AMD_CPPC_MIN_PERF(x) (((x) & 0xff) << 8) +#define AMD_CPPC_DES_PERF(x) (((x) & 0xff) << 16) +#define AMD_CPPC_ENERGY_PERF_PREF(x) (((x) & 0xff) << 24) + /* Fam 17h MSRs */ #define MSR_F17H_IRPERF 0xc00000e9 -- cgit v1.2.3 From 46f57d2410150985f81da7cbbb5fdcda01d02ac2 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Thu, 13 Jan 2022 22:48:22 -0800 Subject: perf arm: Fix off-by-one directory path Relative path include works in the regular build due to -I paths but may fail in other situations. Fixes: 83869019c74cc2d0 ("perf arch: Support register names from all archs") Reviewed-by: German Gomez Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Alexandre Truong Cc: Athira Jajeev Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Link: https://lore.kernel.org/r/20220114064822.1806019-1-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/arm64-frame-pointer-unwind-support.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/util/arm64-frame-pointer-unwind-support.c b/tools/perf/util/arm64-frame-pointer-unwind-support.c index 4f5ecf51ed38..2242a885fbd7 100644 --- a/tools/perf/util/arm64-frame-pointer-unwind-support.c +++ b/tools/perf/util/arm64-frame-pointer-unwind-support.c @@ -6,7 +6,7 @@ #include "unwind.h" #define perf_event_arm_regs perf_event_arm64_regs -#include "../arch/arm64/include/uapi/asm/perf_regs.h" +#include "../../arch/arm64/include/uapi/asm/perf_regs.h" #undef perf_event_arm_regs struct entries { -- cgit v1.2.3 From 99fc11bb5b6f19d2c3671d6cf38571cb3dedb472 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Thu, 13 Jan 2022 22:51:05 -0800 Subject: libperf tests: Update a use of the new cpumap API Fixes a build breakage. Fixes: 6d18804b963b78dc ("perf cpumap: Give CPUs their own type") Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: colin ian king Cc: Ian Rogers Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Shunsuke Nakamura Link: http://lore.kernel.org/lkml/20220114065105.1806542-1-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/lib/perf/tests/test-evlist.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tools/lib/perf/tests/test-evlist.c b/tools/lib/perf/tests/test-evlist.c index e7afff12c35a..b3479dfa9a1c 100644 --- a/tools/lib/perf/tests/test-evlist.c +++ b/tools/lib/perf/tests/test-evlist.c @@ -334,7 +334,8 @@ static int test_mmap_cpus(void) }; cpu_set_t saved_mask; char path[PATH_MAX]; - int id, err, cpu, tmp; + int id, err, tmp; + struct perf_cpu cpu; union perf_event *event; int count = 0; @@ -377,7 +378,7 @@ static int test_mmap_cpus(void) cpu_set_t mask; CPU_ZERO(&mask); - CPU_SET(cpu, &mask); + CPU_SET(cpu.cpu, &mask); err = sched_setaffinity(0, sizeof(mask), &mask); __T("sched_setaffinity failed", err == 0); -- cgit v1.2.3 From e000ea0beffb5497425054b151369fe37a792ece Mon Sep 17 00:00:00 2001 From: José Expósito Date: Wed, 8 Dec 2021 18:11:13 +0100 Subject: perf metricgroup: Fix use after free in metric__new() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We shouldn't free() something that will be used in the next line, fix it. Fixes: b85a4d61d3022608 ("perf metric: Allow modifiers on metrics") Addresses-Coverity-ID: 1494000 Signed-off-by: José Expósito Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ian Rogers Cc: Jiri Olsa Cc: John Garry Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lore.kernel.org/lkml/20211208171113.22089-1-jose.exposito89@gmail.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/metricgroup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c index 51c99cb08abf..8826c555f780 100644 --- a/tools/perf/util/metricgroup.c +++ b/tools/perf/util/metricgroup.c @@ -209,8 +209,8 @@ static struct metric *metric__new(const struct pmu_event *pe, m->metric_name = pe->metric_name; m->modifier = modifier ? strdup(modifier) : NULL; if (modifier && !m->modifier) { - free(m); expr__ctx_free(m->pctx); + free(m); return NULL; } m->metric_expr = pe->metric_expr; -- cgit v1.2.3 From a6e62743621ea29bea461774c0bcc68e5de59068 Mon Sep 17 00:00:00 2001 From: Thomas Richter Date: Wed, 24 Nov 2021 10:03:43 +0100 Subject: perf cputopo: Fix CPU topology reading on s/390 Commit fdf1e29b6118c18f ("perf expr: Add metric literals for topology.") fails on s390: # ./perf test -Fv 7 ... # FAILED tests/expr.c:173 #num_dies >= #num_packages ---- end ---- Simple expression parser: FAILED! # Investigating this issue leads to these functions: build_cpu_topology() +--> has_die_topology(void) { struct utsname uts; if (uname(&uts) < 0) return false; if (strncmp(uts.machine, "x86_64", 6)) return false; .... } which always returns false on s390. The caller build_cpu_topology() checks has_die_topology() return value. On false the the struct cpu_topology::die_cpu_list is not contructed and has zero entries. This leads to the failing comparison: #num_dies >= #num_packages. s390 of course has a positive number of packages. Fix this by adding s390 architecture to support CPU die list. Output after: # ./perf test -Fv 7 7: Simple expression parser : --- start --- division by zero syntax error ---- end ---- Simple expression parser: Ok # Fixes: fdf1e29b6118c18f ("perf expr: Add metric literals for topology.") Reviewed-by: Ian Rogers Signed-off-by: Thomas Richter Cc: Heiko Carstens Cc: Ian Rogers Cc: Sumanth Korikkar Cc: Sven Schnelle Cc: Vasily Gorbik Link: https://lore.kernel.org/r/20211124090343.9436-1-tmricht@linux.ibm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/cputopo.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/cputopo.c b/tools/perf/util/cputopo.c index 84ca106a3246..e20b835a1194 100644 --- a/tools/perf/util/cputopo.c +++ b/tools/perf/util/cputopo.c @@ -165,7 +165,8 @@ static bool has_die_topology(void) if (uname(&uts) < 0) return false; - if (strncmp(uts.machine, "x86_64", 6)) + if (strncmp(uts.machine, "x86_64", 6) && + strncmp(uts.machine, "s390x", 5)) return false; scnprintf(filename, MAXPATHLEN, DIE_CPUS_FMT, -- cgit v1.2.3 From d3e2bb4359f70c8b1d09a6f8e2f57240aab0da3f Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Fri, 14 Jan 2022 22:28:52 -0800 Subject: perf metric: Fix metric_leader Multiple events may have a metric_leader to aggregate into. This happens for uncore events where, for example, uncore_imc is expanded into uncore_imc_0, uncore_imc_1, etc. Such events all have the same metric_id and should aggregate into the first event. The change introducing metric_ids had a bug where the metric_id was compared to itself, creating an always true condition. Correct this by comparing the event in the metric_evlist and the metric_leader. Fixes: ec5c5b3d2c21b3f3 ("perf metric: Encode and use metric-id as qualifier") Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Jiri Olsa Cc: John Garry Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lore.kernel.org/lkml/20220115062852.1959424-1-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/metricgroup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c index 8826c555f780..d8492e339521 100644 --- a/tools/perf/util/metricgroup.c +++ b/tools/perf/util/metricgroup.c @@ -314,7 +314,7 @@ static int setup_metric_events(struct hashmap *ids, */ metric_id = evsel__metric_id(ev); evlist__for_each_entry_continue(metric_evlist, ev) { - if (!strcmp(evsel__metric_id(metric_events[i]), metric_id)) + if (!strcmp(evsel__metric_id(ev), metric_id)) ev->metric_leader = metric_events[i]; } } -- cgit v1.2.3 From 37be585807cb9a810f8395c39c4ee7bdbdc7b0dc Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Sat, 15 Jan 2022 17:11:10 -0300 Subject: perf cpumap: Add is_dummy() method Needed to check if a cpu_map is dummy, i.e. not a cpu map at all, for pid monitoring scenarios. This probably needs to move to libperf, but since perf itself is the first and so far only user, leave it at tools/perf/util/. Acked-by: Andi Kleen Acked-by: Ian Rogers Cc: Adrian Hunter Cc: Jiri Olsa Cc: Namhyung Kim Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/cpumap.h | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h index afc15027d678..0d3c2006a15d 100644 --- a/tools/perf/util/cpumap.h +++ b/tools/perf/util/cpumap.h @@ -2,6 +2,7 @@ #ifndef __PERF_CPUMAP_H #define __PERF_CPUMAP_H +#include #include #include #include @@ -50,6 +51,15 @@ int cpu__setup_cpunode_map(void); int cpu__max_node(void); struct perf_cpu cpu__max_cpu(void); struct perf_cpu cpu__max_present_cpu(void); + +/** + * cpu_map__is_dummy - Events associated with a pid, rather than a CPU, use a single dummy map with an entry of -1. + */ +static inline bool cpu_map__is_dummy(struct perf_cpu_map *cpus) +{ + return cpus->nr == 1 && cpus->map[0].cpu == -1; +} + /** * cpu__get_node - Returns the numa node X as read from * /sys/devices/system/node/nodeX for the given CPU. -- cgit v1.2.3 From 2eea0b56b0d6ace0172550477220a25d633ec5b9 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Sat, 15 Jan 2022 17:15:09 -0300 Subject: perf evlist: No need to do any affinity setup when profiling pids The cpumap is dummy, so no need to go on figuring out affinity.o This way we reduce the setup time for simple scenarios like: $ perf stat sleep 1 Acked-by: Andi Kleen Acked-by: Ian Rogers Cc: Adrian Hunter Cc: Jiri Olsa Cc: Namhyung Kim Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/evlist.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index 11eb95b2106b..6e88d404b5b3 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -1290,7 +1290,7 @@ void evlist__close(struct evlist *evlist) * With perf record core.cpus is usually NULL. * Use the old method to handle this for now. */ - if (!evlist->core.cpus) { + if (!evlist->core.cpus || cpu_map__is_dummy(evlist->core.cpus)) { evlist__for_each_entry_reverse(evlist, evsel) evsel__close(evsel); return; -- cgit v1.2.3 From 9bce13ea88f85344b765abe5d3dabdd0f44dc177 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Thu, 9 Dec 2021 21:04:25 +0100 Subject: perf record: Disable debuginfod by default Fedora 35 sets DEBUGINFOD_URLS by default, which might lead to unexpected stalls in perf record exit path, when we try to cache profiled binaries. # DEBUGINFOD_PROGRESS=1 ./perf record -a ^C[ perf record: Woken up 1 times to write data ] Downloading from https://debuginfod.fedoraproject.org/ 447069 Downloading from https://debuginfod.fedoraproject.org/ 1502175 Downloading \^Z Disabling DEBUGINFOD_URLS by default in perf record and adding debuginfod option and .perfconfig variable support to enable id. Default without debuginfo processing: # perf record -a Using system debuginfod setup: # perf record -a --debuginfod Using custom debuginfd url: # perf record -a --debuginfod='https://evenbetterdebuginfodserver.krava' Adding single perf_debuginfod_setup function and using it also in perf buildid-cache command. Signed-off-by: Jiri Olsa Cc: Alexander Shishkin Cc: Frank Ch. Eigler Cc: Ian Rogers Cc: Mark Rutland Cc: Michael Petlan Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lore.kernel.org/lkml/20211209200425.303561-1-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Documentation/perf-buildid-cache.txt | 5 ++++- tools/perf/Documentation/perf-config.txt | 9 +++++++++ tools/perf/Documentation/perf-record.txt | 9 +++++++++ tools/perf/builtin-buildid-cache.c | 25 ++++++++++++++----------- tools/perf/builtin-record.c | 13 +++++++++++++ tools/perf/util/util.c | 15 +++++++++++++++ tools/perf/util/util.h | 6 ++++++ 7 files changed, 70 insertions(+), 12 deletions(-) diff --git a/tools/perf/Documentation/perf-buildid-cache.txt b/tools/perf/Documentation/perf-buildid-cache.txt index cd8ce6e8ec12..7e44b419d301 100644 --- a/tools/perf/Documentation/perf-buildid-cache.txt +++ b/tools/perf/Documentation/perf-buildid-cache.txt @@ -74,12 +74,15 @@ OPTIONS used when creating a uprobe for a process that resides in a different mount namespace from the perf(1) utility. ---debuginfod=URLs:: +--debuginfod[=URLs]:: Specify debuginfod URL to be used when retrieving perf.data binaries, it follows the same syntax as the DEBUGINFOD_URLS variable, like: buildid-cache.debuginfod=http://192.168.122.174:8002 + If the URLs is not specified, the value of DEBUGINFOD_URLS + system environment variable is used. + SEE ALSO -------- linkperf:perf-record[1], linkperf:perf-report[1], linkperf:perf-buildid-list[1] diff --git a/tools/perf/Documentation/perf-config.txt b/tools/perf/Documentation/perf-config.txt index 3bb75c1f25e8..0420e71698ee 100644 --- a/tools/perf/Documentation/perf-config.txt +++ b/tools/perf/Documentation/perf-config.txt @@ -587,6 +587,15 @@ record.*:: Use 'n' control blocks in asynchronous (Posix AIO) trace writing mode ('n' default: 1, max: 4). + record.debuginfod:: + Specify debuginfod URL to be used when cacheing perf.data binaries, + it follows the same syntax as the DEBUGINFOD_URLS variable, like: + + http://192.168.122.174:8002 + + If the URLs is 'system', the value of DEBUGINFOD_URLS system environment + variable is used. + diff.*:: diff.order:: This option sets the number of columns to sort the result. diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt index 55df7b073a55..9ccc75935bc5 100644 --- a/tools/perf/Documentation/perf-record.txt +++ b/tools/perf/Documentation/perf-record.txt @@ -715,6 +715,15 @@ measurements: include::intel-hybrid.txt[] +--debuginfod[=URLs]:: + Specify debuginfod URL to be used when cacheing perf.data binaries, + it follows the same syntax as the DEBUGINFOD_URLS variable, like: + + http://192.168.122.174:8002 + + If the URLs is not specified, the value of DEBUGINFOD_URLS + system environment variable is used. + SEE ALSO -------- linkperf:perf-stat[1], linkperf:perf-list[1], linkperf:perf-intel-pt[1] diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c index 0db3cfc04c47..cd381693658b 100644 --- a/tools/perf/builtin-buildid-cache.c +++ b/tools/perf/builtin-buildid-cache.c @@ -351,10 +351,14 @@ static int build_id_cache__show_all(void) static int perf_buildid_cache_config(const char *var, const char *value, void *cb) { - const char **debuginfod = cb; + struct perf_debuginfod *di = cb; - if (!strcmp(var, "buildid-cache.debuginfod")) - *debuginfod = strdup(value); + if (!strcmp(var, "buildid-cache.debuginfod")) { + di->urls = strdup(value); + if (!di->urls) + return -ENOMEM; + di->set = true; + } return 0; } @@ -373,8 +377,8 @@ int cmd_buildid_cache(int argc, const char **argv) *purge_name_list_str = NULL, *missing_filename = NULL, *update_name_list_str = NULL, - *kcore_filename = NULL, - *debuginfod = NULL; + *kcore_filename = NULL; + struct perf_debuginfod debuginfod = { }; char sbuf[STRERR_BUFSIZE]; struct perf_data data = { @@ -399,8 +403,10 @@ int cmd_buildid_cache(int argc, const char **argv) OPT_BOOLEAN('f', "force", &force, "don't complain, do it"), OPT_STRING('u', "update", &update_name_list_str, "file list", "file(s) to update"), - OPT_STRING(0, "debuginfod", &debuginfod, "debuginfod url", - "set debuginfod url"), + OPT_STRING_OPTARG_SET(0, "debuginfod", &debuginfod.urls, + &debuginfod.set, "debuginfod urls", + "Enable debuginfod data retrieval from DEBUGINFOD_URLS or specified urls", + "system"), OPT_INCR('v', "verbose", &verbose, "be more verbose"), OPT_INTEGER(0, "target-ns", &ns_id, "target pid for namespace context"), OPT_END() @@ -425,10 +431,7 @@ int cmd_buildid_cache(int argc, const char **argv) if (argc || !(list_files || opts_flag)) usage_with_options(buildid_cache_usage, buildid_cache_options); - if (debuginfod) { - pr_debug("DEBUGINFOD_URLS=%s\n", debuginfod); - setenv("DEBUGINFOD_URLS", debuginfod, 1); - } + perf_debuginfod_setup(&debuginfod); /* -l is exclusive. It can not be used with other options. */ if (list_files && opts_flag) { diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 0a63295d30f0..bb716c953d02 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -111,6 +111,7 @@ struct record { unsigned long long samples; struct mmap_cpu_mask affinity_mask; unsigned long output_max_size; /* = 0: unlimited */ + struct perf_debuginfod debuginfod; }; static volatile int done; @@ -2177,6 +2178,12 @@ static int perf_record_config(const char *var, const char *value, void *cb) rec->opts.nr_cblocks = nr_cblocks_default; } #endif + if (!strcmp(var, "record.debuginfod")) { + rec->debuginfod.urls = strdup(value); + if (!rec->debuginfod.urls) + return -ENOMEM; + rec->debuginfod.set = true; + } return 0; } @@ -2667,6 +2674,10 @@ static struct option __record_options[] = { parse_control_option), OPT_CALLBACK(0, "synth", &record.opts, "no|all|task|mmap|cgroup", "Fine-tune event synthesis: default=all", parse_record_synth_option), + OPT_STRING_OPTARG_SET(0, "debuginfod", &record.debuginfod.urls, + &record.debuginfod.set, "debuginfod urls", + "Enable debuginfod data retrieval from DEBUGINFOD_URLS or specified urls", + "system"), OPT_END() }; @@ -2720,6 +2731,8 @@ int cmd_record(int argc, const char **argv) if (err) return err; + perf_debuginfod_setup(&record.debuginfod); + /* Make system wide (-a) the default target. */ if (!argc && target__none(&rec->opts.target)) rec->opts.target.system_wide = true; diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c index df3c4671be72..fb4f6616b5fa 100644 --- a/tools/perf/util/util.c +++ b/tools/perf/util/util.c @@ -416,3 +416,18 @@ char *perf_exe(char *buf, int len) } return strcpy(buf, "perf"); } + +void perf_debuginfod_setup(struct perf_debuginfod *di) +{ + /* + * By default '!di->set' we clear DEBUGINFOD_URLS, so debuginfod + * processing is not triggered, otherwise we set it to 'di->urls' + * value. If 'di->urls' is "system" we keep DEBUGINFOD_URLS value. + */ + if (!di->set) + setenv("DEBUGINFOD_URLS", "", 1); + else if (di->urls && strcmp(di->urls, "system")) + setenv("DEBUGINFOD_URLS", di->urls, 1); + + pr_debug("DEBUGINFOD_URLS=%s\n", getenv("DEBUGINFOD_URLS")); +} diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h index 9443c29afa52..7b625cbd2dd8 100644 --- a/tools/perf/util/util.h +++ b/tools/perf/util/util.h @@ -71,4 +71,10 @@ void test_attr__init(void); struct perf_event_attr; void test_attr__open(struct perf_event_attr *attr, pid_t pid, struct perf_cpu cpu, int fd, int group_fd, unsigned long flags); + +struct perf_debuginfod { + const char *urls; + bool set; +}; +void perf_debuginfod_setup(struct perf_debuginfod *di); #endif /* GIT_COMPAT_UTIL_H */ -- cgit v1.2.3