summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/Makefile52
-rw-r--r--tools/build/Makefile2
-rw-r--r--tools/build/Makefile.feature44
-rw-r--r--tools/build/Makefile.include2
-rw-r--r--tools/build/feature/Makefile93
-rw-r--r--tools/build/feature/test-all.c5
-rw-r--r--tools/build/feature/test-bpf.c20
-rw-r--r--tools/hv/hv_fcopy_daemon.c24
-rw-r--r--tools/hv/hv_vss_daemon.c2
-rw-r--r--tools/include/linux/bitmap.h (renamed from tools/perf/util/include/linux/bitmap.h)2
-rw-r--r--tools/include/linux/list.h753
-rw-r--r--tools/include/linux/string.h15
-rw-r--r--tools/lib/bitmap.c (renamed from tools/perf/util/bitmap.c)0
-rw-r--r--tools/lib/bpf/Makefile32
-rw-r--r--tools/lib/bpf/bpf.c18
-rw-r--r--tools/lib/bpf/bpf.h2
-rw-r--r--tools/lib/bpf/libbpf.c412
-rw-r--r--tools/lib/bpf/libbpf.h88
-rw-r--r--tools/lib/find_bit.c84
-rw-r--r--tools/lib/lockdep/Makefile2
-rw-r--r--tools/lib/string.c89
-rw-r--r--tools/lib/subcmd/Build7
-rw-r--r--tools/lib/subcmd/Makefile48
-rw-r--r--tools/lib/subcmd/exec-cmd.c209
-rw-r--r--tools/lib/subcmd/exec-cmd.h16
-rw-r--r--tools/lib/subcmd/help.c (renamed from tools/perf/util/help.c)179
-rw-r--r--tools/lib/subcmd/help.h (renamed from tools/perf/util/help.h)13
-rw-r--r--tools/lib/subcmd/pager.c (renamed from tools/perf/util/pager.c)23
-rw-r--r--tools/lib/subcmd/pager.h9
-rw-r--r--tools/lib/subcmd/parse-options.c (renamed from tools/perf/util/parse-options.c)250
-rw-r--r--tools/lib/subcmd/parse-options.h (renamed from tools/perf/util/parse-options.h)30
-rw-r--r--tools/lib/subcmd/run-command.c (renamed from tools/perf/util/run-command.c)24
-rw-r--r--tools/lib/subcmd/run-command.h (renamed from tools/perf/util/run-command.h)12
-rw-r--r--tools/lib/subcmd/sigchain.c (renamed from tools/perf/util/sigchain.c)3
-rw-r--r--tools/lib/subcmd/sigchain.h (renamed from tools/perf/util/sigchain.h)6
-rw-r--r--tools/lib/subcmd/subcmd-config.c11
-rw-r--r--tools/lib/subcmd/subcmd-config.h14
-rw-r--r--tools/lib/subcmd/subcmd-util.h91
-rw-r--r--tools/lib/traceevent/event-parse.c136
-rw-r--r--tools/lib/traceevent/event-parse.h4
-rw-r--r--tools/lib/util/find_next_bit.c89
-rw-r--r--tools/perf/Build9
-rw-r--r--tools/perf/Documentation/perf-config.txt103
-rw-r--r--tools/perf/Documentation/perf-evlist.txt3
-rw-r--r--tools/perf/Documentation/perf-record.txt27
-rw-r--r--tools/perf/Documentation/perf-report.txt41
-rw-r--r--tools/perf/Documentation/perf-stat.txt34
-rw-r--r--tools/perf/Documentation/perf-top.txt3
-rw-r--r--tools/perf/Documentation/tips.txt29
-rw-r--r--tools/perf/MANIFEST9
-rw-r--r--tools/perf/Makefile.perf67
-rw-r--r--tools/perf/arch/x86/include/arch-tests.h8
-rw-r--r--tools/perf/arch/x86/tests/insn-x86.c2
-rw-r--r--tools/perf/arch/x86/tests/intel-cqm.c6
-rw-r--r--tools/perf/arch/x86/tests/perf-time-to-tsc.c3
-rw-r--r--tools/perf/arch/x86/tests/rdpmc.c2
-rw-r--r--tools/perf/arch/x86/util/Build1
-rw-r--r--tools/perf/arch/x86/util/intel-bts.c4
-rw-r--r--tools/perf/arch/x86/util/intel-pt.c6
-rw-r--r--tools/perf/bench/futex-hash.c2
-rw-r--r--tools/perf/bench/futex-lock-pi.c2
-rw-r--r--tools/perf/bench/futex-requeue.c2
-rw-r--r--tools/perf/bench/futex-wake-parallel.c2
-rw-r--r--tools/perf/bench/futex-wake.c2
-rw-r--r--tools/perf/bench/mem-functions.c2
-rw-r--r--tools/perf/bench/numa.c2
-rw-r--r--tools/perf/bench/sched-messaging.c2
-rw-r--r--tools/perf/bench/sched-pipe.c2
-rw-r--r--tools/perf/builtin-annotate.c44
-rw-r--r--tools/perf/builtin-bench.c2
-rw-r--r--tools/perf/builtin-buildid-cache.c2
-rw-r--r--tools/perf/builtin-buildid-list.c4
-rw-r--r--tools/perf/builtin-config.c66
-rw-r--r--tools/perf/builtin-data.c2
-rw-r--r--tools/perf/builtin-diff.c15
-rw-r--r--tools/perf/builtin-evlist.c13
-rw-r--r--tools/perf/builtin-help.c10
-rw-r--r--tools/perf/builtin-inject.c2
-rw-r--r--tools/perf/builtin-kmem.c2
-rw-r--r--tools/perf/builtin-kvm.c5
-rw-r--r--tools/perf/builtin-list.c2
-rw-r--r--tools/perf/builtin-lock.c2
-rw-r--r--tools/perf/builtin-mem.c2
-rw-r--r--tools/perf/builtin-probe.c17
-rw-r--r--tools/perf/builtin-record.c74
-rw-r--r--tools/perf/builtin-report.c60
-rw-r--r--tools/perf/builtin-sched.c2
-rw-r--r--tools/perf/builtin-script.c245
-rw-r--r--tools/perf/builtin-stat.c679
-rw-r--r--tools/perf/builtin-timechart.c2
-rw-r--r--tools/perf/builtin-top.c75
-rw-r--r--tools/perf/builtin-trace.c4
-rw-r--r--tools/perf/builtin-version.c10
-rw-r--r--tools/perf/builtin.h1
-rw-r--r--tools/perf/command-list.txt3
-rw-r--r--tools/perf/config/Makefile35
-rw-r--r--tools/perf/config/utilities.mak19
-rw-r--r--tools/perf/perf.c24
-rw-r--r--tools/perf/scripts/python/stat-cpi.py77
-rw-r--r--tools/perf/tests/.gitignore1
-rw-r--r--tools/perf/tests/Build16
-rw-r--r--tools/perf/tests/attr.c6
-rw-r--r--tools/perf/tests/bp_signal.c2
-rw-r--r--tools/perf/tests/bp_signal_overflow.c2
-rw-r--r--tools/perf/tests/bpf-script-test-prologue.c35
-rw-r--r--tools/perf/tests/bpf.c93
-rw-r--r--tools/perf/tests/builtin-test.c141
-rw-r--r--tools/perf/tests/code-reading.c16
-rw-r--r--tools/perf/tests/cpumap.c88
-rw-r--r--tools/perf/tests/dso-data.c6
-rw-r--r--tools/perf/tests/dwarf-unwind.c37
-rw-r--r--tools/perf/tests/event_update.c117
-rw-r--r--tools/perf/tests/evsel-roundtrip-name.c5
-rw-r--r--tools/perf/tests/evsel-tp-sched.c2
-rw-r--r--tools/perf/tests/fdarray.c4
-rw-r--r--tools/perf/tests/hists_common.c1
-rw-r--r--tools/perf/tests/hists_cumulate.c11
-rw-r--r--tools/perf/tests/hists_filter.c5
-rw-r--r--tools/perf/tests/hists_link.c11
-rw-r--r--tools/perf/tests/hists_output.c13
-rw-r--r--tools/perf/tests/keep-tracking.c5
-rw-r--r--tools/perf/tests/kmod-path.c2
-rw-r--r--tools/perf/tests/llvm.c75
-rw-r--r--tools/perf/tests/llvm.h2
-rw-r--r--tools/perf/tests/make76
-rw-r--r--tools/perf/tests/mmap-basic.c2
-rw-r--r--tools/perf/tests/mmap-thread-lookup.c8
-rw-r--r--tools/perf/tests/openat-syscall-all-cpus.c2
-rw-r--r--tools/perf/tests/openat-syscall-tp-fields.c2
-rw-r--r--tools/perf/tests/openat-syscall.c2
-rw-r--r--tools/perf/tests/parse-events.c2
-rw-r--r--tools/perf/tests/parse-no-sample-id-all.c2
-rw-r--r--tools/perf/tests/perf-record.c8
-rw-r--r--tools/perf/tests/pmu.c2
-rw-r--r--tools/perf/tests/python-use.c3
-rw-r--r--tools/perf/tests/sample-parsing.c2
-rw-r--r--tools/perf/tests/stat.c111
-rw-r--r--tools/perf/tests/sw-clock.c2
-rw-r--r--tools/perf/tests/switch-tracking.c8
-rw-r--r--tools/perf/tests/task-exit.c2
-rw-r--r--tools/perf/tests/tests.h95
-rw-r--r--tools/perf/tests/thread-map.c45
-rw-r--r--tools/perf/tests/thread-mg-share.c2
-rw-r--r--tools/perf/tests/topology.c2
-rw-r--r--tools/perf/tests/vmlinux-kallsyms.c2
-rw-r--r--tools/perf/ui/browser.c2
-rw-r--r--tools/perf/ui/browsers/annotate.c4
-rw-r--r--tools/perf/ui/browsers/hists.c337
-rw-r--r--tools/perf/ui/gtk/hists.c152
-rw-r--r--tools/perf/ui/hist.c14
-rw-r--r--tools/perf/ui/stdio/hist.c100
-rw-r--r--tools/perf/util/Build28
-rw-r--r--tools/perf/util/annotate.c23
-rw-r--r--tools/perf/util/auxtrace.c2
-rw-r--r--tools/perf/util/bpf-loader.c433
-rw-r--r--tools/perf/util/bpf-loader.h4
-rw-r--r--tools/perf/util/bpf-prologue.c455
-rw-r--r--tools/perf/util/bpf-prologue.h34
-rw-r--r--tools/perf/util/build-id.c2
-rw-r--r--tools/perf/util/cache.h14
-rw-r--r--tools/perf/util/callchain.c164
-rw-r--r--tools/perf/util/callchain.h30
-rw-r--r--tools/perf/util/cgroup.c2
-rw-r--r--tools/perf/util/color.c2
-rw-r--r--tools/perf/util/config.c2
-rw-r--r--tools/perf/util/cpumap.c51
-rw-r--r--tools/perf/util/cpumap.h1
-rw-r--r--tools/perf/util/data-convert-bt.c2
-rw-r--r--tools/perf/util/dso.c2
-rw-r--r--tools/perf/util/env.c9
-rw-r--r--tools/perf/util/environment.c8
-rw-r--r--tools/perf/util/event.c312
-rw-r--r--tools/perf/util/event.h150
-rw-r--r--tools/perf/util/evlist.c112
-rw-r--r--tools/perf/util/evlist.h10
-rw-r--r--tools/perf/util/evsel.c53
-rw-r--r--tools/perf/util/evsel.h4
-rw-r--r--tools/perf/util/exec_cmd.c148
-rw-r--r--tools/perf/util/exec_cmd.h12
-rwxr-xr-xtools/perf/util/generate-cmdlist.sh15
-rw-r--r--tools/perf/util/header.c207
-rw-r--r--tools/perf/util/header.h17
-rw-r--r--tools/perf/util/help-unknown-cmd.c103
-rw-r--r--tools/perf/util/help-unknown-cmd.h0
-rw-r--r--tools/perf/util/hist.c120
-rw-r--r--tools/perf/util/hist.h24
-rw-r--r--tools/perf/util/include/linux/string.h3
-rw-r--r--tools/perf/util/intel-pt.c4
-rw-r--r--tools/perf/util/machine.c74
-rw-r--r--tools/perf/util/map.c7
-rw-r--r--tools/perf/util/parse-branch-options.c2
-rw-r--r--tools/perf/util/parse-events.c10
-rw-r--r--tools/perf/util/parse-regs-options.c2
-rw-r--r--tools/perf/util/path.c18
-rw-r--r--tools/perf/util/pmu.c1
-rw-r--r--tools/perf/util/probe-event.c7
-rw-r--r--tools/perf/util/probe-finder.c6
-rw-r--r--tools/perf/util/python-ext-sources2
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c115
-rw-r--r--tools/perf/util/session.c202
-rw-r--r--tools/perf/util/session.h2
-rw-r--r--tools/perf/util/sort.c601
-rw-r--r--tools/perf/util/sort.h14
-rw-r--r--tools/perf/util/stat.c63
-rw-r--r--tools/perf/util/stat.h10
-rw-r--r--tools/perf/util/string.c16
-rw-r--r--tools/perf/util/strlist.c8
-rw-r--r--tools/perf/util/strlist.h9
-rw-r--r--tools/perf/util/symbol-elf.c9
-rw-r--r--tools/perf/util/symbol.c68
-rw-r--r--tools/perf/util/symbol.h4
-rw-r--r--tools/perf/util/term.c35
-rw-r--r--tools/perf/util/term.h10
-rw-r--r--tools/perf/util/thread.c10
-rw-r--r--tools/perf/util/thread_map.c28
-rw-r--r--tools/perf/util/thread_map.h3
-rw-r--r--tools/perf/util/tool.h8
-rw-r--r--tools/perf/util/trace-event-parse.c2
-rw-r--r--tools/perf/util/trace-event.h4
-rw-r--r--tools/perf/util/unwind-libdw.c63
-rw-r--r--tools/perf/util/unwind-libdw.h2
-rw-r--r--tools/perf/util/unwind-libunwind.c80
-rw-r--r--tools/perf/util/util.c68
-rw-r--r--tools/perf/util/util.h20
-rw-r--r--tools/power/acpi/Makefile16
-rw-r--r--tools/power/acpi/common/cmfsize.c2
-rw-r--r--tools/power/acpi/common/getopt.c4
-rw-r--r--tools/power/acpi/os_specific/service_layers/oslibcfs.c5
-rw-r--r--tools/power/acpi/os_specific/service_layers/oslinuxtbl.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/osunixdir.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/osunixmap.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/osunixxf.c2
-rw-r--r--tools/power/acpi/tools/acpidbg/Makefile27
-rw-r--r--tools/power/acpi/tools/acpidbg/acpidbg.c438
-rw-r--r--tools/power/acpi/tools/acpidump/acpidump.h2
-rw-r--r--tools/power/acpi/tools/acpidump/apdump.c2
-rw-r--r--tools/power/acpi/tools/acpidump/apfiles.c15
-rw-r--r--tools/power/acpi/tools/acpidump/apmain.c2
-rw-r--r--tools/power/cpupower/Makefile19
-rw-r--r--tools/power/cpupower/bench/Makefile8
-rw-r--r--tools/power/cpupower/utils/cpufreq-info.c247
-rw-r--r--tools/power/cpupower/utils/cpuidle-info.c16
-rw-r--r--tools/power/cpupower/utils/cpupower-info.c9
-rw-r--r--tools/power/cpupower/utils/cpupower-set.c10
-rw-r--r--tools/power/cpupower/utils/helpers/topology.c2
-rw-r--r--tools/scripts/Makefile.arch (renamed from tools/perf/config/Makefile.arch)0
-rw-r--r--tools/spi/.gitignore2
-rw-r--r--tools/spi/Makefile4
-rw-r--r--tools/spi/spidev_fdx.c158
-rw-r--r--tools/spi/spidev_test.c399
-rw-r--r--tools/testing/nvdimm/Kbuild2
-rw-r--r--tools/testing/nvdimm/test/iomap.c93
-rw-r--r--tools/testing/nvdimm/test/nfit.c11
-rw-r--r--tools/testing/selftests/Makefile2
-rw-r--r--tools/testing/selftests/breakpoints/.gitignore1
-rw-r--r--tools/testing/selftests/capabilities/Makefile21
-rwxr-xr-xtools/testing/selftests/firmware/fw_filesystem.sh29
-rw-r--r--tools/testing/selftests/ftrace/test.d/instances/instance.tc90
-rw-r--r--tools/testing/selftests/intel_pstate/Makefile15
-rw-r--r--tools/testing/selftests/intel_pstate/aperf.c80
-rw-r--r--tools/testing/selftests/intel_pstate/msr.c39
-rwxr-xr-xtools/testing/selftests/intel_pstate/run.sh113
-rw-r--r--tools/testing/selftests/net/.gitignore1
-rw-r--r--tools/testing/selftests/net/Makefile2
-rw-r--r--tools/testing/selftests/net/reuseport_bpf.c514
-rw-r--r--tools/testing/selftests/powerpc/benchmarks/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/benchmarks/Makefile5
-rw-r--r--tools/testing/selftests/powerpc/benchmarks/context_switch.c466
-rw-r--r--tools/testing/selftests/powerpc/dscr/dscr_inherit_exec_test.c8
-rw-r--r--tools/testing/selftests/powerpc/dscr/dscr_inherit_test.c8
-rw-r--r--tools/testing/selftests/powerpc/harness.c43
-rw-r--r--tools/testing/selftests/powerpc/pmu/Makefile4
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/Makefile3
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/ebb.c3
-rw-r--r--tools/testing/selftests/powerpc/pmu/lib.c26
-rw-r--r--tools/testing/selftests/powerpc/pmu/lib.h1
-rwxr-xr-xtools/testing/selftests/powerpc/scripts/hmi.sh89
-rw-r--r--tools/testing/selftests/powerpc/tm/.gitignore3
-rw-r--r--tools/testing/selftests/powerpc/tm/Makefile4
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-resched-dscr.c3
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-signal-msr-resv.c74
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-signal-stack.c76
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-syscall.c13
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-vmxcopy.c103
-rw-r--r--tools/testing/selftests/powerpc/tm/tm.h34
-rw-r--r--tools/testing/selftests/powerpc/utils.c87
-rw-r--r--tools/testing/selftests/powerpc/utils.h7
-rw-r--r--tools/testing/selftests/ptrace/.gitignore1
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh9
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm.sh22
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/parse-console.sh41
-rw-r--r--tools/testing/selftests/rcutorture/doc/TINY_RCU.txt1
-rw-r--r--tools/testing/selftests/rcutorture/doc/TREE_RCU-kconfig.txt4
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c27
-rw-r--r--tools/testing/selftests/seccomp/test_harness.h5
-rw-r--r--tools/testing/selftests/timers/.gitignore1
-rw-r--r--tools/testing/selftests/vm/.gitignore5
-rw-r--r--tools/testing/selftests/x86/Makefile6
-rw-r--r--tools/testing/selftests/x86/vdso_restorer.c88
-rw-r--r--tools/virtio/asm/barrier.h22
-rw-r--r--tools/virtio/linux/compiler.h9
-rw-r--r--tools/virtio/linux/kernel.h1
-rw-r--r--tools/virtio/ringtest/Makefile22
-rw-r--r--tools/virtio/ringtest/README2
-rw-r--r--tools/virtio/ringtest/main.c366
-rw-r--r--tools/virtio/ringtest/main.h119
-rw-r--r--tools/virtio/ringtest/ring.c272
-rwxr-xr-xtools/virtio/ringtest/run-on-all.sh24
-rw-r--r--tools/virtio/ringtest/virtio_ring_0_9.c316
-rw-r--r--tools/virtio/ringtest/virtio_ring_poll.c2
310 files changed, 13373 insertions, 2081 deletions
diff --git a/tools/Makefile b/tools/Makefile
index 7dc820a8c1f1..6339f6ac3ccb 100644
--- a/tools/Makefile
+++ b/tools/Makefile
@@ -8,23 +8,24 @@ include scripts/Makefile.include
help:
@echo 'Possible targets:'
@echo ''
- @echo ' acpi - ACPI tools'
- @echo ' cgroup - cgroup tools'
- @echo ' cpupower - a tool for all things x86 CPU power'
- @echo ' firewire - the userspace part of nosy, an IEEE-1394 traffic sniffer'
- @echo ' hv - tools used when in Hyper-V clients'
- @echo ' iio - IIO tools'
- @echo ' lguest - a minimal 32-bit x86 hypervisor'
- @echo ' perf - Linux performance measurement and analysis tool'
- @echo ' selftests - various kernel selftests'
- @echo ' turbostat - Intel CPU idle stats and freq reporting tool'
- @echo ' usb - USB testing tools'
- @echo ' virtio - vhost test module'
- @echo ' net - misc networking tools'
- @echo ' vm - misc vm tools'
+ @echo ' acpi - ACPI tools'
+ @echo ' cgroup - cgroup tools'
+ @echo ' cpupower - a tool for all things x86 CPU power'
+ @echo ' firewire - the userspace part of nosy, an IEEE-1394 traffic sniffer'
+ @echo ' freefall - laptop accelerometer program for disk protection'
+ @echo ' hv - tools used when in Hyper-V clients'
+ @echo ' iio - IIO tools'
+ @echo ' lguest - a minimal 32-bit x86 hypervisor'
+ @echo ' net - misc networking tools'
+ @echo ' perf - Linux performance measurement and analysis tool'
+ @echo ' selftests - various kernel selftests'
+ @echo ' spi - spi tools'
+ @echo ' tmon - thermal monitoring and tuning tool'
+ @echo ' turbostat - Intel CPU idle stats and freq reporting tool'
+ @echo ' usb - USB testing tools'
+ @echo ' virtio - vhost test module'
+ @echo ' vm - misc vm tools'
@echo ' x86_energy_perf_policy - Intel energy policy tool'
- @echo ' tmon - thermal monitoring and tuning tool'
- @echo ' freefall - laptop accelerometer program for disk protection'
@echo ''
@echo 'You can do:'
@echo ' $$ make -C tools/ <tool>_install'
@@ -52,7 +53,7 @@ acpi: FORCE
cpupower: FORCE
$(call descend,power/$@)
-cgroup firewire hv guest usb virtio vm net iio: FORCE
+cgroup firewire hv guest spi usb virtio vm net iio: FORCE
$(call descend,$@)
liblockdep: FORCE
@@ -96,7 +97,7 @@ cgroup_install firewire_install hv_install lguest_install perf_install usb_insta
$(call descend,$(@:_install=),install)
selftests_install:
- $(call descend,testing/$(@:_clean=),install)
+ $(call descend,testing/$(@:_install=),install)
turbostat_install x86_energy_perf_policy_install:
$(call descend,power/x86/$(@:_install=),install)
@@ -118,7 +119,7 @@ acpi_clean:
cpupower_clean:
$(call descend,power/cpupower,clean)
-cgroup_clean hv_clean firewire_clean lguest_clean usb_clean virtio_clean vm_clean net_clean iio_clean:
+cgroup_clean hv_clean firewire_clean lguest_clean spi_clean usb_clean virtio_clean vm_clean net_clean iio_clean:
$(call descend,$(@:_clean=),clean)
liblockdep_clean:
@@ -127,6 +128,12 @@ liblockdep_clean:
libapi_clean:
$(call descend,lib/api,clean)
+libbpf_clean:
+ $(call descend,lib/bpf,clean)
+
+libsubcmd_clean:
+ $(call descend,lib/subcmd,clean)
+
perf_clean:
$(call descend,$(@:_clean=),clean)
@@ -142,9 +149,12 @@ tmon_clean:
freefall_clean:
$(call descend,laptop/freefall,clean)
+build_clean:
+ $(call descend,build,clean)
+
clean: acpi_clean cgroup_clean cpupower_clean hv_clean firewire_clean lguest_clean \
- perf_clean selftests_clean turbostat_clean usb_clean virtio_clean \
+ perf_clean selftests_clean turbostat_clean spi_clean usb_clean virtio_clean \
vm_clean net_clean iio_clean x86_energy_perf_policy_clean tmon_clean \
- freefall_clean
+ freefall_clean build_clean libbpf_clean libsubcmd_clean liblockdep_clean
.PHONY: FORCE
diff --git a/tools/build/Makefile b/tools/build/Makefile
index a93036272d43..0d5a0e3a8fa9 100644
--- a/tools/build/Makefile
+++ b/tools/build/Makefile
@@ -25,7 +25,7 @@ export Q srctree CC LD
MAKEFLAGS := --no-print-directory
build := -f $(srctree)/tools/build/Makefile.build dir=. obj
-all: fixdep
+all: $(OUTPUT)fixdep
clean:
$(call QUIET_CLEAN, fixdep)
diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
index 37ff4c9f92f1..02db3cdff20f 100644
--- a/tools/build/Makefile.feature
+++ b/tools/build/Makefile.feature
@@ -7,7 +7,7 @@ endif
feature_check = $(eval $(feature_check_code))
define feature_check_code
- feature-$(1) := $(shell $(MAKE) OUTPUT=$(OUTPUT_FEATURES) CFLAGS="$(EXTRA_CFLAGS) $(FEATURE_CHECK_CFLAGS-$(1))" LDFLAGS="$(LDFLAGS) $(FEATURE_CHECK_LDFLAGS-$(1))" -C $(feature_dir) test-$1.bin >/dev/null 2>/dev/null && echo 1 || echo 0)
+ feature-$(1) := $(shell $(MAKE) OUTPUT=$(OUTPUT_FEATURES) CFLAGS="$(EXTRA_CFLAGS) $(FEATURE_CHECK_CFLAGS-$(1))" LDFLAGS="$(LDFLAGS) $(FEATURE_CHECK_LDFLAGS-$(1))" -C $(feature_dir) $(OUTPUT_FEATURES)test-$1.bin >/dev/null 2>/dev/null && echo 1 || echo 0)
endef
feature_set = $(eval $(feature_set_code))
@@ -101,7 +101,6 @@ ifeq ($(feature-all), 1)
#
$(foreach feat,$(FEATURE_TESTS),$(call feature_set,$(feat)))
else
- $(shell $(MAKE) OUTPUT=$(OUTPUT_FEATURES) CFLAGS="$(EXTRA_CFLAGS)" LDFLAGS=$(LDFLAGS) -i -j -C $(feature_dir) $(addsuffix .bin,$(FEATURE_TESTS)) >/dev/null 2>&1)
$(foreach feat,$(FEATURE_TESTS),$(call feature_check,$(feat)))
endif
@@ -123,13 +122,31 @@ define feature_print_text_code
MSG = $(shell printf '...%30s: %s' $(1) $(2))
endef
+#
+# generates feature value assignment for name, like:
+# $(call feature_assign,dwarf) == feature-dwarf=1
+#
+feature_assign = feature-$(1)=$(feature-$(1))
+
FEATURE_DUMP_FILENAME = $(OUTPUT)FEATURE-DUMP$(FEATURE_USER)
-FEATURE_DUMP := $(foreach feat,$(FEATURE_DISPLAY),feature-$(feat)($(feature-$(feat))))
-FEATURE_DUMP_FILE := $(shell touch $(FEATURE_DUMP_FILENAME); cat $(FEATURE_DUMP_FILENAME))
+FEATURE_DUMP := $(shell touch $(FEATURE_DUMP_FILENAME); cat $(FEATURE_DUMP_FILENAME))
-ifeq ($(dwarf-post-unwind),1)
- FEATURE_DUMP += dwarf-post-unwind($(dwarf-post-unwind-text))
-endif
+feature_dump_check = $(eval $(feature_dump_check_code))
+define feature_dump_check_code
+ ifeq ($(findstring $(1),$(FEATURE_DUMP)),)
+ $(2) := 1
+ endif
+endef
+
+#
+# First check if any test from FEATURE_DISPLAY
+# and set feature_display := 1 if it does
+$(foreach feat,$(FEATURE_DISPLAY),$(call feature_dump_check,$(call feature_assign,$(feat)),feature_display))
+
+#
+# Now also check if any other test changed,
+# so we force FEATURE-DUMP generation
+$(foreach feat,$(FEATURE_TESTS),$(call feature_dump_check,$(call feature_assign,$(feat)),feature_dump_changed))
# The $(feature_display) controls the default detection message
# output. It's set if:
@@ -138,13 +155,13 @@ endif
# - one of the $(FEATURE_DISPLAY) is not detected
# - VF is enabled
-ifneq ("$(FEATURE_DUMP)","$(FEATURE_DUMP_FILE)")
- $(shell echo "$(FEATURE_DUMP)" > $(FEATURE_DUMP_FILENAME))
- feature_display := 1
+ifeq ($(feature_dump_changed),1)
+ $(shell rm -f $(FEATURE_DUMP_FILENAME))
+ $(foreach feat,$(FEATURE_TESTS),$(shell echo "$(call feature_assign,$(feat))" >> $(FEATURE_DUMP_FILENAME)))
endif
feature_display_check = $(eval $(feature_check_display_code))
-define feature_display_check_code
+define feature_check_display_code
ifneq ($(feature-$(1)), 1)
feature_display := 1
endif
@@ -161,11 +178,6 @@ ifeq ($(feature_display),1)
$(info )
$(info Auto-detecting system features:)
$(foreach feat,$(FEATURE_DISPLAY),$(call feature_print_status,$(feat),))
-
- ifeq ($(dwarf-post-unwind),1)
- $(call feature_print_text,"DWARF post unwind library", $(dwarf-post-unwind-text))
- endif
-
ifneq ($(feature_verbose),1)
$(info )
endif
diff --git a/tools/build/Makefile.include b/tools/build/Makefile.include
index 4e09ad617a60..be630bed66d2 100644
--- a/tools/build/Makefile.include
+++ b/tools/build/Makefile.include
@@ -4,7 +4,7 @@ ifdef CROSS_COMPILE
fixdep:
else
fixdep:
- $(Q)$(MAKE) -C $(srctree)/tools/build fixdep
+ $(Q)$(MAKE) -C $(srctree)/tools/build CFLAGS= LDFLAGS= $(OUTPUT)fixdep
endif
.PHONY: fixdep
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index cea04ce9f35c..bf8f0352264d 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -1,4 +1,3 @@
-
FILES= \
test-all.bin \
test-backtrace.bin \
@@ -38,38 +37,40 @@ FILES= \
test-bpf.bin \
test-get_cpuid.bin
+FILES := $(addprefix $(OUTPUT),$(FILES))
+
CC := $(CROSS_COMPILE)gcc -MD
PKG_CONFIG := $(CROSS_COMPILE)pkg-config
all: $(FILES)
-__BUILD = $(CC) $(CFLAGS) -Wall -Werror -o $(OUTPUT)$@ $(patsubst %.bin,%.c,$@) $(LDFLAGS)
- BUILD = $(__BUILD) > $(OUTPUT)$(@:.bin=.make.output) 2>&1
+__BUILD = $(CC) $(CFLAGS) -Wall -Werror -o $@ $(patsubst %.bin,%.c,$(@F)) $(LDFLAGS)
+ BUILD = $(__BUILD) > $(@:.bin=.make.output) 2>&1
###############################
-test-all.bin:
+$(OUTPUT)test-all.bin:
$(BUILD) -fstack-protector-all -O2 -D_FORTIFY_SOURCE=2 -ldw -lelf -lnuma -lelf -laudit -I/usr/include/slang -lslang $(shell $(PKG_CONFIG) --libs --cflags gtk+-2.0 2>/dev/null) $(FLAGS_PERL_EMBED) $(FLAGS_PYTHON_EMBED) -DPACKAGE='"perf"' -lbfd -ldl -lz -llzma
-test-hello.bin:
+$(OUTPUT)test-hello.bin:
$(BUILD)
-test-pthread-attr-setaffinity-np.bin:
+$(OUTPUT)test-pthread-attr-setaffinity-np.bin:
$(BUILD) -D_GNU_SOURCE -lpthread
-test-stackprotector-all.bin:
+$(OUTPUT)test-stackprotector-all.bin:
$(BUILD) -fstack-protector-all
-test-fortify-source.bin:
+$(OUTPUT)test-fortify-source.bin:
$(BUILD) -O2 -D_FORTIFY_SOURCE=2
-test-bionic.bin:
+$(OUTPUT)test-bionic.bin:
$(BUILD)
-test-libelf.bin:
+$(OUTPUT)test-libelf.bin:
$(BUILD) -lelf
-test-glibc.bin:
+$(OUTPUT)test-glibc.bin:
$(BUILD)
DWARFLIBS := -ldw
@@ -77,37 +78,37 @@ ifeq ($(findstring -static,${LDFLAGS}),-static)
DWARFLIBS += -lelf -lebl -lz -llzma -lbz2
endif
-test-dwarf.bin:
+$(OUTPUT)test-dwarf.bin:
$(BUILD) $(DWARFLIBS)
-test-libelf-mmap.bin:
+$(OUTPUT)test-libelf-mmap.bin:
$(BUILD) -lelf
-test-libelf-getphdrnum.bin:
+$(OUTPUT)test-libelf-getphdrnum.bin:
$(BUILD) -lelf
-test-libnuma.bin:
+$(OUTPUT)test-libnuma.bin:
$(BUILD) -lnuma
-test-numa_num_possible_cpus.bin:
+$(OUTPUT)test-numa_num_possible_cpus.bin:
$(BUILD) -lnuma
-test-libunwind.bin:
+$(OUTPUT)test-libunwind.bin:
$(BUILD) -lelf
-test-libunwind-debug-frame.bin:
+$(OUTPUT)test-libunwind-debug-frame.bin:
$(BUILD) -lelf
-test-libaudit.bin:
+$(OUTPUT)test-libaudit.bin:
$(BUILD) -laudit
-test-libslang.bin:
+$(OUTPUT)test-libslang.bin:
$(BUILD) -I/usr/include/slang -lslang
-test-gtk2.bin:
+$(OUTPUT)test-gtk2.bin:
$(BUILD) $(shell $(PKG_CONFIG) --libs --cflags gtk+-2.0 2>/dev/null)
-test-gtk2-infobar.bin:
+$(OUTPUT)test-gtk2-infobar.bin:
$(BUILD) $(shell $(PKG_CONFIG) --libs --cflags gtk+-2.0 2>/dev/null)
grep-libs = $(filter -l%,$(1))
@@ -119,63 +120,63 @@ PERL_EMBED_LIBADD = $(call grep-libs,$(PERL_EMBED_LDOPTS))
PERL_EMBED_CCOPTS = `perl -MExtUtils::Embed -e ccopts 2>/dev/null`
FLAGS_PERL_EMBED=$(PERL_EMBED_CCOPTS) $(PERL_EMBED_LDOPTS)
-test-libperl.bin:
+$(OUTPUT)test-libperl.bin:
$(BUILD) $(FLAGS_PERL_EMBED)
-test-libpython.bin:
+$(OUTPUT)test-libpython.bin:
$(BUILD)
-test-libpython-version.bin:
+$(OUTPUT)test-libpython-version.bin:
$(BUILD)
-test-libbfd.bin:
+$(OUTPUT)test-libbfd.bin:
$(BUILD) -DPACKAGE='"perf"' -lbfd -lz -liberty -ldl
-test-liberty.bin:
- $(CC) $(CFLAGS) -Wall -Werror -o $(OUTPUT)$@ test-libbfd.c -DPACKAGE='"perf"' $(LDFLAGS) -lbfd -ldl -liberty
+$(OUTPUT)test-liberty.bin:
+ $(CC) $(CFLAGS) -Wall -Werror -o $@ test-libbfd.c -DPACKAGE='"perf"' $(LDFLAGS) -lbfd -ldl -liberty
-test-liberty-z.bin:
- $(CC) $(CFLAGS) -Wall -Werror -o $(OUTPUT)$@ test-libbfd.c -DPACKAGE='"perf"' $(LDFLAGS) -lbfd -ldl -liberty -lz
+$(OUTPUT)test-liberty-z.bin:
+ $(CC) $(CFLAGS) -Wall -Werror -o $@ test-libbfd.c -DPACKAGE='"perf"' $(LDFLAGS) -lbfd -ldl -liberty -lz
-test-cplus-demangle.bin:
+$(OUTPUT)test-cplus-demangle.bin:
$(BUILD) -liberty
-test-backtrace.bin:
+$(OUTPUT)test-backtrace.bin:
$(BUILD)
-test-timerfd.bin:
+$(OUTPUT)test-timerfd.bin:
$(BUILD)
-test-libdw-dwarf-unwind.bin:
+$(OUTPUT)test-libdw-dwarf-unwind.bin:
$(BUILD) # -ldw provided by $(FEATURE_CHECK_LDFLAGS-libdw-dwarf-unwind)
-test-libbabeltrace.bin:
+$(OUTPUT)test-libbabeltrace.bin:
$(BUILD) # -lbabeltrace provided by $(FEATURE_CHECK_LDFLAGS-libbabeltrace)
-test-sync-compare-and-swap.bin:
+$(OUTPUT)test-sync-compare-and-swap.bin:
$(BUILD)
-test-compile-32.bin:
- $(CC) -m32 -o $(OUTPUT)$@ test-compile.c
+$(OUTPUT)test-compile-32.bin:
+ $(CC) -m32 -o $@ test-compile.c
-test-compile-x32.bin:
- $(CC) -mx32 -o $(OUTPUT)$@ test-compile.c
+$(OUTPUT)test-compile-x32.bin:
+ $(CC) -mx32 -o $@ test-compile.c
-test-zlib.bin:
+$(OUTPUT)test-zlib.bin:
$(BUILD) -lz
-test-lzma.bin:
+$(OUTPUT)test-lzma.bin:
$(BUILD) -llzma
-test-get_cpuid.bin:
+$(OUTPUT)test-get_cpuid.bin:
$(BUILD)
-test-bpf.bin:
+$(OUTPUT)test-bpf.bin:
$(BUILD)
--include *.d
+-include $(OUTPUT)*.d
###############################
clean:
- rm -f $(FILES) *.d $(FILES:.bin=.make.output)
+ rm -f $(FILES) $(OUTPUT)*.d $(FILES:.bin=.make.output)
diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c
index 33cf6f20bd4e..81025cade45f 100644
--- a/tools/build/feature/test-all.c
+++ b/tools/build/feature/test-all.c
@@ -125,6 +125,10 @@
# include "test-get_cpuid.c"
#undef main
+#define main main_test_bpf
+# include "test-bpf.c"
+#undef main
+
int main(int argc, char *argv[])
{
main_test_libpython();
@@ -153,6 +157,7 @@ int main(int argc, char *argv[])
main_test_pthread_attr_setaffinity_np();
main_test_lzma();
main_test_get_cpuid();
+ main_test_bpf();
return 0;
}
diff --git a/tools/build/feature/test-bpf.c b/tools/build/feature/test-bpf.c
index 062bac811af9..b389026839b9 100644
--- a/tools/build/feature/test-bpf.c
+++ b/tools/build/feature/test-bpf.c
@@ -1,9 +1,23 @@
+#include <asm/unistd.h>
#include <linux/bpf.h>
+#include <unistd.h>
+
+#ifndef __NR_bpf
+# if defined(__i386__)
+# define __NR_bpf 357
+# elif defined(__x86_64__)
+# define __NR_bpf 321
+# elif defined(__aarch64__)
+# define __NR_bpf 280
+# error __NR_bpf not defined. libbpf does not support your arch.
+# endif
+#endif
int main(void)
{
union bpf_attr attr;
+ /* Check fields in attr */
attr.prog_type = BPF_PROG_TYPE_KPROBE;
attr.insn_cnt = 0;
attr.insns = 0;
@@ -14,5 +28,9 @@ int main(void)
attr.kern_version = 0;
attr = attr;
- return 0;
+ /*
+ * Test existence of __NR_bpf and BPF_PROG_LOAD.
+ * This call should fail if we run the testcase.
+ */
+ return syscall(__NR_bpf, BPF_PROG_LOAD, attr, sizeof(attr));
}
diff --git a/tools/hv/hv_fcopy_daemon.c b/tools/hv/hv_fcopy_daemon.c
index 5480e4e424eb..fdc9ca4c0356 100644
--- a/tools/hv/hv_fcopy_daemon.c
+++ b/tools/hv/hv_fcopy_daemon.c
@@ -37,12 +37,14 @@
static int target_fd;
static char target_fname[W_MAX_PATH];
+static unsigned long long filesize;
static int hv_start_fcopy(struct hv_start_fcopy *smsg)
{
int error = HV_E_FAIL;
char *q, *p;
+ filesize = 0;
p = (char *)smsg->path_name;
snprintf(target_fname, sizeof(target_fname), "%s/%s",
(char *)smsg->path_name, (char *)smsg->file_name);
@@ -98,14 +100,26 @@ done:
static int hv_copy_data(struct hv_do_fcopy *cpmsg)
{
ssize_t bytes_written;
+ int ret = 0;
bytes_written = pwrite(target_fd, cpmsg->data, cpmsg->size,
cpmsg->offset);
- if (bytes_written != cpmsg->size)
- return HV_E_FAIL;
+ filesize += cpmsg->size;
+ if (bytes_written != cpmsg->size) {
+ switch (errno) {
+ case ENOSPC:
+ ret = HV_ERROR_DISK_FULL;
+ break;
+ default:
+ ret = HV_E_FAIL;
+ break;
+ }
+ syslog(LOG_ERR, "pwrite failed to write %llu bytes: %ld (%s)",
+ filesize, (long)bytes_written, strerror(errno));
+ }
- return 0;
+ return ret;
}
static int hv_copy_finished(void)
@@ -165,7 +179,7 @@ int main(int argc, char *argv[])
}
openlog("HV_FCOPY", 0, LOG_USER);
- syslog(LOG_INFO, "HV_FCOPY starting; pid is:%d", getpid());
+ syslog(LOG_INFO, "starting; pid is:%d", getpid());
fcopy_fd = open("/dev/vmbus/hv_fcopy", O_RDWR);
@@ -201,7 +215,7 @@ int main(int argc, char *argv[])
}
kernel_modver = *(__u32 *)buffer;
in_handshake = 0;
- syslog(LOG_INFO, "HV_FCOPY: kernel module version: %d",
+ syslog(LOG_INFO, "kernel module version: %d",
kernel_modver);
continue;
}
diff --git a/tools/hv/hv_vss_daemon.c b/tools/hv/hv_vss_daemon.c
index 96234b638249..5d51d6ff08e6 100644
--- a/tools/hv/hv_vss_daemon.c
+++ b/tools/hv/hv_vss_daemon.c
@@ -254,7 +254,7 @@ int main(int argc, char *argv[])
syslog(LOG_ERR, "Illegal op:%d\n", op);
}
vss_msg->error = error;
- len = write(vss_fd, &error, sizeof(struct hv_vss_msg));
+ len = write(vss_fd, vss_msg, sizeof(struct hv_vss_msg));
if (len != sizeof(struct hv_vss_msg)) {
syslog(LOG_ERR, "write failed; error: %d %s", errno,
strerror(errno));
diff --git a/tools/perf/util/include/linux/bitmap.h b/tools/include/linux/bitmap.h
index 40bd21488032..28f5493da491 100644
--- a/tools/perf/util/include/linux/bitmap.h
+++ b/tools/include/linux/bitmap.h
@@ -11,6 +11,8 @@ int __bitmap_weight(const unsigned long *bitmap, int bits);
void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
+#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
+
#define BITMAP_LAST_WORD_MASK(nbits) \
( \
((nbits) % BITS_PER_LONG) ? \
diff --git a/tools/include/linux/list.h b/tools/include/linux/list.h
index a017f1595676..1da423820ad4 100644
--- a/tools/include/linux/list.h
+++ b/tools/include/linux/list.h
@@ -1,11 +1,751 @@
-#include <linux/compiler.h>
-#include <linux/kernel.h>
+#ifndef __TOOLS_LINUX_LIST_H
+#define __TOOLS_LINUX_LIST_H
+
#include <linux/types.h>
+#include <linux/poison.h>
+#include <linux/kernel.h>
+#include <linux/compiler.h>
+
+/*
+ * Simple doubly linked list implementation.
+ *
+ * Some of the internal functions ("__xxx") are useful when
+ * manipulating whole lists rather than single entries, as
+ * sometimes we already know the next/prev entries and we can
+ * generate better code by using them directly rather than
+ * using the generic single-entry routines.
+ */
+
+#define LIST_HEAD_INIT(name) { &(name), &(name) }
+
+#define LIST_HEAD(name) \
+ struct list_head name = LIST_HEAD_INIT(name)
+
+static inline void INIT_LIST_HEAD(struct list_head *list)
+{
+ list->next = list;
+ list->prev = list;
+}
+
+/*
+ * Insert a new entry between two known consecutive entries.
+ *
+ * This is only for internal list manipulation where we know
+ * the prev/next entries already!
+ */
+#ifndef CONFIG_DEBUG_LIST
+static inline void __list_add(struct list_head *new,
+ struct list_head *prev,
+ struct list_head *next)
+{
+ next->prev = new;
+ new->next = next;
+ new->prev = prev;
+ prev->next = new;
+}
+#else
+extern void __list_add(struct list_head *new,
+ struct list_head *prev,
+ struct list_head *next);
+#endif
+
+/**
+ * list_add - add a new entry
+ * @new: new entry to be added
+ * @head: list head to add it after
+ *
+ * Insert a new entry after the specified head.
+ * This is good for implementing stacks.
+ */
+static inline void list_add(struct list_head *new, struct list_head *head)
+{
+ __list_add(new, head, head->next);
+}
+
+
+/**
+ * list_add_tail - add a new entry
+ * @new: new entry to be added
+ * @head: list head to add it before
+ *
+ * Insert a new entry before the specified head.
+ * This is useful for implementing queues.
+ */
+static inline void list_add_tail(struct list_head *new, struct list_head *head)
+{
+ __list_add(new, head->prev, head);
+}
+
+/*
+ * Delete a list entry by making the prev/next entries
+ * point to each other.
+ *
+ * This is only for internal list manipulation where we know
+ * the prev/next entries already!
+ */
+static inline void __list_del(struct list_head * prev, struct list_head * next)
+{
+ next->prev = prev;
+ WRITE_ONCE(prev->next, next);
+}
+
+/**
+ * list_del - deletes entry from list.
+ * @entry: the element to delete from the list.
+ * Note: list_empty() on entry does not return true after this, the entry is
+ * in an undefined state.
+ */
+#ifndef CONFIG_DEBUG_LIST
+static inline void __list_del_entry(struct list_head *entry)
+{
+ __list_del(entry->prev, entry->next);
+}
+
+static inline void list_del(struct list_head *entry)
+{
+ __list_del(entry->prev, entry->next);
+ entry->next = LIST_POISON1;
+ entry->prev = LIST_POISON2;
+}
+#else
+extern void __list_del_entry(struct list_head *entry);
+extern void list_del(struct list_head *entry);
+#endif
+
+/**
+ * list_replace - replace old entry by new one
+ * @old : the element to be replaced
+ * @new : the new element to insert
+ *
+ * If @old was empty, it will be overwritten.
+ */
+static inline void list_replace(struct list_head *old,
+ struct list_head *new)
+{
+ new->next = old->next;
+ new->next->prev = new;
+ new->prev = old->prev;
+ new->prev->next = new;
+}
+
+static inline void list_replace_init(struct list_head *old,
+ struct list_head *new)
+{
+ list_replace(old, new);
+ INIT_LIST_HEAD(old);
+}
+
+/**
+ * list_del_init - deletes entry from list and reinitialize it.
+ * @entry: the element to delete from the list.
+ */
+static inline void list_del_init(struct list_head *entry)
+{
+ __list_del_entry(entry);
+ INIT_LIST_HEAD(entry);
+}
+
+/**
+ * list_move - delete from one list and add as another's head
+ * @list: the entry to move
+ * @head: the head that will precede our entry
+ */
+static inline void list_move(struct list_head *list, struct list_head *head)
+{
+ __list_del_entry(list);
+ list_add(list, head);
+}
+
+/**
+ * list_move_tail - delete from one list and add as another's tail
+ * @list: the entry to move
+ * @head: the head that will follow our entry
+ */
+static inline void list_move_tail(struct list_head *list,
+ struct list_head *head)
+{
+ __list_del_entry(list);
+ list_add_tail(list, head);
+}
+
+/**
+ * list_is_last - tests whether @list is the last entry in list @head
+ * @list: the entry to test
+ * @head: the head of the list
+ */
+static inline int list_is_last(const struct list_head *list,
+ const struct list_head *head)
+{
+ return list->next == head;
+}
+
+/**
+ * list_empty - tests whether a list is empty
+ * @head: the list to test.
+ */
+static inline int list_empty(const struct list_head *head)
+{
+ return head->next == head;
+}
+
+/**
+ * list_empty_careful - tests whether a list is empty and not being modified
+ * @head: the list to test
+ *
+ * Description:
+ * tests whether a list is empty _and_ checks that no other CPU might be
+ * in the process of modifying either member (next or prev)
+ *
+ * NOTE: using list_empty_careful() without synchronization
+ * can only be safe if the only activity that can happen
+ * to the list entry is list_del_init(). Eg. it cannot be used
+ * if another CPU could re-list_add() it.
+ */
+static inline int list_empty_careful(const struct list_head *head)
+{
+ struct list_head *next = head->next;
+ return (next == head) && (next == head->prev);
+}
+
+/**
+ * list_rotate_left - rotate the list to the left
+ * @head: the head of the list
+ */
+static inline void list_rotate_left(struct list_head *head)
+{
+ struct list_head *first;
+
+ if (!list_empty(head)) {
+ first = head->next;
+ list_move_tail(first, head);
+ }
+}
+
+/**
+ * list_is_singular - tests whether a list has just one entry.
+ * @head: the list to test.
+ */
+static inline int list_is_singular(const struct list_head *head)
+{
+ return !list_empty(head) && (head->next == head->prev);
+}
+
+static inline void __list_cut_position(struct list_head *list,
+ struct list_head *head, struct list_head *entry)
+{
+ struct list_head *new_first = entry->next;
+ list->next = head->next;
+ list->next->prev = list;
+ list->prev = entry;
+ entry->next = list;
+ head->next = new_first;
+ new_first->prev = head;
+}
+
+/**
+ * list_cut_position - cut a list into two
+ * @list: a new list to add all removed entries
+ * @head: a list with entries
+ * @entry: an entry within head, could be the head itself
+ * and if so we won't cut the list
+ *
+ * This helper moves the initial part of @head, up to and
+ * including @entry, from @head to @list. You should
+ * pass on @entry an element you know is on @head. @list
+ * should be an empty list or a list you do not care about
+ * losing its data.
+ *
+ */
+static inline void list_cut_position(struct list_head *list,
+ struct list_head *head, struct list_head *entry)
+{
+ if (list_empty(head))
+ return;
+ if (list_is_singular(head) &&
+ (head->next != entry && head != entry))
+ return;
+ if (entry == head)
+ INIT_LIST_HEAD(list);
+ else
+ __list_cut_position(list, head, entry);
+}
+
+static inline void __list_splice(const struct list_head *list,
+ struct list_head *prev,
+ struct list_head *next)
+{
+ struct list_head *first = list->next;
+ struct list_head *last = list->prev;
+
+ first->prev = prev;
+ prev->next = first;
+
+ last->next = next;
+ next->prev = last;
+}
+
+/**
+ * list_splice - join two lists, this is designed for stacks
+ * @list: the new list to add.
+ * @head: the place to add it in the first list.
+ */
+static inline void list_splice(const struct list_head *list,
+ struct list_head *head)
+{
+ if (!list_empty(list))
+ __list_splice(list, head, head->next);
+}
+
+/**
+ * list_splice_tail - join two lists, each list being a queue
+ * @list: the new list to add.
+ * @head: the place to add it in the first list.
+ */
+static inline void list_splice_tail(struct list_head *list,
+ struct list_head *head)
+{
+ if (!list_empty(list))
+ __list_splice(list, head->prev, head);
+}
+
+/**
+ * list_splice_init - join two lists and reinitialise the emptied list.
+ * @list: the new list to add.
+ * @head: the place to add it in the first list.
+ *
+ * The list at @list is reinitialised
+ */
+static inline void list_splice_init(struct list_head *list,
+ struct list_head *head)
+{
+ if (!list_empty(list)) {
+ __list_splice(list, head, head->next);
+ INIT_LIST_HEAD(list);
+ }
+}
+
+/**
+ * list_splice_tail_init - join two lists and reinitialise the emptied list
+ * @list: the new list to add.
+ * @head: the place to add it in the first list.
+ *
+ * Each of the lists is a queue.
+ * The list at @list is reinitialised
+ */
+static inline void list_splice_tail_init(struct list_head *list,
+ struct list_head *head)
+{
+ if (!list_empty(list)) {
+ __list_splice(list, head->prev, head);
+ INIT_LIST_HEAD(list);
+ }
+}
+
+/**
+ * list_entry - get the struct for this entry
+ * @ptr: the &struct list_head pointer.
+ * @type: the type of the struct this is embedded in.
+ * @member: the name of the list_head within the struct.
+ */
+#define list_entry(ptr, type, member) \
+ container_of(ptr, type, member)
+
+/**
+ * list_first_entry - get the first element from a list
+ * @ptr: the list head to take the element from.
+ * @type: the type of the struct this is embedded in.
+ * @member: the name of the list_head within the struct.
+ *
+ * Note, that list is expected to be not empty.
+ */
+#define list_first_entry(ptr, type, member) \
+ list_entry((ptr)->next, type, member)
+
+/**
+ * list_last_entry - get the last element from a list
+ * @ptr: the list head to take the element from.
+ * @type: the type of the struct this is embedded in.
+ * @member: the name of the list_head within the struct.
+ *
+ * Note, that list is expected to be not empty.
+ */
+#define list_last_entry(ptr, type, member) \
+ list_entry((ptr)->prev, type, member)
+
+/**
+ * list_first_entry_or_null - get the first element from a list
+ * @ptr: the list head to take the element from.
+ * @type: the type of the struct this is embedded in.
+ * @member: the name of the list_head within the struct.
+ *
+ * Note that if the list is empty, it returns NULL.
+ */
+#define list_first_entry_or_null(ptr, type, member) \
+ (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL)
+
+/**
+ * list_next_entry - get the next element in list
+ * @pos: the type * to cursor
+ * @member: the name of the list_head within the struct.
+ */
+#define list_next_entry(pos, member) \
+ list_entry((pos)->member.next, typeof(*(pos)), member)
+
+/**
+ * list_prev_entry - get the prev element in list
+ * @pos: the type * to cursor
+ * @member: the name of the list_head within the struct.
+ */
+#define list_prev_entry(pos, member) \
+ list_entry((pos)->member.prev, typeof(*(pos)), member)
+
+/**
+ * list_for_each - iterate over a list
+ * @pos: the &struct list_head to use as a loop cursor.
+ * @head: the head for your list.
+ */
+#define list_for_each(pos, head) \
+ for (pos = (head)->next; pos != (head); pos = pos->next)
+
+/**
+ * list_for_each_prev - iterate over a list backwards
+ * @pos: the &struct list_head to use as a loop cursor.
+ * @head: the head for your list.
+ */
+#define list_for_each_prev(pos, head) \
+ for (pos = (head)->prev; pos != (head); pos = pos->prev)
+
+/**
+ * list_for_each_safe - iterate over a list safe against removal of list entry
+ * @pos: the &struct list_head to use as a loop cursor.
+ * @n: another &struct list_head to use as temporary storage
+ * @head: the head for your list.
+ */
+#define list_for_each_safe(pos, n, head) \
+ for (pos = (head)->next, n = pos->next; pos != (head); \
+ pos = n, n = pos->next)
+
+/**
+ * list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry
+ * @pos: the &struct list_head to use as a loop cursor.
+ * @n: another &struct list_head to use as temporary storage
+ * @head: the head for your list.
+ */
+#define list_for_each_prev_safe(pos, n, head) \
+ for (pos = (head)->prev, n = pos->prev; \
+ pos != (head); \
+ pos = n, n = pos->prev)
+
+/**
+ * list_for_each_entry - iterate over list of given type
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ */
+#define list_for_each_entry(pos, head, member) \
+ for (pos = list_first_entry(head, typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = list_next_entry(pos, member))
+
+/**
+ * list_for_each_entry_reverse - iterate backwards over list of given type.
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ */
+#define list_for_each_entry_reverse(pos, head, member) \
+ for (pos = list_last_entry(head, typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = list_prev_entry(pos, member))
+
+/**
+ * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue()
+ * @pos: the type * to use as a start point
+ * @head: the head of the list
+ * @member: the name of the list_head within the struct.
+ *
+ * Prepares a pos entry for use as a start point in list_for_each_entry_continue().
+ */
+#define list_prepare_entry(pos, head, member) \
+ ((pos) ? : list_entry(head, typeof(*pos), member))
+
+/**
+ * list_for_each_entry_continue - continue iteration over list of given type
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ *
+ * Continue to iterate over list of given type, continuing after
+ * the current position.
+ */
+#define list_for_each_entry_continue(pos, head, member) \
+ for (pos = list_next_entry(pos, member); \
+ &pos->member != (head); \
+ pos = list_next_entry(pos, member))
+
+/**
+ * list_for_each_entry_continue_reverse - iterate backwards from the given point
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ *
+ * Start to iterate over list of given type backwards, continuing after
+ * the current position.
+ */
+#define list_for_each_entry_continue_reverse(pos, head, member) \
+ for (pos = list_prev_entry(pos, member); \
+ &pos->member != (head); \
+ pos = list_prev_entry(pos, member))
+
+/**
+ * list_for_each_entry_from - iterate over list of given type from the current point
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ *
+ * Iterate over list of given type, continuing from current position.
+ */
+#define list_for_each_entry_from(pos, head, member) \
+ for (; &pos->member != (head); \
+ pos = list_next_entry(pos, member))
+
+/**
+ * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
+ * @pos: the type * to use as a loop cursor.
+ * @n: another type * to use as temporary storage
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ */
+#define list_for_each_entry_safe(pos, n, head, member) \
+ for (pos = list_first_entry(head, typeof(*pos), member), \
+ n = list_next_entry(pos, member); \
+ &pos->member != (head); \
+ pos = n, n = list_next_entry(n, member))
+
+/**
+ * list_for_each_entry_safe_continue - continue list iteration safe against removal
+ * @pos: the type * to use as a loop cursor.
+ * @n: another type * to use as temporary storage
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ *
+ * Iterate over list of given type, continuing after current point,
+ * safe against removal of list entry.
+ */
+#define list_for_each_entry_safe_continue(pos, n, head, member) \
+ for (pos = list_next_entry(pos, member), \
+ n = list_next_entry(pos, member); \
+ &pos->member != (head); \
+ pos = n, n = list_next_entry(n, member))
+
+/**
+ * list_for_each_entry_safe_from - iterate over list from current point safe against removal
+ * @pos: the type * to use as a loop cursor.
+ * @n: another type * to use as temporary storage
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ *
+ * Iterate over list of given type from current point, safe against
+ * removal of list entry.
+ */
+#define list_for_each_entry_safe_from(pos, n, head, member) \
+ for (n = list_next_entry(pos, member); \
+ &pos->member != (head); \
+ pos = n, n = list_next_entry(n, member))
+
+/**
+ * list_for_each_entry_safe_reverse - iterate backwards over list safe against removal
+ * @pos: the type * to use as a loop cursor.
+ * @n: another type * to use as temporary storage
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ *
+ * Iterate backwards over list of given type, safe against removal
+ * of list entry.
+ */
+#define list_for_each_entry_safe_reverse(pos, n, head, member) \
+ for (pos = list_last_entry(head, typeof(*pos), member), \
+ n = list_prev_entry(pos, member); \
+ &pos->member != (head); \
+ pos = n, n = list_prev_entry(n, member))
-#include "../../../include/linux/list.h"
+/**
+ * list_safe_reset_next - reset a stale list_for_each_entry_safe loop
+ * @pos: the loop cursor used in the list_for_each_entry_safe loop
+ * @n: temporary storage used in list_for_each_entry_safe
+ * @member: the name of the list_head within the struct.
+ *
+ * list_safe_reset_next is not safe to use in general if the list may be
+ * modified concurrently (eg. the lock is dropped in the loop body). An
+ * exception to this is if the cursor element (pos) is pinned in the list,
+ * and list_safe_reset_next is called after re-taking the lock and before
+ * completing the current iteration of the loop body.
+ */
+#define list_safe_reset_next(pos, n, member) \
+ n = list_next_entry(pos, member)
+
+/*
+ * Double linked lists with a single pointer list head.
+ * Mostly useful for hash tables where the two pointer list head is
+ * too wasteful.
+ * You lose the ability to access the tail in O(1).
+ */
+
+#define HLIST_HEAD_INIT { .first = NULL }
+#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
+#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
+static inline void INIT_HLIST_NODE(struct hlist_node *h)
+{
+ h->next = NULL;
+ h->pprev = NULL;
+}
+
+static inline int hlist_unhashed(const struct hlist_node *h)
+{
+ return !h->pprev;
+}
+
+static inline int hlist_empty(const struct hlist_head *h)
+{
+ return !h->first;
+}
+
+static inline void __hlist_del(struct hlist_node *n)
+{
+ struct hlist_node *next = n->next;
+ struct hlist_node **pprev = n->pprev;
+
+ WRITE_ONCE(*pprev, next);
+ if (next)
+ next->pprev = pprev;
+}
+
+static inline void hlist_del(struct hlist_node *n)
+{
+ __hlist_del(n);
+ n->next = LIST_POISON1;
+ n->pprev = LIST_POISON2;
+}
+
+static inline void hlist_del_init(struct hlist_node *n)
+{
+ if (!hlist_unhashed(n)) {
+ __hlist_del(n);
+ INIT_HLIST_NODE(n);
+ }
+}
+
+static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
+{
+ struct hlist_node *first = h->first;
+ n->next = first;
+ if (first)
+ first->pprev = &n->next;
+ h->first = n;
+ n->pprev = &h->first;
+}
+
+/* next must be != NULL */
+static inline void hlist_add_before(struct hlist_node *n,
+ struct hlist_node *next)
+{
+ n->pprev = next->pprev;
+ n->next = next;
+ next->pprev = &n->next;
+ *(n->pprev) = n;
+}
+
+static inline void hlist_add_behind(struct hlist_node *n,
+ struct hlist_node *prev)
+{
+ n->next = prev->next;
+ prev->next = n;
+ n->pprev = &prev->next;
+
+ if (n->next)
+ n->next->pprev = &n->next;
+}
+
+/* after that we'll appear to be on some hlist and hlist_del will work */
+static inline void hlist_add_fake(struct hlist_node *n)
+{
+ n->pprev = &n->next;
+}
+
+static inline bool hlist_fake(struct hlist_node *h)
+{
+ return h->pprev == &h->next;
+}
+
+/*
+ * Move a list from one list head to another. Fixup the pprev
+ * reference of the first entry if it exists.
+ */
+static inline void hlist_move_list(struct hlist_head *old,
+ struct hlist_head *new)
+{
+ new->first = old->first;
+ if (new->first)
+ new->first->pprev = &new->first;
+ old->first = NULL;
+}
+
+#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
+
+#define hlist_for_each(pos, head) \
+ for (pos = (head)->first; pos ; pos = pos->next)
+
+#define hlist_for_each_safe(pos, n, head) \
+ for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
+ pos = n)
+
+#define hlist_entry_safe(ptr, type, member) \
+ ({ typeof(ptr) ____ptr = (ptr); \
+ ____ptr ? hlist_entry(____ptr, type, member) : NULL; \
+ })
+
+/**
+ * hlist_for_each_entry - iterate over list of given type
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the hlist_node within the struct.
+ */
+#define hlist_for_each_entry(pos, head, member) \
+ for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member);\
+ pos; \
+ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
+
+/**
+ * hlist_for_each_entry_continue - iterate over a hlist continuing after current point
+ * @pos: the type * to use as a loop cursor.
+ * @member: the name of the hlist_node within the struct.
+ */
+#define hlist_for_each_entry_continue(pos, member) \
+ for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\
+ pos; \
+ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
+
+/**
+ * hlist_for_each_entry_from - iterate over a hlist continuing from current point
+ * @pos: the type * to use as a loop cursor.
+ * @member: the name of the hlist_node within the struct.
+ */
+#define hlist_for_each_entry_from(pos, member) \
+ for (; pos; \
+ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
+
+/**
+ * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
+ * @pos: the type * to use as a loop cursor.
+ * @n: another &struct hlist_node to use as temporary storage
+ * @head: the head for your list.
+ * @member: the name of the hlist_node within the struct.
+ */
+#define hlist_for_each_entry_safe(pos, n, head, member) \
+ for (pos = hlist_entry_safe((head)->first, typeof(*pos), member);\
+ pos && ({ n = pos->member.next; 1; }); \
+ pos = hlist_entry_safe(n, typeof(*pos), member))
-#ifndef TOOLS_LIST_H
-#define TOOLS_LIST_H
/**
* list_del_range - deletes range of entries from list.
* @begin: first element in the range to delete from the list.
@@ -27,4 +767,5 @@ static inline void list_del_range(struct list_head *begin,
*/
#define list_for_each_from(pos, head) \
for (; pos != (head); pos = pos->next)
-#endif
+
+#endif /* __TOOLS_LINUX_LIST_H */
diff --git a/tools/include/linux/string.h b/tools/include/linux/string.h
new file mode 100644
index 000000000000..e26223f1f287
--- /dev/null
+++ b/tools/include/linux/string.h
@@ -0,0 +1,15 @@
+#ifndef _TOOLS_LINUX_STRING_H_
+#define _TOOLS_LINUX_STRING_H_
+
+
+#include <linux/types.h> /* for size_t */
+
+void *memdup(const void *src, size_t len);
+
+int strtobool(const char *s, bool *res);
+
+#ifndef __UCLIBC__
+extern size_t strlcpy(char *dest, const char *src, size_t size);
+#endif
+
+#endif /* _LINUX_STRING_H_ */
diff --git a/tools/perf/util/bitmap.c b/tools/lib/bitmap.c
index 0a1adc1111fd..0a1adc1111fd 100644
--- a/tools/perf/util/bitmap.c
+++ b/tools/lib/bitmap.c
diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
index a3caaf3eafbd..fc1bc75ae56d 100644
--- a/tools/lib/bpf/Makefile
+++ b/tools/lib/bpf/Makefile
@@ -6,6 +6,12 @@ BPF_EXTRAVERSION = 1
MAKEFLAGS += --no-print-directory
+ifeq ($(srctree),)
+srctree := $(patsubst %/,%,$(dir $(shell pwd)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+#$(info Determined 'srctree' to be $(srctree))
+endif
# Makefiles suck: This macro sets a default value of $(2) for the
# variable named by $(1), unless the variable has been set by
@@ -31,7 +37,8 @@ INSTALL = install
DESTDIR ?=
DESTDIR_SQ = '$(subst ','\'',$(DESTDIR))'
-LP64 := $(shell echo __LP64__ | ${CC} ${CFLAGS} -E -x c - | tail -n 1)
+include $(srctree)/tools/scripts/Makefile.arch
+
ifeq ($(LP64), 1)
libdir_relative = lib64
else
@@ -57,13 +64,6 @@ ifndef VERBOSE
VERBOSE = 0
endif
-ifeq ($(srctree),)
-srctree := $(patsubst %/,%,$(dir $(shell pwd)))
-srctree := $(patsubst %/,%,$(dir $(srctree)))
-srctree := $(patsubst %/,%,$(dir $(srctree)))
-#$(info Determined 'srctree' to be $(srctree))
-endif
-
FEATURE_USER = .libbpf
FEATURE_TESTS = libelf libelf-getphdrnum libelf-mmap bpf
FEATURE_DISPLAY = libelf bpf
@@ -71,7 +71,21 @@ FEATURE_DISPLAY = libelf bpf
INCLUDES = -I. -I$(srctree)/tools/include -I$(srctree)/arch/$(ARCH)/include/uapi -I$(srctree)/include/uapi
FEATURE_CHECK_CFLAGS-bpf = $(INCLUDES)
+check_feat := 1
+NON_CHECK_FEAT_TARGETS := clean TAGS tags cscope help
+ifdef MAKECMDGOALS
+ifeq ($(filter-out $(NON_CHECK_FEAT_TARGETS),$(MAKECMDGOALS)),)
+ check_feat := 0
+endif
+endif
+
+ifeq ($(check_feat),1)
+ifeq ($(FEATURES_DUMP),)
include $(srctree)/tools/build/Makefile.feature
+else
+include $(FEATURES_DUMP)
+endif
+endif
export prefix libdir src obj
@@ -178,7 +192,7 @@ config-clean:
$(Q)$(MAKE) -C $(srctree)/tools/build/feature/ clean >/dev/null
clean:
- $(call QUIET_CLEAN, libbpf) $(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d \
+ $(call QUIET_CLEAN, libbpf) $(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d .*.cmd \
$(RM) LIBBPF-CFLAGS
$(call QUIET_CLEAN, core-gen) $(RM) $(OUTPUT)FEATURE-DUMP.libbpf
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index a6331050ab79..1f91cc941b7c 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -14,8 +14,8 @@
#include "bpf.h"
/*
- * When building perf, unistd.h is override. Define __NR_bpf is
- * required to be defined.
+ * When building perf, unistd.h is overrided. __NR_bpf is
+ * required to be defined explicitly.
*/
#ifndef __NR_bpf
# if defined(__i386__)
@@ -83,3 +83,17 @@ int bpf_load_program(enum bpf_prog_type type, struct bpf_insn *insns,
log_buf[0] = 0;
return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
}
+
+int bpf_map_update_elem(int fd, void *key, void *value,
+ u64 flags)
+{
+ union bpf_attr attr;
+
+ bzero(&attr, sizeof(attr));
+ attr.map_fd = fd;
+ attr.key = ptr_to_u64(key);
+ attr.value = ptr_to_u64(value);
+ attr.flags = flags;
+
+ return sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
+}
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index 854b7361b784..a76465541292 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -20,4 +20,6 @@ int bpf_load_program(enum bpf_prog_type type, struct bpf_insn *insns,
u32 kern_version, char *log_buf,
size_t log_buf_sz);
+int bpf_map_update_elem(int fd, void *key, void *value,
+ u64 flags);
#endif
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index e176bad19bcb..8334a5a9d5d7 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -152,29 +152,36 @@ struct bpf_program {
} *reloc_desc;
int nr_reloc;
- int fd;
+ struct {
+ int nr;
+ int *fds;
+ } instances;
+ bpf_program_prep_t preprocessor;
struct bpf_object *obj;
void *priv;
bpf_program_clear_priv_t clear_priv;
};
+struct bpf_map {
+ int fd;
+ char *name;
+ struct bpf_map_def def;
+ void *priv;
+ bpf_map_clear_priv_t clear_priv;
+};
+
static LIST_HEAD(bpf_objects_list);
struct bpf_object {
char license[64];
u32 kern_version;
- void *maps_buf;
- size_t maps_buf_sz;
struct bpf_program *programs;
size_t nr_programs;
- int *map_fds;
- /*
- * This field is required because maps_buf will be freed and
- * maps_buf_sz will be set to 0 after loaded.
- */
- size_t nr_map_fds;
+ struct bpf_map *maps;
+ size_t nr_maps;
+
bool loaded;
/*
@@ -188,6 +195,7 @@ struct bpf_object {
Elf *elf;
GElf_Ehdr ehdr;
Elf_Data *symbols;
+ size_t strtabidx;
struct {
GElf_Shdr shdr;
Elf_Data *data;
@@ -206,10 +214,25 @@ struct bpf_object {
static void bpf_program__unload(struct bpf_program *prog)
{
+ int i;
+
if (!prog)
return;
- zclose(prog->fd);
+ /*
+ * If the object is opened but the program was never loaded,
+ * it is possible that prog->instances.nr == -1.
+ */
+ if (prog->instances.nr > 0) {
+ for (i = 0; i < prog->instances.nr; i++)
+ zclose(prog->instances.fds[i]);
+ } else if (prog->instances.nr != -1) {
+ pr_warning("Internal error: instances.nr is %d\n",
+ prog->instances.nr);
+ }
+
+ prog->instances.nr = -1;
+ zfree(&prog->instances.fds);
}
static void bpf_program__exit(struct bpf_program *prog)
@@ -260,7 +283,8 @@ bpf_program__init(void *data, size_t size, char *name, int idx,
memcpy(prog->insns, data,
prog->insns_cnt * sizeof(struct bpf_insn));
prog->idx = idx;
- prog->fd = -1;
+ prog->instances.fds = NULL;
+ prog->instances.nr = -1;
return 0;
errout:
@@ -469,21 +493,77 @@ static int
bpf_object__init_maps(struct bpf_object *obj, void *data,
size_t size)
{
- if (size == 0) {
+ size_t nr_maps;
+ int i;
+
+ nr_maps = size / sizeof(struct bpf_map_def);
+ if (!data || !nr_maps) {
pr_debug("%s doesn't need map definition\n",
obj->path);
return 0;
}
- obj->maps_buf = malloc(size);
- if (!obj->maps_buf) {
- pr_warning("malloc maps failed: %s\n", obj->path);
+ pr_debug("maps in %s: %zd bytes\n", obj->path, size);
+
+ obj->maps = calloc(nr_maps, sizeof(obj->maps[0]));
+ if (!obj->maps) {
+ pr_warning("alloc maps for object failed\n");
return -ENOMEM;
}
+ obj->nr_maps = nr_maps;
- obj->maps_buf_sz = size;
- memcpy(obj->maps_buf, data, size);
- pr_debug("maps in %s: %ld bytes\n", obj->path, (long)size);
+ for (i = 0; i < nr_maps; i++) {
+ struct bpf_map_def *def = &obj->maps[i].def;
+
+ /*
+ * fill all fd with -1 so won't close incorrect
+ * fd (fd=0 is stdin) when failure (zclose won't close
+ * negative fd)).
+ */
+ obj->maps[i].fd = -1;
+
+ /* Save map definition into obj->maps */
+ *def = ((struct bpf_map_def *)data)[i];
+ }
+ return 0;
+}
+
+static int
+bpf_object__init_maps_name(struct bpf_object *obj, int maps_shndx)
+{
+ int i;
+ Elf_Data *symbols = obj->efile.symbols;
+
+ if (!symbols || maps_shndx < 0)
+ return -EINVAL;
+
+ for (i = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
+ GElf_Sym sym;
+ size_t map_idx;
+ const char *map_name;
+
+ if (!gelf_getsym(symbols, i, &sym))
+ continue;
+ if (sym.st_shndx != maps_shndx)
+ continue;
+
+ map_name = elf_strptr(obj->efile.elf,
+ obj->efile.strtabidx,
+ sym.st_name);
+ map_idx = sym.st_value / sizeof(struct bpf_map_def);
+ if (map_idx >= obj->nr_maps) {
+ pr_warning("index of map \"%s\" is buggy: %zu > %zu\n",
+ map_name, map_idx, obj->nr_maps);
+ continue;
+ }
+ obj->maps[map_idx].name = strdup(map_name);
+ if (!obj->maps[map_idx].name) {
+ pr_warning("failed to alloc map name\n");
+ return -ENOMEM;
+ }
+ pr_debug("map %zu is \"%s\"\n", map_idx,
+ obj->maps[map_idx].name);
+ }
return 0;
}
@@ -492,7 +572,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
Elf *elf = obj->efile.elf;
GElf_Ehdr *ep = &obj->efile.ehdr;
Elf_Scn *scn = NULL;
- int idx = 0, err = 0;
+ int idx = 0, err = 0, maps_shndx = -1;
/* Elf is corrupted/truncated, avoid calling elf_strptr. */
if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
@@ -542,16 +622,19 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
err = bpf_object__init_kversion(obj,
data->d_buf,
data->d_size);
- else if (strcmp(name, "maps") == 0)
+ else if (strcmp(name, "maps") == 0) {
err = bpf_object__init_maps(obj, data->d_buf,
data->d_size);
- else if (sh.sh_type == SHT_SYMTAB) {
+ maps_shndx = idx;
+ } else if (sh.sh_type == SHT_SYMTAB) {
if (obj->efile.symbols) {
pr_warning("bpf: multiple SYMTAB in %s\n",
obj->path);
err = -LIBBPF_ERRNO__FORMAT;
- } else
+ } else {
obj->efile.symbols = data;
+ obj->efile.strtabidx = sh.sh_link;
+ }
} else if ((sh.sh_type == SHT_PROGBITS) &&
(sh.sh_flags & SHF_EXECINSTR) &&
(data->d_size > 0)) {
@@ -586,6 +669,13 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
if (err)
goto out;
}
+
+ if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) {
+ pr_warning("Corrupted ELF file: index of strtab invalid\n");
+ return LIBBPF_ERRNO__FORMAT;
+ }
+ if (maps_shndx >= 0)
+ err = bpf_object__init_maps_name(obj, maps_shndx);
out:
return err;
}
@@ -668,37 +758,15 @@ static int
bpf_object__create_maps(struct bpf_object *obj)
{
unsigned int i;
- size_t nr_maps;
- int *pfd;
-
- nr_maps = obj->maps_buf_sz / sizeof(struct bpf_map_def);
- if (!obj->maps_buf || !nr_maps) {
- pr_debug("don't need create maps for %s\n",
- obj->path);
- return 0;
- }
-
- obj->map_fds = malloc(sizeof(int) * nr_maps);
- if (!obj->map_fds) {
- pr_warning("realloc perf_bpf_map_fds failed\n");
- return -ENOMEM;
- }
- obj->nr_map_fds = nr_maps;
- /* fill all fd with -1 */
- memset(obj->map_fds, -1, sizeof(int) * nr_maps);
-
- pfd = obj->map_fds;
- for (i = 0; i < nr_maps; i++) {
- struct bpf_map_def def;
+ for (i = 0; i < obj->nr_maps; i++) {
+ struct bpf_map_def *def = &obj->maps[i].def;
+ int *pfd = &obj->maps[i].fd;
- def = *(struct bpf_map_def *)(obj->maps_buf +
- i * sizeof(struct bpf_map_def));
-
- *pfd = bpf_create_map(def.type,
- def.key_size,
- def.value_size,
- def.max_entries);
+ *pfd = bpf_create_map(def->type,
+ def->key_size,
+ def->value_size,
+ def->max_entries);
if (*pfd < 0) {
size_t j;
int err = *pfd;
@@ -706,22 +774,17 @@ bpf_object__create_maps(struct bpf_object *obj)
pr_warning("failed to create map: %s\n",
strerror(errno));
for (j = 0; j < i; j++)
- zclose(obj->map_fds[j]);
- obj->nr_map_fds = 0;
- zfree(&obj->map_fds);
+ zclose(obj->maps[j].fd);
return err;
}
pr_debug("create map: fd=%d\n", *pfd);
- pfd++;
}
- zfree(&obj->maps_buf);
- obj->maps_buf_sz = 0;
return 0;
}
static int
-bpf_program__relocate(struct bpf_program *prog, int *map_fds)
+bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
{
int i;
@@ -741,7 +804,7 @@ bpf_program__relocate(struct bpf_program *prog, int *map_fds)
return -LIBBPF_ERRNO__RELOC;
}
insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
- insns[insn_idx].imm = map_fds[map_idx];
+ insns[insn_idx].imm = obj->maps[map_idx].fd;
}
zfree(&prog->reloc_desc);
@@ -760,7 +823,7 @@ bpf_object__relocate(struct bpf_object *obj)
for (i = 0; i < obj->nr_programs; i++) {
prog = &obj->programs[i];
- err = bpf_program__relocate(prog, obj->map_fds);
+ err = bpf_program__relocate(prog, obj);
if (err) {
pr_warning("failed to relocate '%s'\n",
prog->section_name);
@@ -784,8 +847,7 @@ static int bpf_object__collect_reloc(struct bpf_object *obj)
Elf_Data *data = obj->efile.reloc[i].data;
int idx = shdr->sh_info;
struct bpf_program *prog;
- size_t nr_maps = obj->maps_buf_sz /
- sizeof(struct bpf_map_def);
+ size_t nr_maps = obj->nr_maps;
if (shdr->sh_type != SHT_REL) {
pr_warning("internal error at %d\n", __LINE__);
@@ -860,13 +922,73 @@ static int
bpf_program__load(struct bpf_program *prog,
char *license, u32 kern_version)
{
- int err, fd;
+ int err = 0, fd, i;
- err = load_program(prog->insns, prog->insns_cnt,
- license, kern_version, &fd);
- if (!err)
- prog->fd = fd;
+ if (prog->instances.nr < 0 || !prog->instances.fds) {
+ if (prog->preprocessor) {
+ pr_warning("Internal error: can't load program '%s'\n",
+ prog->section_name);
+ return -LIBBPF_ERRNO__INTERNAL;
+ }
+
+ prog->instances.fds = malloc(sizeof(int));
+ if (!prog->instances.fds) {
+ pr_warning("Not enough memory for BPF fds\n");
+ return -ENOMEM;
+ }
+ prog->instances.nr = 1;
+ prog->instances.fds[0] = -1;
+ }
+
+ if (!prog->preprocessor) {
+ if (prog->instances.nr != 1) {
+ pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n",
+ prog->section_name, prog->instances.nr);
+ }
+ err = load_program(prog->insns, prog->insns_cnt,
+ license, kern_version, &fd);
+ if (!err)
+ prog->instances.fds[0] = fd;
+ goto out;
+ }
+
+ for (i = 0; i < prog->instances.nr; i++) {
+ struct bpf_prog_prep_result result;
+ bpf_program_prep_t preprocessor = prog->preprocessor;
+
+ bzero(&result, sizeof(result));
+ err = preprocessor(prog, i, prog->insns,
+ prog->insns_cnt, &result);
+ if (err) {
+ pr_warning("Preprocessing the %dth instance of program '%s' failed\n",
+ i, prog->section_name);
+ goto out;
+ }
+
+ if (!result.new_insn_ptr || !result.new_insn_cnt) {
+ pr_debug("Skip loading the %dth instance of program '%s'\n",
+ i, prog->section_name);
+ prog->instances.fds[i] = -1;
+ if (result.pfd)
+ *result.pfd = -1;
+ continue;
+ }
+
+ err = load_program(result.new_insn_ptr,
+ result.new_insn_cnt,
+ license, kern_version, &fd);
+
+ if (err) {
+ pr_warning("Loading the %dth instance of program '%s' failed\n",
+ i, prog->section_name);
+ goto out;
+ }
+ if (result.pfd)
+ *result.pfd = fd;
+ prog->instances.fds[i] = fd;
+ }
+out:
if (err)
pr_warning("failed to load program '%s'\n",
prog->section_name);
@@ -970,10 +1092,8 @@ int bpf_object__unload(struct bpf_object *obj)
if (!obj)
return -EINVAL;
- for (i = 0; i < obj->nr_map_fds; i++)
- zclose(obj->map_fds[i]);
- zfree(&obj->map_fds);
- obj->nr_map_fds = 0;
+ for (i = 0; i < obj->nr_maps; i++)
+ zclose(obj->maps[i].fd);
for (i = 0; i < obj->nr_programs; i++)
bpf_program__unload(&obj->programs[i]);
@@ -1016,7 +1136,16 @@ void bpf_object__close(struct bpf_object *obj)
bpf_object__elf_finish(obj);
bpf_object__unload(obj);
- zfree(&obj->maps_buf);
+ for (i = 0; i < obj->nr_maps; i++) {
+ zfree(&obj->maps[i].name);
+ if (obj->maps[i].clear_priv)
+ obj->maps[i].clear_priv(&obj->maps[i],
+ obj->maps[i].priv);
+ obj->maps[i].priv = NULL;
+ obj->maps[i].clear_priv = NULL;
+ }
+ zfree(&obj->maps);
+ obj->nr_maps = 0;
if (obj->programs && obj->nr_programs) {
for (i = 0; i < obj->nr_programs; i++)
@@ -1121,5 +1250,142 @@ const char *bpf_program__title(struct bpf_program *prog, bool needs_copy)
int bpf_program__fd(struct bpf_program *prog)
{
- return prog->fd;
+ return bpf_program__nth_fd(prog, 0);
+}
+
+int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
+ bpf_program_prep_t prep)
+{
+ int *instances_fds;
+
+ if (nr_instances <= 0 || !prep)
+ return -EINVAL;
+
+ if (prog->instances.nr > 0 || prog->instances.fds) {
+ pr_warning("Can't set pre-processor after loading\n");
+ return -EINVAL;
+ }
+
+ instances_fds = malloc(sizeof(int) * nr_instances);
+ if (!instances_fds) {
+ pr_warning("alloc memory failed for fds\n");
+ return -ENOMEM;
+ }
+
+ /* fill all fd with -1 */
+ memset(instances_fds, -1, sizeof(int) * nr_instances);
+
+ prog->instances.nr = nr_instances;
+ prog->instances.fds = instances_fds;
+ prog->preprocessor = prep;
+ return 0;
+}
+
+int bpf_program__nth_fd(struct bpf_program *prog, int n)
+{
+ int fd;
+
+ if (n >= prog->instances.nr || n < 0) {
+ pr_warning("Can't get the %dth fd from program %s: only %d instances\n",
+ n, prog->section_name, prog->instances.nr);
+ return -EINVAL;
+ }
+
+ fd = prog->instances.fds[n];
+ if (fd < 0) {
+ pr_warning("%dth instance of program '%s' is invalid\n",
+ n, prog->section_name);
+ return -ENOENT;
+ }
+
+ return fd;
+}
+
+int bpf_map__get_fd(struct bpf_map *map)
+{
+ if (!map)
+ return -EINVAL;
+
+ return map->fd;
+}
+
+int bpf_map__get_def(struct bpf_map *map, struct bpf_map_def *pdef)
+{
+ if (!map || !pdef)
+ return -EINVAL;
+
+ *pdef = map->def;
+ return 0;
+}
+
+const char *bpf_map__get_name(struct bpf_map *map)
+{
+ if (!map)
+ return NULL;
+ return map->name;
+}
+
+int bpf_map__set_private(struct bpf_map *map, void *priv,
+ bpf_map_clear_priv_t clear_priv)
+{
+ if (!map)
+ return -EINVAL;
+
+ if (map->priv) {
+ if (map->clear_priv)
+ map->clear_priv(map, map->priv);
+ }
+
+ map->priv = priv;
+ map->clear_priv = clear_priv;
+ return 0;
+}
+
+int bpf_map__get_private(struct bpf_map *map, void **ppriv)
+{
+ if (!map)
+ return -EINVAL;
+
+ if (ppriv)
+ *ppriv = map->priv;
+ return 0;
+}
+
+struct bpf_map *
+bpf_map__next(struct bpf_map *prev, struct bpf_object *obj)
+{
+ size_t idx;
+ struct bpf_map *s, *e;
+
+ if (!obj || !obj->maps)
+ return NULL;
+
+ s = obj->maps;
+ e = obj->maps + obj->nr_maps;
+
+ if (prev == NULL)
+ return s;
+
+ if ((prev < s) || (prev >= e)) {
+ pr_warning("error in %s: map handler doesn't belong to object\n",
+ __func__);
+ return NULL;
+ }
+
+ idx = (prev - obj->maps) + 1;
+ if (idx >= obj->nr_maps)
+ return NULL;
+ return &obj->maps[idx];
+}
+
+struct bpf_map *
+bpf_object__get_map_by_name(struct bpf_object *obj, const char *name)
+{
+ struct bpf_map *pos;
+
+ bpf_map__for_each(pos, obj) {
+ if (pos->name && !strcmp(pos->name, name))
+ return pos;
+ }
+ return NULL;
}
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index c9a9aef2806c..a51594c7b518 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -88,6 +88,70 @@ const char *bpf_program__title(struct bpf_program *prog, bool needs_copy);
int bpf_program__fd(struct bpf_program *prog);
+struct bpf_insn;
+
+/*
+ * Libbpf allows callers to adjust BPF programs before being loaded
+ * into kernel. One program in an object file can be transform into
+ * multiple variants to be attached to different code.
+ *
+ * bpf_program_prep_t, bpf_program__set_prep and bpf_program__nth_fd
+ * are APIs for this propose.
+ *
+ * - bpf_program_prep_t:
+ * It defines 'preprocessor', which is a caller defined function
+ * passed to libbpf through bpf_program__set_prep(), and will be
+ * called before program is loaded. The processor should adjust
+ * the program one time for each instances according to the number
+ * passed to it.
+ *
+ * - bpf_program__set_prep:
+ * Attachs a preprocessor to a BPF program. The number of instances
+ * whould be created is also passed through this function.
+ *
+ * - bpf_program__nth_fd:
+ * After the program is loaded, get resuling fds from bpf program for
+ * each instances.
+ *
+ * If bpf_program__set_prep() is not used, the program whould be loaded
+ * without adjustment during bpf_object__load(). The program has only
+ * one instance. In this case bpf_program__fd(prog) is equal to
+ * bpf_program__nth_fd(prog, 0).
+ */
+
+struct bpf_prog_prep_result {
+ /*
+ * If not NULL, load new instruction array.
+ * If set to NULL, don't load this instance.
+ */
+ struct bpf_insn *new_insn_ptr;
+ int new_insn_cnt;
+
+ /* If not NULL, result fd is set to it */
+ int *pfd;
+};
+
+/*
+ * Parameters of bpf_program_prep_t:
+ * - prog: The bpf_program being loaded.
+ * - n: Index of instance being generated.
+ * - insns: BPF instructions array.
+ * - insns_cnt:Number of instructions in insns.
+ * - res: Output parameter, result of transformation.
+ *
+ * Return value:
+ * - Zero: pre-processing success.
+ * - Non-zero: pre-processing, stop loading.
+ */
+typedef int (*bpf_program_prep_t)(struct bpf_program *prog, int n,
+ struct bpf_insn *insns, int insns_cnt,
+ struct bpf_prog_prep_result *res);
+
+int bpf_program__set_prep(struct bpf_program *prog, int nr_instance,
+ bpf_program_prep_t prep);
+
+int bpf_program__nth_fd(struct bpf_program *prog, int n);
+
/*
* We don't need __attribute__((packed)) now since it is
* unnecessary for 'bpf_map_def' because they are all aligned.
@@ -101,4 +165,28 @@ struct bpf_map_def {
unsigned int max_entries;
};
+/*
+ * There is another 'struct bpf_map' in include/linux/map.h. However,
+ * it is not a uapi header so no need to consider name clash.
+ */
+struct bpf_map;
+struct bpf_map *
+bpf_object__get_map_by_name(struct bpf_object *obj, const char *name);
+
+struct bpf_map *
+bpf_map__next(struct bpf_map *map, struct bpf_object *obj);
+#define bpf_map__for_each(pos, obj) \
+ for ((pos) = bpf_map__next(NULL, (obj)); \
+ (pos) != NULL; \
+ (pos) = bpf_map__next((pos), (obj)))
+
+int bpf_map__get_fd(struct bpf_map *map);
+int bpf_map__get_def(struct bpf_map *map, struct bpf_map_def *pdef);
+const char *bpf_map__get_name(struct bpf_map *map);
+
+typedef void (*bpf_map_clear_priv_t)(struct bpf_map *, void *);
+int bpf_map__set_private(struct bpf_map *map, void *priv,
+ bpf_map_clear_priv_t clear_priv);
+int bpf_map__get_private(struct bpf_map *map, void **ppriv);
+
#endif
diff --git a/tools/lib/find_bit.c b/tools/lib/find_bit.c
new file mode 100644
index 000000000000..9122a9e80046
--- /dev/null
+++ b/tools/lib/find_bit.c
@@ -0,0 +1,84 @@
+/* bit search implementation
+ *
+ * Copied from lib/find_bit.c to tools/lib/find_bit.c
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * Copyright (C) 2008 IBM Corporation
+ * 'find_last_bit' is written by Rusty Russell <rusty@rustcorp.com.au>
+ * (Inspired by David Howell's find_next_bit implementation)
+ *
+ * Rewritten by Yury Norov <yury.norov@gmail.com> to decrease
+ * size and improve performance, 2015.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/bitops.h>
+#include <linux/bitmap.h>
+#include <linux/kernel.h>
+
+#if !defined(find_next_bit)
+
+/*
+ * This is a common helper function for find_next_bit and
+ * find_next_zero_bit. The difference is the "invert" argument, which
+ * is XORed with each fetched word before searching it for one bits.
+ */
+static unsigned long _find_next_bit(const unsigned long *addr,
+ unsigned long nbits, unsigned long start, unsigned long invert)
+{
+ unsigned long tmp;
+
+ if (!nbits || start >= nbits)
+ return nbits;
+
+ tmp = addr[start / BITS_PER_LONG] ^ invert;
+
+ /* Handle 1st word. */
+ tmp &= BITMAP_FIRST_WORD_MASK(start);
+ start = round_down(start, BITS_PER_LONG);
+
+ while (!tmp) {
+ start += BITS_PER_LONG;
+ if (start >= nbits)
+ return nbits;
+
+ tmp = addr[start / BITS_PER_LONG] ^ invert;
+ }
+
+ return min(start + __ffs(tmp), nbits);
+}
+#endif
+
+#ifndef find_next_bit
+/*
+ * Find the next set bit in a memory region.
+ */
+unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
+ unsigned long offset)
+{
+ return _find_next_bit(addr, size, offset, 0UL);
+}
+#endif
+
+#ifndef find_first_bit
+/*
+ * Find the first set bit in a memory region.
+ */
+unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
+{
+ unsigned long idx;
+
+ for (idx = 0; idx * BITS_PER_LONG < size; idx++) {
+ if (addr[idx])
+ return min(idx * BITS_PER_LONG + __ffs(addr[idx]), size);
+ }
+
+ return size;
+}
+#endif
diff --git a/tools/lib/lockdep/Makefile b/tools/lib/lockdep/Makefile
index 7e319afac78a..90d2baeb621a 100644
--- a/tools/lib/lockdep/Makefile
+++ b/tools/lib/lockdep/Makefile
@@ -149,7 +149,7 @@ install_lib: all_cmd
install: install_lib
clean:
- $(RM) *.o *~ $(TARGETS) *.a *liblockdep*.so* $(VERSION_FILES) .*.d
+ $(RM) *.o *~ $(TARGETS) *.a *liblockdep*.so* $(VERSION_FILES) .*.d .*.cmd
$(RM) tags TAGS
PHONY += force
diff --git a/tools/lib/string.c b/tools/lib/string.c
new file mode 100644
index 000000000000..bd239bc1d557
--- /dev/null
+++ b/tools/lib/string.c
@@ -0,0 +1,89 @@
+/*
+ * linux/tools/lib/string.c
+ *
+ * Copied from linux/lib/string.c, where it is:
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ * More specifically, the first copied function was strtobool, which
+ * was introduced by:
+ *
+ * d0f1fed29e6e ("Add a strtobool function matching semantics of existing in kernel equivalents")
+ * Author: Jonathan Cameron <jic23@cam.ac.uk>
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <linux/string.h>
+#include <linux/compiler.h>
+
+/**
+ * memdup - duplicate region of memory
+ *
+ * @src: memory region to duplicate
+ * @len: memory region length
+ */
+void *memdup(const void *src, size_t len)
+{
+ void *p = malloc(len);
+
+ if (p)
+ memcpy(p, src, len);
+
+ return p;
+}
+
+/**
+ * strtobool - convert common user inputs into boolean values
+ * @s: input string
+ * @res: result
+ *
+ * This routine returns 0 iff the first character is one of 'Yy1Nn0'.
+ * Otherwise it will return -EINVAL. Value pointed to by res is
+ * updated upon finding a match.
+ */
+int strtobool(const char *s, bool *res)
+{
+ switch (s[0]) {
+ case 'y':
+ case 'Y':
+ case '1':
+ *res = true;
+ break;
+ case 'n':
+ case 'N':
+ case '0':
+ *res = false;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * strlcpy - Copy a C-string into a sized buffer
+ * @dest: Where to copy the string to
+ * @src: Where to copy the string from
+ * @size: size of destination buffer
+ *
+ * Compatible with *BSD: the result is always a valid
+ * NUL-terminated string that fits in the buffer (unless,
+ * of course, the buffer size is zero). It does not pad
+ * out the result like strncpy() does.
+ *
+ * If libc has strlcpy() then that version will override this
+ * implementation:
+ */
+size_t __weak strlcpy(char *dest, const char *src, size_t size)
+{
+ size_t ret = strlen(src);
+
+ if (size) {
+ size_t len = (ret >= size) ? size - 1 : ret;
+ memcpy(dest, src, len);
+ dest[len] = '\0';
+ }
+ return ret;
+}
diff --git a/tools/lib/subcmd/Build b/tools/lib/subcmd/Build
new file mode 100644
index 000000000000..ee31288788c1
--- /dev/null
+++ b/tools/lib/subcmd/Build
@@ -0,0 +1,7 @@
+libsubcmd-y += exec-cmd.o
+libsubcmd-y += help.o
+libsubcmd-y += pager.o
+libsubcmd-y += parse-options.o
+libsubcmd-y += run-command.o
+libsubcmd-y += sigchain.o
+libsubcmd-y += subcmd-config.o
diff --git a/tools/lib/subcmd/Makefile b/tools/lib/subcmd/Makefile
new file mode 100644
index 000000000000..629cf8c14e68
--- /dev/null
+++ b/tools/lib/subcmd/Makefile
@@ -0,0 +1,48 @@
+include ../../scripts/Makefile.include
+include ../../perf/config/utilities.mak # QUIET_CLEAN
+
+ifeq ($(srctree),)
+srctree := $(patsubst %/,%,$(dir $(shell pwd)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+#$(info Determined 'srctree' to be $(srctree))
+endif
+
+CC = $(CROSS_COMPILE)gcc
+AR = $(CROSS_COMPILE)ar
+RM = rm -f
+
+MAKEFLAGS += --no-print-directory
+
+LIBFILE = $(OUTPUT)libsubcmd.a
+
+CFLAGS := $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
+CFLAGS += -ggdb3 -Wall -Wextra -std=gnu99 -Werror -O6 -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fPIC
+CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
+
+CFLAGS += -I$(srctree)/tools/include/
+CFLAGS += -I$(srctree)/include/uapi
+CFLAGS += -I$(srctree)/include
+
+SUBCMD_IN := $(OUTPUT)libsubcmd-in.o
+
+all:
+
+export srctree OUTPUT CC LD CFLAGS V
+include $(srctree)/tools/build/Makefile.include
+
+all: fixdep $(LIBFILE)
+
+$(SUBCMD_IN): FORCE
+ @$(MAKE) $(build)=libsubcmd
+
+$(LIBFILE): $(SUBCMD_IN)
+ $(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(SUBCMD_IN)
+
+clean:
+ $(call QUIET_CLEAN, libsubcmd) $(RM) $(LIBFILE); \
+ find $(if $(OUTPUT),$(OUTPUT),.) -name \*.o -or -name \*.o.cmd -or -name \*.o.d | xargs $(RM)
+
+FORCE:
+
+.PHONY: clean FORCE
diff --git a/tools/lib/subcmd/exec-cmd.c b/tools/lib/subcmd/exec-cmd.c
new file mode 100644
index 000000000000..1ae833af1a4a
--- /dev/null
+++ b/tools/lib/subcmd/exec-cmd.c
@@ -0,0 +1,209 @@
+#include <linux/compiler.h>
+#include <linux/string.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "subcmd-util.h"
+#include "exec-cmd.h"
+#include "subcmd-config.h"
+
+#define MAX_ARGS 32
+#define PATH_MAX 4096
+
+static const char *argv_exec_path;
+static const char *argv0_path;
+
+void exec_cmd_init(const char *exec_name, const char *prefix,
+ const char *exec_path, const char *exec_path_env)
+{
+ subcmd_config.exec_name = exec_name;
+ subcmd_config.prefix = prefix;
+ subcmd_config.exec_path = exec_path;
+ subcmd_config.exec_path_env = exec_path_env;
+}
+
+#define is_dir_sep(c) ((c) == '/')
+
+static int is_absolute_path(const char *path)
+{
+ return path[0] == '/';
+}
+
+static const char *get_pwd_cwd(void)
+{
+ static char cwd[PATH_MAX + 1];
+ char *pwd;
+ struct stat cwd_stat, pwd_stat;
+ if (getcwd(cwd, PATH_MAX) == NULL)
+ return NULL;
+ pwd = getenv("PWD");
+ if (pwd && strcmp(pwd, cwd)) {
+ stat(cwd, &cwd_stat);
+ if (!stat(pwd, &pwd_stat) &&
+ pwd_stat.st_dev == cwd_stat.st_dev &&
+ pwd_stat.st_ino == cwd_stat.st_ino) {
+ strlcpy(cwd, pwd, PATH_MAX);
+ }
+ }
+ return cwd;
+}
+
+static const char *make_nonrelative_path(const char *path)
+{
+ static char buf[PATH_MAX + 1];
+
+ if (is_absolute_path(path)) {
+ if (strlcpy(buf, path, PATH_MAX) >= PATH_MAX)
+ die("Too long path: %.*s", 60, path);
+ } else {
+ const char *cwd = get_pwd_cwd();
+ if (!cwd)
+ die("Cannot determine the current working directory");
+ if (snprintf(buf, PATH_MAX, "%s/%s", cwd, path) >= PATH_MAX)
+ die("Too long path: %.*s", 60, path);
+ }
+ return buf;
+}
+
+char *system_path(const char *path)
+{
+ char *buf = NULL;
+
+ if (is_absolute_path(path))
+ return strdup(path);
+
+ astrcatf(&buf, "%s/%s", subcmd_config.prefix, path);
+
+ return buf;
+}
+
+const char *extract_argv0_path(const char *argv0)
+{
+ const char *slash;
+
+ if (!argv0 || !*argv0)
+ return NULL;
+ slash = argv0 + strlen(argv0);
+
+ while (argv0 <= slash && !is_dir_sep(*slash))
+ slash--;
+
+ if (slash >= argv0) {
+ argv0_path = strndup(argv0, slash - argv0);
+ return argv0_path ? slash + 1 : NULL;
+ }
+
+ return argv0;
+}
+
+void set_argv_exec_path(const char *exec_path)
+{
+ argv_exec_path = exec_path;
+ /*
+ * Propagate this setting to external programs.
+ */
+ setenv(subcmd_config.exec_path_env, exec_path, 1);
+}
+
+
+/* Returns the highest-priority location to look for subprograms. */
+char *get_argv_exec_path(void)
+{
+ char *env;
+
+ if (argv_exec_path)
+ return strdup(argv_exec_path);
+
+ env = getenv(subcmd_config.exec_path_env);
+ if (env && *env)
+ return strdup(env);
+
+ return system_path(subcmd_config.exec_path);
+}
+
+static void add_path(char **out, const char *path)
+{
+ if (path && *path) {
+ if (is_absolute_path(path))
+ astrcat(out, path);
+ else
+ astrcat(out, make_nonrelative_path(path));
+
+ astrcat(out, ":");
+ }
+}
+
+void setup_path(void)
+{
+ const char *old_path = getenv("PATH");
+ char *new_path = NULL;
+ char *tmp = get_argv_exec_path();
+
+ add_path(&new_path, tmp);
+ add_path(&new_path, argv0_path);
+ free(tmp);
+
+ if (old_path)
+ astrcat(&new_path, old_path);
+ else
+ astrcat(&new_path, "/usr/local/bin:/usr/bin:/bin");
+
+ setenv("PATH", new_path, 1);
+
+ free(new_path);
+}
+
+static const char **prepare_exec_cmd(const char **argv)
+{
+ int argc;
+ const char **nargv;
+
+ for (argc = 0; argv[argc]; argc++)
+ ; /* just counting */
+ nargv = malloc(sizeof(*nargv) * (argc + 2));
+
+ nargv[0] = subcmd_config.exec_name;
+ for (argc = 0; argv[argc]; argc++)
+ nargv[argc + 1] = argv[argc];
+ nargv[argc + 1] = NULL;
+ return nargv;
+}
+
+int execv_cmd(const char **argv) {
+ const char **nargv = prepare_exec_cmd(argv);
+
+ /* execvp() can only ever return if it fails */
+ execvp(subcmd_config.exec_name, (char **)nargv);
+
+ free(nargv);
+ return -1;
+}
+
+
+int execl_cmd(const char *cmd,...)
+{
+ int argc;
+ const char *argv[MAX_ARGS + 1];
+ const char *arg;
+ va_list param;
+
+ va_start(param, cmd);
+ argv[0] = cmd;
+ argc = 1;
+ while (argc < MAX_ARGS) {
+ arg = argv[argc++] = va_arg(param, char *);
+ if (!arg)
+ break;
+ }
+ va_end(param);
+ if (MAX_ARGS <= argc) {
+ fprintf(stderr, " Error: too many args to run %s\n", cmd);
+ return -1;
+ }
+
+ argv[argc] = NULL;
+ return execv_cmd(argv);
+}
diff --git a/tools/lib/subcmd/exec-cmd.h b/tools/lib/subcmd/exec-cmd.h
new file mode 100644
index 000000000000..5d08bda31d90
--- /dev/null
+++ b/tools/lib/subcmd/exec-cmd.h
@@ -0,0 +1,16 @@
+#ifndef __SUBCMD_EXEC_CMD_H
+#define __SUBCMD_EXEC_CMD_H
+
+extern void exec_cmd_init(const char *exec_name, const char *prefix,
+ const char *exec_path, const char *exec_path_env);
+
+extern void set_argv_exec_path(const char *exec_path);
+extern const char *extract_argv0_path(const char *path);
+extern void setup_path(void);
+extern int execv_cmd(const char **argv); /* NULL terminated */
+extern int execl_cmd(const char *cmd, ...);
+/* get_argv_exec_path and system_path return malloc'd string, caller must free it */
+extern char *get_argv_exec_path(void);
+extern char *system_path(const char *path);
+
+#endif /* __SUBCMD_EXEC_CMD_H */
diff --git a/tools/perf/util/help.c b/tools/lib/subcmd/help.c
index 86c37c472263..e228c3cb3716 100644
--- a/tools/perf/util/help.c
+++ b/tools/lib/subcmd/help.c
@@ -1,9 +1,15 @@
-#include "cache.h"
-#include "../builtin.h"
-#include "exec_cmd.h"
-#include "levenshtein.h"
-#include "help.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
#include <termios.h>
+#include <sys/ioctl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <dirent.h>
+#include "subcmd-util.h"
+#include "help.h"
+#include "exec-cmd.h"
void add_cmdname(struct cmdnames *cmds, const char *name, size_t len)
{
@@ -17,7 +23,7 @@ void add_cmdname(struct cmdnames *cmds, const char *name, size_t len)
cmds->names[cmds->cnt++] = ent;
}
-static void clean_cmdnames(struct cmdnames *cmds)
+void clean_cmdnames(struct cmdnames *cmds)
{
unsigned int i;
@@ -28,14 +34,14 @@ static void clean_cmdnames(struct cmdnames *cmds)
cmds->alloc = 0;
}
-static int cmdname_compare(const void *a_, const void *b_)
+int cmdname_compare(const void *a_, const void *b_)
{
struct cmdname *a = *(struct cmdname **)a_;
struct cmdname *b = *(struct cmdname **)b_;
return strcmp(a->name, b->name);
}
-static void uniq(struct cmdnames *cmds)
+void uniq(struct cmdnames *cmds)
{
unsigned int i, j;
@@ -71,6 +77,28 @@ void exclude_cmds(struct cmdnames *cmds, struct cmdnames *excludes)
cmds->cnt = cj;
}
+static void get_term_dimensions(struct winsize *ws)
+{
+ char *s = getenv("LINES");
+
+ if (s != NULL) {
+ ws->ws_row = atoi(s);
+ s = getenv("COLUMNS");
+ if (s != NULL) {
+ ws->ws_col = atoi(s);
+ if (ws->ws_row && ws->ws_col)
+ return;
+ }
+ }
+#ifdef TIOCGWINSZ
+ if (ioctl(1, TIOCGWINSZ, ws) == 0 &&
+ ws->ws_row && ws->ws_col)
+ return;
+#endif
+ ws->ws_row = 25;
+ ws->ws_col = 80;
+}
+
static void pretty_print_string_list(struct cmdnames *cmds, int longest)
{
int cols = 1, rows;
@@ -114,6 +142,14 @@ static int is_executable(const char *name)
return st.st_mode & S_IXUSR;
}
+static int has_extension(const char *filename, const char *ext)
+{
+ size_t len = strlen(filename);
+ size_t extlen = strlen(ext);
+
+ return len > extlen && !memcmp(filename + len - extlen, ext, extlen);
+}
+
static void list_commands_in_dir(struct cmdnames *cmds,
const char *path,
const char *prefix)
@@ -121,8 +157,7 @@ static void list_commands_in_dir(struct cmdnames *cmds,
int prefix_len;
DIR *dir = opendir(path);
struct dirent *de;
- struct strbuf buf = STRBUF_INIT;
- int len;
+ char *buf = NULL;
if (!dir)
return;
@@ -130,8 +165,7 @@ static void list_commands_in_dir(struct cmdnames *cmds,
prefix = "perf-";
prefix_len = strlen(prefix);
- strbuf_addf(&buf, "%s/", path);
- len = buf.len;
+ astrcatf(&buf, "%s/", path);
while ((de = readdir(dir)) != NULL) {
int entlen;
@@ -139,9 +173,8 @@ static void list_commands_in_dir(struct cmdnames *cmds,
if (prefixcmp(de->d_name, prefix))
continue;
- strbuf_setlen(&buf, len);
- strbuf_addstr(&buf, de->d_name);
- if (!is_executable(buf.buf))
+ astrcat(&buf, de->d_name);
+ if (!is_executable(buf))
continue;
entlen = strlen(de->d_name) - prefix_len;
@@ -151,7 +184,7 @@ static void list_commands_in_dir(struct cmdnames *cmds,
add_cmdname(cmds, de->d_name + prefix_len, entlen);
}
closedir(dir);
- strbuf_release(&buf);
+ free(buf);
}
void load_command_list(const char *prefix,
@@ -159,7 +192,7 @@ void load_command_list(const char *prefix,
struct cmdnames *other_cmds)
{
const char *env_path = getenv("PATH");
- const char *exec_path = perf_exec_path();
+ char *exec_path = get_argv_exec_path();
if (exec_path) {
list_commands_in_dir(main_cmds, exec_path, prefix);
@@ -172,7 +205,7 @@ void load_command_list(const char *prefix,
char *paths, *path, *colon;
path = paths = strdup(env_path);
while (1) {
- if ((colon = strchr(path, PATH_SEP)))
+ if ((colon = strchr(path, ':')))
*colon = 0;
if (!exec_path || strcmp(path, exec_path))
list_commands_in_dir(other_cmds, path, prefix);
@@ -187,6 +220,7 @@ void load_command_list(const char *prefix,
sizeof(*other_cmds->names), cmdname_compare);
uniq(other_cmds);
}
+ free(exec_path);
exclude_cmds(other_cmds, main_cmds);
}
@@ -203,13 +237,14 @@ void list_commands(const char *title, struct cmdnames *main_cmds,
longest = other_cmds->names[i]->len;
if (main_cmds->cnt) {
- const char *exec_path = perf_exec_path();
+ char *exec_path = get_argv_exec_path();
printf("available %s in '%s'\n", title, exec_path);
printf("----------------");
mput_char('-', strlen(title) + strlen(exec_path));
putchar('\n');
pretty_print_string_list(main_cmds, longest);
putchar('\n');
+ free(exec_path);
}
if (other_cmds->cnt) {
@@ -231,109 +266,3 @@ int is_in_cmdlist(struct cmdnames *c, const char *s)
return 1;
return 0;
}
-
-static int autocorrect;
-static struct cmdnames aliases;
-
-static int perf_unknown_cmd_config(const char *var, const char *value, void *cb)
-{
- if (!strcmp(var, "help.autocorrect"))
- autocorrect = perf_config_int(var,value);
- /* Also use aliases for command lookup */
- if (!prefixcmp(var, "alias."))
- add_cmdname(&aliases, var + 6, strlen(var + 6));
-
- return perf_default_config(var, value, cb);
-}
-
-static int levenshtein_compare(const void *p1, const void *p2)
-{
- const struct cmdname *const *c1 = p1, *const *c2 = p2;
- const char *s1 = (*c1)->name, *s2 = (*c2)->name;
- int l1 = (*c1)->len;
- int l2 = (*c2)->len;
- return l1 != l2 ? l1 - l2 : strcmp(s1, s2);
-}
-
-static void add_cmd_list(struct cmdnames *cmds, struct cmdnames *old)
-{
- unsigned int i;
-
- ALLOC_GROW(cmds->names, cmds->cnt + old->cnt, cmds->alloc);
-
- for (i = 0; i < old->cnt; i++)
- cmds->names[cmds->cnt++] = old->names[i];
- zfree(&old->names);
- old->cnt = 0;
-}
-
-const char *help_unknown_cmd(const char *cmd)
-{
- unsigned int i, n = 0, best_similarity = 0;
- struct cmdnames main_cmds, other_cmds;
-
- memset(&main_cmds, 0, sizeof(main_cmds));
- memset(&other_cmds, 0, sizeof(main_cmds));
- memset(&aliases, 0, sizeof(aliases));
-
- perf_config(perf_unknown_cmd_config, NULL);
-
- load_command_list("perf-", &main_cmds, &other_cmds);
-
- add_cmd_list(&main_cmds, &aliases);
- add_cmd_list(&main_cmds, &other_cmds);
- qsort(main_cmds.names, main_cmds.cnt,
- sizeof(main_cmds.names), cmdname_compare);
- uniq(&main_cmds);
-
- if (main_cmds.cnt) {
- /* This reuses cmdname->len for similarity index */
- for (i = 0; i < main_cmds.cnt; ++i)
- main_cmds.names[i]->len =
- levenshtein(cmd, main_cmds.names[i]->name, 0, 2, 1, 4);
-
- qsort(main_cmds.names, main_cmds.cnt,
- sizeof(*main_cmds.names), levenshtein_compare);
-
- best_similarity = main_cmds.names[0]->len;
- n = 1;
- while (n < main_cmds.cnt && best_similarity == main_cmds.names[n]->len)
- ++n;
- }
-
- if (autocorrect && n == 1) {
- const char *assumed = main_cmds.names[0]->name;
-
- main_cmds.names[0] = NULL;
- clean_cmdnames(&main_cmds);
- fprintf(stderr, "WARNING: You called a perf program named '%s', "
- "which does not exist.\n"
- "Continuing under the assumption that you meant '%s'\n",
- cmd, assumed);
- if (autocorrect > 0) {
- fprintf(stderr, "in %0.1f seconds automatically...\n",
- (float)autocorrect/10.0);
- poll(NULL, 0, autocorrect * 100);
- }
- return assumed;
- }
-
- fprintf(stderr, "perf: '%s' is not a perf-command. See 'perf --help'.\n", cmd);
-
- if (main_cmds.cnt && best_similarity < 6) {
- fprintf(stderr, "\nDid you mean %s?\n",
- n < 2 ? "this": "one of these");
-
- for (i = 0; i < n; i++)
- fprintf(stderr, "\t%s\n", main_cmds.names[i]->name);
- }
-
- exit(1);
-}
-
-int cmd_version(int argc __maybe_unused, const char **argv __maybe_unused,
- const char *prefix __maybe_unused)
-{
- printf("perf version %s\n", perf_version_string);
- return 0;
-}
diff --git a/tools/perf/util/help.h b/tools/lib/subcmd/help.h
index 7f5c6dedd714..e145a020780c 100644
--- a/tools/perf/util/help.h
+++ b/tools/lib/subcmd/help.h
@@ -1,12 +1,14 @@
-#ifndef __PERF_HELP_H
-#define __PERF_HELP_H
+#ifndef __SUBCMD_HELP_H
+#define __SUBCMD_HELP_H
+
+#include <sys/types.h>
struct cmdnames {
size_t alloc;
size_t cnt;
struct cmdname {
size_t len; /* also used for similarity index in help.c */
- char name[FLEX_ARRAY];
+ char name[];
} **names;
};
@@ -20,10 +22,13 @@ void load_command_list(const char *prefix,
struct cmdnames *main_cmds,
struct cmdnames *other_cmds);
void add_cmdname(struct cmdnames *cmds, const char *name, size_t len);
+void clean_cmdnames(struct cmdnames *cmds);
+int cmdname_compare(const void *a, const void *b);
+void uniq(struct cmdnames *cmds);
/* Here we require that excludes is a sorted list. */
void exclude_cmds(struct cmdnames *cmds, struct cmdnames *excludes);
int is_in_cmdlist(struct cmdnames *c, const char *s);
void list_commands(const char *title, struct cmdnames *main_cmds,
struct cmdnames *other_cmds);
-#endif /* __PERF_HELP_H */
+#endif /* __SUBCMD_HELP_H */
diff --git a/tools/perf/util/pager.c b/tools/lib/subcmd/pager.c
index 53ef006a951c..d50f3b58606b 100644
--- a/tools/perf/util/pager.c
+++ b/tools/lib/subcmd/pager.c
@@ -1,6 +1,12 @@
-#include "cache.h"
+#include <sys/select.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <signal.h>
+#include "pager.h"
#include "run-command.h"
#include "sigchain.h"
+#include "subcmd-config.h"
/*
* This is split up from the rest of git so that we can do
@@ -9,6 +15,11 @@
static int spawned_pager;
+void pager_init(const char *pager_env)
+{
+ subcmd_config.pager_env = pager_env;
+}
+
static void pager_preexec(void)
{
/*
@@ -46,7 +57,7 @@ static void wait_for_pager_signal(int signo)
void setup_pager(void)
{
- const char *pager = getenv("PERF_PAGER");
+ const char *pager = getenv(subcmd_config.pager_env);
if (!isatty(1))
return;
@@ -85,11 +96,5 @@ void setup_pager(void)
int pager_in_use(void)
{
- const char *env;
-
- if (spawned_pager)
- return 1;
-
- env = getenv("PERF_PAGER_IN_USE");
- return env ? perf_config_bool("PERF_PAGER_IN_USE", env) : 0;
+ return spawned_pager;
}
diff --git a/tools/lib/subcmd/pager.h b/tools/lib/subcmd/pager.h
new file mode 100644
index 000000000000..8b83714ecf73
--- /dev/null
+++ b/tools/lib/subcmd/pager.h
@@ -0,0 +1,9 @@
+#ifndef __SUBCMD_PAGER_H
+#define __SUBCMD_PAGER_H
+
+extern void pager_init(const char *pager_env);
+
+extern void setup_pager(void);
+extern int pager_in_use(void);
+
+#endif /* __SUBCMD_PAGER_H */
diff --git a/tools/perf/util/parse-options.c b/tools/lib/subcmd/parse-options.c
index 9fca09296eb3..981bb4481fd5 100644
--- a/tools/perf/util/parse-options.c
+++ b/tools/lib/subcmd/parse-options.c
@@ -1,37 +1,66 @@
-#include "util.h"
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <ctype.h>
+#include "subcmd-util.h"
#include "parse-options.h"
-#include "cache.h"
-#include "header.h"
-#include <linux/string.h>
+#include "subcmd-config.h"
+#include "pager.h"
#define OPT_SHORT 1
#define OPT_UNSET 2
-static struct strbuf error_buf = STRBUF_INIT;
+char *error_buf;
static int opterror(const struct option *opt, const char *reason, int flags)
{
if (flags & OPT_SHORT)
- return error("switch `%c' %s", opt->short_name, reason);
- if (flags & OPT_UNSET)
- return error("option `no-%s' %s", opt->long_name, reason);
- return error("option `%s' %s", opt->long_name, reason);
+ fprintf(stderr, " Error: switch `%c' %s", opt->short_name, reason);
+ else if (flags & OPT_UNSET)
+ fprintf(stderr, " Error: option `no-%s' %s", opt->long_name, reason);
+ else
+ fprintf(stderr, " Error: option `%s' %s", opt->long_name, reason);
+
+ return -1;
+}
+
+static const char *skip_prefix(const char *str, const char *prefix)
+{
+ size_t len = strlen(prefix);
+ return strncmp(str, prefix, len) ? NULL : str + len;
+}
+
+static void optwarning(const struct option *opt, const char *reason, int flags)
+{
+ if (flags & OPT_SHORT)
+ fprintf(stderr, " Warning: switch `%c' %s", opt->short_name, reason);
+ else if (flags & OPT_UNSET)
+ fprintf(stderr, " Warning: option `no-%s' %s", opt->long_name, reason);
+ else
+ fprintf(stderr, " Warning: option `%s' %s", opt->long_name, reason);
}
static int get_arg(struct parse_opt_ctx_t *p, const struct option *opt,
int flags, const char **arg)
{
+ const char *res;
+
if (p->opt) {
- *arg = p->opt;
+ res = p->opt;
p->opt = NULL;
} else if ((opt->flags & PARSE_OPT_LASTARG_DEFAULT) && (p->argc == 1 ||
**(p->argv + 1) == '-')) {
- *arg = (const char *)opt->defval;
+ res = (const char *)opt->defval;
} else if (p->argc > 1) {
p->argc--;
- *arg = *++p->argv;
+ res = *++p->argv;
} else
return opterror(opt, "requires a value", flags);
+ if (arg)
+ *arg = res;
return 0;
}
@@ -55,11 +84,11 @@ static int get_value(struct parse_opt_ctx_t *p,
if (((flags & OPT_SHORT) && p->excl_opt->short_name) ||
p->excl_opt->long_name == NULL) {
- scnprintf(msg, sizeof(msg), "cannot be used with switch `%c'",
- p->excl_opt->short_name);
+ snprintf(msg, sizeof(msg), "cannot be used with switch `%c'",
+ p->excl_opt->short_name);
} else {
- scnprintf(msg, sizeof(msg), "cannot be used with %s",
- p->excl_opt->long_name);
+ snprintf(msg, sizeof(msg), "cannot be used with %s",
+ p->excl_opt->long_name);
}
opterror(opt, msg, flags);
return -3;
@@ -91,6 +120,64 @@ static int get_value(struct parse_opt_ctx_t *p,
}
}
+ if (opt->flags & PARSE_OPT_NOBUILD) {
+ char reason[128];
+ bool noarg = false;
+
+ err = snprintf(reason, sizeof(reason),
+ opt->flags & PARSE_OPT_CANSKIP ?
+ "is being ignored because %s " :
+ "is not available because %s",
+ opt->build_opt);
+ reason[sizeof(reason) - 1] = '\0';
+
+ if (err < 0)
+ strncpy(reason, opt->flags & PARSE_OPT_CANSKIP ?
+ "is being ignored" :
+ "is not available",
+ sizeof(reason));
+
+ if (!(opt->flags & PARSE_OPT_CANSKIP))
+ return opterror(opt, reason, flags);
+
+ err = 0;
+ if (unset)
+ noarg = true;
+ if (opt->flags & PARSE_OPT_NOARG)
+ noarg = true;
+ if (opt->flags & PARSE_OPT_OPTARG && !p->opt)
+ noarg = true;
+
+ switch (opt->type) {
+ case OPTION_BOOLEAN:
+ case OPTION_INCR:
+ case OPTION_BIT:
+ case OPTION_SET_UINT:
+ case OPTION_SET_PTR:
+ case OPTION_END:
+ case OPTION_ARGUMENT:
+ case OPTION_GROUP:
+ noarg = true;
+ break;
+ case OPTION_CALLBACK:
+ case OPTION_STRING:
+ case OPTION_INTEGER:
+ case OPTION_UINTEGER:
+ case OPTION_LONG:
+ case OPTION_U64:
+ default:
+ break;
+ }
+
+ if (!noarg)
+ err = get_arg(p, opt, flags, NULL);
+ if (err)
+ return err;
+
+ optwarning(opt, reason, flags);
+ return 0;
+ }
+
switch (opt->type) {
case OPTION_BIT:
if (unset)
@@ -327,14 +414,16 @@ match:
return get_value(p, options, flags);
}
- if (ambiguous_option)
- return error("Ambiguous option: %s "
- "(could be --%s%s or --%s%s)",
- arg,
- (ambiguous_flags & OPT_UNSET) ? "no-" : "",
- ambiguous_option->long_name,
- (abbrev_flags & OPT_UNSET) ? "no-" : "",
- abbrev_option->long_name);
+ if (ambiguous_option) {
+ fprintf(stderr,
+ " Error: Ambiguous option: %s (could be --%s%s or --%s%s)",
+ arg,
+ (ambiguous_flags & OPT_UNSET) ? "no-" : "",
+ ambiguous_option->long_name,
+ (abbrev_flags & OPT_UNSET) ? "no-" : "",
+ abbrev_option->long_name);
+ return -1;
+ }
if (abbrev_option)
return get_value(p, abbrev_option, abbrev_flags);
return -2;
@@ -346,7 +435,7 @@ static void check_typos(const char *arg, const struct option *options)
return;
if (!prefixcmp(arg, "no-")) {
- error ("did you mean `--%s` (with two dashes ?)", arg);
+ fprintf(stderr, " Error: did you mean `--%s` (with two dashes ?)", arg);
exit(129);
}
@@ -354,14 +443,14 @@ static void check_typos(const char *arg, const struct option *options)
if (!options->long_name)
continue;
if (!prefixcmp(options->long_name, arg)) {
- error ("did you mean `--%s` (with two dashes ?)", arg);
+ fprintf(stderr, " Error: did you mean `--%s` (with two dashes ?)", arg);
exit(129);
}
}
}
-void parse_options_start(struct parse_opt_ctx_t *ctx,
- int argc, const char **argv, int flags)
+static void parse_options_start(struct parse_opt_ctx_t *ctx,
+ int argc, const char **argv, int flags)
{
memset(ctx, 0, sizeof(*ctx));
ctx->argc = argc - 1;
@@ -378,9 +467,9 @@ static int usage_with_options_internal(const char * const *,
const struct option *, int,
struct parse_opt_ctx_t *);
-int parse_options_step(struct parse_opt_ctx_t *ctx,
- const struct option *options,
- const char * const usagestr[])
+static int parse_options_step(struct parse_opt_ctx_t *ctx,
+ const struct option *options,
+ const char * const usagestr[])
{
int internal_help = !(ctx->flags & PARSE_OPT_NO_INTERNAL_HELP);
int excl_short_opt = 1;
@@ -489,7 +578,7 @@ exclusive:
return PARSE_OPT_HELP;
}
-int parse_options_end(struct parse_opt_ctx_t *ctx)
+static int parse_options_end(struct parse_opt_ctx_t *ctx)
{
memmove(ctx->out + ctx->cpidx, ctx->argv, ctx->argc * sizeof(*ctx->out));
ctx->out[ctx->cpidx + ctx->argc] = NULL;
@@ -501,22 +590,20 @@ int parse_options_subcommand(int argc, const char **argv, const struct option *o
{
struct parse_opt_ctx_t ctx;
- perf_env__set_cmdline(&perf_env, argc, argv);
-
/* build usage string if it's not provided */
if (subcommands && !usagestr[0]) {
- struct strbuf buf = STRBUF_INIT;
+ char *buf = NULL;
+
+ astrcatf(&buf, "%s %s [<options>] {", subcmd_config.exec_name, argv[0]);
- strbuf_addf(&buf, "perf %s [<options>] {", argv[0]);
for (int i = 0; subcommands[i]; i++) {
if (i)
- strbuf_addstr(&buf, "|");
- strbuf_addstr(&buf, subcommands[i]);
+ astrcat(&buf, "|");
+ astrcat(&buf, subcommands[i]);
}
- strbuf_addstr(&buf, "}");
+ astrcat(&buf, "}");
- usagestr[0] = strdup(buf.buf);
- strbuf_release(&buf);
+ usagestr[0] = buf;
}
parse_options_start(&ctx, argc, argv, flags);
@@ -541,13 +628,11 @@ int parse_options_subcommand(int argc, const char **argv, const struct option *o
putchar('\n');
exit(130);
default: /* PARSE_OPT_UNKNOWN */
- if (ctx.argv[0][1] == '-') {
- strbuf_addf(&error_buf, "unknown option `%s'",
- ctx.argv[0] + 2);
- } else {
- strbuf_addf(&error_buf, "unknown switch `%c'",
- *ctx.opt);
- }
+ if (ctx.argv[0][1] == '-')
+ astrcatf(&error_buf, "unknown option `%s'",
+ ctx.argv[0] + 2);
+ else
+ astrcatf(&error_buf, "unknown switch `%c'", *ctx.opt);
usage_with_options(usagestr, options);
}
@@ -647,6 +732,10 @@ static void print_option_help(const struct option *opts, int full)
pad = USAGE_OPTS_WIDTH;
}
fprintf(stderr, "%*s%s\n", pad + USAGE_GAP, "", opts->help);
+ if (opts->flags & PARSE_OPT_NOBUILD)
+ fprintf(stderr, "%*s(not built-in because %s)\n",
+ USAGE_OPTS_WIDTH + USAGE_GAP, "",
+ opts->build_opt);
}
static int option__cmp(const void *va, const void *vb)
@@ -672,16 +761,18 @@ static int option__cmp(const void *va, const void *vb)
static struct option *options__order(const struct option *opts)
{
- int nr_opts = 0;
+ int nr_opts = 0, len;
const struct option *o = opts;
struct option *ordered;
for (o = opts; o->type != OPTION_END; o++)
++nr_opts;
- ordered = memdup(opts, sizeof(*o) * (nr_opts + 1));
- if (ordered == NULL)
+ len = sizeof(*o) * (nr_opts + 1);
+ ordered = malloc(len);
+ if (!ordered)
goto out;
+ memcpy(ordered, opts, len);
qsort(ordered, nr_opts, sizeof(*o), option__cmp);
out:
@@ -719,9 +810,9 @@ static bool option__in_argv(const struct option *opt, const struct parse_opt_ctx
return false;
}
-int usage_with_options_internal(const char * const *usagestr,
- const struct option *opts, int full,
- struct parse_opt_ctx_t *ctx)
+static int usage_with_options_internal(const char * const *usagestr,
+ const struct option *opts, int full,
+ struct parse_opt_ctx_t *ctx)
{
struct option *ordered;
@@ -730,9 +821,9 @@ int usage_with_options_internal(const char * const *usagestr,
setup_pager();
- if (strbuf_avail(&error_buf)) {
- fprintf(stderr, " Error: %s\n", error_buf.buf);
- strbuf_release(&error_buf);
+ if (error_buf) {
+ fprintf(stderr, " Error: %s\n", error_buf);
+ zfree(&error_buf);
}
fprintf(stderr, "\n Usage: %s\n", *usagestr++);
@@ -768,7 +859,6 @@ int usage_with_options_internal(const char * const *usagestr,
void usage_with_options(const char * const *usagestr,
const struct option *opts)
{
- exit_browser(false);
usage_with_options_internal(usagestr, opts, 0, NULL);
exit(129);
}
@@ -777,13 +867,15 @@ void usage_with_options_msg(const char * const *usagestr,
const struct option *opts, const char *fmt, ...)
{
va_list ap;
-
- exit_browser(false);
+ char *tmp = error_buf;
va_start(ap, fmt);
- strbuf_addv(&error_buf, fmt, ap);
+ if (vasprintf(&error_buf, fmt, ap) == -1)
+ die("vasprintf failed");
va_end(ap);
+ free(tmp);
+
usage_with_options_internal(usagestr, opts, 0, NULL);
exit(129);
}
@@ -853,15 +945,39 @@ int parse_opt_verbosity_cb(const struct option *opt,
return 0;
}
-void set_option_flag(struct option *opts, int shortopt, const char *longopt,
- int flag)
+static struct option *
+find_option(struct option *opts, int shortopt, const char *longopt)
{
for (; opts->type != OPTION_END; opts++) {
if ((shortopt && opts->short_name == shortopt) ||
(opts->long_name && longopt &&
- !strcmp(opts->long_name, longopt))) {
- opts->flags |= flag;
- break;
- }
+ !strcmp(opts->long_name, longopt)))
+ return opts;
}
+ return NULL;
+}
+
+void set_option_flag(struct option *opts, int shortopt, const char *longopt,
+ int flag)
+{
+ struct option *opt = find_option(opts, shortopt, longopt);
+
+ if (opt)
+ opt->flags |= flag;
+ return;
+}
+
+void set_option_nobuild(struct option *opts, int shortopt,
+ const char *longopt,
+ const char *build_opt,
+ bool can_skip)
+{
+ struct option *opt = find_option(opts, shortopt, longopt);
+
+ if (!opt)
+ return;
+
+ opt->flags |= PARSE_OPT_NOBUILD;
+ opt->flags |= can_skip ? PARSE_OPT_CANSKIP : 0;
+ opt->build_opt = build_opt;
}
diff --git a/tools/perf/util/parse-options.h b/tools/lib/subcmd/parse-options.h
index a8e407bc251e..d60cab2726da 100644
--- a/tools/perf/util/parse-options.h
+++ b/tools/lib/subcmd/parse-options.h
@@ -1,8 +1,12 @@
-#ifndef __PERF_PARSE_OPTIONS_H
-#define __PERF_PARSE_OPTIONS_H
+#ifndef __SUBCMD_PARSE_OPTIONS_H
+#define __SUBCMD_PARSE_OPTIONS_H
-#include <linux/kernel.h>
#include <stdbool.h>
+#include <stdint.h>
+
+#ifndef NORETURN
+#define NORETURN __attribute__((__noreturn__))
+#endif
enum parse_opt_type {
/* special types */
@@ -41,6 +45,8 @@ enum parse_opt_option_flags {
PARSE_OPT_DISABLED = 32,
PARSE_OPT_EXCLUSIVE = 64,
PARSE_OPT_NOEMPTY = 128,
+ PARSE_OPT_NOBUILD = 256,
+ PARSE_OPT_CANSKIP = 512,
};
struct option;
@@ -96,6 +102,7 @@ struct option {
void *value;
const char *argh;
const char *help;
+ const char *build_opt;
int flags;
parse_opt_cb *callback;
@@ -149,6 +156,9 @@ struct option {
/* parse_options() will filter out the processed options and leave the
* non-option argments in argv[].
* Returns the number of arguments left in argv[].
+ *
+ * NOTE: parse_options() and parse_options_subcommand() may call exit() in the
+ * case of an error (or for 'special' options like --list-cmds or --list-opts).
*/
extern int parse_options(int argc, const char **argv,
const struct option *options,
@@ -195,15 +205,6 @@ extern int parse_options_usage(const char * const *usagestr,
const char *optstr,
bool short_opt);
-extern void parse_options_start(struct parse_opt_ctx_t *ctx,
- int argc, const char **argv, int flags);
-
-extern int parse_options_step(struct parse_opt_ctx_t *ctx,
- const struct option *options,
- const char * const usagestr[]);
-
-extern int parse_options_end(struct parse_opt_ctx_t *ctx);
-
/*----- some often used options -----*/
extern int parse_opt_abbrev_cb(const struct option *, const char *, int);
@@ -226,4 +227,7 @@ extern int parse_opt_verbosity_cb(const struct option *, const char *, int);
extern const char *parse_options_fix_filename(const char *prefix, const char *file);
void set_option_flag(struct option *opts, int sopt, const char *lopt, int flag);
-#endif /* __PERF_PARSE_OPTIONS_H */
+void set_option_nobuild(struct option *opts, int shortopt, const char *longopt,
+ const char *build_opt, bool can_skip);
+
+#endif /* __SUBCMD_PARSE_OPTIONS_H */
diff --git a/tools/perf/util/run-command.c b/tools/lib/subcmd/run-command.c
index 34622b53e733..f4f6c9eb8e59 100644
--- a/tools/perf/util/run-command.c
+++ b/tools/lib/subcmd/run-command.c
@@ -1,7 +1,15 @@
-#include "cache.h"
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <string.h>
+#include <errno.h>
+#include <sys/wait.h>
+#include "subcmd-util.h"
#include "run-command.h"
-#include "exec_cmd.h"
-#include "debug.h"
+#include "exec-cmd.h"
+
+#define STRERR_BUFSIZE 128
static inline void close_pair(int fd[2])
{
@@ -112,8 +120,8 @@ int start_command(struct child_process *cmd)
}
if (cmd->preexec_cb)
cmd->preexec_cb();
- if (cmd->perf_cmd) {
- execv_perf_cmd(cmd->argv);
+ if (cmd->exec_cmd) {
+ execv_cmd(cmd->argv);
} else {
execvp(cmd->argv[0], (char *const*) cmd->argv);
}
@@ -164,8 +172,8 @@ static int wait_or_whine(pid_t pid)
if (waiting < 0) {
if (errno == EINTR)
continue;
- error("waitpid failed (%s)",
- strerror_r(errno, sbuf, sizeof(sbuf)));
+ fprintf(stderr, " Error: waitpid failed (%s)",
+ strerror_r(errno, sbuf, sizeof(sbuf)));
return -ERR_RUN_COMMAND_WAITPID;
}
if (waiting != pid)
@@ -207,7 +215,7 @@ static void prepare_run_command_v_opt(struct child_process *cmd,
memset(cmd, 0, sizeof(*cmd));
cmd->argv = argv;
cmd->no_stdin = opt & RUN_COMMAND_NO_STDIN ? 1 : 0;
- cmd->perf_cmd = opt & RUN_PERF_CMD ? 1 : 0;
+ cmd->exec_cmd = opt & RUN_EXEC_CMD ? 1 : 0;
cmd->stdout_to_stderr = opt & RUN_COMMAND_STDOUT_TO_STDERR ? 1 : 0;
}
diff --git a/tools/perf/util/run-command.h b/tools/lib/subcmd/run-command.h
index 1ef264d5069c..fe2befea1e73 100644
--- a/tools/perf/util/run-command.h
+++ b/tools/lib/subcmd/run-command.h
@@ -1,5 +1,7 @@
-#ifndef __PERF_RUN_COMMAND_H
-#define __PERF_RUN_COMMAND_H
+#ifndef __SUBCMD_RUN_COMMAND_H
+#define __SUBCMD_RUN_COMMAND_H
+
+#include <unistd.h>
enum {
ERR_RUN_COMMAND_FORK = 10000,
@@ -41,7 +43,7 @@ struct child_process {
unsigned no_stdin:1;
unsigned no_stdout:1;
unsigned no_stderr:1;
- unsigned perf_cmd:1; /* if this is to be perf sub-command */
+ unsigned exec_cmd:1; /* if this is to be external sub-command */
unsigned stdout_to_stderr:1;
void (*preexec_cb)(void);
};
@@ -51,8 +53,8 @@ int finish_command(struct child_process *);
int run_command(struct child_process *);
#define RUN_COMMAND_NO_STDIN 1
-#define RUN_PERF_CMD 2 /*If this is to be perf sub-command */
+#define RUN_EXEC_CMD 2 /*If this is to be external sub-command */
#define RUN_COMMAND_STDOUT_TO_STDERR 4
int run_command_v_opt(const char **argv, int opt);
-#endif /* __PERF_RUN_COMMAND_H */
+#endif /* __SUBCMD_RUN_COMMAND_H */
diff --git a/tools/perf/util/sigchain.c b/tools/lib/subcmd/sigchain.c
index ba785e9b1841..3537c348a18e 100644
--- a/tools/perf/util/sigchain.c
+++ b/tools/lib/subcmd/sigchain.c
@@ -1,5 +1,6 @@
+#include <signal.h>
+#include "subcmd-util.h"
#include "sigchain.h"
-#include "cache.h"
#define SIGCHAIN_MAX_SIGNALS 32
diff --git a/tools/perf/util/sigchain.h b/tools/lib/subcmd/sigchain.h
index 959d64eb5557..0c919f2874ca 100644
--- a/tools/perf/util/sigchain.h
+++ b/tools/lib/subcmd/sigchain.h
@@ -1,5 +1,5 @@
-#ifndef __PERF_SIGCHAIN_H
-#define __PERF_SIGCHAIN_H
+#ifndef __SUBCMD_SIGCHAIN_H
+#define __SUBCMD_SIGCHAIN_H
typedef void (*sigchain_fun)(int);
@@ -7,4 +7,4 @@ int sigchain_pop(int sig);
void sigchain_push_common(sigchain_fun f);
-#endif /* __PERF_SIGCHAIN_H */
+#endif /* __SUBCMD_SIGCHAIN_H */
diff --git a/tools/lib/subcmd/subcmd-config.c b/tools/lib/subcmd/subcmd-config.c
new file mode 100644
index 000000000000..d017c728bd1b
--- /dev/null
+++ b/tools/lib/subcmd/subcmd-config.c
@@ -0,0 +1,11 @@
+#include "subcmd-config.h"
+
+#define UNDEFINED "SUBCMD_HAS_NOT_BEEN_INITIALIZED"
+
+struct subcmd_config subcmd_config = {
+ .exec_name = UNDEFINED,
+ .prefix = UNDEFINED,
+ .exec_path = UNDEFINED,
+ .exec_path_env = UNDEFINED,
+ .pager_env = UNDEFINED,
+};
diff --git a/tools/lib/subcmd/subcmd-config.h b/tools/lib/subcmd/subcmd-config.h
new file mode 100644
index 000000000000..cc8514030b5c
--- /dev/null
+++ b/tools/lib/subcmd/subcmd-config.h
@@ -0,0 +1,14 @@
+#ifndef __PERF_SUBCMD_CONFIG_H
+#define __PERF_SUBCMD_CONFIG_H
+
+struct subcmd_config {
+ const char *exec_name;
+ const char *prefix;
+ const char *exec_path;
+ const char *exec_path_env;
+ const char *pager_env;
+};
+
+extern struct subcmd_config subcmd_config;
+
+#endif /* __PERF_SUBCMD_CONFIG_H */
diff --git a/tools/lib/subcmd/subcmd-util.h b/tools/lib/subcmd/subcmd-util.h
new file mode 100644
index 000000000000..fc2e45d8aaf1
--- /dev/null
+++ b/tools/lib/subcmd/subcmd-util.h
@@ -0,0 +1,91 @@
+#ifndef __SUBCMD_UTIL_H
+#define __SUBCMD_UTIL_H
+
+#include <stdarg.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+#define NORETURN __attribute__((__noreturn__))
+
+static inline void report(const char *prefix, const char *err, va_list params)
+{
+ char msg[1024];
+ vsnprintf(msg, sizeof(msg), err, params);
+ fprintf(stderr, " %s%s\n", prefix, msg);
+}
+
+static NORETURN inline void die(const char *err, ...)
+{
+ va_list params;
+
+ va_start(params, err);
+ report(" Fatal: ", err, params);
+ exit(128);
+ va_end(params);
+}
+
+#define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
+
+#define alloc_nr(x) (((x)+16)*3/2)
+
+/*
+ * Realloc the buffer pointed at by variable 'x' so that it can hold
+ * at least 'nr' entries; the number of entries currently allocated
+ * is 'alloc', using the standard growing factor alloc_nr() macro.
+ *
+ * DO NOT USE any expression with side-effect for 'x' or 'alloc'.
+ */
+#define ALLOC_GROW(x, nr, alloc) \
+ do { \
+ if ((nr) > alloc) { \
+ if (alloc_nr(alloc) < (nr)) \
+ alloc = (nr); \
+ else \
+ alloc = alloc_nr(alloc); \
+ x = xrealloc((x), alloc * sizeof(*(x))); \
+ } \
+ } while(0)
+
+static inline void *xrealloc(void *ptr, size_t size)
+{
+ void *ret = realloc(ptr, size);
+ if (!ret && !size)
+ ret = realloc(ptr, 1);
+ if (!ret) {
+ ret = realloc(ptr, size);
+ if (!ret && !size)
+ ret = realloc(ptr, 1);
+ if (!ret)
+ die("Out of memory, realloc failed");
+ }
+ return ret;
+}
+
+#define astrcatf(out, fmt, ...) \
+({ \
+ char *tmp = *(out); \
+ if (asprintf((out), "%s" fmt, tmp ?: "", ## __VA_ARGS__) == -1) \
+ die("asprintf failed"); \
+ free(tmp); \
+})
+
+static inline void astrcat(char **out, const char *add)
+{
+ char *tmp = *out;
+
+ if (asprintf(out, "%s%s", tmp ?: "", add) == -1)
+ die("asprintf failed");
+
+ free(tmp);
+}
+
+static inline int prefixcmp(const char *str, const char *prefix)
+{
+ for (; ; str++, prefix++)
+ if (!*prefix)
+ return 0;
+ else if (*str != *prefix)
+ return (unsigned char)*prefix - (unsigned char)*str;
+}
+
+#endif /* __SUBCMD_UTIL_H */
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index 2a912df6771b..c3bd294a63d1 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -3746,7 +3746,7 @@ static const struct flag flags[] = {
{ "NET_TX_SOFTIRQ", 2 },
{ "NET_RX_SOFTIRQ", 3 },
{ "BLOCK_SOFTIRQ", 4 },
- { "BLOCK_IOPOLL_SOFTIRQ", 5 },
+ { "IRQ_POLL_SOFTIRQ", 5 },
{ "TASKLET_SOFTIRQ", 6 },
{ "SCHED_SOFTIRQ", 7 },
{ "HRTIMER_SOFTIRQ", 8 },
@@ -4735,73 +4735,80 @@ static int is_printable_array(char *p, unsigned int len)
return 1;
}
-static void print_event_fields(struct trace_seq *s, void *data,
- int size __maybe_unused,
- struct event_format *event)
+void pevent_print_field(struct trace_seq *s, void *data,
+ struct format_field *field)
{
- struct format_field *field;
unsigned long long val;
unsigned int offset, len, i;
-
- field = event->format.fields;
- while (field) {
- trace_seq_printf(s, " %s=", field->name);
- if (field->flags & FIELD_IS_ARRAY) {
- offset = field->offset;
- len = field->size;
- if (field->flags & FIELD_IS_DYNAMIC) {
- val = pevent_read_number(event->pevent, data + offset, len);
- offset = val;
- len = offset >> 16;
- offset &= 0xffff;
- }
- if (field->flags & FIELD_IS_STRING &&
- is_printable_array(data + offset, len)) {
- trace_seq_printf(s, "%s", (char *)data + offset);
- } else {
- trace_seq_puts(s, "ARRAY[");
- for (i = 0; i < len; i++) {
- if (i)
- trace_seq_puts(s, ", ");
- trace_seq_printf(s, "%02x",
- *((unsigned char *)data + offset + i));
- }
- trace_seq_putc(s, ']');
- field->flags &= ~FIELD_IS_STRING;
- }
+ struct pevent *pevent = field->event->pevent;
+
+ if (field->flags & FIELD_IS_ARRAY) {
+ offset = field->offset;
+ len = field->size;
+ if (field->flags & FIELD_IS_DYNAMIC) {
+ val = pevent_read_number(pevent, data + offset, len);
+ offset = val;
+ len = offset >> 16;
+ offset &= 0xffff;
+ }
+ if (field->flags & FIELD_IS_STRING &&
+ is_printable_array(data + offset, len)) {
+ trace_seq_printf(s, "%s", (char *)data + offset);
} else {
- val = pevent_read_number(event->pevent, data + field->offset,
- field->size);
- if (field->flags & FIELD_IS_POINTER) {
- trace_seq_printf(s, "0x%llx", val);
- } else if (field->flags & FIELD_IS_SIGNED) {
- switch (field->size) {
- case 4:
- /*
- * If field is long then print it in hex.
- * A long usually stores pointers.
- */
- if (field->flags & FIELD_IS_LONG)
- trace_seq_printf(s, "0x%x", (int)val);
- else
- trace_seq_printf(s, "%d", (int)val);
- break;
- case 2:
- trace_seq_printf(s, "%2d", (short)val);
- break;
- case 1:
- trace_seq_printf(s, "%1d", (char)val);
- break;
- default:
- trace_seq_printf(s, "%lld", val);
- }
- } else {
+ trace_seq_puts(s, "ARRAY[");
+ for (i = 0; i < len; i++) {
+ if (i)
+ trace_seq_puts(s, ", ");
+ trace_seq_printf(s, "%02x",
+ *((unsigned char *)data + offset + i));
+ }
+ trace_seq_putc(s, ']');
+ field->flags &= ~FIELD_IS_STRING;
+ }
+ } else {
+ val = pevent_read_number(pevent, data + field->offset,
+ field->size);
+ if (field->flags & FIELD_IS_POINTER) {
+ trace_seq_printf(s, "0x%llx", val);
+ } else if (field->flags & FIELD_IS_SIGNED) {
+ switch (field->size) {
+ case 4:
+ /*
+ * If field is long then print it in hex.
+ * A long usually stores pointers.
+ */
if (field->flags & FIELD_IS_LONG)
- trace_seq_printf(s, "0x%llx", val);
+ trace_seq_printf(s, "0x%x", (int)val);
else
- trace_seq_printf(s, "%llu", val);
+ trace_seq_printf(s, "%d", (int)val);
+ break;
+ case 2:
+ trace_seq_printf(s, "%2d", (short)val);
+ break;
+ case 1:
+ trace_seq_printf(s, "%1d", (char)val);
+ break;
+ default:
+ trace_seq_printf(s, "%lld", val);
}
+ } else {
+ if (field->flags & FIELD_IS_LONG)
+ trace_seq_printf(s, "0x%llx", val);
+ else
+ trace_seq_printf(s, "%llu", val);
}
+ }
+}
+
+void pevent_print_fields(struct trace_seq *s, void *data,
+ int size __maybe_unused, struct event_format *event)
+{
+ struct format_field *field;
+
+ field = event->format.fields;
+ while (field) {
+ trace_seq_printf(s, " %s=", field->name);
+ pevent_print_field(s, data, field);
field = field->next;
}
}
@@ -4827,7 +4834,7 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
if (event->flags & EVENT_FL_FAILED) {
trace_seq_printf(s, "[FAILED TO PARSE]");
- print_event_fields(s, data, size, event);
+ pevent_print_fields(s, data, size, event);
return;
}
@@ -4968,13 +4975,12 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
sizeof(long) != 8) {
char *p;
- ls = 2;
/* make %l into %ll */
- p = strchr(format, 'l');
- if (p)
+ if (ls == 1 && (p = strchr(format, 'l')))
memmove(p+1, p, strlen(p)+1);
else if (strcmp(format, "%p") == 0)
strcpy(format, "0x%llx");
+ ls = 2;
}
switch (ls) {
case -2:
@@ -5302,7 +5308,7 @@ void pevent_event_info(struct trace_seq *s, struct event_format *event,
int print_pretty = 1;
if (event->pevent->print_raw || (event->flags & EVENT_FL_PRINTRAW))
- print_event_fields(s, record->data, record->size, event);
+ pevent_print_fields(s, record->data, record->size, event);
else {
if (event->handler && !(event->flags & EVENT_FL_NOHANDLE))
diff --git a/tools/lib/traceevent/event-parse.h b/tools/lib/traceevent/event-parse.h
index 6fc83c7edbe9..706d9bc24066 100644
--- a/tools/lib/traceevent/event-parse.h
+++ b/tools/lib/traceevent/event-parse.h
@@ -705,6 +705,10 @@ struct cmdline *pevent_data_pid_from_comm(struct pevent *pevent, const char *com
struct cmdline *next);
int pevent_cmdline_pid(struct pevent *pevent, struct cmdline *cmdline);
+void pevent_print_field(struct trace_seq *s, void *data,
+ struct format_field *field);
+void pevent_print_fields(struct trace_seq *s, void *data,
+ int size __maybe_unused, struct event_format *event);
void pevent_event_info(struct trace_seq *s, struct event_format *event,
struct pevent_record *record);
int pevent_strerror(struct pevent *pevent, enum pevent_errno errnum,
diff --git a/tools/lib/util/find_next_bit.c b/tools/lib/util/find_next_bit.c
deleted file mode 100644
index 41b44f65a79e..000000000000
--- a/tools/lib/util/find_next_bit.c
+++ /dev/null
@@ -1,89 +0,0 @@
-/* find_next_bit.c: fallback find next bit implementation
- *
- * Copied from lib/find_next_bit.c to tools/lib/next_bit.c
- *
- * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/bitops.h>
-#include <asm/types.h>
-#include <asm/byteorder.h>
-
-#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
-
-#ifndef find_next_bit
-/*
- * Find the next set bit in a memory region.
- */
-unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
- unsigned long offset)
-{
- const unsigned long *p = addr + BITOP_WORD(offset);
- unsigned long result = offset & ~(BITS_PER_LONG-1);
- unsigned long tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset %= BITS_PER_LONG;
- if (offset) {
- tmp = *(p++);
- tmp &= (~0UL << offset);
- if (size < BITS_PER_LONG)
- goto found_first;
- if (tmp)
- goto found_middle;
- size -= BITS_PER_LONG;
- result += BITS_PER_LONG;
- }
- while (size & ~(BITS_PER_LONG-1)) {
- if ((tmp = *(p++)))
- goto found_middle;
- result += BITS_PER_LONG;
- size -= BITS_PER_LONG;
- }
- if (!size)
- return result;
- tmp = *p;
-
-found_first:
- tmp &= (~0UL >> (BITS_PER_LONG - size));
- if (tmp == 0UL) /* Are any bits set? */
- return result + size; /* Nope. */
-found_middle:
- return result + __ffs(tmp);
-}
-#endif
-
-#ifndef find_first_bit
-/*
- * Find the first set bit in a memory region.
- */
-unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
-{
- const unsigned long *p = addr;
- unsigned long result = 0;
- unsigned long tmp;
-
- while (size & ~(BITS_PER_LONG-1)) {
- if ((tmp = *(p++)))
- goto found;
- result += BITS_PER_LONG;
- size -= BITS_PER_LONG;
- }
- if (!size)
- return result;
-
- tmp = (*p) & (~0UL >> (BITS_PER_LONG - size));
- if (tmp == 0UL) /* Are any bits set? */
- return result + size; /* Nope. */
-found:
- return result + __ffs(tmp);
-}
-#endif
diff --git a/tools/perf/Build b/tools/perf/Build
index 72237455b400..a43fae7f439a 100644
--- a/tools/perf/Build
+++ b/tools/perf/Build
@@ -1,5 +1,6 @@
perf-y += builtin-bench.o
perf-y += builtin-annotate.o
+perf-y += builtin-config.o
perf-y += builtin-diff.o
perf-y += builtin-evlist.o
perf-y += builtin-help.o
@@ -19,6 +20,7 @@ perf-y += builtin-kvm.o
perf-y += builtin-inject.o
perf-y += builtin-mem.o
perf-y += builtin-data.o
+perf-y += builtin-version.o
perf-$(CONFIG_AUDIT) += builtin-trace.o
perf-$(CONFIG_LIBELF) += builtin-probe.o
@@ -34,8 +36,13 @@ paths += -DPERF_MAN_PATH="BUILD_STR($(mandir_SQ))"
CFLAGS_builtin-help.o += $(paths)
CFLAGS_builtin-timechart.o += $(paths)
-CFLAGS_perf.o += -DPERF_HTML_PATH="BUILD_STR($(htmldir_SQ))" -include $(OUTPUT)PERF-VERSION-FILE
+CFLAGS_perf.o += -DPERF_HTML_PATH="BUILD_STR($(htmldir_SQ))" \
+ -DPERF_EXEC_PATH="BUILD_STR($(perfexecdir_SQ))" \
+ -DPREFIX="BUILD_STR($(prefix_SQ))" \
+ -include $(OUTPUT)PERF-VERSION-FILE
CFLAGS_builtin-trace.o += -DSTRACE_GROUPS_DIR="BUILD_STR($(STRACE_GROUPS_DIR_SQ))"
+CFLAGS_builtin-report.o += -DTIPDIR="BUILD_STR($(tipdir_SQ))"
+CFLAGS_builtin-report.o += -DDOCDIR="BUILD_STR($(srcdir_SQ)/Documentation)"
libperf-y += util/
libperf-y += arch/
diff --git a/tools/perf/Documentation/perf-config.txt b/tools/perf/Documentation/perf-config.txt
new file mode 100644
index 000000000000..b9ca1e304158
--- /dev/null
+++ b/tools/perf/Documentation/perf-config.txt
@@ -0,0 +1,103 @@
+perf-config(1)
+==============
+
+NAME
+----
+perf-config - Get and set variables in a configuration file.
+
+SYNOPSIS
+--------
+[verse]
+'perf config' -l | --list
+
+DESCRIPTION
+-----------
+You can manage variables in a configuration file with this command.
+
+OPTIONS
+-------
+
+-l::
+--list::
+ Show current config variables, name and value, for all sections.
+
+CONFIGURATION FILE
+------------------
+
+The perf configuration file contains many variables to change various
+aspects of each of its tools, including output, disk usage, etc.
+The '$HOME/.perfconfig' file is used to store a per-user configuration.
+The file '$(sysconfdir)/perfconfig' can be used to
+store a system-wide default configuration.
+
+Syntax
+~~~~~~
+
+The file consist of sections. A section starts with its name
+surrounded by square brackets and continues till the next section
+begins. Each variable must be in a section, and have the form
+'name = value', for example:
+
+ [section]
+ name1 = value1
+ name2 = value2
+
+Section names are case sensitive and can contain any characters except
+newline (double quote `"` and backslash have to be escaped as `\"` and `\\`,
+respectively). Section headers can't span multiple lines.
+
+Example
+~~~~~~~
+
+Given a $HOME/.perfconfig like this:
+
+#
+# This is the config file, and
+# a '#' and ';' character indicates a comment
+#
+
+ [colors]
+ # Color variables
+ top = red, default
+ medium = green, default
+ normal = lightgray, default
+ selected = white, lightgray
+ code = blue, default
+ addr = magenta, default
+ root = white, blue
+
+ [tui]
+ # Defaults if linked with libslang
+ report = on
+ annotate = on
+ top = on
+
+ [buildid]
+ # Default, disable using /dev/null
+ dir = ~/.debug
+
+ [annotate]
+ # Defaults
+ hide_src_code = false
+ use_offset = true
+ jump_arrows = true
+ show_nr_jumps = false
+
+ [help]
+ # Format can be man, info, web or html
+ format = man
+ autocorrect = 0
+
+ [ui]
+ show-headers = true
+
+ [call-graph]
+ # fp (framepointer), dwarf
+ record-mode = fp
+ print-type = graph
+ order = caller
+ sort-key = function
+
+SEE ALSO
+--------
+linkperf:perf[1]
diff --git a/tools/perf/Documentation/perf-evlist.txt b/tools/perf/Documentation/perf-evlist.txt
index 1ceb3700ffbb..6f7200fb85cf 100644
--- a/tools/perf/Documentation/perf-evlist.txt
+++ b/tools/perf/Documentation/perf-evlist.txt
@@ -32,6 +32,9 @@ OPTIONS
--group::
Show event group information.
+--trace-fields::
+ Show tracepoint field names.
+
SEE ALSO
--------
linkperf:perf-record[1], linkperf:perf-list[1],
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index e630a7d2c348..fbceb631387c 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -207,11 +207,23 @@ comma-separated list with no space: 0,1. Ranges of CPUs are specified with -: 0-
In per-thread mode with inheritance mode on (default), samples are captured only when
the thread executes on the designated CPUs. Default is to monitor all CPUs.
+-B::
+--no-buildid::
+Do not save the build ids of binaries in the perf.data files. This skips
+post processing after recording, which sometimes makes the final step in
+the recording process to take a long time, as it needs to process all
+events looking for mmap records. The downside is that it can misresolve
+symbols if the workload binaries used when recording get locally rebuilt
+or upgraded, because the only key available in this case is the
+pathname. You can also set the "record.build-id" config variable to
+'skip to have this behaviour permanently.
+
-N::
--no-buildid-cache::
Do not update the buildid cache. This saves some overhead in situations
where the information in the perf.data file (which includes buildids)
-is sufficient.
+is sufficient. You can also set the "record.build-id" config variable to
+'no-cache' to have the same effect.
-G name,...::
--cgroup name,...::
@@ -314,11 +326,20 @@ This option sets the time out limit. The default value is 500 ms.
Record context switch events i.e. events of type PERF_RECORD_SWITCH or
PERF_RECORD_SWITCH_CPU_WIDE.
---clang-path::
+--clang-path=PATH::
Path to clang binary to use for compiling BPF scriptlets.
+(enabled when BPF support is on)
---clang-opt::
+--clang-opt=OPTIONS::
Options passed to clang when compiling BPF scriptlets.
+(enabled when BPF support is on)
+
+--vmlinux=PATH::
+Specify vmlinux path which has debuginfo.
+(enabled when BPF prologue is on)
+
+--buildid-all::
+Record build-id of all DSOs regardless whether it's actually hit or not.
SEE ALSO
--------
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index 5ce8da1e1256..8a301f6afb37 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -117,6 +117,30 @@ OPTIONS
And default sort keys are changed to comm, dso_from, symbol_from, dso_to
and symbol_to, see '--branch-stack'.
+ If the data file has tracepoint event(s), following (dynamic) sort keys
+ are also available:
+ trace, trace_fields, [<event>.]<field>[/raw]
+
+ - trace: pretty printed trace output in a single column
+ - trace_fields: fields in tracepoints in separate columns
+ - <field name>: optional event and field name for a specific field
+
+ The last form consists of event and field names. If event name is
+ omitted, it searches all events for matching field name. The matched
+ field will be shown only for the event has the field. The event name
+ supports substring match so user doesn't need to specify full subsystem
+ and event name everytime. For example, 'sched:sched_switch' event can
+ be shortened to 'switch' as long as it's not ambiguous. Also event can
+ be specified by its index (starting from 1) preceded by the '%'.
+ So '%1' is the first event, '%2' is the second, and so on.
+
+ The field name can have '/raw' suffix which disables pretty printing
+ and shows raw field value like hex numbers. The --raw-trace option
+ has the same effect for all dynamic sort keys.
+
+ The default sort keys are changed to 'trace' if all events in the data
+ file are tracepoint.
+
-F::
--fields=::
Specify output field - multiple keys can be specified in CSV format.
@@ -170,17 +194,18 @@ OPTIONS
Dump raw trace in ASCII.
-g::
---call-graph=<print_type,threshold[,print_limit],order,sort_key,branch>::
+--call-graph=<print_type,threshold[,print_limit],order,sort_key[,branch],value>::
Display call chains using type, min percent threshold, print limit,
- call order, sort key and branch. Note that ordering of parameters is not
- fixed so any parement can be given in an arbitraty order. One exception
- is the print_limit which should be preceded by threshold.
+ call order, sort key, optional branch and value. Note that ordering of
+ parameters is not fixed so any parement can be given in an arbitraty order.
+ One exception is the print_limit which should be preceded by threshold.
print_type can be either:
- flat: single column, linear exposure of call chains.
- graph: use a graph tree, displaying absolute overhead rates. (default)
- fractal: like graph, but displays relative rates. Each branch of
the tree is considered as a new profiled object.
+ - folded: call chains are displayed in a line, separated by semicolons
- none: disable call chain display.
threshold is a percentage value which specifies a minimum percent to be
@@ -204,6 +229,11 @@ OPTIONS
- branch: include last branch information in callgraph when available.
Usually more convenient to use --branch-history for this.
+ value can be:
+ - percent: diplay overhead percent (default)
+ - period: display event period
+ - count: display event count
+
--children::
Accumulate callchain of children to parent entry so that then can
show up in the output. The output will have a new "Children" column
@@ -365,6 +395,9 @@ include::itrace.txt[]
--socket-filter::
Only report the samples on the processor socket that match with this filter
+--raw-trace::
+ When displaying traceevent output, do not use print fmt or plugins.
+
include::callchain-overhead-calculation.txt[]
SEE ALSO
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
index 4e074a660826..52ef7a9d50aa 100644
--- a/tools/perf/Documentation/perf-stat.txt
+++ b/tools/perf/Documentation/perf-stat.txt
@@ -10,6 +10,8 @@ SYNOPSIS
[verse]
'perf stat' [-e <EVENT> | --event=EVENT] [-a] <command>
'perf stat' [-e <EVENT> | --event=EVENT] [-a] -- <command> [<options>]
+'perf stat' [-e <EVENT> | --event=EVENT] [-a] record [-o file] -- <command> [<options>]
+'perf stat' report [-i file]
DESCRIPTION
-----------
@@ -22,6 +24,11 @@ OPTIONS
<command>...::
Any command you can specify in a shell.
+record::
+ See STAT RECORD.
+
+report::
+ See STAT REPORT.
-e::
--event=::
@@ -159,6 +166,33 @@ filter out the startup phase of the program, which is often very different.
Print statistics of transactional execution if supported.
+STAT RECORD
+-----------
+Stores stat data into perf data file.
+
+-o file::
+--output file::
+Output file name.
+
+STAT REPORT
+-----------
+Reads and reports stat data from perf data file.
+
+-i file::
+--input file::
+Input file name.
+
+--per-socket::
+Aggregate counts per processor socket for system-wide mode measurements.
+
+--per-core::
+Aggregate counts per physical processor for system-wide mode measurements.
+
+-A::
+--no-aggr::
+Do not aggregate counts across all monitored CPUs.
+
+
EXAMPLES
--------
diff --git a/tools/perf/Documentation/perf-top.txt b/tools/perf/Documentation/perf-top.txt
index 556cec09bf50..b0e60e17db38 100644
--- a/tools/perf/Documentation/perf-top.txt
+++ b/tools/perf/Documentation/perf-top.txt
@@ -230,6 +230,9 @@ Default is to monitor all CPUS.
The various filters must be specified as a comma separated list: --branch-filter any_ret,u,k
Note that this feature may not be available on all processors.
+--raw-trace::
+ When displaying traceevent output, do not use print fmt or plugins.
+
INTERACTIVE PROMPTING KEYS
--------------------------
diff --git a/tools/perf/Documentation/tips.txt b/tools/perf/Documentation/tips.txt
new file mode 100644
index 000000000000..e0ce9573b79b
--- /dev/null
+++ b/tools/perf/Documentation/tips.txt
@@ -0,0 +1,29 @@
+For a higher level overview, try: perf report --sort comm,dso
+Sample related events with: perf record -e '{cycles,instructions}:S'
+Compare performance results with: perf diff [<old file> <new file>]
+Boolean options have negative forms, e.g.: perf report --no-children
+Customize output of perf script with: perf script -F event,ip,sym
+Generate a script for your data: perf script -g <lang>
+Save output of perf stat using: perf stat record <target workload>
+Create an archive with symtabs to analyse on other machine: perf archive
+Search options using a keyword: perf report -h <keyword>
+Use parent filter to see specific call path: perf report -p <regex>
+List events using substring match: perf list <keyword>
+To see list of saved events and attributes: perf evlist -v
+Use --symfs <dir> if your symbol files are in non-standard locations
+To see callchains in a more compact form: perf report -g folded
+Show individual samples with: perf script
+Limit to show entries above 5% only: perf report --percent-limit 5
+Profiling branch (mis)predictions with: perf record -b / perf report
+Treat branches as callchains: perf report --branch-history
+To count events in every 1000 msec: perf stat -I 1000
+Print event counts in CSV format with: perf stat -x,
+If you have debuginfo enabled, try: perf report -s sym,srcline
+For memory address profiling, try: perf mem record / perf mem report
+For tracepoint events, try: perf report -s trace_fields
+To record callchains for each sample: perf record -g
+To record every process run by an user: perf record -u <user>
+Skip collecing build-id when recording: perf record -B
+To change sampling frequency to 100 Hz: perf record -F 100
+See assembly instructions with percentage: perf annotate <symbol>
+If you prefer Intel style assembly, try: perf annotate -M intel
diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST
index 39c38cb45b00..2e1fa2357528 100644
--- a/tools/perf/MANIFEST
+++ b/tools/perf/MANIFEST
@@ -1,6 +1,7 @@
tools/perf
tools/arch/alpha/include/asm/barrier.h
tools/arch/arm/include/asm/barrier.h
+tools/arch/arm64/include/asm/barrier.h
tools/arch/ia64/include/asm/barrier.h
tools/arch/mips/include/asm/barrier.h
tools/arch/powerpc/include/asm/barrier.h
@@ -20,14 +21,18 @@ tools/lib/traceevent
tools/lib/bpf
tools/lib/api
tools/lib/bpf
+tools/lib/subcmd
tools/lib/hweight.c
tools/lib/rbtree.c
+tools/lib/string.c
tools/lib/symbol/kallsyms.c
tools/lib/symbol/kallsyms.h
-tools/lib/util/find_next_bit.c
+tools/lib/find_bit.c
+tools/lib/bitmap.c
tools/include/asm/atomic.h
tools/include/asm/barrier.h
tools/include/asm/bug.h
+tools/include/asm-generic/atomic-gcc.h
tools/include/asm-generic/barrier.h
tools/include/asm-generic/bitops/arch_hweight.h
tools/include/asm-generic/bitops/atomic.h
@@ -50,8 +55,10 @@ tools/include/linux/log2.h
tools/include/linux/poison.h
tools/include/linux/rbtree.h
tools/include/linux/rbtree_augmented.h
+tools/include/linux/string.h
tools/include/linux/types.h
tools/include/linux/err.h
+tools/include/linux/bitmap.h
include/asm-generic/bitops/arch_hweight.h
include/asm-generic/bitops/const_hweight.h
include/asm-generic/bitops/fls64.h
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 0d19d5447d6c..5d34815c7ccb 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -77,6 +77,9 @@ include config/utilities.mak
# Define NO_AUXTRACE if you do not want AUX area tracing support
#
# Define NO_LIBBPF if you do not want BPF support
+#
+# Define FEATURES_DUMP to provide features detection dump file
+# and bypass the feature detection
# As per kernel Makefile, avoid funny character set dependencies
unexport LC_ALL
@@ -145,9 +148,10 @@ BISON = bison
STRIP = strip
AWK = awk
-LIB_DIR = $(srctree)/tools/lib/api/
+LIB_DIR = $(srctree)/tools/lib/api/
TRACE_EVENT_DIR = $(srctree)/tools/lib/traceevent/
-BPF_DIR = $(srctree)/tools/lib/bpf/
+BPF_DIR = $(srctree)/tools/lib/bpf/
+SUBCMD_DIR = $(srctree)/tools/lib/subcmd/
# include config/Makefile by default and rule out
# non-config cases
@@ -165,6 +169,15 @@ ifeq ($(config),1)
include config/Makefile
endif
+# The FEATURE_DUMP_EXPORT holds location of the actual
+# FEATURE_DUMP file to be used to bypass feature detection
+# (for bpf or any other subproject)
+ifeq ($(FEATURES_DUMP),)
+FEATURE_DUMP_EXPORT := $(realpath $(OUTPUT)FEATURE-DUMP)
+else
+FEATURE_DUMP_EXPORT := $(FEATURES_DUMP)
+endif
+
export prefix bindir sharedir sysconfdir DESTDIR
# sparse is architecture-neutral, which means that we need to tell it
@@ -184,15 +197,17 @@ strip-libs = $(filter-out -l%,$(1))
ifneq ($(OUTPUT),)
TE_PATH=$(OUTPUT)
BPF_PATH=$(OUTPUT)
+ SUBCMD_PATH=$(OUTPUT)
ifneq ($(subdir),)
- LIB_PATH=$(OUTPUT)/../lib/api/
+ API_PATH=$(OUTPUT)/../lib/api/
else
- LIB_PATH=$(OUTPUT)
+ API_PATH=$(OUTPUT)
endif
else
TE_PATH=$(TRACE_EVENT_DIR)
- LIB_PATH=$(LIB_DIR)
+ API_PATH=$(LIB_DIR)
BPF_PATH=$(BPF_DIR)
+ SUBCMD_PATH=$(SUBCMD_DIR)
endif
LIBTRACEEVENT = $(TE_PATH)libtraceevent.a
@@ -201,11 +216,13 @@ export LIBTRACEEVENT
LIBTRACEEVENT_DYNAMIC_LIST = $(TE_PATH)libtraceevent-dynamic-list
LIBTRACEEVENT_DYNAMIC_LIST_LDFLAGS = -Xlinker --dynamic-list=$(LIBTRACEEVENT_DYNAMIC_LIST)
-LIBAPI = $(LIB_PATH)libapi.a
+LIBAPI = $(API_PATH)libapi.a
export LIBAPI
LIBBPF = $(BPF_PATH)libbpf.a
+LIBSUBCMD = $(SUBCMD_PATH)libsubcmd.a
+
# python extension build directories
PYTHON_EXTBUILD := $(OUTPUT)python_ext_build/
PYTHON_EXTBUILD_LIB := $(PYTHON_EXTBUILD)lib/
@@ -257,7 +274,7 @@ export PERL_PATH
LIB_FILE=$(OUTPUT)libperf.a
-PERFLIBS = $(LIB_FILE) $(LIBAPI) $(LIBTRACEEVENT)
+PERFLIBS = $(LIB_FILE) $(LIBAPI) $(LIBTRACEEVENT) $(LIBSUBCMD)
ifndef NO_LIBBPF
PERFLIBS += $(LIBBPF)
endif
@@ -420,7 +437,7 @@ $(LIBTRACEEVENT)-clean:
$(call QUIET_CLEAN, libtraceevent)
$(Q)$(MAKE) -C $(TRACE_EVENT_DIR) O=$(OUTPUT) clean >/dev/null
-install-traceevent-plugins: $(LIBTRACEEVENT)
+install-traceevent-plugins: libtraceevent_plugins
$(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) install_plugins
$(LIBAPI): fixdep FORCE
@@ -431,12 +448,19 @@ $(LIBAPI)-clean:
$(Q)$(MAKE) -C $(LIB_DIR) O=$(OUTPUT) clean >/dev/null
$(LIBBPF): fixdep FORCE
- $(Q)$(MAKE) -C $(BPF_DIR) O=$(OUTPUT) $(OUTPUT)libbpf.a
+ $(Q)$(MAKE) -C $(BPF_DIR) O=$(OUTPUT) $(OUTPUT)libbpf.a FEATURES_DUMP=$(FEATURE_DUMP_EXPORT)
$(LIBBPF)-clean:
$(call QUIET_CLEAN, libbpf)
$(Q)$(MAKE) -C $(BPF_DIR) O=$(OUTPUT) clean >/dev/null
+$(LIBSUBCMD): fixdep FORCE
+ $(Q)$(MAKE) -C $(SUBCMD_DIR) O=$(OUTPUT) $(OUTPUT)libsubcmd.a
+
+$(LIBSUBCMD)-clean:
+ $(call QUIET_CLEAN, libsubcmd)
+ $(Q)$(MAKE) -C $(SUBCMD_DIR) O=$(OUTPUT) clean
+
help:
@echo 'Perf make targets:'
@echo ' doc - make *all* documentation (see below)'
@@ -476,7 +500,7 @@ INSTALL_DOC_TARGETS += quick-install-doc quick-install-man quick-install-html
$(DOC_TARGETS):
$(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) $(@:doc=all)
-TAG_FOLDERS= . ../lib/traceevent ../lib/api ../lib/symbol ../include ../lib/bpf
+TAG_FOLDERS= . ../lib ../include
TAG_FILES= ../../include/uapi/linux/perf_event.h
TAGS:
@@ -555,6 +579,9 @@ endif
$(call QUIET_INSTALL, perf_completion-script) \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(sysconfdir_SQ)/bash_completion.d'; \
$(INSTALL) perf-completion.sh '$(DESTDIR_SQ)$(sysconfdir_SQ)/bash_completion.d/perf'
+ $(call QUIET_INSTALL, perf-tip) \
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(tip_instdir_SQ)'; \
+ $(INSTALL) Documentation/tips.txt -t '$(DESTDIR_SQ)$(tip_instdir_SQ)'
install-tests: all install-gtk
$(call QUIET_INSTALL, tests) \
@@ -582,19 +609,31 @@ $(INSTALL_DOC_TARGETS):
#
config-clean:
$(call QUIET_CLEAN, config)
- $(Q)$(MAKE) -C $(srctree)/tools/build/feature/ clean >/dev/null
+ $(Q)$(MAKE) -C $(srctree)/tools/build/feature/ $(if $(OUTPUT),OUTPUT=$(OUTPUT)feature/,) clean >/dev/null
-clean: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean config-clean
+clean: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clean config-clean
$(call QUIET_CLEAN, core-objs) $(RM) $(LIB_FILE) $(OUTPUT)perf-archive $(OUTPUT)perf-with-kcore $(LANG_BINDINGS)
- $(Q)find . -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
+ $(Q)find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
$(Q)$(RM) $(OUTPUT).config-detected
$(call QUIET_CLEAN, core-progs) $(RM) $(ALL_PROGRAMS) perf perf-read-vdso32 perf-read-vdsox32
$(call QUIET_CLEAN, core-gen) $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope* $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)FEATURE-DUMP $(OUTPUT)util/*-bison* $(OUTPUT)util/*-flex* \
- $(OUTPUT)util/intel-pt-decoder/inat-tables.c
+ $(OUTPUT)util/intel-pt-decoder/inat-tables.c $(OUTPUT)fixdep \
+ $(OUTPUT)tests/llvm-src-{base,kbuild,prologue}.c
$(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) clean
$(python-clean)
#
+# To provide FEATURE-DUMP into $(FEATURE_DUMP_COPY)
+# file if defined, with no further action.
+feature-dump:
+ifdef FEATURE_DUMP_COPY
+ @cp $(OUTPUT)FEATURE-DUMP $(FEATURE_DUMP_COPY)
+ @echo "FEATURE-DUMP file copied into $(FEATURE_DUMP_COPY)"
+else
+ @echo "FEATURE-DUMP file available in $(OUTPUT)FEATURE-DUMP"
+endif
+
+#
# Trick: if ../../.git does not exist - we are building out of tree for example,
# then force version regeneration:
#
diff --git a/tools/perf/arch/x86/include/arch-tests.h b/tools/perf/arch/x86/include/arch-tests.h
index 7ed00f4b0908..b48de2f5813c 100644
--- a/tools/perf/arch/x86/include/arch-tests.h
+++ b/tools/perf/arch/x86/include/arch-tests.h
@@ -2,10 +2,10 @@
#define ARCH_TESTS_H
/* Tests */
-int test__rdpmc(void);
-int test__perf_time_to_tsc(void);
-int test__insn_x86(void);
-int test__intel_cqm_count_nmi_context(void);
+int test__rdpmc(int subtest);
+int test__perf_time_to_tsc(int subtest);
+int test__insn_x86(int subtest);
+int test__intel_cqm_count_nmi_context(int subtest);
#ifdef HAVE_DWARF_UNWIND_SUPPORT
struct thread;
diff --git a/tools/perf/arch/x86/tests/insn-x86.c b/tools/perf/arch/x86/tests/insn-x86.c
index b6115dfd28f0..08d9b2bc185c 100644
--- a/tools/perf/arch/x86/tests/insn-x86.c
+++ b/tools/perf/arch/x86/tests/insn-x86.c
@@ -171,7 +171,7 @@ static int test_data_set(struct test_data *dat_set, int x86_64)
* verbose (-v) option to see all the instructions and whether or not they
* decoded successfuly.
*/
-int test__insn_x86(void)
+int test__insn_x86(int subtest __maybe_unused)
{
int ret = 0;
diff --git a/tools/perf/arch/x86/tests/intel-cqm.c b/tools/perf/arch/x86/tests/intel-cqm.c
index d28c1b6a3b54..7f064eb37158 100644
--- a/tools/perf/arch/x86/tests/intel-cqm.c
+++ b/tools/perf/arch/x86/tests/intel-cqm.c
@@ -17,7 +17,7 @@ static pid_t spawn(void)
if (pid)
return pid;
- while(1);
+ while(1)
sleep(5);
return 0;
}
@@ -33,7 +33,7 @@ static pid_t spawn(void)
* the last read counter value to avoid triggering a WARN_ON_ONCE() in
* smp_call_function_many() caused by sending IPIs from NMI context.
*/
-int test__intel_cqm_count_nmi_context(void)
+int test__intel_cqm_count_nmi_context(int subtest __maybe_unused)
{
struct perf_evlist *evlist = NULL;
struct perf_evsel *evsel = NULL;
@@ -54,7 +54,7 @@ int test__intel_cqm_count_nmi_context(void)
ret = parse_events(evlist, "intel_cqm/llc_occupancy/", NULL);
if (ret) {
- pr_debug("parse_events failed\n");
+ pr_debug("parse_events failed, is \"intel_cqm/llc_occupancy/\" available?\n");
err = TEST_SKIP;
goto out;
}
diff --git a/tools/perf/arch/x86/tests/perf-time-to-tsc.c b/tools/perf/arch/x86/tests/perf-time-to-tsc.c
index 658cd200af74..9d29ee283ac5 100644
--- a/tools/perf/arch/x86/tests/perf-time-to-tsc.c
+++ b/tools/perf/arch/x86/tests/perf-time-to-tsc.c
@@ -35,13 +35,12 @@
* %0 is returned, otherwise %-1 is returned. If TSC conversion is not
* supported then then the test passes but " (not supported)" is printed.
*/
-int test__perf_time_to_tsc(void)
+int test__perf_time_to_tsc(int subtest __maybe_unused)
{
struct record_opts opts = {
.mmap_pages = UINT_MAX,
.user_freq = UINT_MAX,
.user_interval = ULLONG_MAX,
- .freq = 4000,
.target = {
.uses_mmap = true,
},
diff --git a/tools/perf/arch/x86/tests/rdpmc.c b/tools/perf/arch/x86/tests/rdpmc.c
index e7688214c7cf..7bb0d13c235f 100644
--- a/tools/perf/arch/x86/tests/rdpmc.c
+++ b/tools/perf/arch/x86/tests/rdpmc.c
@@ -149,7 +149,7 @@ out_close:
return 0;
}
-int test__rdpmc(void)
+int test__rdpmc(int subtest __maybe_unused)
{
int status = 0;
int wret = 0;
diff --git a/tools/perf/arch/x86/util/Build b/tools/perf/arch/x86/util/Build
index ff63649fa9ac..465970370f3e 100644
--- a/tools/perf/arch/x86/util/Build
+++ b/tools/perf/arch/x86/util/Build
@@ -5,6 +5,7 @@ libperf-y += kvm-stat.o
libperf-y += perf_regs.o
libperf-$(CONFIG_DWARF) += dwarf-regs.o
+libperf-$(CONFIG_BPF_PROLOGUE) += dwarf-regs.o
libperf-$(CONFIG_LIBUNWIND) += unwind-libunwind.o
libperf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
diff --git a/tools/perf/arch/x86/util/intel-bts.c b/tools/perf/arch/x86/util/intel-bts.c
index 9b94ce520917..8d8150f1cf9b 100644
--- a/tools/perf/arch/x86/util/intel-bts.c
+++ b/tools/perf/arch/x86/util/intel-bts.c
@@ -327,7 +327,7 @@ static int intel_bts_snapshot_start(struct auxtrace_record *itr)
evlist__for_each(btsr->evlist, evsel) {
if (evsel->attr.type == btsr->intel_bts_pmu->type)
- return perf_evlist__disable_event(btsr->evlist, evsel);
+ return perf_evsel__disable(evsel);
}
return -EINVAL;
}
@@ -340,7 +340,7 @@ static int intel_bts_snapshot_finish(struct auxtrace_record *itr)
evlist__for_each(btsr->evlist, evsel) {
if (evsel->attr.type == btsr->intel_bts_pmu->type)
- return perf_evlist__enable_event(btsr->evlist, evsel);
+ return perf_evsel__enable(evsel);
}
return -EINVAL;
}
diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c
index b02af064f0f9..f05daacc9e78 100644
--- a/tools/perf/arch/x86/util/intel-pt.c
+++ b/tools/perf/arch/x86/util/intel-pt.c
@@ -26,7 +26,7 @@
#include "../../util/evlist.h"
#include "../../util/evsel.h"
#include "../../util/cpumap.h"
-#include "../../util/parse-options.h"
+#include <subcmd/parse-options.h>
#include "../../util/parse-events.h"
#include "../../util/pmu.h"
#include "../../util/debug.h"
@@ -725,7 +725,7 @@ static int intel_pt_snapshot_start(struct auxtrace_record *itr)
evlist__for_each(ptr->evlist, evsel) {
if (evsel->attr.type == ptr->intel_pt_pmu->type)
- return perf_evlist__disable_event(ptr->evlist, evsel);
+ return perf_evsel__disable(evsel);
}
return -EINVAL;
}
@@ -738,7 +738,7 @@ static int intel_pt_snapshot_finish(struct auxtrace_record *itr)
evlist__for_each(ptr->evlist, evsel) {
if (evsel->attr.type == ptr->intel_pt_pmu->type)
- return perf_evlist__enable_event(ptr->evlist, evsel);
+ return perf_evsel__enable(evsel);
}
return -EINVAL;
}
diff --git a/tools/perf/bench/futex-hash.c b/tools/perf/bench/futex-hash.c
index fc9bebd2cca0..0999ac536d86 100644
--- a/tools/perf/bench/futex-hash.c
+++ b/tools/perf/bench/futex-hash.c
@@ -11,7 +11,7 @@
#include "../perf.h"
#include "../util/util.h"
#include "../util/stat.h"
-#include "../util/parse-options.h"
+#include <subcmd/parse-options.h>
#include "../util/header.h"
#include "bench.h"
#include "futex.h"
diff --git a/tools/perf/bench/futex-lock-pi.c b/tools/perf/bench/futex-lock-pi.c
index bc6a16adbca8..6a18ce21f865 100644
--- a/tools/perf/bench/futex-lock-pi.c
+++ b/tools/perf/bench/futex-lock-pi.c
@@ -5,7 +5,7 @@
#include "../perf.h"
#include "../util/util.h"
#include "../util/stat.h"
-#include "../util/parse-options.h"
+#include <subcmd/parse-options.h>
#include "../util/header.h"
#include "bench.h"
#include "futex.h"
diff --git a/tools/perf/bench/futex-requeue.c b/tools/perf/bench/futex-requeue.c
index ad0d9b5342fb..718238683013 100644
--- a/tools/perf/bench/futex-requeue.c
+++ b/tools/perf/bench/futex-requeue.c
@@ -11,7 +11,7 @@
#include "../perf.h"
#include "../util/util.h"
#include "../util/stat.h"
-#include "../util/parse-options.h"
+#include <subcmd/parse-options.h>
#include "../util/header.h"
#include "bench.h"
#include "futex.h"
diff --git a/tools/perf/bench/futex-wake-parallel.c b/tools/perf/bench/futex-wake-parallel.c
index 6d8c9fa2a16c..91aaf2a1fa90 100644
--- a/tools/perf/bench/futex-wake-parallel.c
+++ b/tools/perf/bench/futex-wake-parallel.c
@@ -10,7 +10,7 @@
#include "../perf.h"
#include "../util/util.h"
#include "../util/stat.h"
-#include "../util/parse-options.h"
+#include <subcmd/parse-options.h>
#include "../util/header.h"
#include "bench.h"
#include "futex.h"
diff --git a/tools/perf/bench/futex-wake.c b/tools/perf/bench/futex-wake.c
index e5e41d3bdce7..f416bd705f66 100644
--- a/tools/perf/bench/futex-wake.c
+++ b/tools/perf/bench/futex-wake.c
@@ -11,7 +11,7 @@
#include "../perf.h"
#include "../util/util.h"
#include "../util/stat.h"
-#include "../util/parse-options.h"
+#include <subcmd/parse-options.h>
#include "../util/header.h"
#include "bench.h"
#include "futex.h"
diff --git a/tools/perf/bench/mem-functions.c b/tools/perf/bench/mem-functions.c
index 9419b944220f..a91aa85d80ff 100644
--- a/tools/perf/bench/mem-functions.c
+++ b/tools/perf/bench/mem-functions.c
@@ -8,7 +8,7 @@
#include "../perf.h"
#include "../util/util.h"
-#include "../util/parse-options.h"
+#include <subcmd/parse-options.h>
#include "../util/header.h"
#include "../util/cloexec.h"
#include "bench.h"
diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
index 492df2752a2d..5049d6357a46 100644
--- a/tools/perf/bench/numa.c
+++ b/tools/perf/bench/numa.c
@@ -7,7 +7,7 @@
#include "../perf.h"
#include "../builtin.h"
#include "../util/util.h"
-#include "../util/parse-options.h"
+#include <subcmd/parse-options.h>
#include "../util/cloexec.h"
#include "bench.h"
diff --git a/tools/perf/bench/sched-messaging.c b/tools/perf/bench/sched-messaging.c
index d4ff1b539cfd..bfaf9503de8e 100644
--- a/tools/perf/bench/sched-messaging.c
+++ b/tools/perf/bench/sched-messaging.c
@@ -11,7 +11,7 @@
#include "../perf.h"
#include "../util/util.h"
-#include "../util/parse-options.h"
+#include <subcmd/parse-options.h>
#include "../builtin.h"
#include "bench.h"
diff --git a/tools/perf/bench/sched-pipe.c b/tools/perf/bench/sched-pipe.c
index 005cc283790c..1dc2d13cc272 100644
--- a/tools/perf/bench/sched-pipe.c
+++ b/tools/perf/bench/sched-pipe.c
@@ -10,7 +10,7 @@
*/
#include "../perf.h"
#include "../util/util.h"
-#include "../util/parse-options.h"
+#include <subcmd/parse-options.h>
#include "../builtin.h"
#include "bench.h"
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 2bf9b3fd9e61..cc5c1267c738 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -21,7 +21,7 @@
#include "util/evsel.h"
#include "util/annotate.h"
#include "util/event.h"
-#include "util/parse-options.h"
+#include <subcmd/parse-options.h>
#include "util/parse-events.h"
#include "util/thread.h"
#include "util/sort.h"
@@ -47,7 +47,7 @@ struct perf_annotate {
};
static int perf_evsel__add_sample(struct perf_evsel *evsel,
- struct perf_sample *sample __maybe_unused,
+ struct perf_sample *sample,
struct addr_location *al,
struct perf_annotate *ann)
{
@@ -72,7 +72,10 @@ static int perf_evsel__add_sample(struct perf_evsel *evsel,
return 0;
}
- he = __hists__add_entry(hists, al, NULL, NULL, NULL, 1, 1, 0, true);
+ sample->period = 1;
+ sample->weight = 1;
+
+ he = __hists__add_entry(hists, al, NULL, NULL, NULL, sample, true);
if (he == NULL)
return -ENOMEM;
@@ -343,18 +346,19 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __maybe_unused)
return ret;
argc = parse_options(argc, argv, options, annotate_usage, 0);
+ if (argc) {
+ /*
+ * Special case: if there's an argument left then assume that
+ * it's a symbol filter:
+ */
+ if (argc > 1)
+ usage_with_options(annotate_usage, options);
- if (annotate.use_stdio)
- use_browser = 0;
- else if (annotate.use_tui)
- use_browser = 1;
- else if (annotate.use_gtk)
- use_browser = 2;
+ annotate.sym_hist_filter = argv[0];
+ }
file.path = input_name;
- setup_browser(true);
-
annotate.session = perf_session__new(&file, false, &annotate.tool);
if (annotate.session == NULL)
return -1;
@@ -366,19 +370,17 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __maybe_unused)
if (ret < 0)
goto out_delete;
- if (setup_sorting() < 0)
+ if (setup_sorting(NULL) < 0)
usage_with_options(annotate_usage, options);
- if (argc) {
- /*
- * Special case: if there's an argument left then assume that
- * it's a symbol filter:
- */
- if (argc > 1)
- usage_with_options(annotate_usage, options);
+ if (annotate.use_stdio)
+ use_browser = 0;
+ else if (annotate.use_tui)
+ use_browser = 1;
+ else if (annotate.use_gtk)
+ use_browser = 2;
- annotate.sym_hist_filter = argv[0];
- }
+ setup_browser(true);
ret = __cmd_annotate(&annotate);
diff --git a/tools/perf/builtin-bench.c b/tools/perf/builtin-bench.c
index b17aed36ca16..a1cddc6bbf0f 100644
--- a/tools/perf/builtin-bench.c
+++ b/tools/perf/builtin-bench.c
@@ -16,7 +16,7 @@
*/
#include "perf.h"
#include "util/util.h"
-#include "util/parse-options.h"
+#include <subcmd/parse-options.h>
#include "builtin.h"
#include "bench/bench.h"
diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c
index 7b8450cd33c2..d93bff7fc0e4 100644
--- a/tools/perf/builtin-buildid-cache.c
+++ b/tools/perf/builtin-buildid-cache.c
@@ -16,7 +16,7 @@
#include "util/cache.h"
#include "util/debug.h"
#include "util/header.h"
-#include "util/parse-options.h"
+#include <subcmd/parse-options.h>
#include "util/strlist.h"
#include "util/build-id.h"
#include "util/session.h"
diff --git a/tools/perf/builtin-buildid-list.c b/tools/perf/builtin-buildid-list.c
index 918b4de29de4..5e914ee79eb3 100644
--- a/tools/perf/builtin-buildid-list.c
+++ b/tools/perf/builtin-buildid-list.c
@@ -12,7 +12,7 @@
#include "util/build-id.h"
#include "util/cache.h"
#include "util/debug.h"
-#include "util/parse-options.h"
+#include <subcmd/parse-options.h>
#include "util/session.h"
#include "util/symbol.h"
#include "util/data.h"
@@ -110,7 +110,7 @@ int cmd_buildid_list(int argc, const char **argv,
setup_pager();
if (show_kernel)
- return sysfs__fprintf_build_id(stdout);
+ return !(sysfs__fprintf_build_id(stdout) > 0);
return perf_session__list_build_ids(force, with_hits);
}
diff --git a/tools/perf/builtin-config.c b/tools/perf/builtin-config.c
new file mode 100644
index 000000000000..f04e804a9fad
--- /dev/null
+++ b/tools/perf/builtin-config.c
@@ -0,0 +1,66 @@
+/*
+ * builtin-config.c
+ *
+ * Copyright (C) 2015, Taeung Song <treeze.taeung@gmail.com>
+ *
+ */
+#include "builtin.h"
+
+#include "perf.h"
+
+#include "util/cache.h"
+#include <subcmd/parse-options.h>
+#include "util/util.h"
+#include "util/debug.h"
+
+static const char * const config_usage[] = {
+ "perf config [options]",
+ NULL
+};
+
+enum actions {
+ ACTION_LIST = 1
+} actions;
+
+static struct option config_options[] = {
+ OPT_SET_UINT('l', "list", &actions,
+ "show current config variables", ACTION_LIST),
+ OPT_END()
+};
+
+static int show_config(const char *key, const char *value,
+ void *cb __maybe_unused)
+{
+ if (value)
+ printf("%s=%s\n", key, value);
+ else
+ printf("%s\n", key);
+
+ return 0;
+}
+
+int cmd_config(int argc, const char **argv, const char *prefix __maybe_unused)
+{
+ int ret = 0;
+
+ argc = parse_options(argc, argv, config_options, config_usage,
+ PARSE_OPT_STOP_AT_NON_OPTION);
+
+ switch (actions) {
+ case ACTION_LIST:
+ if (argc) {
+ pr_err("Error: takes no arguments\n");
+ parse_options_usage(config_usage, config_options, "l", 1);
+ } else {
+ ret = perf_config(show_config, NULL);
+ if (ret < 0)
+ pr_err("Nothing configured, "
+ "please check your ~/.perfconfig file\n");
+ }
+ break;
+ default:
+ usage_with_options(config_usage, config_options);
+ }
+
+ return ret;
+}
diff --git a/tools/perf/builtin-data.c b/tools/perf/builtin-data.c
index d6525bc54d13..b97bc1518b44 100644
--- a/tools/perf/builtin-data.c
+++ b/tools/perf/builtin-data.c
@@ -2,7 +2,7 @@
#include "builtin.h"
#include "perf.h"
#include "debug.h"
-#include "parse-options.h"
+#include <subcmd/parse-options.h>
#include "data-convert-bt.h"
typedef int (*data_cmd_fn_t)(int argc, const char **argv, const char *prefix);
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 0b180a885ba3..36ccc2b8827f 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -311,11 +311,11 @@ static int formula_fprintf(struct hist_entry *he, struct hist_entry *pair,
}
static int hists__add_entry(struct hists *hists,
- struct addr_location *al, u64 period,
- u64 weight, u64 transaction)
+ struct addr_location *al,
+ struct perf_sample *sample)
{
- if (__hists__add_entry(hists, al, NULL, NULL, NULL, period, weight,
- transaction, true) != NULL)
+ if (__hists__add_entry(hists, al, NULL, NULL, NULL,
+ sample, true) != NULL)
return 0;
return -ENOMEM;
}
@@ -336,8 +336,7 @@ static int diff__process_sample_event(struct perf_tool *tool __maybe_unused,
return -1;
}
- if (hists__add_entry(hists, &al, sample->period,
- sample->weight, sample->transaction)) {
+ if (hists__add_entry(hists, &al, sample)) {
pr_warning("problem incrementing symbol period, skipping event\n");
goto out_put;
}
@@ -1208,7 +1207,7 @@ static int ui_init(void)
BUG_ON(1);
}
- list_add(&fmt->sort_list, &perf_hpp__sort_list);
+ perf_hpp__register_sort_field(fmt);
return 0;
}
@@ -1280,7 +1279,7 @@ int cmd_diff(int argc, const char **argv, const char *prefix __maybe_unused)
sort__mode = SORT_MODE__DIFF;
- if (setup_sorting() < 0)
+ if (setup_sorting(NULL) < 0)
usage_with_options(diff_usage, options);
setup_pager();
diff --git a/tools/perf/builtin-evlist.c b/tools/perf/builtin-evlist.c
index f4d62510acbb..8a31f511e1a0 100644
--- a/tools/perf/builtin-evlist.c
+++ b/tools/perf/builtin-evlist.c
@@ -12,7 +12,7 @@
#include "util/evlist.h"
#include "util/evsel.h"
#include "util/parse-events.h"
-#include "util/parse-options.h"
+#include <subcmd/parse-options.h>
#include "util/session.h"
#include "util/data.h"
#include "util/debug.h"
@@ -26,14 +26,22 @@ static int __cmd_evlist(const char *file_name, struct perf_attr_details *details
.mode = PERF_DATA_MODE_READ,
.force = details->force,
};
+ bool has_tracepoint = false;
session = perf_session__new(&file, 0, NULL);
if (session == NULL)
return -1;
- evlist__for_each(session->evlist, pos)
+ evlist__for_each(session->evlist, pos) {
perf_evsel__fprintf(pos, details, stdout);
+ if (pos->attr.type == PERF_TYPE_TRACEPOINT)
+ has_tracepoint = true;
+ }
+
+ if (has_tracepoint && !details->trace_fields)
+ printf("# Tip: use 'perf evlist --trace-fields' to show fields for tracepoint events\n");
+
perf_session__delete(session);
return 0;
}
@@ -49,6 +57,7 @@ int cmd_evlist(int argc, const char **argv, const char *prefix __maybe_unused)
OPT_BOOLEAN('g', "group", &details.event_group,
"Show event group information"),
OPT_BOOLEAN('f', "force", &details.force, "don't complain, do it"),
+ OPT_BOOLEAN(0, "trace-fields", &details.trace_fields, "Show tracepoint fields"),
OPT_END()
};
const char * const evlist_usage[] = {
diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c
index a7d588bf3cdd..96c1a4cfbbbf 100644
--- a/tools/perf/builtin-help.c
+++ b/tools/perf/builtin-help.c
@@ -6,11 +6,11 @@
#include "perf.h"
#include "util/cache.h"
#include "builtin.h"
-#include "util/exec_cmd.h"
+#include <subcmd/exec-cmd.h>
#include "common-cmds.h"
-#include "util/parse-options.h"
-#include "util/run-command.h"
-#include "util/help.h"
+#include <subcmd/parse-options.h>
+#include <subcmd/run-command.h>
+#include <subcmd/help.h>
#include "util/debug.h"
static struct man_viewer_list {
@@ -407,7 +407,7 @@ static int get_html_page_path(struct strbuf *page_path, const char *page)
#ifndef open_html
static void open_html(const char *path)
{
- execl_perf_cmd("web--browse", "-c", "help.browser", path, NULL);
+ execl_cmd("web--browse", "-c", "help.browser", path, NULL);
}
#endif
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index 99d127fe9c35..0022e02ed31a 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -18,7 +18,7 @@
#include "util/data.h"
#include "util/auxtrace.h"
-#include "util/parse-options.h"
+#include <subcmd/parse-options.h>
#include <linux/list.h>
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index 93ce665f976f..118010553d0c 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -12,7 +12,7 @@
#include "util/tool.h"
#include "util/callchain.h"
-#include "util/parse-options.h"
+#include <subcmd/parse-options.h>
#include "util/trace-event.h"
#include "util/data.h"
#include "util/cpumap.h"
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
index dd94b4ca2213..4418d9214872 100644
--- a/tools/perf/builtin-kvm.c
+++ b/tools/perf/builtin-kvm.c
@@ -10,7 +10,7 @@
#include "util/header.h"
#include "util/session.h"
#include "util/intlist.h"
-#include "util/parse-options.h"
+#include <subcmd/parse-options.h>
#include "util/trace-event.h"
#include "util/debug.h"
#include "util/tool.h"
@@ -1351,7 +1351,6 @@ static int kvm_events_live(struct perf_kvm_stat *kvm,
disable_buildid_cache();
use_browser = 0;
- setup_browser(false);
if (argc) {
argc = parse_options(argc, argv, live_options,
@@ -1409,8 +1408,6 @@ static int kvm_events_live(struct perf_kvm_stat *kvm,
err = kvm_events_live_report(kvm);
out:
- exit_browser(0);
-
if (kvm->session)
perf_session__delete(kvm->session);
kvm->session = NULL;
diff --git a/tools/perf/builtin-list.c b/tools/perf/builtin-list.c
index bf679e2c978b..5e22db4684b8 100644
--- a/tools/perf/builtin-list.c
+++ b/tools/perf/builtin-list.c
@@ -14,7 +14,7 @@
#include "util/parse-events.h"
#include "util/cache.h"
#include "util/pmu.h"
-#include "util/parse-options.h"
+#include <subcmd/parse-options.h>
int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused)
{
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index de16aaed516e..ce3bfb48b26f 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -9,7 +9,7 @@
#include "util/thread.h"
#include "util/header.h"
-#include "util/parse-options.h"
+#include <subcmd/parse-options.h>
#include "util/trace-event.h"
#include "util/debug.h"
diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c
index 80170aace5d4..390170041696 100644
--- a/tools/perf/builtin-mem.c
+++ b/tools/perf/builtin-mem.c
@@ -1,7 +1,7 @@
#include "builtin.h"
#include "perf.h"
-#include "util/parse-options.h"
+#include <subcmd/parse-options.h>
#include "util/trace-event.h"
#include "util/tool.h"
#include "util/session.h"
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index 132afc97676c..9af859b28b15 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -37,7 +37,7 @@
#include "util/strfilter.h"
#include "util/symbol.h"
#include "util/debug.h"
-#include "util/parse-options.h"
+#include <subcmd/parse-options.h>
#include "util/probe-finder.h"
#include "util/probe-event.h"
#include "util/probe-file.h"
@@ -249,6 +249,9 @@ static int opt_show_vars(const struct option *opt,
return ret;
}
+#else
+# define opt_show_lines NULL
+# define opt_show_vars NULL
#endif
static int opt_add_probe_event(const struct option *opt,
const char *str, int unset __maybe_unused)
@@ -473,7 +476,6 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
opt_add_probe_event),
OPT_BOOLEAN('f', "force", &probe_conf.force_add, "forcibly add events"
" with existing name"),
-#ifdef HAVE_DWARF_SUPPORT
OPT_CALLBACK('L', "line", NULL,
"FUNC[:RLN[+NUM|-RLN2]]|SRC:ALN[+NUM|-ALN2]",
"Show source code lines.", opt_show_lines),
@@ -490,7 +492,6 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
"directory", "path to kernel source"),
OPT_BOOLEAN('\0', "no-inlines", &probe_conf.no_inlines,
"Don't search inlined functions"),
-#endif
OPT__DRY_RUN(&probe_event_dry_run),
OPT_INTEGER('\0', "max-probes", &probe_conf.max_probes,
"Set how many probe points can be found for a probe."),
@@ -521,6 +522,16 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
#ifdef HAVE_DWARF_SUPPORT
set_option_flag(options, 'L', "line", PARSE_OPT_EXCLUSIVE);
set_option_flag(options, 'V', "vars", PARSE_OPT_EXCLUSIVE);
+#else
+# define set_nobuild(s, l, c) set_option_nobuild(options, s, l, "NO_DWARF=1", c)
+ set_nobuild('L', "line", false);
+ set_nobuild('V', "vars", false);
+ set_nobuild('\0', "externs", false);
+ set_nobuild('\0', "range", false);
+ set_nobuild('k', "vmlinux", true);
+ set_nobuild('s', "source", true);
+ set_nobuild('\0', "no-inlines", true);
+# undef set_nobuild
#endif
set_option_flag(options, 'F', "funcs", PARSE_OPT_EXCLUSIVE);
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 199fc31e3919..319712a4e02b 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -11,7 +11,7 @@
#include "util/build-id.h"
#include "util/util.h"
-#include "util/parse-options.h"
+#include <subcmd/parse-options.h>
#include "util/parse-events.h"
#include "util/callchain.h"
@@ -50,6 +50,7 @@ struct record {
int realtime_prio;
bool no_buildid;
bool no_buildid_cache;
+ bool buildid_all;
unsigned long long samples;
};
@@ -362,6 +363,13 @@ static int process_buildids(struct record *rec)
*/
symbol_conf.ignore_vmlinux_buildid = true;
+ /*
+ * If --buildid-all is given, it marks all DSO regardless of hits,
+ * so no need to process samples.
+ */
+ if (rec->buildid_all)
+ rec->tool.sample = NULL;
+
return perf_session__process_events(session);
}
@@ -452,6 +460,8 @@ static void record__init_features(struct record *rec)
if (!rec->opts.full_auxtrace)
perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
+
+ perf_header__clear_feat(&session->header, HEADER_STAT);
}
static volatile int workload_exec_errno;
@@ -754,12 +764,8 @@ out_child:
if (!rec->no_buildid) {
process_buildids(rec);
- /*
- * We take all buildids when the file contains
- * AUX area tracing data because we do not decode the
- * trace because it would take too long.
- */
- if (rec->opts.full_auxtrace)
+
+ if (rec->buildid_all)
dsos__hit_all(rec->session);
}
perf_session__write_header(rec->session, rec->evlist, fd, true);
@@ -813,8 +819,12 @@ int record_parse_callchain_opt(const struct option *opt,
}
ret = parse_callchain_record_opt(arg, &callchain_param);
- if (!ret)
+ if (!ret) {
+ /* Enable data address sampling for DWARF unwind. */
+ if (callchain_param.record_mode == CALLCHAIN_DWARF)
+ record->sample_address = true;
callchain_debug();
+ }
return ret;
}
@@ -837,6 +847,19 @@ int record_callchain_opt(const struct option *opt,
static int perf_record_config(const char *var, const char *value, void *cb)
{
+ struct record *rec = cb;
+
+ if (!strcmp(var, "record.build-id")) {
+ if (!strcmp(value, "cache"))
+ rec->no_buildid_cache = false;
+ else if (!strcmp(value, "no-cache"))
+ rec->no_buildid_cache = true;
+ else if (!strcmp(value, "skip"))
+ rec->no_buildid = true;
+ else
+ return -1;
+ return 0;
+ }
if (!strcmp(var, "record.call-graph"))
var = "call-graph.record-mode"; /* fall-through */
@@ -1113,12 +1136,14 @@ struct option __record_options[] = {
"per thread proc mmap processing timeout in ms"),
OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
"Record context switch events"),
-#ifdef HAVE_LIBBPF_SUPPORT
OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
"clang binary to use for compiling BPF scriptlets"),
OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
"options passed to clang when compiling BPF scriptlets"),
-#endif
+ OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
+ "file", "vmlinux pathname"),
+ OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
+ "Record build-id of all DSOs regardless of hits"),
OPT_END()
};
@@ -1130,6 +1155,27 @@ int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
struct record *rec = &record;
char errbuf[BUFSIZ];
+#ifndef HAVE_LIBBPF_SUPPORT
+# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c)
+ set_nobuild('\0', "clang-path", true);
+ set_nobuild('\0', "clang-opt", true);
+# undef set_nobuild
+#endif
+
+#ifndef HAVE_BPF_PROLOGUE
+# if !defined (HAVE_DWARF_SUPPORT)
+# define REASON "NO_DWARF=1"
+# elif !defined (HAVE_LIBBPF_SUPPORT)
+# define REASON "NO_LIBBPF=1"
+# else
+# define REASON "this architecture doesn't support BPF prologue"
+# endif
+# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, REASON, c)
+ set_nobuild('\0', "vmlinux", true);
+# undef set_nobuild
+# undef REASON
+#endif
+
rec->evlist = perf_evlist__new();
if (rec->evlist == NULL)
return -ENOMEM;
@@ -1215,6 +1261,14 @@ int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
if (err)
goto out_symbol_exit;
+ /*
+ * We take all buildids when the file contains
+ * AUX area tracing data because we do not decode the
+ * trace because it would take too long.
+ */
+ if (rec->opts.full_auxtrace)
+ rec->buildid_all = true;
+
if (record_opts__config(&rec->opts)) {
err = -EINVAL;
goto out_symbol_exit;
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index f256fac1e722..2bf537f190a0 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -27,7 +27,8 @@
#include "util/session.h"
#include "util/tool.h"
-#include "util/parse-options.h"
+#include <subcmd/parse-options.h>
+#include <subcmd/exec-cmd.h>
#include "util/parse-events.h"
#include "util/thread.h"
@@ -45,7 +46,6 @@ struct report {
struct perf_tool tool;
struct perf_session *session;
bool use_tui, use_gtk, use_stdio;
- bool hide_unresolved;
bool dont_use_callchains;
bool show_full_info;
bool show_threads;
@@ -146,7 +146,7 @@ static int process_sample_event(struct perf_tool *tool,
struct hist_entry_iter iter = {
.evsel = evsel,
.sample = sample,
- .hide_unresolved = rep->hide_unresolved,
+ .hide_unresolved = symbol_conf.hide_unresolved,
.add_entry_cb = hist_iter__report_callback,
};
int ret = 0;
@@ -157,7 +157,7 @@ static int process_sample_event(struct perf_tool *tool,
return -1;
}
- if (rep->hide_unresolved && al.sym == NULL)
+ if (symbol_conf.hide_unresolved && al.sym == NULL)
goto out_put;
if (rep->cpu_list && !test_bit(sample->cpu, rep->cpu_bitmap))
@@ -434,7 +434,14 @@ static int report__browse_hists(struct report *rep)
int ret;
struct perf_session *session = rep->session;
struct perf_evlist *evlist = session->evlist;
- const char *help = "For a higher level overview, try: perf report --sort comm,dso";
+ const char *help = perf_tip(system_path(TIPDIR));
+
+ if (help == NULL) {
+ /* fallback for people who don't install perf ;-) */
+ help = perf_tip(DOCDIR);
+ if (help == NULL)
+ help = "Cannot load tips.txt file, please install perf!";
+ }
switch (use_browser) {
case 1:
@@ -514,20 +521,26 @@ static int __cmd_report(struct report *rep)
if (rep->cpu_list) {
ret = perf_session__cpu_bitmap(session, rep->cpu_list,
rep->cpu_bitmap);
- if (ret)
+ if (ret) {
+ ui__error("failed to set cpu bitmap\n");
return ret;
+ }
}
if (rep->show_threads)
perf_read_values_init(&rep->show_threads_values);
ret = report__setup_sample_type(rep);
- if (ret)
+ if (ret) {
+ /* report__setup_sample_type() already showed error message */
return ret;
+ }
ret = perf_session__process_events(session);
- if (ret)
+ if (ret) {
+ ui__error("failed to process sample\n");
return ret;
+ }
report__warn_kptr_restrict(rep);
@@ -625,7 +638,7 @@ parse_percent_limit(const struct option *opt, const char *str,
return 0;
}
-#define CALLCHAIN_DEFAULT_OPT "graph,0.5,caller,function"
+#define CALLCHAIN_DEFAULT_OPT "graph,0.5,caller,function,percent"
const char report_callchain_help[] = "Display call graph (stack chain/backtrace):\n\n"
CALLCHAIN_REPORT_HELP
@@ -708,7 +721,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
OPT_BOOLEAN('x', "exclude-other", &symbol_conf.exclude_other,
"Only display entries with parent-match"),
OPT_CALLBACK_DEFAULT('g', "call-graph", &report,
- "print_type,threshold[,print_limit],order,sort_key[,branch]",
+ "print_type,threshold[,print_limit],order,sort_key[,branch],value",
report_callchain_help, &report_parse_callchain_opt,
callchain_default_opt),
OPT_BOOLEAN(0, "children", &symbol_conf.cumulate_callchain,
@@ -740,7 +753,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
OPT_STRING_NOEMPTY('t', "field-separator", &symbol_conf.field_sep, "separator",
"separator for columns, no spaces will be added between "
"columns '.' is reserved."),
- OPT_BOOLEAN('U', "hide-unresolved", &report.hide_unresolved,
+ OPT_BOOLEAN('U', "hide-unresolved", &symbol_conf.hide_unresolved,
"Only display entries resolved to a symbol"),
OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
"Look for files with symbols relative to this directory"),
@@ -783,6 +796,8 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
"Show callgraph from reference event"),
OPT_INTEGER(0, "socket-filter", &report.socket_filter,
"only show processor socket that match with this filter"),
+ OPT_BOOLEAN(0, "raw-trace", &symbol_conf.raw_trace,
+ "Show raw trace event output (do not use print fmt or plugins)"),
OPT_END()
};
struct perf_data_file file = {
@@ -796,6 +811,16 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
perf_config(report__config, &report);
argc = parse_options(argc, argv, options, report_usage, 0);
+ if (argc) {
+ /*
+ * Special case: if there's an argument left then assume that
+ * it's a symbol filter:
+ */
+ if (argc > 1)
+ usage_with_options(report_usage, options);
+
+ report.symbol_filter_str = argv[0];
+ }
if (symbol_conf.vmlinux_name &&
access(symbol_conf.vmlinux_name, R_OK)) {
@@ -882,7 +907,7 @@ repeat:
symbol_conf.cumulate_callchain = false;
}
- if (setup_sorting() < 0) {
+ if (setup_sorting(session->evlist) < 0) {
if (sort_order)
parse_options_usage(report_usage, options, "s", 1);
if (field_order)
@@ -941,17 +966,6 @@ repeat:
if (symbol__init(&session->header.env) < 0)
goto error;
- if (argc) {
- /*
- * Special case: if there's an argument left then assume that
- * it's a symbol filter:
- */
- if (argc > 1)
- usage_with_options(report_usage, options);
-
- report.symbol_filter_str = argv[0];
- }
-
sort__setup_elide(stdout);
ret = __cmd_report(&report);
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index e3d3e32c0a93..871b55ae22a4 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -12,7 +12,7 @@
#include "util/tool.h"
#include "util/cloexec.h"
-#include "util/parse-options.h"
+#include <subcmd/parse-options.h>
#include "util/trace-event.h"
#include "util/debug.h"
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 72b5deb4bd79..c691214d820f 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -3,9 +3,9 @@
#include "perf.h"
#include "util/cache.h"
#include "util/debug.h"
-#include "util/exec_cmd.h"
+#include <subcmd/exec-cmd.h>
#include "util/header.h"
-#include "util/parse-options.h"
+#include <subcmd/parse-options.h>
#include "util/perf_regs.h"
#include "util/session.h"
#include "util/tool.h"
@@ -18,7 +18,11 @@
#include "util/sort.h"
#include "util/data.h"
#include "util/auxtrace.h"
+#include "util/cpumap.h"
+#include "util/thread_map.h"
+#include "util/stat.h"
#include <linux/bitmap.h>
+#include "asm/bug.h"
static char const *script_name;
static char const *generate_script_lang;
@@ -32,6 +36,7 @@ static bool print_flags;
static bool nanosecs;
static const char *cpu_list;
static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
+static struct perf_stat_config stat_config;
unsigned int scripting_max_stack = PERF_MAX_STACK_DEPTH;
@@ -130,6 +135,18 @@ static struct {
.invalid_fields = PERF_OUTPUT_TRACE,
},
+
+ [PERF_TYPE_BREAKPOINT] = {
+ .user_set = false,
+
+ .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
+ PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
+ PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
+ PERF_OUTPUT_SYM | PERF_OUTPUT_DSO |
+ PERF_OUTPUT_PERIOD,
+
+ .invalid_fields = PERF_OUTPUT_TRACE,
+ },
};
static bool output_set_by_user(void)
@@ -204,6 +221,9 @@ static int perf_evsel__check_attr(struct perf_evsel *evsel,
struct perf_event_attr *attr = &evsel->attr;
bool allow_user_set;
+ if (perf_header__has_feat(&session->header, HEADER_STAT))
+ return 0;
+
allow_user_set = perf_header__has_feat(&session->header,
HEADER_AUXTRACE);
@@ -588,8 +608,35 @@ static void print_sample_flags(u32 flags)
printf(" %-4s ", str);
}
-static void process_event(union perf_event *event, struct perf_sample *sample,
- struct perf_evsel *evsel, struct addr_location *al)
+struct perf_script {
+ struct perf_tool tool;
+ struct perf_session *session;
+ bool show_task_events;
+ bool show_mmap_events;
+ bool show_switch_events;
+ bool allocated;
+ struct cpu_map *cpus;
+ struct thread_map *threads;
+ int name_width;
+};
+
+static int perf_evlist__max_name_len(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel;
+ int max = 0;
+
+ evlist__for_each(evlist, evsel) {
+ int len = strlen(perf_evsel__name(evsel));
+
+ max = MAX(len, max);
+ }
+
+ return max;
+}
+
+static void process_event(struct perf_script *script, union perf_event *event,
+ struct perf_sample *sample, struct perf_evsel *evsel,
+ struct addr_location *al)
{
struct thread *thread = al->thread;
struct perf_event_attr *attr = &evsel->attr;
@@ -604,7 +651,12 @@ static void process_event(union perf_event *event, struct perf_sample *sample,
if (PRINT_FIELD(EVNAME)) {
const char *evname = perf_evsel__name(evsel);
- printf("%s: ", evname ? evname : "[unknown]");
+
+ if (!script->name_width)
+ script->name_width = perf_evlist__max_name_len(script->session->evlist);
+
+ printf("%*s: ", script->name_width,
+ evname ? evname : "[unknown]");
}
if (print_flags)
@@ -643,65 +695,81 @@ static void process_event(union perf_event *event, struct perf_sample *sample,
printf("\n");
}
-static int default_start_script(const char *script __maybe_unused,
- int argc __maybe_unused,
- const char **argv __maybe_unused)
-{
- return 0;
-}
+static struct scripting_ops *scripting_ops;
-static int default_flush_script(void)
+static void __process_stat(struct perf_evsel *counter, u64 tstamp)
{
- return 0;
+ int nthreads = thread_map__nr(counter->threads);
+ int ncpus = perf_evsel__nr_cpus(counter);
+ int cpu, thread;
+ static int header_printed;
+
+ if (counter->system_wide)
+ nthreads = 1;
+
+ if (!header_printed) {
+ printf("%3s %8s %15s %15s %15s %15s %s\n",
+ "CPU", "THREAD", "VAL", "ENA", "RUN", "TIME", "EVENT");
+ header_printed = 1;
+ }
+
+ for (thread = 0; thread < nthreads; thread++) {
+ for (cpu = 0; cpu < ncpus; cpu++) {
+ struct perf_counts_values *counts;
+
+ counts = perf_counts(counter->counts, cpu, thread);
+
+ printf("%3d %8d %15" PRIu64 " %15" PRIu64 " %15" PRIu64 " %15" PRIu64 " %s\n",
+ counter->cpus->map[cpu],
+ thread_map__pid(counter->threads, thread),
+ counts->val,
+ counts->ena,
+ counts->run,
+ tstamp,
+ perf_evsel__name(counter));
+ }
+ }
}
-static int default_stop_script(void)
+static void process_stat(struct perf_evsel *counter, u64 tstamp)
{
- return 0;
+ if (scripting_ops && scripting_ops->process_stat)
+ scripting_ops->process_stat(&stat_config, counter, tstamp);
+ else
+ __process_stat(counter, tstamp);
}
-static int default_generate_script(struct pevent *pevent __maybe_unused,
- const char *outfile __maybe_unused)
+static void process_stat_interval(u64 tstamp)
{
- return 0;
+ if (scripting_ops && scripting_ops->process_stat_interval)
+ scripting_ops->process_stat_interval(tstamp);
}
-static struct scripting_ops default_scripting_ops = {
- .start_script = default_start_script,
- .flush_script = default_flush_script,
- .stop_script = default_stop_script,
- .process_event = process_event,
- .generate_script = default_generate_script,
-};
-
-static struct scripting_ops *scripting_ops;
-
static void setup_scripting(void)
{
setup_perl_scripting();
setup_python_scripting();
-
- scripting_ops = &default_scripting_ops;
}
static int flush_scripting(void)
{
- return scripting_ops->flush_script();
+ return scripting_ops ? scripting_ops->flush_script() : 0;
}
static int cleanup_scripting(void)
{
pr_debug("\nperf script stopped\n");
- return scripting_ops->stop_script();
+ return scripting_ops ? scripting_ops->stop_script() : 0;
}
-static int process_sample_event(struct perf_tool *tool __maybe_unused,
+static int process_sample_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct perf_evsel *evsel,
struct machine *machine)
{
+ struct perf_script *scr = container_of(tool, struct perf_script, tool);
struct addr_location al;
if (debug_mode) {
@@ -727,20 +795,16 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
if (cpu_list && !test_bit(sample->cpu, cpu_bitmap))
goto out_put;
- scripting_ops->process_event(event, sample, evsel, &al);
+ if (scripting_ops)
+ scripting_ops->process_event(event, sample, evsel, &al);
+ else
+ process_event(scr, event, sample, evsel, &al);
+
out_put:
addr_location__put(&al);
return 0;
}
-struct perf_script {
- struct perf_tool tool;
- struct perf_session *session;
- bool show_task_events;
- bool show_mmap_events;
- bool show_switch_events;
-};
-
static int process_attr(struct perf_tool *tool, union perf_event *event,
struct perf_evlist **pevlist)
{
@@ -1156,6 +1220,8 @@ static int parse_output_fields(const struct option *opt __maybe_unused,
type = PERF_TYPE_TRACEPOINT;
else if (!strcmp(str, "raw"))
type = PERF_TYPE_RAW;
+ else if (!strcmp(str, "break"))
+ type = PERF_TYPE_BREAKPOINT;
else {
fprintf(stderr, "Invalid event type in field string.\n");
rc = -EINVAL;
@@ -1421,7 +1487,7 @@ static int list_available_scripts(const struct option *opt __maybe_unused,
char first_half[BUFSIZ];
char *script_root;
- snprintf(scripts_path, MAXPATHLEN, "%s/scripts", perf_exec_path());
+ snprintf(scripts_path, MAXPATHLEN, "%s/scripts", get_argv_exec_path());
scripts_dir = opendir(scripts_path);
if (!scripts_dir)
@@ -1542,7 +1608,7 @@ int find_scripts(char **scripts_array, char **scripts_path_array)
if (!session)
return -1;
- snprintf(scripts_path, MAXPATHLEN, "%s/scripts", perf_exec_path());
+ snprintf(scripts_path, MAXPATHLEN, "%s/scripts", get_argv_exec_path());
scripts_dir = opendir(scripts_path);
if (!scripts_dir) {
@@ -1600,7 +1666,7 @@ static char *get_script_path(const char *script_root, const char *suffix)
char lang_path[MAXPATHLEN];
char *__script_root;
- snprintf(scripts_path, MAXPATHLEN, "%s/scripts", perf_exec_path());
+ snprintf(scripts_path, MAXPATHLEN, "%s/scripts", get_argv_exec_path());
scripts_dir = opendir(scripts_path);
if (!scripts_dir)
@@ -1695,6 +1761,87 @@ static void script__setup_sample_type(struct perf_script *script)
}
}
+static int process_stat_round_event(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_session *session)
+{
+ struct stat_round_event *round = &event->stat_round;
+ struct perf_evsel *counter;
+
+ evlist__for_each(session->evlist, counter) {
+ perf_stat_process_counter(&stat_config, counter);
+ process_stat(counter, round->time);
+ }
+
+ process_stat_interval(round->time);
+ return 0;
+}
+
+static int process_stat_config_event(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_session *session __maybe_unused)
+{
+ perf_event__read_stat_config(&stat_config, &event->stat_config);
+ return 0;
+}
+
+static int set_maps(struct perf_script *script)
+{
+ struct perf_evlist *evlist = script->session->evlist;
+
+ if (!script->cpus || !script->threads)
+ return 0;
+
+ if (WARN_ONCE(script->allocated, "stats double allocation\n"))
+ return -EINVAL;
+
+ perf_evlist__set_maps(evlist, script->cpus, script->threads);
+
+ if (perf_evlist__alloc_stats(evlist, true))
+ return -ENOMEM;
+
+ script->allocated = true;
+ return 0;
+}
+
+static
+int process_thread_map_event(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_session *session __maybe_unused)
+{
+ struct perf_script *script = container_of(tool, struct perf_script, tool);
+
+ if (script->threads) {
+ pr_warning("Extra thread map event, ignoring.\n");
+ return 0;
+ }
+
+ script->threads = thread_map__new_event(&event->thread_map);
+ if (!script->threads)
+ return -ENOMEM;
+
+ return set_maps(script);
+}
+
+static
+int process_cpu_map_event(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_session *session __maybe_unused)
+{
+ struct perf_script *script = container_of(tool, struct perf_script, tool);
+
+ if (script->cpus) {
+ pr_warning("Extra cpu map event, ignoring.\n");
+ return 0;
+ }
+
+ script->cpus = cpu_map__new_data(&event->cpu_map.data);
+ if (!script->cpus)
+ return -ENOMEM;
+
+ return set_maps(script);
+}
+
int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
{
bool show_full_info = false;
@@ -1723,6 +1870,11 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
.auxtrace_info = perf_event__process_auxtrace_info,
.auxtrace = perf_event__process_auxtrace,
.auxtrace_error = perf_event__process_auxtrace_error,
+ .stat = perf_event__process_stat_event,
+ .stat_round = process_stat_round_event,
+ .stat_config = process_stat_config_event,
+ .thread_map = process_thread_map_event,
+ .cpu_map = process_cpu_map_event,
.ordered_events = true,
.ordering_requires_timestamps = true,
},
@@ -1836,7 +1988,7 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
scripting_max_stack = itrace_synth_opts.callchain_sz;
/* make sure PERF_EXEC_PATH is set for scripts */
- perf_set_argv_exec_path(perf_exec_path());
+ set_argv_exec_path(get_argv_exec_path());
if (argc && !script_name && !rec_script_path && !rep_script_path) {
int live_pipe[2];
@@ -2076,6 +2228,7 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
flush_scripting();
out_delete:
+ perf_evlist__free_stats(session->evlist);
perf_session__delete(session);
if (script_started)
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index e77880b5094d..038e877081b6 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -45,7 +45,7 @@
#include "builtin.h"
#include "util/cgroup.h"
#include "util/util.h"
-#include "util/parse-options.h"
+#include <subcmd/parse-options.h>
#include "util/parse-events.h"
#include "util/pmu.h"
#include "util/event.h"
@@ -59,6 +59,9 @@
#include "util/thread.h"
#include "util/thread_map.h"
#include "util/counts.h"
+#include "util/session.h"
+#include "util/tool.h"
+#include "asm/bug.h"
#include <stdlib.h>
#include <sys/prctl.h>
@@ -126,6 +129,21 @@ static bool append_file;
static const char *output_name;
static int output_fd;
+struct perf_stat {
+ bool record;
+ struct perf_data_file file;
+ struct perf_session *session;
+ u64 bytes_written;
+ struct perf_tool tool;
+ bool maps_allocated;
+ struct cpu_map *cpus;
+ struct thread_map *threads;
+ enum aggr_mode aggr_mode;
+};
+
+static struct perf_stat perf_stat;
+#define STAT_RECORD perf_stat.record
+
static volatile int done = 0;
static struct perf_stat_config stat_config = {
@@ -161,15 +179,43 @@ static int create_perf_stat_counter(struct perf_evsel *evsel)
attr->inherit = !no_inherit;
- if (target__has_cpu(&target))
- return perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel));
+ /*
+ * Some events get initialized with sample_(period/type) set,
+ * like tracepoints. Clear it up for counting.
+ */
+ attr->sample_period = 0;
+
+ /*
+ * But set sample_type to PERF_SAMPLE_IDENTIFIER, which should be harmless
+ * while avoiding that older tools show confusing messages.
+ *
+ * However for pipe sessions we need to keep it zero,
+ * because script's perf_evsel__check_attr is triggered
+ * by attr->sample_type != 0, and we can't run it on
+ * stat sessions.
+ */
+ if (!(STAT_RECORD && perf_stat.file.is_pipe))
+ attr->sample_type = PERF_SAMPLE_IDENTIFIER;
- if (!target__has_task(&target) && perf_evsel__is_group_leader(evsel)) {
+ /*
+ * Disabling all counters initially, they will be enabled
+ * either manually by us or by kernel via enable_on_exec
+ * set later.
+ */
+ if (perf_evsel__is_group_leader(evsel)) {
attr->disabled = 1;
- if (!initial_delay)
+
+ /*
+ * In case of initial_delay we enable tracee
+ * events manually.
+ */
+ if (target__none(&target) && !initial_delay)
attr->enable_on_exec = 1;
}
+ if (target__has_cpu(&target))
+ return perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel));
+
return perf_evsel__open_per_thread(evsel, evsel_list->threads);
}
@@ -185,6 +231,42 @@ static inline int nsec_counter(struct perf_evsel *evsel)
return 0;
}
+static int process_synthesized_event(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ if (perf_data_file__write(&perf_stat.file, event, event->header.size) < 0) {
+ pr_err("failed to write perf data, error: %m\n");
+ return -1;
+ }
+
+ perf_stat.bytes_written += event->header.size;
+ return 0;
+}
+
+static int write_stat_round_event(u64 tm, u64 type)
+{
+ return perf_event__synthesize_stat_round(NULL, tm, type,
+ process_synthesized_event,
+ NULL);
+}
+
+#define WRITE_STAT_ROUND_EVENT(time, interval) \
+ write_stat_round_event(time, PERF_STAT_ROUND_TYPE__ ## interval)
+
+#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
+
+static int
+perf_evsel__write_stat_event(struct perf_evsel *counter, u32 cpu, u32 thread,
+ struct perf_counts_values *count)
+{
+ struct perf_sample_id *sid = SID(counter, cpu, thread);
+
+ return perf_event__synthesize_stat(NULL, cpu, thread, sid->id, count,
+ process_synthesized_event, NULL);
+}
+
/*
* Read out the results of a single counter:
* do not aggregate counts across CPUs in system-wide mode
@@ -208,6 +290,13 @@ static int read_counter(struct perf_evsel *counter)
count = perf_counts(counter->counts, cpu, thread);
if (perf_evsel__read(counter, cpu, thread, count))
return -1;
+
+ if (STAT_RECORD) {
+ if (perf_evsel__write_stat_event(counter, cpu, thread, count)) {
+ pr_err("failed to write stat event\n");
+ return -1;
+ }
+ }
}
}
@@ -241,21 +330,26 @@ static void process_interval(void)
clock_gettime(CLOCK_MONOTONIC, &ts);
diff_timespec(&rs, &ts, &ref_time);
+ if (STAT_RECORD) {
+ if (WRITE_STAT_ROUND_EVENT(rs.tv_sec * NSECS_PER_SEC + rs.tv_nsec, INTERVAL))
+ pr_err("failed to write stat round event\n");
+ }
+
print_counters(&rs, 0, NULL);
}
-static void handle_initial_delay(void)
+static void enable_counters(void)
{
- struct perf_evsel *counter;
-
- if (initial_delay) {
- const int ncpus = cpu_map__nr(evsel_list->cpus),
- nthreads = thread_map__nr(evsel_list->threads);
-
+ if (initial_delay)
usleep(initial_delay * 1000);
- evlist__for_each(evsel_list, counter)
- perf_evsel__enable(counter, ncpus, nthreads);
- }
+
+ /*
+ * We need to enable counters only if:
+ * - we don't have tracee (attaching to task or cpu)
+ * - we have initial delay configured
+ */
+ if (!target__none(&target) || initial_delay)
+ perf_evlist__enable(evsel_list);
}
static volatile int workload_exec_errno;
@@ -271,6 +365,135 @@ static void workload_exec_failed_signal(int signo __maybe_unused, siginfo_t *inf
workload_exec_errno = info->si_value.sival_int;
}
+static bool has_unit(struct perf_evsel *counter)
+{
+ return counter->unit && *counter->unit;
+}
+
+static bool has_scale(struct perf_evsel *counter)
+{
+ return counter->scale != 1;
+}
+
+static int perf_stat_synthesize_config(bool is_pipe)
+{
+ struct perf_evsel *counter;
+ int err;
+
+ if (is_pipe) {
+ err = perf_event__synthesize_attrs(NULL, perf_stat.session,
+ process_synthesized_event);
+ if (err < 0) {
+ pr_err("Couldn't synthesize attrs.\n");
+ return err;
+ }
+ }
+
+ /*
+ * Synthesize other events stuff not carried within
+ * attr event - unit, scale, name
+ */
+ evlist__for_each(evsel_list, counter) {
+ if (!counter->supported)
+ continue;
+
+ /*
+ * Synthesize unit and scale only if it's defined.
+ */
+ if (has_unit(counter)) {
+ err = perf_event__synthesize_event_update_unit(NULL, counter, process_synthesized_event);
+ if (err < 0) {
+ pr_err("Couldn't synthesize evsel unit.\n");
+ return err;
+ }
+ }
+
+ if (has_scale(counter)) {
+ err = perf_event__synthesize_event_update_scale(NULL, counter, process_synthesized_event);
+ if (err < 0) {
+ pr_err("Couldn't synthesize evsel scale.\n");
+ return err;
+ }
+ }
+
+ if (counter->own_cpus) {
+ err = perf_event__synthesize_event_update_cpus(NULL, counter, process_synthesized_event);
+ if (err < 0) {
+ pr_err("Couldn't synthesize evsel scale.\n");
+ return err;
+ }
+ }
+
+ /*
+ * Name is needed only for pipe output,
+ * perf.data carries event names.
+ */
+ if (is_pipe) {
+ err = perf_event__synthesize_event_update_name(NULL, counter, process_synthesized_event);
+ if (err < 0) {
+ pr_err("Couldn't synthesize evsel name.\n");
+ return err;
+ }
+ }
+ }
+
+ err = perf_event__synthesize_thread_map2(NULL, evsel_list->threads,
+ process_synthesized_event,
+ NULL);
+ if (err < 0) {
+ pr_err("Couldn't synthesize thread map.\n");
+ return err;
+ }
+
+ err = perf_event__synthesize_cpu_map(NULL, evsel_list->cpus,
+ process_synthesized_event, NULL);
+ if (err < 0) {
+ pr_err("Couldn't synthesize thread map.\n");
+ return err;
+ }
+
+ err = perf_event__synthesize_stat_config(NULL, &stat_config,
+ process_synthesized_event, NULL);
+ if (err < 0) {
+ pr_err("Couldn't synthesize config.\n");
+ return err;
+ }
+
+ return 0;
+}
+
+#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
+
+static int __store_counter_ids(struct perf_evsel *counter,
+ struct cpu_map *cpus,
+ struct thread_map *threads)
+{
+ int cpu, thread;
+
+ for (cpu = 0; cpu < cpus->nr; cpu++) {
+ for (thread = 0; thread < threads->nr; thread++) {
+ int fd = FD(counter, cpu, thread);
+
+ if (perf_evlist__id_add_fd(evsel_list, counter,
+ cpu, thread, fd) < 0)
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int store_counter_ids(struct perf_evsel *counter)
+{
+ struct cpu_map *cpus = counter->cpus;
+ struct thread_map *threads = counter->threads;
+
+ if (perf_evsel__alloc_id(counter, cpus->nr, threads->nr))
+ return -ENOMEM;
+
+ return __store_counter_ids(counter, cpus, threads);
+}
+
static int __run_perf_stat(int argc, const char **argv)
{
int interval = stat_config.interval;
@@ -281,6 +504,7 @@ static int __run_perf_stat(int argc, const char **argv)
size_t l;
int status = 0;
const bool forks = (argc > 0);
+ bool is_pipe = STAT_RECORD ? perf_stat.file.is_pipe : false;
if (interval) {
ts.tv_sec = interval / 1000;
@@ -291,7 +515,7 @@ static int __run_perf_stat(int argc, const char **argv)
}
if (forks) {
- if (perf_evlist__prepare_workload(evsel_list, &target, argv, false,
+ if (perf_evlist__prepare_workload(evsel_list, &target, argv, is_pipe,
workload_exec_failed_signal) < 0) {
perror("failed to prepare workload");
return -1;
@@ -335,6 +559,9 @@ static int __run_perf_stat(int argc, const char **argv)
l = strlen(counter->unit);
if (l > unit_width)
unit_width = l;
+
+ if (STAT_RECORD && store_counter_ids(counter))
+ return -1;
}
if (perf_evlist__apply_filters(evsel_list, &counter)) {
@@ -344,6 +571,24 @@ static int __run_perf_stat(int argc, const char **argv)
return -1;
}
+ if (STAT_RECORD) {
+ int err, fd = perf_data_file__fd(&perf_stat.file);
+
+ if (is_pipe) {
+ err = perf_header__write_pipe(perf_data_file__fd(&perf_stat.file));
+ } else {
+ err = perf_session__write_header(perf_stat.session, evsel_list,
+ fd, false);
+ }
+
+ if (err < 0)
+ return err;
+
+ err = perf_stat_synthesize_config(is_pipe);
+ if (err < 0)
+ return err;
+ }
+
/*
* Enable counters and exec the command:
*/
@@ -352,7 +597,7 @@ static int __run_perf_stat(int argc, const char **argv)
if (forks) {
perf_evlist__start_workload(evsel_list);
- handle_initial_delay();
+ enable_counters();
if (interval) {
while (!waitpid(child_pid, &status, WNOHANG)) {
@@ -371,7 +616,7 @@ static int __run_perf_stat(int argc, const char **argv)
if (WIFSIGNALED(status))
psignal(WTERMSIG(status), argv[0]);
} else {
- handle_initial_delay();
+ enable_counters();
while (!done) {
nanosleep(&ts, NULL);
if (interval)
@@ -810,8 +1055,8 @@ static void print_header(int argc, const char **argv)
else if (target.cpu_list)
fprintf(output, "\'CPU(s) %s", target.cpu_list);
else if (!target__has_task(&target)) {
- fprintf(output, "\'%s", argv[0]);
- for (i = 1; i < argc; i++)
+ fprintf(output, "\'%s", argv ? argv[0] : "pipe");
+ for (i = 1; argv && (i < argc); i++)
fprintf(output, " %s", argv[i]);
} else if (target.pid)
fprintf(output, "process id \'%s", target.pid);
@@ -847,6 +1092,10 @@ static void print_counters(struct timespec *ts, int argc, const char **argv)
struct perf_evsel *counter;
char buf[64], *prefix = NULL;
+ /* Do not print anything if we record to the pipe. */
+ if (STAT_RECORD && perf_stat.file.is_pipe)
+ return;
+
if (interval)
print_interval(prefix = buf, ts);
else
@@ -1077,6 +1326,109 @@ static int perf_stat_init_aggr_mode(void)
return cpus_aggr_map ? 0 : -ENOMEM;
}
+static void perf_stat__exit_aggr_mode(void)
+{
+ cpu_map__put(aggr_map);
+ cpu_map__put(cpus_aggr_map);
+ aggr_map = NULL;
+ cpus_aggr_map = NULL;
+}
+
+static inline int perf_env__get_cpu(struct perf_env *env, struct cpu_map *map, int idx)
+{
+ int cpu;
+
+ if (idx > map->nr)
+ return -1;
+
+ cpu = map->map[idx];
+
+ if (cpu >= env->nr_cpus_online)
+ return -1;
+
+ return cpu;
+}
+
+static int perf_env__get_socket(struct cpu_map *map, int idx, void *data)
+{
+ struct perf_env *env = data;
+ int cpu = perf_env__get_cpu(env, map, idx);
+
+ return cpu == -1 ? -1 : env->cpu[cpu].socket_id;
+}
+
+static int perf_env__get_core(struct cpu_map *map, int idx, void *data)
+{
+ struct perf_env *env = data;
+ int core = -1, cpu = perf_env__get_cpu(env, map, idx);
+
+ if (cpu != -1) {
+ int socket_id = env->cpu[cpu].socket_id;
+
+ /*
+ * Encode socket in upper 16 bits
+ * core_id is relative to socket, and
+ * we need a global id. So we combine
+ * socket + core id.
+ */
+ core = (socket_id << 16) | (env->cpu[cpu].core_id & 0xffff);
+ }
+
+ return core;
+}
+
+static int perf_env__build_socket_map(struct perf_env *env, struct cpu_map *cpus,
+ struct cpu_map **sockp)
+{
+ return cpu_map__build_map(cpus, sockp, perf_env__get_socket, env);
+}
+
+static int perf_env__build_core_map(struct perf_env *env, struct cpu_map *cpus,
+ struct cpu_map **corep)
+{
+ return cpu_map__build_map(cpus, corep, perf_env__get_core, env);
+}
+
+static int perf_stat__get_socket_file(struct cpu_map *map, int idx)
+{
+ return perf_env__get_socket(map, idx, &perf_stat.session->header.env);
+}
+
+static int perf_stat__get_core_file(struct cpu_map *map, int idx)
+{
+ return perf_env__get_core(map, idx, &perf_stat.session->header.env);
+}
+
+static int perf_stat_init_aggr_mode_file(struct perf_stat *st)
+{
+ struct perf_env *env = &st->session->header.env;
+
+ switch (stat_config.aggr_mode) {
+ case AGGR_SOCKET:
+ if (perf_env__build_socket_map(env, evsel_list->cpus, &aggr_map)) {
+ perror("cannot build socket map");
+ return -1;
+ }
+ aggr_get_id = perf_stat__get_socket_file;
+ break;
+ case AGGR_CORE:
+ if (perf_env__build_core_map(env, evsel_list->cpus, &aggr_map)) {
+ perror("cannot build core map");
+ return -1;
+ }
+ aggr_get_id = perf_stat__get_core_file;
+ break;
+ case AGGR_NONE:
+ case AGGR_GLOBAL:
+ case AGGR_THREAD:
+ case AGGR_UNSET:
+ default:
+ break;
+ }
+
+ return 0;
+}
+
/*
* Add default attributes, if there were no attributes specified or
* if -d/--detailed, -d -d or -d -d -d is used:
@@ -1236,6 +1588,225 @@ static int add_default_attributes(void)
return perf_evlist__add_default_attrs(evsel_list, very_very_detailed_attrs);
}
+static const char * const stat_record_usage[] = {
+ "perf stat record [<options>]",
+ NULL,
+};
+
+static void init_features(struct perf_session *session)
+{
+ int feat;
+
+ for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
+ perf_header__set_feat(&session->header, feat);
+
+ perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
+ perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
+ perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
+ perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
+}
+
+static int __cmd_record(int argc, const char **argv)
+{
+ struct perf_session *session;
+ struct perf_data_file *file = &perf_stat.file;
+
+ argc = parse_options(argc, argv, stat_options, stat_record_usage,
+ PARSE_OPT_STOP_AT_NON_OPTION);
+
+ if (output_name)
+ file->path = output_name;
+
+ if (run_count != 1 || forever) {
+ pr_err("Cannot use -r option with perf stat record.\n");
+ return -1;
+ }
+
+ session = perf_session__new(file, false, NULL);
+ if (session == NULL) {
+ pr_err("Perf session creation failed.\n");
+ return -1;
+ }
+
+ init_features(session);
+
+ session->evlist = evsel_list;
+ perf_stat.session = session;
+ perf_stat.record = true;
+ return argc;
+}
+
+static int process_stat_round_event(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_session *session)
+{
+ struct stat_round_event *round = &event->stat_round;
+ struct perf_evsel *counter;
+ struct timespec tsh, *ts = NULL;
+ const char **argv = session->header.env.cmdline_argv;
+ int argc = session->header.env.nr_cmdline;
+
+ evlist__for_each(evsel_list, counter)
+ perf_stat_process_counter(&stat_config, counter);
+
+ if (round->type == PERF_STAT_ROUND_TYPE__FINAL)
+ update_stats(&walltime_nsecs_stats, round->time);
+
+ if (stat_config.interval && round->time) {
+ tsh.tv_sec = round->time / NSECS_PER_SEC;
+ tsh.tv_nsec = round->time % NSECS_PER_SEC;
+ ts = &tsh;
+ }
+
+ print_counters(ts, argc, argv);
+ return 0;
+}
+
+static
+int process_stat_config_event(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_session *session __maybe_unused)
+{
+ struct perf_stat *st = container_of(tool, struct perf_stat, tool);
+
+ perf_event__read_stat_config(&stat_config, &event->stat_config);
+
+ if (cpu_map__empty(st->cpus)) {
+ if (st->aggr_mode != AGGR_UNSET)
+ pr_warning("warning: processing task data, aggregation mode not set\n");
+ return 0;
+ }
+
+ if (st->aggr_mode != AGGR_UNSET)
+ stat_config.aggr_mode = st->aggr_mode;
+
+ if (perf_stat.file.is_pipe)
+ perf_stat_init_aggr_mode();
+ else
+ perf_stat_init_aggr_mode_file(st);
+
+ return 0;
+}
+
+static int set_maps(struct perf_stat *st)
+{
+ if (!st->cpus || !st->threads)
+ return 0;
+
+ if (WARN_ONCE(st->maps_allocated, "stats double allocation\n"))
+ return -EINVAL;
+
+ perf_evlist__set_maps(evsel_list, st->cpus, st->threads);
+
+ if (perf_evlist__alloc_stats(evsel_list, true))
+ return -ENOMEM;
+
+ st->maps_allocated = true;
+ return 0;
+}
+
+static
+int process_thread_map_event(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_session *session __maybe_unused)
+{
+ struct perf_stat *st = container_of(tool, struct perf_stat, tool);
+
+ if (st->threads) {
+ pr_warning("Extra thread map event, ignoring.\n");
+ return 0;
+ }
+
+ st->threads = thread_map__new_event(&event->thread_map);
+ if (!st->threads)
+ return -ENOMEM;
+
+ return set_maps(st);
+}
+
+static
+int process_cpu_map_event(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_session *session __maybe_unused)
+{
+ struct perf_stat *st = container_of(tool, struct perf_stat, tool);
+ struct cpu_map *cpus;
+
+ if (st->cpus) {
+ pr_warning("Extra cpu map event, ignoring.\n");
+ return 0;
+ }
+
+ cpus = cpu_map__new_data(&event->cpu_map.data);
+ if (!cpus)
+ return -ENOMEM;
+
+ st->cpus = cpus;
+ return set_maps(st);
+}
+
+static const char * const stat_report_usage[] = {
+ "perf stat report [<options>]",
+ NULL,
+};
+
+static struct perf_stat perf_stat = {
+ .tool = {
+ .attr = perf_event__process_attr,
+ .event_update = perf_event__process_event_update,
+ .thread_map = process_thread_map_event,
+ .cpu_map = process_cpu_map_event,
+ .stat_config = process_stat_config_event,
+ .stat = perf_event__process_stat_event,
+ .stat_round = process_stat_round_event,
+ },
+ .aggr_mode = AGGR_UNSET,
+};
+
+static int __cmd_report(int argc, const char **argv)
+{
+ struct perf_session *session;
+ const struct option options[] = {
+ OPT_STRING('i', "input", &input_name, "file", "input file name"),
+ OPT_SET_UINT(0, "per-socket", &perf_stat.aggr_mode,
+ "aggregate counts per processor socket", AGGR_SOCKET),
+ OPT_SET_UINT(0, "per-core", &perf_stat.aggr_mode,
+ "aggregate counts per physical processor core", AGGR_CORE),
+ OPT_SET_UINT('A', "no-aggr", &perf_stat.aggr_mode,
+ "disable CPU count aggregation", AGGR_NONE),
+ OPT_END()
+ };
+ struct stat st;
+ int ret;
+
+ argc = parse_options(argc, argv, options, stat_report_usage, 0);
+
+ if (!input_name || !strlen(input_name)) {
+ if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode))
+ input_name = "-";
+ else
+ input_name = "perf.data";
+ }
+
+ perf_stat.file.path = input_name;
+ perf_stat.file.mode = PERF_DATA_MODE_READ;
+
+ session = perf_session__new(&perf_stat.file, false, &perf_stat.tool);
+ if (session == NULL)
+ return -1;
+
+ perf_stat.session = session;
+ stat_config.output = stderr;
+ evsel_list = session->evlist;
+
+ ret = perf_session__process_events(session);
+ if (ret)
+ return ret;
+
+ perf_session__delete(session);
+ return 0;
+}
+
int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
{
const char * const stat_usage[] = {
@@ -1246,6 +1817,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
const char *mode;
FILE *output = stderr;
unsigned int interval;
+ const char * const stat_subcommands[] = { "record", "report" };
setlocale(LC_ALL, "");
@@ -1253,12 +1825,30 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
if (evsel_list == NULL)
return -ENOMEM;
- argc = parse_options(argc, argv, stat_options, stat_usage,
- PARSE_OPT_STOP_AT_NON_OPTION);
+ argc = parse_options_subcommand(argc, argv, stat_options, stat_subcommands,
+ (const char **) stat_usage,
+ PARSE_OPT_STOP_AT_NON_OPTION);
+
+ if (csv_sep) {
+ csv_output = true;
+ if (!strcmp(csv_sep, "\\t"))
+ csv_sep = "\t";
+ } else
+ csv_sep = DEFAULT_SEPARATOR;
+
+ if (argc && !strncmp(argv[0], "rec", 3)) {
+ argc = __cmd_record(argc, argv);
+ if (argc < 0)
+ return -1;
+ } else if (argc && !strncmp(argv[0], "rep", 3))
+ return __cmd_report(argc, argv);
interval = stat_config.interval;
- if (output_name && strcmp(output_name, "-"))
+ /*
+ * For record command the -o is already taken care of.
+ */
+ if (!STAT_RECORD && output_name && strcmp(output_name, "-"))
output = NULL;
if (output_name && output_fd) {
@@ -1296,13 +1886,6 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
stat_config.output = output;
- if (csv_sep) {
- csv_output = true;
- if (!strcmp(csv_sep, "\\t"))
- csv_sep = "\t";
- } else
- csv_sep = DEFAULT_SEPARATOR;
-
/*
* let the spreadsheet do the pretty-printing
*/
@@ -1425,6 +2008,42 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
if (!forever && status != -1 && !interval)
print_counters(NULL, argc, argv);
+ if (STAT_RECORD) {
+ /*
+ * We synthesize the kernel mmap record just so that older tools
+ * don't emit warnings about not being able to resolve symbols
+ * due to /proc/sys/kernel/kptr_restrict settings and instear provide
+ * a saner message about no samples being in the perf.data file.
+ *
+ * This also serves to suppress a warning about f_header.data.size == 0
+ * in header.c at the moment 'perf stat record' gets introduced, which
+ * is not really needed once we start adding the stat specific PERF_RECORD_
+ * records, but the need to suppress the kptr_restrict messages in older
+ * tools remain -acme
+ */
+ int fd = perf_data_file__fd(&perf_stat.file);
+ int err = perf_event__synthesize_kernel_mmap((void *)&perf_stat,
+ process_synthesized_event,
+ &perf_stat.session->machines.host);
+ if (err) {
+ pr_warning("Couldn't synthesize the kernel mmap record, harmless, "
+ "older tools may produce warnings about this file\n.");
+ }
+
+ if (!interval) {
+ if (WRITE_STAT_ROUND_EVENT(walltime_nsecs_stats.max, FINAL))
+ pr_err("failed to write stat round event\n");
+ }
+
+ if (!perf_stat.file.is_pipe) {
+ perf_stat.session->header.data_size += perf_stat.bytes_written;
+ perf_session__write_header(perf_stat.session, evsel_list, fd, true);
+ }
+
+ perf_session__delete(perf_stat.session);
+ }
+
+ perf_stat__exit_aggr_mode();
perf_evlist__free_stats(evsel_list);
out:
perf_evlist__delete(evsel_list);
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index 30e59620179d..bd7a7757176f 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -30,7 +30,7 @@
#include "perf.h"
#include "util/header.h"
-#include "util/parse-options.h"
+#include <subcmd/parse-options.h>
#include "util/parse-events.h"
#include "util/event.h"
#include "util/session.h"
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 7e2e72e6d9d1..bf01cbb0ef23 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -34,7 +34,7 @@
#include "util/top.h"
#include "util/util.h"
#include <linux/rbtree.h>
-#include "util/parse-options.h"
+#include <subcmd/parse-options.h>
#include "util/parse-events.h"
#include "util/cpumap.h"
#include "util/xyarray.h"
@@ -175,42 +175,40 @@ static void perf_top__record_precise_ip(struct perf_top *top,
int counter, u64 ip)
{
struct annotation *notes;
- struct symbol *sym;
+ struct symbol *sym = he->ms.sym;
int err = 0;
- if (he == NULL || he->ms.sym == NULL ||
- ((top->sym_filter_entry == NULL ||
- top->sym_filter_entry->ms.sym != he->ms.sym) && use_browser != 1))
+ if (sym == NULL || (use_browser == 0 &&
+ (top->sym_filter_entry == NULL ||
+ top->sym_filter_entry->ms.sym != sym)))
return;
- sym = he->ms.sym;
notes = symbol__annotation(sym);
if (pthread_mutex_trylock(&notes->lock))
return;
- ip = he->ms.map->map_ip(he->ms.map, ip);
-
- if (ui__has_annotation())
- err = hist_entry__inc_addr_samples(he, counter, ip);
+ err = hist_entry__inc_addr_samples(he, counter, ip);
pthread_mutex_unlock(&notes->lock);
- /*
- * This function is now called with he->hists->lock held.
- * Release it before going to sleep.
- */
- pthread_mutex_unlock(&he->hists->lock);
+ if (unlikely(err)) {
+ /*
+ * This function is now called with he->hists->lock held.
+ * Release it before going to sleep.
+ */
+ pthread_mutex_unlock(&he->hists->lock);
+
+ if (err == -ERANGE && !he->ms.map->erange_warned)
+ ui__warn_map_erange(he->ms.map, sym, ip);
+ else if (err == -ENOMEM) {
+ pr_err("Not enough memory for annotating '%s' symbol!\n",
+ sym->name);
+ sleep(1);
+ }
- if (err == -ERANGE && !he->ms.map->erange_warned)
- ui__warn_map_erange(he->ms.map, sym, ip);
- else if (err == -ENOMEM) {
- pr_err("Not enough memory for annotating '%s' symbol!\n",
- sym->name);
- sleep(1);
+ pthread_mutex_lock(&he->hists->lock);
}
-
- pthread_mutex_lock(&he->hists->lock);
}
static void perf_top__show_details(struct perf_top *top)
@@ -687,14 +685,8 @@ static int hist_iter__top_callback(struct hist_entry_iter *iter,
struct hist_entry *he = iter->he;
struct perf_evsel *evsel = iter->evsel;
- if (sort__has_sym && single) {
- u64 ip = al->addr;
-
- if (al->map)
- ip = al->map->unmap_ip(al->map, ip);
-
- perf_top__record_precise_ip(top, he, evsel->idx, ip);
- }
+ if (sort__has_sym && single)
+ perf_top__record_precise_ip(top, he, evsel->idx, al->addr);
hist__account_cycles(iter->sample->branch_stack, al, iter->sample,
!(top->record_opts.branch_stack & PERF_SAMPLE_BRANCH_ANY));
@@ -964,7 +956,7 @@ static int __cmd_top(struct perf_top *top)
if (ret)
goto out_delete;
- if (perf_session__register_idle_thread(top->session) == NULL)
+ if (perf_session__register_idle_thread(top->session) < 0)
goto out_delete;
machine__synthesize_threads(&top->session->machines.host, &opts->target,
@@ -1218,6 +1210,8 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
OPT_CALLBACK('j', "branch-filter", &opts->branch_stack,
"branch filter mask", "branch stack filter modes",
parse_branch_stack),
+ OPT_BOOLEAN(0, "raw-trace", &symbol_conf.raw_trace,
+ "Show raw trace event output (do not use print fmt or plugins)"),
OPT_END()
};
const char * const top_usage[] = {
@@ -1239,11 +1233,17 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
if (argc)
usage_with_options(top_usage, options);
+ if (!top.evlist->nr_entries &&
+ perf_evlist__add_default(top.evlist) < 0) {
+ pr_err("Not enough memory for event selector list\n");
+ goto out_delete_evlist;
+ }
+
sort__mode = SORT_MODE__TOP;
/* display thread wants entries to be collapsed in a different tree */
sort__need_collapse = 1;
- if (setup_sorting() < 0) {
+ if (setup_sorting(top.evlist) < 0) {
if (sort_order)
parse_options_usage(top_usage, options, "s", 1);
if (field_order)
@@ -1279,12 +1279,9 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
if (target__none(target))
target->system_wide = true;
- if (perf_evlist__create_maps(top.evlist, target) < 0)
- usage_with_options(top_usage, options);
-
- if (!top.evlist->nr_entries &&
- perf_evlist__add_default(top.evlist) < 0) {
- ui__error("Not enough memory for event selector list\n");
+ if (perf_evlist__create_maps(top.evlist, target) < 0) {
+ ui__error("Couldn't create thread/CPU maps: %s\n",
+ errno == ENOENT ? "No such process" : strerror_r(errno, errbuf, sizeof(errbuf)));
goto out_delete_evlist;
}
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index c783d8fd3a80..20916dd77aac 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -22,11 +22,11 @@
#include "util/color.h"
#include "util/debug.h"
#include "util/evlist.h"
-#include "util/exec_cmd.h"
+#include <subcmd/exec-cmd.h>
#include "util/machine.h"
#include "util/session.h"
#include "util/thread.h"
-#include "util/parse-options.h"
+#include <subcmd/parse-options.h>
#include "util/strlist.h"
#include "util/intlist.h"
#include "util/thread_map.h"
diff --git a/tools/perf/builtin-version.c b/tools/perf/builtin-version.c
new file mode 100644
index 000000000000..9b10cda6b6dc
--- /dev/null
+++ b/tools/perf/builtin-version.c
@@ -0,0 +1,10 @@
+#include "util/util.h"
+#include "builtin.h"
+#include "perf.h"
+
+int cmd_version(int argc __maybe_unused, const char **argv __maybe_unused,
+ const char *prefix __maybe_unused)
+{
+ printf("perf version %s\n", perf_version_string);
+ return 0;
+}
diff --git a/tools/perf/builtin.h b/tools/perf/builtin.h
index 3688ad29085f..3f871b54e261 100644
--- a/tools/perf/builtin.h
+++ b/tools/perf/builtin.h
@@ -17,6 +17,7 @@ extern int cmd_annotate(int argc, const char **argv, const char *prefix);
extern int cmd_bench(int argc, const char **argv, const char *prefix);
extern int cmd_buildid_cache(int argc, const char **argv, const char *prefix);
extern int cmd_buildid_list(int argc, const char **argv, const char *prefix);
+extern int cmd_config(int argc, const char **argv, const char *prefix);
extern int cmd_diff(int argc, const char **argv, const char *prefix);
extern int cmd_evlist(int argc, const char **argv, const char *prefix);
extern int cmd_help(int argc, const char **argv, const char *prefix);
diff --git a/tools/perf/command-list.txt b/tools/perf/command-list.txt
index 00fcaf8a5b8d..ab5cbaa170d0 100644
--- a/tools/perf/command-list.txt
+++ b/tools/perf/command-list.txt
@@ -9,6 +9,7 @@ perf-buildid-cache mainporcelain common
perf-buildid-list mainporcelain common
perf-data mainporcelain common
perf-diff mainporcelain common
+perf-config mainporcelain common
perf-evlist mainporcelain common
perf-inject mainporcelain common
perf-kmem mainporcelain common
@@ -25,4 +26,4 @@ perf-stat mainporcelain common
perf-test mainporcelain common
perf-timechart mainporcelain common
perf-top mainporcelain common
-perf-trace mainporcelain common
+perf-trace mainporcelain audit
diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile
index de89ec574361..511141b102e8 100644
--- a/tools/perf/config/Makefile
+++ b/tools/perf/config/Makefile
@@ -17,7 +17,7 @@ detected_var = $(shell echo "$(1)=$($(1))" >> $(OUTPUT).config-detected)
CFLAGS := $(EXTRA_CFLAGS) $(EXTRA_WARNINGS)
-include $(src-perf)/config/Makefile.arch
+include $(srctree)/tools/scripts/Makefile.arch
$(call detected_var,ARCH)
@@ -135,8 +135,6 @@ endif
ifeq ($(DEBUG),0)
CFLAGS += -O6
-else
- CFLAGS += $(call cc-option,-Og,-O0)
endif
ifdef PARSER_DEBUG
@@ -183,7 +181,11 @@ LDFLAGS += -Wl,-z,noexecstack
EXTLIBS = -lpthread -lrt -lm -ldl
+ifeq ($(FEATURES_DUMP),)
include $(srctree)/tools/build/Makefile.feature
+else
+include $(FEATURES_DUMP)
+endif
ifeq ($(feature-stackprotector-all), 1)
CFLAGS += -fstack-protector-all
@@ -318,6 +320,18 @@ ifndef NO_LIBELF
CFLAGS += -DHAVE_LIBBPF_SUPPORT
$(call detected,CONFIG_LIBBPF)
endif
+
+ ifndef NO_DWARF
+ ifdef PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET
+ CFLAGS += -DHAVE_BPF_PROLOGUE
+ $(call detected,CONFIG_BPF_PROLOGUE)
+ else
+ msg := $(warning BPF prologue is not supported by architecture $(ARCH), missing regs_query_register_offset());
+ endif
+ else
+ msg := $(warning DWARF support is off, BPF prologue is disabled);
+ endif
+
endif # NO_LIBBPF
endif # NO_LIBELF
@@ -483,7 +497,7 @@ else
PYTHON_EMBED_LDOPTS := $(shell $(PYTHON_CONFIG_SQ) --ldflags 2>/dev/null)
PYTHON_EMBED_LDFLAGS := $(call strip-libs,$(PYTHON_EMBED_LDOPTS))
- PYTHON_EMBED_LIBADD := $(call grep-libs,$(PYTHON_EMBED_LDOPTS))
+ PYTHON_EMBED_LIBADD := $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) -lutil
PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --cflags 2>/dev/null)
FLAGS_PYTHON_EMBED := $(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS)
@@ -681,6 +695,8 @@ sharedir = $(prefix)/share
template_dir = share/perf-core/templates
STRACE_GROUPS_DIR = share/perf-core/strace/groups
htmldir = share/doc/perf-doc
+tipdir = share/doc/perf-tip
+srcdir = $(srctree)/tools/perf
ifeq ($(prefix),/usr)
sysconfdir = /etc
ETC_PERFCONFIG = $(sysconfdir)/perfconfig
@@ -707,19 +723,24 @@ infodir_SQ = $(subst ','\'',$(infodir))
perfexecdir_SQ = $(subst ','\'',$(perfexecdir))
template_dir_SQ = $(subst ','\'',$(template_dir))
htmldir_SQ = $(subst ','\'',$(htmldir))
+tipdir_SQ = $(subst ','\'',$(tipdir))
prefix_SQ = $(subst ','\'',$(prefix))
sysconfdir_SQ = $(subst ','\'',$(sysconfdir))
libdir_SQ = $(subst ','\'',$(libdir))
+srcdir_SQ = $(subst ','\'',$(srcdir))
ifneq ($(filter /%,$(firstword $(perfexecdir))),)
perfexec_instdir = $(perfexecdir)
STRACE_GROUPS_INSTDIR = $(STRACE_GROUPS_DIR)
+tip_instdir = $(tipdir)
else
perfexec_instdir = $(prefix)/$(perfexecdir)
STRACE_GROUPS_INSTDIR = $(prefix)/$(STRACE_GROUPS_DIR)
+tip_instdir = $(prefix)/$(tipdir)
endif
perfexec_instdir_SQ = $(subst ','\'',$(perfexec_instdir))
STRACE_GROUPS_INSTDIR_SQ = $(subst ','\'',$(STRACE_GROUPS_INSTDIR))
+tip_instdir_SQ = $(subst ','\'',$(tip_instdir))
# If we install to $(HOME) we keep the traceevent default:
# $(HOME)/.traceevent/plugins
@@ -741,6 +762,10 @@ ifeq ($(VF),1)
$(call print_var,sysconfdir)
$(call print_var,LIBUNWIND_DIR)
$(call print_var,LIBDW_DIR)
+
+ ifeq ($(dwarf-post-unwind),1)
+ $(call feature_print_text,"DWARF post unwind library", $(dwarf-post-unwind-text))
+ endif
$(info )
endif
@@ -756,6 +781,8 @@ $(call detected_var,ETC_PERFCONFIG_SQ)
$(call detected_var,STRACE_GROUPS_DIR_SQ)
$(call detected_var,prefix_SQ)
$(call detected_var,perfexecdir_SQ)
+$(call detected_var,tipdir_SQ)
+$(call detected_var,srcdir_SQ)
$(call detected_var,LIBDIR)
$(call detected_var,GTK_CFLAGS)
$(call detected_var,PERL_EMBED_CCOPTS)
diff --git a/tools/perf/config/utilities.mak b/tools/perf/config/utilities.mak
index 0ebef09c0842..c16ce833079c 100644
--- a/tools/perf/config/utilities.mak
+++ b/tools/perf/config/utilities.mak
@@ -177,22 +177,3 @@ $(if $($(1)),$(call _ge_attempt,$($(1)),$(1)),$(call _ge_attempt,$(2)))
endef
_ge_attempt = $(if $(get-executable),$(get-executable),$(call _gea_err,$(2)))
_gea_err = $(if $(1),$(error Please set '$(1)' appropriately))
-
-# try-run
-# Usage: option = $(call try-run, $(CC)...-o "$$TMP",option-ok,otherwise)
-# Exit code chooses option. "$$TMP" is can be used as temporary file and
-# is automatically cleaned up.
-try-run = $(shell set -e; \
- TMP="$(TMPOUT).$$$$.tmp"; \
- TMPO="$(TMPOUT).$$$$.o"; \
- if ($(1)) >/dev/null 2>&1; \
- then echo "$(2)"; \
- else echo "$(3)"; \
- fi; \
- rm -f "$$TMP" "$$TMPO")
-
-# cc-option
-# Usage: cflags-y += $(call cc-option,-march=winchip-c6,-march=i586)
-
-cc-option = $(call try-run,\
- $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2))
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index 3d4c7c09adea..a929618b8eb6 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -9,16 +9,18 @@
#include "builtin.h"
#include "util/env.h"
-#include "util/exec_cmd.h"
+#include <subcmd/exec-cmd.h>
#include "util/cache.h"
#include "util/quote.h"
-#include "util/run-command.h"
+#include <subcmd/run-command.h>
#include "util/parse-events.h"
-#include "util/parse-options.h"
+#include <subcmd/parse-options.h>
#include "util/bpf-loader.h"
#include "util/debug.h"
#include <api/fs/tracing_path.h>
#include <pthread.h>
+#include <stdlib.h>
+#include <time.h>
const char perf_usage_string[] =
"perf [--version] [--help] [OPTIONS] COMMAND [ARGS]";
@@ -39,6 +41,7 @@ struct cmd_struct {
static struct cmd_struct commands[] = {
{ "buildid-cache", cmd_buildid_cache, 0 },
{ "buildid-list", cmd_buildid_list, 0 },
+ { "config", cmd_config, 0 },
{ "diff", cmd_diff, 0 },
{ "evlist", cmd_evlist, 0 },
{ "help", cmd_help, 0 },
@@ -118,7 +121,7 @@ static void commit_pager_choice(void)
{
switch (use_pager) {
case 0:
- setenv("PERF_PAGER", "cat", 1);
+ setenv(PERF_PAGER_ENVIRONMENT, "cat", 1);
break;
case 1:
/* setup_pager(); */
@@ -182,9 +185,9 @@ static int handle_options(const char ***argv, int *argc, int *envchanged)
if (!prefixcmp(cmd, CMD_EXEC_PATH)) {
cmd += strlen(CMD_EXEC_PATH);
if (*cmd == '=')
- perf_set_argv_exec_path(cmd + 1);
+ set_argv_exec_path(cmd + 1);
else {
- puts(perf_exec_path());
+ puts(get_argv_exec_path());
exit(0);
}
} else if (!strcmp(cmd, "--html-path")) {
@@ -383,6 +386,7 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
use_pager = 1;
commit_pager_choice();
+ perf_env__set_cmdline(&perf_env, argc, argv);
status = p->fn(argc, argv, prefix);
exit_browser(status);
perf_env__exit(&perf_env);
@@ -528,14 +532,20 @@ int main(int argc, const char **argv)
const char *cmd;
char sbuf[STRERR_BUFSIZE];
+ /* libsubcmd init */
+ exec_cmd_init("perf", PREFIX, PERF_EXEC_PATH, EXEC_PATH_ENVIRONMENT);
+ pager_init(PERF_PAGER_ENVIRONMENT);
+
/* The page_size is placed in util object. */
page_size = sysconf(_SC_PAGE_SIZE);
cacheline_size = sysconf(_SC_LEVEL1_DCACHE_LINESIZE);
- cmd = perf_extract_argv0_path(argv[0]);
+ cmd = extract_argv0_path(argv[0]);
if (!cmd)
cmd = "perf-help";
+ srandom(time(NULL));
+
/* get debugfs/tracefs mount point from /proc/mounts */
tracing_path_mount();
diff --git a/tools/perf/scripts/python/stat-cpi.py b/tools/perf/scripts/python/stat-cpi.py
new file mode 100644
index 000000000000..8b60f343dd07
--- /dev/null
+++ b/tools/perf/scripts/python/stat-cpi.py
@@ -0,0 +1,77 @@
+#!/usr/bin/env python
+
+data = {}
+times = []
+threads = []
+cpus = []
+
+def get_key(time, event, cpu, thread):
+ return "%d-%s-%d-%d" % (time, event, cpu, thread)
+
+def store_key(time, cpu, thread):
+ if (time not in times):
+ times.append(time)
+
+ if (cpu not in cpus):
+ cpus.append(cpu)
+
+ if (thread not in threads):
+ threads.append(thread)
+
+def store(time, event, cpu, thread, val, ena, run):
+ #print "event %s cpu %d, thread %d, time %d, val %d, ena %d, run %d" % \
+ # (event, cpu, thread, time, val, ena, run)
+
+ store_key(time, cpu, thread)
+ key = get_key(time, event, cpu, thread)
+ data[key] = [ val, ena, run]
+
+def get(time, event, cpu, thread):
+ key = get_key(time, event, cpu, thread)
+ return data[key][0]
+
+def stat__cycles_k(cpu, thread, time, val, ena, run):
+ store(time, "cycles", cpu, thread, val, ena, run);
+
+def stat__instructions_k(cpu, thread, time, val, ena, run):
+ store(time, "instructions", cpu, thread, val, ena, run);
+
+def stat__cycles_u(cpu, thread, time, val, ena, run):
+ store(time, "cycles", cpu, thread, val, ena, run);
+
+def stat__instructions_u(cpu, thread, time, val, ena, run):
+ store(time, "instructions", cpu, thread, val, ena, run);
+
+def stat__cycles(cpu, thread, time, val, ena, run):
+ store(time, "cycles", cpu, thread, val, ena, run);
+
+def stat__instructions(cpu, thread, time, val, ena, run):
+ store(time, "instructions", cpu, thread, val, ena, run);
+
+def stat__interval(time):
+ for cpu in cpus:
+ for thread in threads:
+ cyc = get(time, "cycles", cpu, thread)
+ ins = get(time, "instructions", cpu, thread)
+ cpi = 0
+
+ if ins != 0:
+ cpi = cyc/float(ins)
+
+ print "%15f: cpu %d, thread %d -> cpi %f (%d/%d)" % (time/(float(1000000000)), cpu, thread, cpi, cyc, ins)
+
+def trace_end():
+ pass
+# XXX trace_end callback could be used as an alternative place
+# to compute same values as in the script above:
+#
+# for time in times:
+# for cpu in cpus:
+# for thread in threads:
+# cyc = get(time, "cycles", cpu, thread)
+# ins = get(time, "instructions", cpu, thread)
+#
+# if ins != 0:
+# cpi = cyc/float(ins)
+#
+# print "time %.9f, cpu %d, thread %d -> cpi %f" % (time/(float(1000000000)), cpu, thread, cpi)
diff --git a/tools/perf/tests/.gitignore b/tools/perf/tests/.gitignore
index 489fc9ffbcb0..bf016c439fbd 100644
--- a/tools/perf/tests/.gitignore
+++ b/tools/perf/tests/.gitignore
@@ -1,2 +1,3 @@
llvm-src-base.c
llvm-src-kbuild.c
+llvm-src-prologue.c
diff --git a/tools/perf/tests/Build b/tools/perf/tests/Build
index f41ebf8849fe..614899b88b37 100644
--- a/tools/perf/tests/Build
+++ b/tools/perf/tests/Build
@@ -31,24 +31,34 @@ perf-y += sample-parsing.o
perf-y += parse-no-sample-id-all.o
perf-y += kmod-path.o
perf-y += thread-map.o
-perf-y += llvm.o llvm-src-base.o llvm-src-kbuild.o
+perf-y += llvm.o llvm-src-base.o llvm-src-kbuild.o llvm-src-prologue.o
perf-y += bpf.o
perf-y += topology.o
+perf-y += cpumap.o
+perf-y += stat.o
+perf-y += event_update.o
-$(OUTPUT)tests/llvm-src-base.c: tests/bpf-script-example.c
+$(OUTPUT)tests/llvm-src-base.c: tests/bpf-script-example.c tests/Build
$(call rule_mkdir)
$(Q)echo '#include <tests/llvm.h>' > $@
$(Q)echo 'const char test_llvm__bpf_base_prog[] =' >> $@
$(Q)sed -e 's/"/\\"/g' -e 's/\(.*\)/"\1\\n"/g' $< >> $@
$(Q)echo ';' >> $@
-$(OUTPUT)tests/llvm-src-kbuild.c: tests/bpf-script-test-kbuild.c
+$(OUTPUT)tests/llvm-src-kbuild.c: tests/bpf-script-test-kbuild.c tests/Build
$(call rule_mkdir)
$(Q)echo '#include <tests/llvm.h>' > $@
$(Q)echo 'const char test_llvm__bpf_test_kbuild_prog[] =' >> $@
$(Q)sed -e 's/"/\\"/g' -e 's/\(.*\)/"\1\\n"/g' $< >> $@
$(Q)echo ';' >> $@
+$(OUTPUT)tests/llvm-src-prologue.c: tests/bpf-script-test-prologue.c tests/Build
+ $(call rule_mkdir)
+ $(Q)echo '#include <tests/llvm.h>' > $@
+ $(Q)echo 'const char test_llvm__bpf_test_prologue_prog[] =' >> $@
+ $(Q)sed -e 's/"/\\"/g' -e 's/\(.*\)/"\1\\n"/g' $< >> $@
+ $(Q)echo ';' >> $@
+
ifeq ($(ARCH),$(filter $(ARCH),x86 arm arm64))
perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
endif
diff --git a/tools/perf/tests/attr.c b/tools/perf/tests/attr.c
index 638875a0960a..28d1605b0338 100644
--- a/tools/perf/tests/attr.c
+++ b/tools/perf/tests/attr.c
@@ -24,7 +24,7 @@
#include <linux/kernel.h>
#include "../perf.h"
#include "util.h"
-#include "exec_cmd.h"
+#include <subcmd/exec-cmd.h>
#include "tests.h"
#define ENV "PERF_TEST_ATTR"
@@ -153,7 +153,7 @@ static int run_dir(const char *d, const char *perf)
return system(cmd);
}
-int test__attr(void)
+int test__attr(int subtest __maybe_unused)
{
struct stat st;
char path_perf[PATH_MAX];
@@ -164,7 +164,7 @@ int test__attr(void)
return run_dir("./tests", "./perf");
/* Then installed path. */
- snprintf(path_dir, PATH_MAX, "%s/tests", perf_exec_path());
+ snprintf(path_dir, PATH_MAX, "%s/tests", get_argv_exec_path());
snprintf(path_perf, PATH_MAX, "%s/perf", BINDIR);
if (!lstat(path_dir, &st) &&
diff --git a/tools/perf/tests/bp_signal.c b/tools/perf/tests/bp_signal.c
index a02b035fd5aa..fb80c9eb6a95 100644
--- a/tools/perf/tests/bp_signal.c
+++ b/tools/perf/tests/bp_signal.c
@@ -111,7 +111,7 @@ static long long bp_count(int fd)
return count;
}
-int test__bp_signal(void)
+int test__bp_signal(int subtest __maybe_unused)
{
struct sigaction sa;
long long count1, count2;
diff --git a/tools/perf/tests/bp_signal_overflow.c b/tools/perf/tests/bp_signal_overflow.c
index e76537724491..89f92fa67cc4 100644
--- a/tools/perf/tests/bp_signal_overflow.c
+++ b/tools/perf/tests/bp_signal_overflow.c
@@ -58,7 +58,7 @@ static long long bp_count(int fd)
#define EXECUTIONS 10000
#define THRESHOLD 100
-int test__bp_signal_overflow(void)
+int test__bp_signal_overflow(int subtest __maybe_unused)
{
struct perf_event_attr pe;
struct sigaction sa;
diff --git a/tools/perf/tests/bpf-script-test-prologue.c b/tools/perf/tests/bpf-script-test-prologue.c
new file mode 100644
index 000000000000..7230e62c70fc
--- /dev/null
+++ b/tools/perf/tests/bpf-script-test-prologue.c
@@ -0,0 +1,35 @@
+/*
+ * bpf-script-test-prologue.c
+ * Test BPF prologue
+ */
+#ifndef LINUX_VERSION_CODE
+# error Need LINUX_VERSION_CODE
+# error Example: for 4.2 kernel, put 'clang-opt="-DLINUX_VERSION_CODE=0x40200" into llvm section of ~/.perfconfig'
+#endif
+#define SEC(NAME) __attribute__((section(NAME), used))
+
+#include <uapi/linux/fs.h>
+
+#define FMODE_READ 0x1
+#define FMODE_WRITE 0x2
+
+static void (*bpf_trace_printk)(const char *fmt, int fmt_size, ...) =
+ (void *) 6;
+
+SEC("func=null_lseek file->f_mode offset orig")
+int bpf_func__null_lseek(void *ctx, int err, unsigned long f_mode,
+ unsigned long offset, unsigned long orig)
+{
+ if (err)
+ return 0;
+ if (f_mode & FMODE_WRITE)
+ return 0;
+ if (offset & 1)
+ return 0;
+ if (orig == SEEK_CUR)
+ return 0;
+ return 1;
+}
+
+char _license[] SEC("license") = "GPL";
+int _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/tools/perf/tests/bpf.c b/tools/perf/tests/bpf.c
index ec16f7812c8b..33689a0cf821 100644
--- a/tools/perf/tests/bpf.c
+++ b/tools/perf/tests/bpf.c
@@ -19,6 +19,29 @@ static int epoll_pwait_loop(void)
return 0;
}
+#ifdef HAVE_BPF_PROLOGUE
+
+static int llseek_loop(void)
+{
+ int fds[2], i;
+
+ fds[0] = open("/dev/null", O_RDONLY);
+ fds[1] = open("/dev/null", O_RDWR);
+
+ if (fds[0] < 0 || fds[1] < 0)
+ return -1;
+
+ for (i = 0; i < NR_ITERS; i++) {
+ lseek(fds[i % 2], i, (i / 2) % 2 ? SEEK_CUR : SEEK_SET);
+ lseek(fds[(i + 1) % 2], i, (i / 2) % 2 ? SEEK_CUR : SEEK_SET);
+ }
+ close(fds[0]);
+ close(fds[1]);
+ return 0;
+}
+
+#endif
+
static struct {
enum test_llvm__testcase prog_id;
const char *desc;
@@ -37,6 +60,17 @@ static struct {
&epoll_pwait_loop,
(NR_ITERS + 1) / 2,
},
+#ifdef HAVE_BPF_PROLOGUE
+ {
+ LLVM_TESTCASE_BPF_PROLOGUE,
+ "Test BPF prologue generation",
+ "[bpf_prologue_test]",
+ "fix kbuild first",
+ "check your vmlinux setting?",
+ &llseek_loop,
+ (NR_ITERS + 1) / 4,
+ },
+#endif
};
static int do_test(struct bpf_object *obj, int (*func)(void),
@@ -68,8 +102,7 @@ static int do_test(struct bpf_object *obj, int (*func)(void),
err = parse_events_load_bpf_obj(&parse_evlist, &parse_evlist.list, obj);
if (err || list_empty(&parse_evlist.list)) {
pr_debug("Failed to add events selected by BPF\n");
- if (!err)
- return TEST_FAIL;
+ return TEST_FAIL;
}
snprintf(pid, sizeof(pid), "%d", getpid());
@@ -123,8 +156,10 @@ static int do_test(struct bpf_object *obj, int (*func)(void),
}
}
- if (count != expect)
+ if (count != expect) {
pr_debug("BPF filter result incorrect\n");
+ goto out_delete_evlist;
+ }
ret = TEST_OK;
@@ -146,7 +181,7 @@ prepare_bpf(void *obj_buf, size_t obj_buf_sz, const char *name)
return obj;
}
-static int __test__bpf(int index)
+static int __test__bpf(int idx)
{
int ret;
void *obj_buf;
@@ -154,54 +189,72 @@ static int __test__bpf(int index)
struct bpf_object *obj;
ret = test_llvm__fetch_bpf_obj(&obj_buf, &obj_buf_sz,
- bpf_testcase_table[index].prog_id,
+ bpf_testcase_table[idx].prog_id,
true);
if (ret != TEST_OK || !obj_buf || !obj_buf_sz) {
pr_debug("Unable to get BPF object, %s\n",
- bpf_testcase_table[index].msg_compile_fail);
- if (index == 0)
+ bpf_testcase_table[idx].msg_compile_fail);
+ if (idx == 0)
return TEST_SKIP;
else
return TEST_FAIL;
}
obj = prepare_bpf(obj_buf, obj_buf_sz,
- bpf_testcase_table[index].name);
+ bpf_testcase_table[idx].name);
if (!obj) {
ret = TEST_FAIL;
goto out;
}
ret = do_test(obj,
- bpf_testcase_table[index].target_func,
- bpf_testcase_table[index].expect_result);
+ bpf_testcase_table[idx].target_func,
+ bpf_testcase_table[idx].expect_result);
out:
bpf__clear();
return ret;
}
-int test__bpf(void)
+int test__bpf_subtest_get_nr(void)
+{
+ return (int)ARRAY_SIZE(bpf_testcase_table);
+}
+
+const char *test__bpf_subtest_get_desc(int i)
+{
+ if (i < 0 || i >= (int)ARRAY_SIZE(bpf_testcase_table))
+ return NULL;
+ return bpf_testcase_table[i].desc;
+}
+
+int test__bpf(int i)
{
- unsigned int i;
int err;
+ if (i < 0 || i >= (int)ARRAY_SIZE(bpf_testcase_table))
+ return TEST_FAIL;
+
if (geteuid() != 0) {
pr_debug("Only root can run BPF test\n");
return TEST_SKIP;
}
- for (i = 0; i < ARRAY_SIZE(bpf_testcase_table); i++) {
- err = __test__bpf(i);
+ err = __test__bpf(i);
+ return err;
+}
- if (err != TEST_OK)
- return err;
- }
+#else
+int test__bpf_subtest_get_nr(void)
+{
+ return 0;
+}
- return TEST_OK;
+const char *test__bpf_subtest_get_desc(int i __maybe_unused)
+{
+ return NULL;
}
-#else
-int test__bpf(void)
+int test__bpf(int i __maybe_unused)
{
pr_debug("Skip BPF test because BPF support is not compiled\n");
return TEST_SKIP;
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index 80c442eab767..f2b1dcac45d3 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -11,7 +11,7 @@
#include "tests.h"
#include "debug.h"
#include "color.h"
-#include "parse-options.h"
+#include <subcmd/parse-options.h>
#include "symbol.h"
struct test __weak arch_tests[] = {
@@ -160,6 +160,11 @@ static struct test generic_tests[] = {
{
.desc = "Test LLVM searching and compiling",
.func = test__llvm,
+ .subtest = {
+ .skip_if_fail = true,
+ .get_nr = test__llvm_subtest_get_nr,
+ .get_desc = test__llvm_subtest_get_desc,
+ },
},
{
.desc = "Test topology in session",
@@ -168,6 +173,35 @@ static struct test generic_tests[] = {
{
.desc = "Test BPF filter",
.func = test__bpf,
+ .subtest = {
+ .skip_if_fail = true,
+ .get_nr = test__bpf_subtest_get_nr,
+ .get_desc = test__bpf_subtest_get_desc,
+ },
+ },
+ {
+ .desc = "Test thread map synthesize",
+ .func = test__thread_map_synthesize,
+ },
+ {
+ .desc = "Test cpu map synthesize",
+ .func = test__cpu_map_synthesize,
+ },
+ {
+ .desc = "Test stat config synthesize",
+ .func = test__synthesize_stat_config,
+ },
+ {
+ .desc = "Test stat synthesize",
+ .func = test__synthesize_stat,
+ },
+ {
+ .desc = "Test stat round synthesize",
+ .func = test__synthesize_stat_round,
+ },
+ {
+ .desc = "Test attr update synthesize",
+ .func = test__event_update,
},
{
.func = NULL,
@@ -203,7 +237,7 @@ static bool perf_test__matches(struct test *test, int curr, int argc, const char
return false;
}
-static int run_test(struct test *test)
+static int run_test(struct test *test, int subtest)
{
int status, err = -1, child = fork();
char sbuf[STRERR_BUFSIZE];
@@ -216,7 +250,22 @@ static int run_test(struct test *test)
if (!child) {
pr_debug("test child forked, pid %d\n", getpid());
- err = test->func();
+ if (!verbose) {
+ int nullfd = open("/dev/null", O_WRONLY);
+ if (nullfd >= 0) {
+ close(STDERR_FILENO);
+ close(STDOUT_FILENO);
+
+ dup2(nullfd, STDOUT_FILENO);
+ dup2(STDOUT_FILENO, STDERR_FILENO);
+ close(nullfd);
+ }
+ } else {
+ signal(SIGSEGV, sighandler_dump_stack);
+ signal(SIGFPE, sighandler_dump_stack);
+ }
+
+ err = test->func(subtest);
exit(err);
}
@@ -237,6 +286,40 @@ static int run_test(struct test *test)
for (j = 0; j < ARRAY_SIZE(tests); j++) \
for (t = &tests[j][0]; t->func; t++)
+static int test_and_print(struct test *t, bool force_skip, int subtest)
+{
+ int err;
+
+ if (!force_skip) {
+ pr_debug("\n--- start ---\n");
+ err = run_test(t, subtest);
+ pr_debug("---- end ----\n");
+ } else {
+ pr_debug("\n--- force skipped ---\n");
+ err = TEST_SKIP;
+ }
+
+ if (!t->subtest.get_nr)
+ pr_debug("%s:", t->desc);
+ else
+ pr_debug("%s subtest %d:", t->desc, subtest);
+
+ switch (err) {
+ case TEST_OK:
+ pr_info(" Ok\n");
+ break;
+ case TEST_SKIP:
+ color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip\n");
+ break;
+ case TEST_FAIL:
+ default:
+ color_fprintf(stderr, PERF_COLOR_RED, " FAILED!\n");
+ break;
+ }
+
+ return err;
+}
+
static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
{
struct test *t;
@@ -264,21 +347,43 @@ static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
continue;
}
- pr_debug("\n--- start ---\n");
- err = run_test(t);
- pr_debug("---- end ----\n%s:", t->desc);
-
- switch (err) {
- case TEST_OK:
- pr_info(" Ok\n");
- break;
- case TEST_SKIP:
- color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip\n");
- break;
- case TEST_FAIL:
- default:
- color_fprintf(stderr, PERF_COLOR_RED, " FAILED!\n");
- break;
+ if (!t->subtest.get_nr) {
+ test_and_print(t, false, -1);
+ } else {
+ int subn = t->subtest.get_nr();
+ /*
+ * minus 2 to align with normal testcases.
+ * For subtest we print additional '.x' in number.
+ * for example:
+ *
+ * 35: Test LLVM searching and compiling :
+ * 35.1: Basic BPF llvm compiling test : Ok
+ */
+ int subw = width > 2 ? width - 2 : width;
+ bool skip = false;
+ int subi;
+
+ if (subn <= 0) {
+ color_fprintf(stderr, PERF_COLOR_YELLOW,
+ " Skip (not compiled in)\n");
+ continue;
+ }
+ pr_info("\n");
+
+ for (subi = 0; subi < subn; subi++) {
+ int len = strlen(t->subtest.get_desc(subi));
+
+ if (subw < len)
+ subw = len;
+ }
+
+ for (subi = 0; subi < subn; subi++) {
+ pr_info("%2d.%1d: %-*s:", i, subi + 1, subw,
+ t->subtest.get_desc(subi));
+ err = test_and_print(t, skip, subi);
+ if (err != TEST_OK && t->subtest.skip_if_fail)
+ skip = true;
+ }
}
}
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
index a767a6400c5c..313a48c6b2bc 100644
--- a/tools/perf/tests/code-reading.c
+++ b/tools/perf/tests/code-reading.c
@@ -433,7 +433,6 @@ enum {
static int do_test_code_reading(bool try_kcore)
{
- struct machines machines;
struct machine *machine;
struct thread *thread;
struct record_opts opts = {
@@ -459,8 +458,7 @@ static int do_test_code_reading(bool try_kcore)
pid = getpid();
- machines__init(&machines);
- machine = &machines.host;
+ machine = machine__new_host();
ret = machine__create_kernel_maps(machine);
if (ret < 0) {
@@ -549,6 +547,13 @@ static int do_test_code_reading(bool try_kcore)
if (ret < 0) {
if (!excl_kernel) {
excl_kernel = true;
+ /*
+ * Both cpus and threads are now owned by evlist
+ * and will be freed by following perf_evlist__set_maps
+ * call. Getting refference to keep them alive.
+ */
+ cpu_map__get(cpus);
+ thread_map__get(threads);
perf_evlist__set_maps(evlist, NULL, NULL);
perf_evlist__delete(evlist);
evlist = NULL;
@@ -594,14 +599,13 @@ out_err:
cpu_map__put(cpus);
thread_map__put(threads);
}
- machines__destroy_kernel_maps(&machines);
machine__delete_threads(machine);
- machines__exit(&machines);
+ machine__delete(machine);
return err;
}
-int test__code_reading(void)
+int test__code_reading(int subtest __maybe_unused)
{
int ret;
diff --git a/tools/perf/tests/cpumap.c b/tools/perf/tests/cpumap.c
new file mode 100644
index 000000000000..4cb6418a8ffc
--- /dev/null
+++ b/tools/perf/tests/cpumap.c
@@ -0,0 +1,88 @@
+#include "tests.h"
+#include "cpumap.h"
+
+static int process_event_mask(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ struct cpu_map_event *map_event = &event->cpu_map;
+ struct cpu_map_mask *mask;
+ struct cpu_map_data *data;
+ struct cpu_map *map;
+ int i;
+
+ data = &map_event->data;
+
+ TEST_ASSERT_VAL("wrong type", data->type == PERF_CPU_MAP__MASK);
+
+ mask = (struct cpu_map_mask *)data->data;
+
+ TEST_ASSERT_VAL("wrong nr", mask->nr == 1);
+
+ for (i = 0; i < 20; i++) {
+ TEST_ASSERT_VAL("wrong cpu", test_bit(i, mask->mask));
+ }
+
+ map = cpu_map__new_data(data);
+ TEST_ASSERT_VAL("wrong nr", map->nr == 20);
+
+ for (i = 0; i < 20; i++) {
+ TEST_ASSERT_VAL("wrong cpu", map->map[i] == i);
+ }
+
+ cpu_map__put(map);
+ return 0;
+}
+
+static int process_event_cpus(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ struct cpu_map_event *map_event = &event->cpu_map;
+ struct cpu_map_entries *cpus;
+ struct cpu_map_data *data;
+ struct cpu_map *map;
+
+ data = &map_event->data;
+
+ TEST_ASSERT_VAL("wrong type", data->type == PERF_CPU_MAP__CPUS);
+
+ cpus = (struct cpu_map_entries *)data->data;
+
+ TEST_ASSERT_VAL("wrong nr", cpus->nr == 2);
+ TEST_ASSERT_VAL("wrong cpu", cpus->cpu[0] == 1);
+ TEST_ASSERT_VAL("wrong cpu", cpus->cpu[1] == 256);
+
+ map = cpu_map__new_data(data);
+ TEST_ASSERT_VAL("wrong nr", map->nr == 2);
+ TEST_ASSERT_VAL("wrong cpu", map->map[0] == 1);
+ TEST_ASSERT_VAL("wrong cpu", map->map[1] == 256);
+ TEST_ASSERT_VAL("wrong refcnt", atomic_read(&map->refcnt) == 1);
+ cpu_map__put(map);
+ return 0;
+}
+
+
+int test__cpu_map_synthesize(int subtest __maybe_unused)
+{
+ struct cpu_map *cpus;
+
+ /* This one is better stores in mask. */
+ cpus = cpu_map__new("0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19");
+
+ TEST_ASSERT_VAL("failed to synthesize map",
+ !perf_event__synthesize_cpu_map(NULL, cpus, process_event_mask, NULL));
+
+ cpu_map__put(cpus);
+
+ /* This one is better stores in cpu values. */
+ cpus = cpu_map__new("1,256");
+
+ TEST_ASSERT_VAL("failed to synthesize map",
+ !perf_event__synthesize_cpu_map(NULL, cpus, process_event_cpus, NULL));
+
+ cpu_map__put(cpus);
+ return 0;
+}
diff --git a/tools/perf/tests/dso-data.c b/tools/perf/tests/dso-data.c
index a218aeaf56a0..dc673ff7c437 100644
--- a/tools/perf/tests/dso-data.c
+++ b/tools/perf/tests/dso-data.c
@@ -110,7 +110,7 @@ static int dso__data_fd(struct dso *dso, struct machine *machine)
return fd;
}
-int test__dso_data(void)
+int test__dso_data(int subtest __maybe_unused)
{
struct machine machine;
struct dso *dso;
@@ -245,7 +245,7 @@ static int set_fd_limit(int n)
return setrlimit(RLIMIT_NOFILE, &rlim);
}
-int test__dso_data_cache(void)
+int test__dso_data_cache(int subtest __maybe_unused)
{
struct machine machine;
long nr_end, nr = open_files_cnt();
@@ -302,7 +302,7 @@ int test__dso_data_cache(void)
return 0;
}
-int test__dso_data_reopen(void)
+int test__dso_data_reopen(int subtest __maybe_unused)
{
struct machine machine;
long nr_end, nr = open_files_cnt();
diff --git a/tools/perf/tests/dwarf-unwind.c b/tools/perf/tests/dwarf-unwind.c
index 07221793a3ac..1c5c0221cea2 100644
--- a/tools/perf/tests/dwarf-unwind.c
+++ b/tools/perf/tests/dwarf-unwind.c
@@ -51,6 +51,12 @@ static int unwind_entry(struct unwind_entry *entry, void *arg)
"krava_1",
"test__dwarf_unwind"
};
+ /*
+ * The funcs[MAX_STACK] array index, based on the
+ * callchain order setup.
+ */
+ int idx = callchain_param.order == ORDER_CALLER ?
+ MAX_STACK - *cnt - 1 : *cnt;
if (*cnt >= MAX_STACK) {
pr_debug("failed: crossed the max stack value %d\n", MAX_STACK);
@@ -63,8 +69,10 @@ static int unwind_entry(struct unwind_entry *entry, void *arg)
return -1;
}
- pr_debug("got: %s 0x%" PRIx64 "\n", symbol, entry->ip);
- return strcmp((const char *) symbol, funcs[(*cnt)++]);
+ (*cnt)++;
+ pr_debug("got: %s 0x%" PRIx64 ", expecting %s\n",
+ symbol, entry->ip, funcs[idx]);
+ return strcmp((const char *) symbol, funcs[idx]);
}
__attribute__ ((noinline))
@@ -105,8 +113,16 @@ static int compare(void *p1, void *p2)
/* Any possible value should be 'thread' */
struct thread *thread = *(struct thread **)p1;
- if (global_unwind_retval == -INT_MAX)
+ if (global_unwind_retval == -INT_MAX) {
+ /* Call unwinder twice for both callchain orders. */
+ callchain_param.order = ORDER_CALLER;
+
global_unwind_retval = unwind_thread(thread);
+ if (!global_unwind_retval) {
+ callchain_param.order = ORDER_CALLEE;
+ global_unwind_retval = unwind_thread(thread);
+ }
+ }
return p1 - p2;
}
@@ -142,21 +158,23 @@ static int krava_1(struct thread *thread)
return krava_2(thread);
}
-int test__dwarf_unwind(void)
+int test__dwarf_unwind(int subtest __maybe_unused)
{
- struct machines machines;
struct machine *machine;
struct thread *thread;
int err = -1;
- machines__init(&machines);
-
- machine = machines__find(&machines, HOST_KERNEL_ID);
+ machine = machine__new_host();
if (!machine) {
pr_err("Could not get machine\n");
return -1;
}
+ if (machine__create_kernel_maps(machine)) {
+ pr_err("Failed to create kernel maps\n");
+ return -1;
+ }
+
callchain_param.record_mode = CALLCHAIN_DWARF;
if (init_live_machine(machine)) {
@@ -178,7 +196,6 @@ int test__dwarf_unwind(void)
out:
machine__delete_threads(machine);
- machine__exit(machine);
- machines__exit(&machines);
+ machine__delete(machine);
return err;
}
diff --git a/tools/perf/tests/event_update.c b/tools/perf/tests/event_update.c
new file mode 100644
index 000000000000..012eab5d1df1
--- /dev/null
+++ b/tools/perf/tests/event_update.c
@@ -0,0 +1,117 @@
+#include <linux/compiler.h>
+#include "evlist.h"
+#include "evsel.h"
+#include "machine.h"
+#include "tests.h"
+#include "debug.h"
+
+static int process_event_unit(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ struct event_update_event *ev = (struct event_update_event *) event;
+
+ TEST_ASSERT_VAL("wrong id", ev->id == 123);
+ TEST_ASSERT_VAL("wrong id", ev->type == PERF_EVENT_UPDATE__UNIT);
+ TEST_ASSERT_VAL("wrong unit", !strcmp(ev->data, "KRAVA"));
+ return 0;
+}
+
+static int process_event_scale(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ struct event_update_event *ev = (struct event_update_event *) event;
+ struct event_update_event_scale *ev_data;
+
+ ev_data = (struct event_update_event_scale *) ev->data;
+
+ TEST_ASSERT_VAL("wrong id", ev->id == 123);
+ TEST_ASSERT_VAL("wrong id", ev->type == PERF_EVENT_UPDATE__SCALE);
+ TEST_ASSERT_VAL("wrong scale", ev_data->scale = 0.123);
+ return 0;
+}
+
+struct event_name {
+ struct perf_tool tool;
+ const char *name;
+};
+
+static int process_event_name(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ struct event_name *tmp = container_of(tool, struct event_name, tool);
+ struct event_update_event *ev = (struct event_update_event*) event;
+
+ TEST_ASSERT_VAL("wrong id", ev->id == 123);
+ TEST_ASSERT_VAL("wrong id", ev->type == PERF_EVENT_UPDATE__NAME);
+ TEST_ASSERT_VAL("wrong name", !strcmp(ev->data, tmp->name));
+ return 0;
+}
+
+static int process_event_cpus(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ struct event_update_event *ev = (struct event_update_event*) event;
+ struct event_update_event_cpus *ev_data;
+ struct cpu_map *map;
+
+ ev_data = (struct event_update_event_cpus*) ev->data;
+
+ map = cpu_map__new_data(&ev_data->cpus);
+
+ TEST_ASSERT_VAL("wrong id", ev->id == 123);
+ TEST_ASSERT_VAL("wrong type", ev->type == PERF_EVENT_UPDATE__CPUS);
+ TEST_ASSERT_VAL("wrong cpus", map->nr == 3);
+ TEST_ASSERT_VAL("wrong cpus", map->map[0] == 1);
+ TEST_ASSERT_VAL("wrong cpus", map->map[1] == 2);
+ TEST_ASSERT_VAL("wrong cpus", map->map[2] == 3);
+ cpu_map__put(map);
+ return 0;
+}
+
+int test__event_update(int subtest __maybe_unused)
+{
+ struct perf_evlist *evlist;
+ struct perf_evsel *evsel;
+ struct event_name tmp;
+
+ evlist = perf_evlist__new_default();
+ TEST_ASSERT_VAL("failed to get evlist", evlist);
+
+ evsel = perf_evlist__first(evlist);
+
+ TEST_ASSERT_VAL("failed to allos ids",
+ !perf_evsel__alloc_id(evsel, 1, 1));
+
+ perf_evlist__id_add(evlist, evsel, 0, 0, 123);
+
+ evsel->unit = strdup("KRAVA");
+
+ TEST_ASSERT_VAL("failed to synthesize attr update unit",
+ !perf_event__synthesize_event_update_unit(NULL, evsel, process_event_unit));
+
+ evsel->scale = 0.123;
+
+ TEST_ASSERT_VAL("failed to synthesize attr update scale",
+ !perf_event__synthesize_event_update_scale(NULL, evsel, process_event_scale));
+
+ tmp.name = perf_evsel__name(evsel);
+
+ TEST_ASSERT_VAL("failed to synthesize attr update name",
+ !perf_event__synthesize_event_update_name(&tmp.tool, evsel, process_event_name));
+
+ evsel->own_cpus = cpu_map__new("1,2,3");
+
+ TEST_ASSERT_VAL("failed to synthesize attr update cpus",
+ !perf_event__synthesize_event_update_cpus(&tmp.tool, evsel, process_event_cpus));
+
+ cpu_map__put(evsel->own_cpus);
+ return 0;
+}
diff --git a/tools/perf/tests/evsel-roundtrip-name.c b/tools/perf/tests/evsel-roundtrip-name.c
index 3fa715987a5e..2de4a4f2c3ed 100644
--- a/tools/perf/tests/evsel-roundtrip-name.c
+++ b/tools/perf/tests/evsel-roundtrip-name.c
@@ -95,7 +95,7 @@ out_delete_evlist:
#define perf_evsel__name_array_test(names) \
__perf_evsel__name_array_test(names, ARRAY_SIZE(names))
-int test__perf_evsel__roundtrip_name_test(void)
+int test__perf_evsel__roundtrip_name_test(int subtest __maybe_unused)
{
int err = 0, ret = 0;
@@ -103,7 +103,8 @@ int test__perf_evsel__roundtrip_name_test(void)
if (err)
ret = err;
- err = perf_evsel__name_array_test(perf_evsel__sw_names);
+ err = __perf_evsel__name_array_test(perf_evsel__sw_names,
+ PERF_COUNT_SW_DUMMY + 1);
if (err)
ret = err;
diff --git a/tools/perf/tests/evsel-tp-sched.c b/tools/perf/tests/evsel-tp-sched.c
index 790e413d9a1f..1984b3bbfe15 100644
--- a/tools/perf/tests/evsel-tp-sched.c
+++ b/tools/perf/tests/evsel-tp-sched.c
@@ -32,7 +32,7 @@ static int perf_evsel__test_field(struct perf_evsel *evsel, const char *name,
return ret;
}
-int test__perf_evsel__tp_sched_test(void)
+int test__perf_evsel__tp_sched_test(int subtest __maybe_unused)
{
struct perf_evsel *evsel = perf_evsel__newtp("sched", "sched_switch");
int ret = 0;
diff --git a/tools/perf/tests/fdarray.c b/tools/perf/tests/fdarray.c
index d24b837951d4..c809463edbe5 100644
--- a/tools/perf/tests/fdarray.c
+++ b/tools/perf/tests/fdarray.c
@@ -25,7 +25,7 @@ static int fdarray__fprintf_prefix(struct fdarray *fda, const char *prefix, FILE
return printed + fdarray__fprintf(fda, fp);
}
-int test__fdarray__filter(void)
+int test__fdarray__filter(int subtest __maybe_unused)
{
int nr_fds, expected_fd[2], fd, err = TEST_FAIL;
struct fdarray *fda = fdarray__new(5, 5);
@@ -103,7 +103,7 @@ out:
return err;
}
-int test__fdarray__add(void)
+int test__fdarray__add(int subtest __maybe_unused)
{
int err = TEST_FAIL;
struct fdarray *fda = fdarray__new(2, 2);
diff --git a/tools/perf/tests/hists_common.c b/tools/perf/tests/hists_common.c
index ce80b274b097..071a8b5f5232 100644
--- a/tools/perf/tests/hists_common.c
+++ b/tools/perf/tests/hists_common.c
@@ -150,7 +150,6 @@ struct machine *setup_fake_machine(struct machines *machines)
out:
pr_debug("Not enough memory for machine setup\n");
machine__delete_threads(machine);
- machine__delete(machine);
return NULL;
}
diff --git a/tools/perf/tests/hists_cumulate.c b/tools/perf/tests/hists_cumulate.c
index 7ed737019de7..5e6a86e50fb9 100644
--- a/tools/perf/tests/hists_cumulate.c
+++ b/tools/perf/tests/hists_cumulate.c
@@ -281,7 +281,7 @@ static int test1(struct perf_evsel *evsel, struct machine *machine)
symbol_conf.cumulate_callchain = false;
perf_evsel__reset_sample_bit(evsel, CALLCHAIN);
- setup_sorting();
+ setup_sorting(NULL);
callchain_register_param(&callchain_param);
err = add_hist_entries(hists, machine);
@@ -428,7 +428,7 @@ static int test2(struct perf_evsel *evsel, struct machine *machine)
symbol_conf.cumulate_callchain = false;
perf_evsel__set_sample_bit(evsel, CALLCHAIN);
- setup_sorting();
+ setup_sorting(NULL);
callchain_register_param(&callchain_param);
err = add_hist_entries(hists, machine);
@@ -486,7 +486,7 @@ static int test3(struct perf_evsel *evsel, struct machine *machine)
symbol_conf.cumulate_callchain = true;
perf_evsel__reset_sample_bit(evsel, CALLCHAIN);
- setup_sorting();
+ setup_sorting(NULL);
callchain_register_param(&callchain_param);
err = add_hist_entries(hists, machine);
@@ -670,7 +670,7 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
symbol_conf.cumulate_callchain = true;
perf_evsel__set_sample_bit(evsel, CALLCHAIN);
- setup_sorting();
+ setup_sorting(NULL);
callchain_register_param(&callchain_param);
err = add_hist_entries(hists, machine);
@@ -686,7 +686,7 @@ out:
return err;
}
-int test__hists_cumulate(void)
+int test__hists_cumulate(int subtest __maybe_unused)
{
int err = TEST_FAIL;
struct machines machines;
@@ -706,6 +706,7 @@ int test__hists_cumulate(void)
err = parse_events(evlist, "cpu-clock", NULL);
if (err)
goto out;
+ err = TEST_FAIL;
machines__init(&machines);
diff --git a/tools/perf/tests/hists_filter.c b/tools/perf/tests/hists_filter.c
index 818acf875dd0..351a42463444 100644
--- a/tools/perf/tests/hists_filter.c
+++ b/tools/perf/tests/hists_filter.c
@@ -104,7 +104,7 @@ out:
return TEST_FAIL;
}
-int test__hists_filter(void)
+int test__hists_filter(int subtest __maybe_unused)
{
int err = TEST_FAIL;
struct machines machines;
@@ -120,9 +120,10 @@ int test__hists_filter(void)
err = parse_events(evlist, "task-clock", NULL);
if (err)
goto out;
+ err = TEST_FAIL;
/* default sort order (comm,dso,sym) will be used */
- if (setup_sorting() < 0)
+ if (setup_sorting(NULL) < 0)
goto out;
machines__init(&machines);
diff --git a/tools/perf/tests/hists_link.c b/tools/perf/tests/hists_link.c
index 8c102b011424..64b257d8d557 100644
--- a/tools/perf/tests/hists_link.c
+++ b/tools/perf/tests/hists_link.c
@@ -64,7 +64,7 @@ static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine)
struct perf_evsel *evsel;
struct addr_location al;
struct hist_entry *he;
- struct perf_sample sample = { .period = 1, };
+ struct perf_sample sample = { .period = 1, .weight = 1, };
size_t i = 0, k;
/*
@@ -90,7 +90,7 @@ static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine)
goto out;
he = __hists__add_entry(hists, &al, NULL,
- NULL, NULL, 1, 1, 0, true);
+ NULL, NULL, &sample, true);
if (he == NULL) {
addr_location__put(&al);
goto out;
@@ -116,7 +116,7 @@ static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine)
goto out;
he = __hists__add_entry(hists, &al, NULL,
- NULL, NULL, 1, 1, 0, true);
+ NULL, NULL, &sample, true);
if (he == NULL) {
addr_location__put(&al);
goto out;
@@ -274,7 +274,7 @@ static int validate_link(struct hists *leader, struct hists *other)
return __validate_link(leader, 0) || __validate_link(other, 1);
}
-int test__hists_link(void)
+int test__hists_link(int subtest __maybe_unused)
{
int err = -1;
struct hists *hists, *first_hists;
@@ -293,8 +293,9 @@ int test__hists_link(void)
if (err)
goto out;
+ err = TEST_FAIL;
/* default sort order (comm,dso,sym) will be used */
- if (setup_sorting() < 0)
+ if (setup_sorting(NULL) < 0)
goto out;
machines__init(&machines);
diff --git a/tools/perf/tests/hists_output.c b/tools/perf/tests/hists_output.c
index adbebc852cc8..b231265148d8 100644
--- a/tools/perf/tests/hists_output.c
+++ b/tools/perf/tests/hists_output.c
@@ -134,7 +134,7 @@ static int test1(struct perf_evsel *evsel, struct machine *machine)
field_order = NULL;
sort_order = NULL; /* equivalent to sort_order = "comm,dso,sym" */
- setup_sorting();
+ setup_sorting(NULL);
/*
* expected output:
@@ -236,7 +236,7 @@ static int test2(struct perf_evsel *evsel, struct machine *machine)
field_order = "overhead,cpu";
sort_order = "pid";
- setup_sorting();
+ setup_sorting(NULL);
/*
* expected output:
@@ -292,7 +292,7 @@ static int test3(struct perf_evsel *evsel, struct machine *machine)
field_order = "comm,overhead,dso";
sort_order = NULL;
- setup_sorting();
+ setup_sorting(NULL);
/*
* expected output:
@@ -366,7 +366,7 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
field_order = "dso,sym,comm,overhead,dso";
sort_order = "sym";
- setup_sorting();
+ setup_sorting(NULL);
/*
* expected output:
@@ -468,7 +468,7 @@ static int test5(struct perf_evsel *evsel, struct machine *machine)
field_order = "cpu,pid,comm,dso,sym";
sort_order = "dso,pid";
- setup_sorting();
+ setup_sorting(NULL);
/*
* expected output:
@@ -576,7 +576,7 @@ out:
return err;
}
-int test__hists_output(void)
+int test__hists_output(int subtest __maybe_unused)
{
int err = TEST_FAIL;
struct machines machines;
@@ -597,6 +597,7 @@ int test__hists_output(void)
err = parse_events(evlist, "cpu-clock", NULL);
if (err)
goto out;
+ err = TEST_FAIL;
machines__init(&machines);
diff --git a/tools/perf/tests/keep-tracking.c b/tools/perf/tests/keep-tracking.c
index a2e2269aa093..ddb78fae064a 100644
--- a/tools/perf/tests/keep-tracking.c
+++ b/tools/perf/tests/keep-tracking.c
@@ -49,13 +49,12 @@ static int find_comm(struct perf_evlist *evlist, const char *comm)
* when an event is disabled but a dummy software event is not disabled. If the
* test passes %0 is returned, otherwise %-1 is returned.
*/
-int test__keep_tracking(void)
+int test__keep_tracking(int subtest __maybe_unused)
{
struct record_opts opts = {
.mmap_pages = UINT_MAX,
.user_freq = UINT_MAX,
.user_interval = ULLONG_MAX,
- .freq = 4000,
.target = {
.uses_mmap = true,
},
@@ -124,7 +123,7 @@ int test__keep_tracking(void)
evsel = perf_evlist__last(evlist);
- CHECK__(perf_evlist__disable_event(evlist, evsel));
+ CHECK__(perf_evsel__disable(evsel));
comm = "Test COMM 2";
CHECK__(prctl(PR_SET_NAME, (unsigned long)comm, 0, 0, 0));
diff --git a/tools/perf/tests/kmod-path.c b/tools/perf/tests/kmod-path.c
index 08c433b4bf4f..d2af78193153 100644
--- a/tools/perf/tests/kmod-path.c
+++ b/tools/perf/tests/kmod-path.c
@@ -49,7 +49,7 @@ static int test_is_kernel_module(const char *path, int cpumode, bool expect)
#define M(path, c, e) \
TEST_ASSERT_VAL("failed", !test_is_kernel_module(path, c, e))
-int test__kmod_path__parse(void)
+int test__kmod_path__parse(int subtest __maybe_unused)
{
/* path alloc_name alloc_ext kmod comp name ext */
T("/xxxx/xxxx/x-x.ko", true , true , true, false, "[x_x]", NULL);
diff --git a/tools/perf/tests/llvm.c b/tools/perf/tests/llvm.c
index bc4cf507cde5..06f45c1d4256 100644
--- a/tools/perf/tests/llvm.c
+++ b/tools/perf/tests/llvm.c
@@ -44,13 +44,17 @@ static struct {
.source = test_llvm__bpf_test_kbuild_prog,
.desc = "Test kbuild searching",
},
+ [LLVM_TESTCASE_BPF_PROLOGUE] = {
+ .source = test_llvm__bpf_test_prologue_prog,
+ .desc = "Compile source for BPF prologue generation test",
+ },
};
int
test_llvm__fetch_bpf_obj(void **p_obj_buf,
size_t *p_obj_buf_sz,
- enum test_llvm__testcase index,
+ enum test_llvm__testcase idx,
bool force)
{
const char *source;
@@ -59,11 +63,11 @@ test_llvm__fetch_bpf_obj(void **p_obj_buf,
char *tmpl_new = NULL, *clang_opt_new = NULL;
int err, old_verbose, ret = TEST_FAIL;
- if (index >= __LLVM_TESTCASE_MAX)
+ if (idx >= __LLVM_TESTCASE_MAX)
return TEST_FAIL;
- source = bpf_source_table[index].source;
- desc = bpf_source_table[index].desc;
+ source = bpf_source_table[idx].source;
+ desc = bpf_source_table[idx].desc;
perf_config(perf_config_cb, NULL);
@@ -127,44 +131,39 @@ out:
return ret;
}
-int test__llvm(void)
+int test__llvm(int subtest)
{
- enum test_llvm__testcase i;
+ int ret;
+ void *obj_buf = NULL;
+ size_t obj_buf_sz = 0;
- for (i = 0; i < __LLVM_TESTCASE_MAX; i++) {
- int ret;
- void *obj_buf = NULL;
- size_t obj_buf_sz = 0;
+ if ((subtest < 0) || (subtest >= __LLVM_TESTCASE_MAX))
+ return TEST_FAIL;
- ret = test_llvm__fetch_bpf_obj(&obj_buf, &obj_buf_sz,
- i, false);
+ ret = test_llvm__fetch_bpf_obj(&obj_buf, &obj_buf_sz,
+ subtest, false);
- if (ret == TEST_OK) {
- ret = test__bpf_parsing(obj_buf, obj_buf_sz);
- if (ret != TEST_OK)
- pr_debug("Failed to parse test case '%s'\n",
- bpf_source_table[i].desc);
- }
- free(obj_buf);
-
- switch (ret) {
- case TEST_SKIP:
- return TEST_SKIP;
- case TEST_OK:
- break;
- default:
- /*
- * Test 0 is the basic LLVM test. If test 0
- * fail, the basic LLVM support not functional
- * so the whole test should fail. If other test
- * case fail, it can be fixed by adjusting
- * config so don't report error.
- */
- if (i == 0)
- return TEST_FAIL;
- else
- return TEST_SKIP;
+ if (ret == TEST_OK) {
+ ret = test__bpf_parsing(obj_buf, obj_buf_sz);
+ if (ret != TEST_OK) {
+ pr_debug("Failed to parse test case '%s'\n",
+ bpf_source_table[subtest].desc);
}
}
- return TEST_OK;
+ free(obj_buf);
+
+ return ret;
+}
+
+int test__llvm_subtest_get_nr(void)
+{
+ return __LLVM_TESTCASE_MAX;
+}
+
+const char *test__llvm_subtest_get_desc(int subtest)
+{
+ if ((subtest < 0) || (subtest >= __LLVM_TESTCASE_MAX))
+ return NULL;
+
+ return bpf_source_table[subtest].desc;
}
diff --git a/tools/perf/tests/llvm.h b/tools/perf/tests/llvm.h
index d91d8f44efee..5150b4d6ef50 100644
--- a/tools/perf/tests/llvm.h
+++ b/tools/perf/tests/llvm.h
@@ -6,10 +6,12 @@
extern const char test_llvm__bpf_base_prog[];
extern const char test_llvm__bpf_test_kbuild_prog[];
+extern const char test_llvm__bpf_test_prologue_prog[];
enum test_llvm__testcase {
LLVM_TESTCASE_BASE,
LLVM_TESTCASE_KBUILD,
+ LLVM_TESTCASE_BPF_PROLOGUE,
__LLVM_TESTCASE_MAX,
};
diff --git a/tools/perf/tests/make b/tools/perf/tests/make
index 8ea3dffc5065..f918015512af 100644
--- a/tools/perf/tests/make
+++ b/tools/perf/tests/make
@@ -1,9 +1,11 @@
+include ../scripts/Makefile.include
+
ifndef MK
ifeq ($(MAKECMDGOALS),)
# no target specified, trigger the whole suite
all:
@echo "Testing Makefile"; $(MAKE) -sf tests/make MK=Makefile
- @echo "Testing Makefile.perf"; $(MAKE) -sf tests/make MK=Makefile.perf
+ @echo "Testing Makefile.perf"; $(MAKE) -sf tests/make MK=Makefile.perf SET_PARALLEL=1 SET_O=1
else
# run only specific test over 'Makefile'
%:
@@ -11,8 +13,40 @@ else
endif
else
PERF := .
+PERF_O := $(PERF)
+O_OPT :=
+
+ifneq ($(O),)
+ FULL_O := $(shell readlink -f $(O) || echo $(O))
+ PERF_O := $(FULL_O)
+ ifeq ($(SET_O),1)
+ O_OPT := 'O=$(FULL_O)'
+ endif
+ K_O_OPT := 'O=$(FULL_O)'
+endif
+
+PARALLEL_OPT=
+ifeq ($(SET_PARALLEL),1)
+ cores := $(shell (getconf _NPROCESSORS_ONLN || egrep -c '^processor|^CPU[0-9]' /proc/cpuinfo) 2>/dev/null)
+ ifeq ($(cores),0)
+ cores := 1
+ endif
+ PARALLEL_OPT="-j$(cores)"
+endif
+
+# As per kernel Makefile, avoid funny character set dependencies
+unexport LC_ALL
+LC_COLLATE=C
+LC_NUMERIC=C
+export LC_COLLATE LC_NUMERIC
-include config/Makefile.arch
+ifeq ($(srctree),)
+srctree := $(patsubst %/,%,$(dir $(shell pwd)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+#$(info Determined 'srctree' to be $(srctree))
+endif
+
+include $(srctree)/tools/scripts/Makefile.arch
# FIXME looks like x86 is the only arch running tests ;-)
# we need some IS_(32/64) flag to make this generic
@@ -142,11 +176,11 @@ test_make_doc := $(test_ok)
test_make_help_O := $(test_ok)
test_make_doc_O := $(test_ok)
-test_make_python_perf_so := test -f $(PERF)/python/perf.so
+test_make_python_perf_so := test -f $(PERF_O)/python/perf.so
-test_make_perf_o := test -f $(PERF)/perf.o
-test_make_util_map_o := test -f $(PERF)/util/map.o
-test_make_util_pmu_bison_o := test -f $(PERF)/util/pmu-bison.o
+test_make_perf_o := test -f $(PERF_O)/perf.o
+test_make_util_map_o := test -f $(PERF_O)/util/map.o
+test_make_util_pmu_bison_o := test -f $(PERF_O)/util/pmu-bison.o
define test_dest_files
for file in $(1); do \
@@ -213,7 +247,7 @@ test_make_perf_o_O := test -f $$TMP_O/perf.o
test_make_util_map_o_O := test -f $$TMP_O/util/map.o
test_make_util_pmu_bison_o_O := test -f $$TMP_O/util/pmu-bison.o
-test_default = test -x $(PERF)/perf
+test_default = test -x $(PERF_O)/perf
test = $(if $(test_$1),$(test_$1),$(test_default))
test_default_O = test -x $$TMP_O/perf
@@ -233,12 +267,12 @@ endif
MAKEFLAGS := --no-print-directory
-clean := @(cd $(PERF); make -s -f $(MK) clean >/dev/null)
+clean := @(cd $(PERF); make -s -f $(MK) $(O_OPT) clean >/dev/null)
$(run):
$(call clean)
@TMP_DEST=$$(mktemp -d); \
- cmd="cd $(PERF) && make -f $(MK) DESTDIR=$$TMP_DEST $($@)"; \
+ cmd="cd $(PERF) && make -f $(MK) $(PARALLEL_OPT) $(O_OPT) DESTDIR=$$TMP_DEST $($@)"; \
echo "- $@: $$cmd" && echo $$cmd > $@ && \
( eval $$cmd ) >> $@ 2>&1; \
echo " test: $(call test,$@)" >> $@ 2>&1; \
@@ -249,7 +283,7 @@ $(run_O):
$(call clean)
@TMP_O=$$(mktemp -d); \
TMP_DEST=$$(mktemp -d); \
- cmd="cd $(PERF) && make -f $(MK) O=$$TMP_O DESTDIR=$$TMP_DEST $($(patsubst %_O,%,$@))"; \
+ cmd="cd $(PERF) && make -f $(MK) $(PARALLEL_OPT) O=$$TMP_O DESTDIR=$$TMP_DEST $($(patsubst %_O,%,$@))"; \
echo "- $@: $$cmd" && echo $$cmd > $@ && \
( eval $$cmd ) >> $@ 2>&1 && \
echo " test: $(call test_O,$@)" >> $@ 2>&1; \
@@ -259,19 +293,25 @@ $(run_O):
tarpkg:
@cmd="$(PERF)/tests/perf-targz-src-pkg $(PERF)"; \
echo "- $@: $$cmd" && echo $$cmd > $@ && \
- ( eval $$cmd ) >> $@ 2>&1
+ ( eval $$cmd ) >> $@ 2>&1 && \
+ rm -f $@
+
+KERNEL_O := ../..
+ifneq ($(O),)
+ KERNEL_O := $(O)
+endif
make_kernelsrc:
- @echo "- make -C <kernelsrc> tools/perf"
+ @echo "- make -C <kernelsrc> $(PARALLEL_OPT) $(K_O_OPT) tools/perf"
$(call clean); \
- (make -C ../.. tools/perf) > $@ 2>&1 && \
- test -x perf && rm -f $@ || (cat $@ ; false)
+ (make -C ../.. $(PARALLEL_OPT) $(K_O_OPT) tools/perf) > $@ 2>&1 && \
+ test -x $(KERNEL_O)/tools/perf/perf && rm -f $@ || (cat $@ ; false)
make_kernelsrc_tools:
- @echo "- make -C <kernelsrc>/tools perf"
+ @echo "- make -C <kernelsrc>/tools $(PARALLEL_OPT) $(K_O_OPT) perf"
$(call clean); \
- (make -C ../../tools perf) > $@ 2>&1 && \
- test -x perf && rm -f $@ || (cat $@ ; false)
+ (make -C ../../tools $(PARALLEL_OPT) $(K_O_OPT) perf) > $@ 2>&1 && \
+ test -x $(KERNEL_O)/tools/perf/perf && rm -f $@ || (cat $@ ; false)
all: $(run) $(run_O) tarpkg make_kernelsrc make_kernelsrc_tools
@echo OK
@@ -279,5 +319,5 @@ all: $(run) $(run_O) tarpkg make_kernelsrc make_kernelsrc_tools
out: $(run_O)
@echo OK
-.PHONY: all $(run) $(run_O) tarpkg clean
+.PHONY: all $(run) $(run_O) tarpkg clean make_kernelsrc make_kernelsrc_tools
endif # ifndef MK
diff --git a/tools/perf/tests/mmap-basic.c b/tools/perf/tests/mmap-basic.c
index 4495493c9431..359e98fcd94c 100644
--- a/tools/perf/tests/mmap-basic.c
+++ b/tools/perf/tests/mmap-basic.c
@@ -16,7 +16,7 @@
* Then it checks if the number of syscalls reported as perf events by
* the kernel corresponds to the number of syscalls made.
*/
-int test__basic_mmap(void)
+int test__basic_mmap(int subtest __maybe_unused)
{
int err = -1;
union perf_event *event;
diff --git a/tools/perf/tests/mmap-thread-lookup.c b/tools/perf/tests/mmap-thread-lookup.c
index 145050e2e544..0c5ce44f723f 100644
--- a/tools/perf/tests/mmap-thread-lookup.c
+++ b/tools/perf/tests/mmap-thread-lookup.c
@@ -149,7 +149,6 @@ static int synth_process(struct machine *machine)
static int mmap_events(synth_cb synth)
{
- struct machines machines;
struct machine *machine;
int err, i;
@@ -162,8 +161,7 @@ static int mmap_events(synth_cb synth)
*/
TEST_ASSERT_VAL("failed to create threads", !threads_create());
- machines__init(&machines);
- machine = &machines.host;
+ machine = machine__new_host();
dump_trace = verbose > 1 ? 1 : 0;
@@ -203,7 +201,7 @@ static int mmap_events(synth_cb synth)
}
machine__delete_threads(machine);
- machines__exit(&machines);
+ machine__delete(machine);
return err;
}
@@ -221,7 +219,7 @@ static int mmap_events(synth_cb synth)
*
* by using all thread objects.
*/
-int test__mmap_thread_lookup(void)
+int test__mmap_thread_lookup(int subtest __maybe_unused)
{
/* perf_event__synthesize_threads synthesize */
TEST_ASSERT_VAL("failed with sythesizing all",
diff --git a/tools/perf/tests/openat-syscall-all-cpus.c b/tools/perf/tests/openat-syscall-all-cpus.c
index 2006485a2859..53c2273e8859 100644
--- a/tools/perf/tests/openat-syscall-all-cpus.c
+++ b/tools/perf/tests/openat-syscall-all-cpus.c
@@ -7,7 +7,7 @@
#include "debug.h"
#include "stat.h"
-int test__openat_syscall_event_on_all_cpus(void)
+int test__openat_syscall_event_on_all_cpus(int subtest __maybe_unused)
{
int err = -1, fd, cpu;
struct cpu_map *cpus;
diff --git a/tools/perf/tests/openat-syscall-tp-fields.c b/tools/perf/tests/openat-syscall-tp-fields.c
index 5e811cd8f1c3..eb99a105f31c 100644
--- a/tools/perf/tests/openat-syscall-tp-fields.c
+++ b/tools/perf/tests/openat-syscall-tp-fields.c
@@ -6,7 +6,7 @@
#include "tests.h"
#include "debug.h"
-int test__syscall_openat_tp_fields(void)
+int test__syscall_openat_tp_fields(int subtest __maybe_unused)
{
struct record_opts opts = {
.target = {
diff --git a/tools/perf/tests/openat-syscall.c b/tools/perf/tests/openat-syscall.c
index 033b54797b8a..1184f9ba6499 100644
--- a/tools/perf/tests/openat-syscall.c
+++ b/tools/perf/tests/openat-syscall.c
@@ -5,7 +5,7 @@
#include "debug.h"
#include "tests.h"
-int test__openat_syscall_event(void)
+int test__openat_syscall_event(int subtest __maybe_unused)
{
int err = -1, fd;
struct perf_evsel *evsel;
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index 636d7b42d844..abe8849d1d70 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -1765,7 +1765,7 @@ static void debug_warn(const char *warn, va_list params)
fprintf(stderr, " Warning: %s\n", msg);
}
-int test__parse_events(void)
+int test__parse_events(int subtest __maybe_unused)
{
int ret1, ret2 = 0;
diff --git a/tools/perf/tests/parse-no-sample-id-all.c b/tools/perf/tests/parse-no-sample-id-all.c
index 2c63ea658541..294c76b01b41 100644
--- a/tools/perf/tests/parse-no-sample-id-all.c
+++ b/tools/perf/tests/parse-no-sample-id-all.c
@@ -67,7 +67,7 @@ struct test_attr_event {
*
* Return: %0 on success, %-1 if the test fails.
*/
-int test__parse_no_sample_id_all(void)
+int test__parse_no_sample_id_all(int subtest __maybe_unused)
{
int err;
diff --git a/tools/perf/tests/perf-record.c b/tools/perf/tests/perf-record.c
index 7a228a2a070b..1cc78cefe399 100644
--- a/tools/perf/tests/perf-record.c
+++ b/tools/perf/tests/perf-record.c
@@ -32,7 +32,7 @@ realloc:
return cpu;
}
-int test__PERF_RECORD(void)
+int test__PERF_RECORD(int subtest __maybe_unused)
{
struct record_opts opts = {
.target = {
@@ -40,12 +40,11 @@ int test__PERF_RECORD(void)
.uses_mmap = true,
},
.no_buffering = true,
- .freq = 10,
.mmap_pages = 256,
};
cpu_set_t cpu_mask;
size_t cpu_mask_size = sizeof(cpu_mask);
- struct perf_evlist *evlist = perf_evlist__new_default();
+ struct perf_evlist *evlist = perf_evlist__new_dummy();
struct perf_evsel *evsel;
struct perf_sample sample;
const char *cmd = "sleep";
@@ -61,6 +60,9 @@ int test__PERF_RECORD(void)
int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, };
char sbuf[STRERR_BUFSIZE];
+ if (evlist == NULL) /* Fallback for kernels lacking PERF_COUNT_SW_DUMMY */
+ evlist = perf_evlist__new_default();
+
if (evlist == NULL || argv == NULL) {
pr_debug("Not enough memory to create evlist\n");
goto out;
diff --git a/tools/perf/tests/pmu.c b/tools/perf/tests/pmu.c
index faa04e9d5d5f..1e2ba2602930 100644
--- a/tools/perf/tests/pmu.c
+++ b/tools/perf/tests/pmu.c
@@ -133,7 +133,7 @@ static struct list_head *test_terms_list(void)
return &terms;
}
-int test__pmu(void)
+int test__pmu(int subtest __maybe_unused)
{
char *format = test_format_dir_get();
LIST_HEAD(formats);
diff --git a/tools/perf/tests/python-use.c b/tools/perf/tests/python-use.c
index 7760277c6def..7a52834ee0d0 100644
--- a/tools/perf/tests/python-use.c
+++ b/tools/perf/tests/python-use.c
@@ -4,11 +4,12 @@
#include <stdio.h>
#include <stdlib.h>
+#include <linux/compiler.h>
#include "tests.h"
extern int verbose;
-int test__python_use(void)
+int test__python_use(int subtest __maybe_unused)
{
char *cmd;
int ret;
diff --git a/tools/perf/tests/sample-parsing.c b/tools/perf/tests/sample-parsing.c
index 30c02181e78b..5f23710b9fee 100644
--- a/tools/perf/tests/sample-parsing.c
+++ b/tools/perf/tests/sample-parsing.c
@@ -290,7 +290,7 @@ out_free:
* checks sample format bits separately and together. If the test passes %0 is
* returned, otherwise %-1 is returned.
*/
-int test__sample_parsing(void)
+int test__sample_parsing(int subtest __maybe_unused)
{
const u64 rf[] = {4, 5, 6, 7, 12, 13, 14, 15};
u64 sample_type;
diff --git a/tools/perf/tests/stat.c b/tools/perf/tests/stat.c
new file mode 100644
index 000000000000..6a20ff2326bb
--- /dev/null
+++ b/tools/perf/tests/stat.c
@@ -0,0 +1,111 @@
+#include <linux/compiler.h>
+#include "event.h"
+#include "tests.h"
+#include "stat.h"
+#include "counts.h"
+#include "debug.h"
+
+static bool has_term(struct stat_config_event *config,
+ u64 tag, u64 val)
+{
+ unsigned i;
+
+ for (i = 0; i < config->nr; i++) {
+ if ((config->data[i].tag == tag) &&
+ (config->data[i].val == val))
+ return true;
+ }
+
+ return false;
+}
+
+static int process_stat_config_event(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ struct stat_config_event *config = &event->stat_config;
+ struct perf_stat_config stat_config;
+
+#define HAS(term, val) \
+ has_term(config, PERF_STAT_CONFIG_TERM__##term, val)
+
+ TEST_ASSERT_VAL("wrong nr", config->nr == PERF_STAT_CONFIG_TERM__MAX);
+ TEST_ASSERT_VAL("wrong aggr_mode", HAS(AGGR_MODE, AGGR_CORE));
+ TEST_ASSERT_VAL("wrong scale", HAS(SCALE, 1));
+ TEST_ASSERT_VAL("wrong interval", HAS(INTERVAL, 1));
+
+#undef HAS
+
+ perf_event__read_stat_config(&stat_config, config);
+
+ TEST_ASSERT_VAL("wrong aggr_mode", stat_config.aggr_mode == AGGR_CORE);
+ TEST_ASSERT_VAL("wrong scale", stat_config.scale == 1);
+ TEST_ASSERT_VAL("wrong interval", stat_config.interval == 1);
+ return 0;
+}
+
+int test__synthesize_stat_config(int subtest __maybe_unused)
+{
+ struct perf_stat_config stat_config = {
+ .aggr_mode = AGGR_CORE,
+ .scale = 1,
+ .interval = 1,
+ };
+
+ TEST_ASSERT_VAL("failed to synthesize stat_config",
+ !perf_event__synthesize_stat_config(NULL, &stat_config, process_stat_config_event, NULL));
+
+ return 0;
+}
+
+static int process_stat_event(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ struct stat_event *st = &event->stat;
+
+ TEST_ASSERT_VAL("wrong cpu", st->cpu == 1);
+ TEST_ASSERT_VAL("wrong thread", st->thread == 2);
+ TEST_ASSERT_VAL("wrong id", st->id == 3);
+ TEST_ASSERT_VAL("wrong val", st->val == 100);
+ TEST_ASSERT_VAL("wrong run", st->ena == 200);
+ TEST_ASSERT_VAL("wrong ena", st->run == 300);
+ return 0;
+}
+
+int test__synthesize_stat(int subtest __maybe_unused)
+{
+ struct perf_counts_values count;
+
+ count.val = 100;
+ count.ena = 200;
+ count.run = 300;
+
+ TEST_ASSERT_VAL("failed to synthesize stat_config",
+ !perf_event__synthesize_stat(NULL, 1, 2, 3, &count, process_stat_event, NULL));
+
+ return 0;
+}
+
+static int process_stat_round_event(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ struct stat_round_event *stat_round = &event->stat_round;
+
+ TEST_ASSERT_VAL("wrong time", stat_round->time == 0xdeadbeef);
+ TEST_ASSERT_VAL("wrong type", stat_round->type == PERF_STAT_ROUND_TYPE__INTERVAL);
+ return 0;
+}
+
+int test__synthesize_stat_round(int subtest __maybe_unused)
+{
+ TEST_ASSERT_VAL("failed to synthesize stat_config",
+ !perf_event__synthesize_stat_round(NULL, 0xdeadbeef, PERF_STAT_ROUND_TYPE__INTERVAL,
+ process_stat_round_event, NULL));
+
+ return 0;
+}
diff --git a/tools/perf/tests/sw-clock.c b/tools/perf/tests/sw-clock.c
index 5b83f56a3b6f..36e8ce1550e3 100644
--- a/tools/perf/tests/sw-clock.c
+++ b/tools/perf/tests/sw-clock.c
@@ -122,7 +122,7 @@ out_delete_evlist:
return err;
}
-int test__sw_clock_freq(void)
+int test__sw_clock_freq(int subtest __maybe_unused)
{
int ret;
diff --git a/tools/perf/tests/switch-tracking.c b/tools/perf/tests/switch-tracking.c
index a02af503100c..ebd80168d51e 100644
--- a/tools/perf/tests/switch-tracking.c
+++ b/tools/perf/tests/switch-tracking.c
@@ -305,7 +305,7 @@ out_free_nodes:
* evsel->system_wide and evsel->tracking flags (respectively) with other events
* sometimes enabled or disabled.
*/
-int test__switch_tracking(void)
+int test__switch_tracking(int subtest __maybe_unused)
{
const char *sched_switch = "sched:sched_switch";
struct switch_tracking switch_tracking = { .tids = NULL, };
@@ -455,7 +455,7 @@ int test__switch_tracking(void)
perf_evlist__enable(evlist);
- err = perf_evlist__disable_event(evlist, cpu_clocks_evsel);
+ err = perf_evsel__disable(cpu_clocks_evsel);
if (err) {
pr_debug("perf_evlist__disable_event failed!\n");
goto out_err;
@@ -474,7 +474,7 @@ int test__switch_tracking(void)
goto out_err;
}
- err = perf_evlist__disable_event(evlist, cycles_evsel);
+ err = perf_evsel__disable(cycles_evsel);
if (err) {
pr_debug("perf_evlist__disable_event failed!\n");
goto out_err;
@@ -500,7 +500,7 @@ int test__switch_tracking(void)
goto out_err;
}
- err = perf_evlist__enable_event(evlist, cycles_evsel);
+ err = perf_evsel__enable(cycles_evsel);
if (err) {
pr_debug("perf_evlist__disable_event failed!\n");
goto out_err;
diff --git a/tools/perf/tests/task-exit.c b/tools/perf/tests/task-exit.c
index add16385f13e..2dfff7ac8ef3 100644
--- a/tools/perf/tests/task-exit.c
+++ b/tools/perf/tests/task-exit.c
@@ -31,7 +31,7 @@ static void workload_exec_failed_signal(int signo __maybe_unused,
* if the number of exit event reported by the kernel is 1 or not
* in order to check the kernel returns correct number of event.
*/
-int test__task_exit(void)
+int test__task_exit(int subtest __maybe_unused)
{
int err = -1;
union perf_event *event;
diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h
index 3c8734a3abbc..82b2b5e6ba7c 100644
--- a/tools/perf/tests/tests.h
+++ b/tools/perf/tests/tests.h
@@ -1,6 +1,8 @@
#ifndef TESTS_H
#define TESTS_H
+#include <stdbool.h>
+
#define TEST_ASSERT_VAL(text, cond) \
do { \
if (!(cond)) { \
@@ -26,48 +28,63 @@ enum {
struct test {
const char *desc;
- int (*func)(void);
+ int (*func)(int subtest);
+ struct {
+ bool skip_if_fail;
+ int (*get_nr)(void);
+ const char *(*get_desc)(int subtest);
+ } subtest;
};
/* Tests */
-int test__vmlinux_matches_kallsyms(void);
-int test__openat_syscall_event(void);
-int test__openat_syscall_event_on_all_cpus(void);
-int test__basic_mmap(void);
-int test__PERF_RECORD(void);
-int test__perf_evsel__roundtrip_name_test(void);
-int test__perf_evsel__tp_sched_test(void);
-int test__syscall_openat_tp_fields(void);
-int test__pmu(void);
-int test__attr(void);
-int test__dso_data(void);
-int test__dso_data_cache(void);
-int test__dso_data_reopen(void);
-int test__parse_events(void);
-int test__hists_link(void);
-int test__python_use(void);
-int test__bp_signal(void);
-int test__bp_signal_overflow(void);
-int test__task_exit(void);
-int test__sw_clock_freq(void);
-int test__code_reading(void);
-int test__sample_parsing(void);
-int test__keep_tracking(void);
-int test__parse_no_sample_id_all(void);
-int test__dwarf_unwind(void);
-int test__hists_filter(void);
-int test__mmap_thread_lookup(void);
-int test__thread_mg_share(void);
-int test__hists_output(void);
-int test__hists_cumulate(void);
-int test__switch_tracking(void);
-int test__fdarray__filter(void);
-int test__fdarray__add(void);
-int test__kmod_path__parse(void);
-int test__thread_map(void);
-int test__llvm(void);
-int test__bpf(void);
-int test_session_topology(void);
+int test__vmlinux_matches_kallsyms(int subtest);
+int test__openat_syscall_event(int subtest);
+int test__openat_syscall_event_on_all_cpus(int subtest);
+int test__basic_mmap(int subtest);
+int test__PERF_RECORD(int subtest);
+int test__perf_evsel__roundtrip_name_test(int subtest);
+int test__perf_evsel__tp_sched_test(int subtest);
+int test__syscall_openat_tp_fields(int subtest);
+int test__pmu(int subtest);
+int test__attr(int subtest);
+int test__dso_data(int subtest);
+int test__dso_data_cache(int subtest);
+int test__dso_data_reopen(int subtest);
+int test__parse_events(int subtest);
+int test__hists_link(int subtest);
+int test__python_use(int subtest);
+int test__bp_signal(int subtest);
+int test__bp_signal_overflow(int subtest);
+int test__task_exit(int subtest);
+int test__sw_clock_freq(int subtest);
+int test__code_reading(int subtest);
+int test__sample_parsing(int subtest);
+int test__keep_tracking(int subtest);
+int test__parse_no_sample_id_all(int subtest);
+int test__dwarf_unwind(int subtest);
+int test__hists_filter(int subtest);
+int test__mmap_thread_lookup(int subtest);
+int test__thread_mg_share(int subtest);
+int test__hists_output(int subtest);
+int test__hists_cumulate(int subtest);
+int test__switch_tracking(int subtest);
+int test__fdarray__filter(int subtest);
+int test__fdarray__add(int subtest);
+int test__kmod_path__parse(int subtest);
+int test__thread_map(int subtest);
+int test__llvm(int subtest);
+const char *test__llvm_subtest_get_desc(int subtest);
+int test__llvm_subtest_get_nr(void);
+int test__bpf(int subtest);
+const char *test__bpf_subtest_get_desc(int subtest);
+int test__bpf_subtest_get_nr(void);
+int test_session_topology(int subtest);
+int test__thread_map_synthesize(int subtest);
+int test__cpu_map_synthesize(int subtest);
+int test__synthesize_stat_config(int subtest);
+int test__synthesize_stat(int subtest);
+int test__synthesize_stat_round(int subtest);
+int test__event_update(int subtest);
#if defined(__arm__) || defined(__aarch64__)
#ifdef HAVE_DWARF_UNWIND_SUPPORT
diff --git a/tools/perf/tests/thread-map.c b/tools/perf/tests/thread-map.c
index 138a0e3431fa..fccde848fe9c 100644
--- a/tools/perf/tests/thread-map.c
+++ b/tools/perf/tests/thread-map.c
@@ -4,7 +4,7 @@
#include "thread_map.h"
#include "debug.h"
-int test__thread_map(void)
+int test__thread_map(int subtest __maybe_unused)
{
struct thread_map *map;
@@ -40,3 +40,46 @@ int test__thread_map(void)
thread_map__put(map);
return 0;
}
+
+static int process_event(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ struct thread_map_event *map = &event->thread_map;
+ struct thread_map *threads;
+
+ TEST_ASSERT_VAL("wrong nr", map->nr == 1);
+ TEST_ASSERT_VAL("wrong pid", map->entries[0].pid == (u64) getpid());
+ TEST_ASSERT_VAL("wrong comm", !strcmp(map->entries[0].comm, "perf"));
+
+ threads = thread_map__new_event(&event->thread_map);
+ TEST_ASSERT_VAL("failed to alloc map", threads);
+
+ TEST_ASSERT_VAL("wrong nr", threads->nr == 1);
+ TEST_ASSERT_VAL("wrong pid",
+ thread_map__pid(threads, 0) == getpid());
+ TEST_ASSERT_VAL("wrong comm",
+ thread_map__comm(threads, 0) &&
+ !strcmp(thread_map__comm(threads, 0), "perf"));
+ TEST_ASSERT_VAL("wrong refcnt",
+ atomic_read(&threads->refcnt) == 1);
+ thread_map__put(threads);
+ return 0;
+}
+
+int test__thread_map_synthesize(int subtest __maybe_unused)
+{
+ struct thread_map *threads;
+
+ /* test map on current pid */
+ threads = thread_map__new_by_pid(getpid());
+ TEST_ASSERT_VAL("failed to alloc map", threads);
+
+ thread_map__read_comms(threads);
+
+ TEST_ASSERT_VAL("failed to synthesize map",
+ !perf_event__synthesize_thread_map2(NULL, threads, process_event, NULL));
+
+ return 0;
+}
diff --git a/tools/perf/tests/thread-mg-share.c b/tools/perf/tests/thread-mg-share.c
index 01fabb19d746..188b63140fc8 100644
--- a/tools/perf/tests/thread-mg-share.c
+++ b/tools/perf/tests/thread-mg-share.c
@@ -4,7 +4,7 @@
#include "map.h"
#include "debug.h"
-int test__thread_mg_share(void)
+int test__thread_mg_share(int subtest __maybe_unused)
{
struct machines machines;
struct machine *machine;
diff --git a/tools/perf/tests/topology.c b/tools/perf/tests/topology.c
index f5bb096c3bd9..98fe69ac553c 100644
--- a/tools/perf/tests/topology.c
+++ b/tools/perf/tests/topology.c
@@ -84,7 +84,7 @@ static int check_cpu_topology(char *path, struct cpu_map *map)
return 0;
}
-int test_session_topology(void)
+int test_session_topology(int subtest __maybe_unused)
{
char path[PATH_MAX];
struct cpu_map *map;
diff --git a/tools/perf/tests/vmlinux-kallsyms.c b/tools/perf/tests/vmlinux-kallsyms.c
index d677e018e504..f0bfc9e8fd9f 100644
--- a/tools/perf/tests/vmlinux-kallsyms.c
+++ b/tools/perf/tests/vmlinux-kallsyms.c
@@ -18,7 +18,7 @@ static int vmlinux_matches_kallsyms_filter(struct map *map __maybe_unused,
#define UM(x) kallsyms_map->unmap_ip(kallsyms_map, (x))
-int test__vmlinux_matches_kallsyms(void)
+int test__vmlinux_matches_kallsyms(int subtest __maybe_unused)
{
int err = -1;
struct rb_node *nd;
diff --git a/tools/perf/ui/browser.c b/tools/perf/ui/browser.c
index e9703c0829f1..d37202121689 100644
--- a/tools/perf/ui/browser.c
+++ b/tools/perf/ui/browser.c
@@ -528,7 +528,7 @@ static struct ui_browser_colorset {
.colorset = HE_COLORSET_SELECTED,
.name = "selected",
.fg = "black",
- .bg = "lightgray",
+ .bg = "yellow",
},
{
.colorset = HE_COLORSET_CODE,
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index d4d7cc27252f..718bd46d47fa 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -755,11 +755,11 @@ static int annotate_browser__run(struct annotate_browser *browser,
nd = browser->curr_hot;
break;
case K_UNTAB:
- if (nd != NULL)
+ if (nd != NULL) {
nd = rb_next(nd);
if (nd == NULL)
nd = rb_first(&browser->entries);
- else
+ } else
nd = browser->curr_hot;
break;
case K_F1:
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index fa9eb92c9e24..08c09ad755d2 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -178,12 +178,51 @@ static int callchain_node__count_rows_rb_tree(struct callchain_node *node)
return n;
}
+static int callchain_node__count_flat_rows(struct callchain_node *node)
+{
+ struct callchain_list *chain;
+ char folded_sign = 0;
+ int n = 0;
+
+ list_for_each_entry(chain, &node->parent_val, list) {
+ if (!folded_sign) {
+ /* only check first chain list entry */
+ folded_sign = callchain_list__folded(chain);
+ if (folded_sign == '+')
+ return 1;
+ }
+ n++;
+ }
+
+ list_for_each_entry(chain, &node->val, list) {
+ if (!folded_sign) {
+ /* node->parent_val list might be empty */
+ folded_sign = callchain_list__folded(chain);
+ if (folded_sign == '+')
+ return 1;
+ }
+ n++;
+ }
+
+ return n;
+}
+
+static int callchain_node__count_folded_rows(struct callchain_node *node __maybe_unused)
+{
+ return 1;
+}
+
static int callchain_node__count_rows(struct callchain_node *node)
{
struct callchain_list *chain;
bool unfolded = false;
int n = 0;
+ if (callchain_param.mode == CHAIN_FLAT)
+ return callchain_node__count_flat_rows(node);
+ else if (callchain_param.mode == CHAIN_FOLDED)
+ return callchain_node__count_folded_rows(node);
+
list_for_each_entry(chain, &node->val, list) {
++n;
unfolded = chain->unfolded;
@@ -263,7 +302,7 @@ static void callchain_node__init_have_children(struct callchain_node *node,
chain = list_entry(node->val.next, struct callchain_list, list);
chain->has_children = has_sibling;
- if (!list_empty(&node->val)) {
+ if (node->val.next != node->val.prev) {
chain = list_entry(node->val.prev, struct callchain_list, list);
chain->has_children = !RB_EMPTY_ROOT(&node->rb_root);
}
@@ -279,6 +318,9 @@ static void callchain__init_have_children(struct rb_root *root)
for (nd = rb_first(root); nd; nd = rb_next(nd)) {
struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node);
callchain_node__init_have_children(node, has_sibling);
+ if (callchain_param.mode == CHAIN_FLAT ||
+ callchain_param.mode == CHAIN_FOLDED)
+ callchain_node__make_parent_list(node);
}
}
@@ -298,6 +340,9 @@ static bool hist_browser__toggle_fold(struct hist_browser *browser)
struct callchain_list *cl = container_of(ms, struct callchain_list, ms);
bool has_children;
+ if (!he || !ms)
+ return false;
+
if (ms == &he->ms)
has_children = hist_entry__toggle_fold(he);
else
@@ -435,7 +480,7 @@ static int hist_browser__run(struct hist_browser *browser, const char *help)
hists__browser_title(browser->hists, hbt, title, sizeof(title));
- if (ui_browser__show(&browser->b, title, help) < 0)
+ if (ui_browser__show(&browser->b, title, "%s", help) < 0)
return -1;
while (1) {
@@ -574,6 +619,231 @@ static bool hist_browser__check_dump_full(struct hist_browser *browser __maybe_u
#define LEVEL_OFFSET_STEP 3
+static int hist_browser__show_callchain_list(struct hist_browser *browser,
+ struct callchain_node *node,
+ struct callchain_list *chain,
+ unsigned short row, u64 total,
+ bool need_percent, int offset,
+ print_callchain_entry_fn print,
+ struct callchain_print_arg *arg)
+{
+ char bf[1024], *alloc_str;
+ const char *str;
+
+ if (arg->row_offset != 0) {
+ arg->row_offset--;
+ return 0;
+ }
+
+ alloc_str = NULL;
+ str = callchain_list__sym_name(chain, bf, sizeof(bf),
+ browser->show_dso);
+
+ if (need_percent) {
+ char buf[64];
+
+ callchain_node__scnprintf_value(node, buf, sizeof(buf),
+ total);
+
+ if (asprintf(&alloc_str, "%s %s", buf, str) < 0)
+ str = "Not enough memory!";
+ else
+ str = alloc_str;
+ }
+
+ print(browser, chain, str, offset, row, arg);
+
+ free(alloc_str);
+ return 1;
+}
+
+static int hist_browser__show_callchain_flat(struct hist_browser *browser,
+ struct rb_root *root,
+ unsigned short row, u64 total,
+ print_callchain_entry_fn print,
+ struct callchain_print_arg *arg,
+ check_output_full_fn is_output_full)
+{
+ struct rb_node *node;
+ int first_row = row, offset = LEVEL_OFFSET_STEP;
+ bool need_percent;
+
+ node = rb_first(root);
+ need_percent = node && rb_next(node);
+
+ while (node) {
+ struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node);
+ struct rb_node *next = rb_next(node);
+ struct callchain_list *chain;
+ char folded_sign = ' ';
+ int first = true;
+ int extra_offset = 0;
+
+ list_for_each_entry(chain, &child->parent_val, list) {
+ bool was_first = first;
+
+ if (first)
+ first = false;
+ else if (need_percent)
+ extra_offset = LEVEL_OFFSET_STEP;
+
+ folded_sign = callchain_list__folded(chain);
+
+ row += hist_browser__show_callchain_list(browser, child,
+ chain, row, total,
+ was_first && need_percent,
+ offset + extra_offset,
+ print, arg);
+
+ if (is_output_full(browser, row))
+ goto out;
+
+ if (folded_sign == '+')
+ goto next;
+ }
+
+ list_for_each_entry(chain, &child->val, list) {
+ bool was_first = first;
+
+ if (first)
+ first = false;
+ else if (need_percent)
+ extra_offset = LEVEL_OFFSET_STEP;
+
+ folded_sign = callchain_list__folded(chain);
+
+ row += hist_browser__show_callchain_list(browser, child,
+ chain, row, total,
+ was_first && need_percent,
+ offset + extra_offset,
+ print, arg);
+
+ if (is_output_full(browser, row))
+ goto out;
+
+ if (folded_sign == '+')
+ break;
+ }
+
+next:
+ if (is_output_full(browser, row))
+ break;
+ node = next;
+ }
+out:
+ return row - first_row;
+}
+
+static char *hist_browser__folded_callchain_str(struct hist_browser *browser,
+ struct callchain_list *chain,
+ char *value_str, char *old_str)
+{
+ char bf[1024];
+ const char *str;
+ char *new;
+
+ str = callchain_list__sym_name(chain, bf, sizeof(bf),
+ browser->show_dso);
+ if (old_str) {
+ if (asprintf(&new, "%s%s%s", old_str,
+ symbol_conf.field_sep ?: ";", str) < 0)
+ new = NULL;
+ } else {
+ if (value_str) {
+ if (asprintf(&new, "%s %s", value_str, str) < 0)
+ new = NULL;
+ } else {
+ if (asprintf(&new, "%s", str) < 0)
+ new = NULL;
+ }
+ }
+ return new;
+}
+
+static int hist_browser__show_callchain_folded(struct hist_browser *browser,
+ struct rb_root *root,
+ unsigned short row, u64 total,
+ print_callchain_entry_fn print,
+ struct callchain_print_arg *arg,
+ check_output_full_fn is_output_full)
+{
+ struct rb_node *node;
+ int first_row = row, offset = LEVEL_OFFSET_STEP;
+ bool need_percent;
+
+ node = rb_first(root);
+ need_percent = node && rb_next(node);
+
+ while (node) {
+ struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node);
+ struct rb_node *next = rb_next(node);
+ struct callchain_list *chain, *first_chain = NULL;
+ int first = true;
+ char *value_str = NULL, *value_str_alloc = NULL;
+ char *chain_str = NULL, *chain_str_alloc = NULL;
+
+ if (arg->row_offset != 0) {
+ arg->row_offset--;
+ goto next;
+ }
+
+ if (need_percent) {
+ char buf[64];
+
+ callchain_node__scnprintf_value(child, buf, sizeof(buf), total);
+ if (asprintf(&value_str, "%s", buf) < 0) {
+ value_str = (char *)"<...>";
+ goto do_print;
+ }
+ value_str_alloc = value_str;
+ }
+
+ list_for_each_entry(chain, &child->parent_val, list) {
+ chain_str = hist_browser__folded_callchain_str(browser,
+ chain, value_str, chain_str);
+ if (first) {
+ first = false;
+ first_chain = chain;
+ }
+
+ if (chain_str == NULL) {
+ chain_str = (char *)"Not enough memory!";
+ goto do_print;
+ }
+
+ chain_str_alloc = chain_str;
+ }
+
+ list_for_each_entry(chain, &child->val, list) {
+ chain_str = hist_browser__folded_callchain_str(browser,
+ chain, value_str, chain_str);
+ if (first) {
+ first = false;
+ first_chain = chain;
+ }
+
+ if (chain_str == NULL) {
+ chain_str = (char *)"Not enough memory!";
+ goto do_print;
+ }
+
+ chain_str_alloc = chain_str;
+ }
+
+do_print:
+ print(browser, first_chain, chain_str, offset, row++, arg);
+ free(value_str_alloc);
+ free(chain_str_alloc);
+
+next:
+ if (is_output_full(browser, row))
+ break;
+ node = next;
+ }
+
+ return row - first_row;
+}
+
static int hist_browser__show_callchain(struct hist_browser *browser,
struct rb_root *root, int level,
unsigned short row, u64 total,
@@ -592,15 +862,12 @@ static int hist_browser__show_callchain(struct hist_browser *browser,
while (node) {
struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node);
struct rb_node *next = rb_next(node);
- u64 cumul = callchain_cumul_hits(child);
struct callchain_list *chain;
char folded_sign = ' ';
int first = true;
int extra_offset = 0;
list_for_each_entry(chain, &child->val, list) {
- char bf[1024], *alloc_str;
- const char *str;
bool was_first = first;
if (first)
@@ -609,31 +876,16 @@ static int hist_browser__show_callchain(struct hist_browser *browser,
extra_offset = LEVEL_OFFSET_STEP;
folded_sign = callchain_list__folded(chain);
- if (arg->row_offset != 0) {
- arg->row_offset--;
- goto do_next;
- }
-
- alloc_str = NULL;
- str = callchain_list__sym_name(chain, bf, sizeof(bf),
- browser->show_dso);
-
- if (was_first && need_percent) {
- double percent = cumul * 100.0 / total;
- if (asprintf(&alloc_str, "%2.2f%% %s", percent, str) < 0)
- str = "Not enough memory!";
- else
- str = alloc_str;
- }
-
- print(browser, chain, str, offset + extra_offset, row, arg);
+ row += hist_browser__show_callchain_list(browser, child,
+ chain, row, total,
+ was_first && need_percent,
+ offset + extra_offset,
+ print, arg);
- free(alloc_str);
-
- if (is_output_full(browser, ++row))
+ if (is_output_full(browser, row))
goto out;
-do_next:
+
if (folded_sign == '+')
break;
}
@@ -789,7 +1041,8 @@ static int hist_browser__show_entry(struct hist_browser *browser,
hist_browser__gotorc(browser, row, 0);
perf_hpp__for_each_format(fmt) {
- if (perf_hpp__should_skip(fmt) || column++ < browser->b.horiz_scroll)
+ if (perf_hpp__should_skip(fmt, entry->hists) ||
+ column++ < browser->b.horiz_scroll)
continue;
if (current_entry && browser->b.navkeypressed) {
@@ -844,10 +1097,22 @@ static int hist_browser__show_entry(struct hist_browser *browser,
total = entry->stat.period;
}
- printed += hist_browser__show_callchain(browser,
+ if (callchain_param.mode == CHAIN_FLAT) {
+ printed += hist_browser__show_callchain_flat(browser,
+ &entry->sorted_chain, row, total,
+ hist_browser__show_callchain_entry, &arg,
+ hist_browser__check_output_full);
+ } else if (callchain_param.mode == CHAIN_FOLDED) {
+ printed += hist_browser__show_callchain_folded(browser,
+ &entry->sorted_chain, row, total,
+ hist_browser__show_callchain_entry, &arg,
+ hist_browser__check_output_full);
+ } else {
+ printed += hist_browser__show_callchain(browser,
&entry->sorted_chain, 1, row, total,
hist_browser__show_callchain_entry, &arg,
hist_browser__check_output_full);
+ }
if (arg.is_current_entry)
browser->he_selection = entry;
@@ -880,7 +1145,7 @@ static int hists_browser__scnprintf_headers(struct hist_browser *browser, char *
}
perf_hpp__for_each_format(fmt) {
- if (perf_hpp__should_skip(fmt) || column++ < browser->b.horiz_scroll)
+ if (perf_hpp__should_skip(fmt, hists) || column++ < browser->b.horiz_scroll)
continue;
ret = fmt->header(fmt, &dummy_hpp, hists_to_evsel(hists));
@@ -928,6 +1193,8 @@ static unsigned int hist_browser__refresh(struct ui_browser *browser)
}
ui_browser__hists_init_top(browser);
+ hb->he_selection = NULL;
+ hb->selection = NULL;
for (nd = browser->top; nd; nd = rb_next(nd)) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
@@ -1033,6 +1300,9 @@ static void ui_browser__hists_seek(struct ui_browser *browser,
* and stop when we printed enough lines to fill the screen.
*/
do_offset:
+ if (!nd)
+ return;
+
if (offset > 0) {
do {
h = rb_entry(nd, struct hist_entry, rb_node);
@@ -1145,7 +1415,7 @@ static int hist_browser__fprintf_entry(struct hist_browser *browser,
printed += fprintf(fp, "%c ", folded_sign);
perf_hpp__for_each_format(fmt) {
- if (perf_hpp__should_skip(fmt))
+ if (perf_hpp__should_skip(fmt, he->hists))
continue;
if (!first) {
@@ -1794,10 +2064,9 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
SLang_reset_tty();
SLang_init_tty(0, 0, 0);
- if (min_pcnt) {
+ if (min_pcnt)
browser->min_pcnt = min_pcnt;
- hist_browser__update_nr_entries(browser);
- }
+ hist_browser__update_nr_entries(browser);
browser->pstack = pstack__new(3);
if (browser->pstack == NULL)
diff --git a/tools/perf/ui/gtk/hists.c b/tools/perf/ui/gtk/hists.c
index 4b3585eed1e8..0f8dcfdfb10f 100644
--- a/tools/perf/ui/gtk/hists.c
+++ b/tools/perf/ui/gtk/hists.c
@@ -89,8 +89,8 @@ void perf_gtk__init_hpp(void)
perf_gtk__hpp_color_overhead_acc;
}
-static void perf_gtk__add_callchain(struct rb_root *root, GtkTreeStore *store,
- GtkTreeIter *parent, int col, u64 total)
+static void perf_gtk__add_callchain_flat(struct rb_root *root, GtkTreeStore *store,
+ GtkTreeIter *parent, int col, u64 total)
{
struct rb_node *nd;
bool has_single_node = (rb_first(root) == rb_last(root));
@@ -100,13 +100,132 @@ static void perf_gtk__add_callchain(struct rb_root *root, GtkTreeStore *store,
struct callchain_list *chain;
GtkTreeIter iter, new_parent;
bool need_new_parent;
- double percent;
- u64 hits, child_total;
node = rb_entry(nd, struct callchain_node, rb_node);
- hits = callchain_cumul_hits(node);
- percent = 100.0 * hits / total;
+ new_parent = *parent;
+ need_new_parent = !has_single_node;
+
+ callchain_node__make_parent_list(node);
+
+ list_for_each_entry(chain, &node->parent_val, list) {
+ char buf[128];
+
+ gtk_tree_store_append(store, &iter, &new_parent);
+
+ callchain_node__scnprintf_value(node, buf, sizeof(buf), total);
+ gtk_tree_store_set(store, &iter, 0, buf, -1);
+
+ callchain_list__sym_name(chain, buf, sizeof(buf), false);
+ gtk_tree_store_set(store, &iter, col, buf, -1);
+
+ if (need_new_parent) {
+ /*
+ * Only show the top-most symbol in a callchain
+ * if it's not the only callchain.
+ */
+ new_parent = iter;
+ need_new_parent = false;
+ }
+ }
+
+ list_for_each_entry(chain, &node->val, list) {
+ char buf[128];
+
+ gtk_tree_store_append(store, &iter, &new_parent);
+
+ callchain_node__scnprintf_value(node, buf, sizeof(buf), total);
+ gtk_tree_store_set(store, &iter, 0, buf, -1);
+
+ callchain_list__sym_name(chain, buf, sizeof(buf), false);
+ gtk_tree_store_set(store, &iter, col, buf, -1);
+
+ if (need_new_parent) {
+ /*
+ * Only show the top-most symbol in a callchain
+ * if it's not the only callchain.
+ */
+ new_parent = iter;
+ need_new_parent = false;
+ }
+ }
+ }
+}
+
+static void perf_gtk__add_callchain_folded(struct rb_root *root, GtkTreeStore *store,
+ GtkTreeIter *parent, int col, u64 total)
+{
+ struct rb_node *nd;
+
+ for (nd = rb_first(root); nd; nd = rb_next(nd)) {
+ struct callchain_node *node;
+ struct callchain_list *chain;
+ GtkTreeIter iter;
+ char buf[64];
+ char *str, *str_alloc = NULL;
+ bool first = true;
+
+ node = rb_entry(nd, struct callchain_node, rb_node);
+
+ callchain_node__make_parent_list(node);
+
+ list_for_each_entry(chain, &node->parent_val, list) {
+ char name[1024];
+
+ callchain_list__sym_name(chain, name, sizeof(name), false);
+
+ if (asprintf(&str, "%s%s%s",
+ first ? "" : str_alloc,
+ first ? "" : symbol_conf.field_sep ?: "; ",
+ name) < 0)
+ return;
+
+ first = false;
+ free(str_alloc);
+ str_alloc = str;
+ }
+
+ list_for_each_entry(chain, &node->val, list) {
+ char name[1024];
+
+ callchain_list__sym_name(chain, name, sizeof(name), false);
+
+ if (asprintf(&str, "%s%s%s",
+ first ? "" : str_alloc,
+ first ? "" : symbol_conf.field_sep ?: "; ",
+ name) < 0)
+ return;
+
+ first = false;
+ free(str_alloc);
+ str_alloc = str;
+ }
+
+ gtk_tree_store_append(store, &iter, parent);
+
+ callchain_node__scnprintf_value(node, buf, sizeof(buf), total);
+ gtk_tree_store_set(store, &iter, 0, buf, -1);
+
+ gtk_tree_store_set(store, &iter, col, str, -1);
+
+ free(str_alloc);
+ }
+}
+
+static void perf_gtk__add_callchain_graph(struct rb_root *root, GtkTreeStore *store,
+ GtkTreeIter *parent, int col, u64 total)
+{
+ struct rb_node *nd;
+ bool has_single_node = (rb_first(root) == rb_last(root));
+
+ for (nd = rb_first(root); nd; nd = rb_next(nd)) {
+ struct callchain_node *node;
+ struct callchain_list *chain;
+ GtkTreeIter iter, new_parent;
+ bool need_new_parent;
+ u64 child_total;
+
+ node = rb_entry(nd, struct callchain_node, rb_node);
new_parent = *parent;
need_new_parent = !has_single_node && (node->val_nr > 1);
@@ -116,7 +235,7 @@ static void perf_gtk__add_callchain(struct rb_root *root, GtkTreeStore *store,
gtk_tree_store_append(store, &iter, &new_parent);
- scnprintf(buf, sizeof(buf), "%5.2f%%", percent);
+ callchain_node__scnprintf_value(node, buf, sizeof(buf), total);
gtk_tree_store_set(store, &iter, 0, buf, -1);
callchain_list__sym_name(chain, buf, sizeof(buf), false);
@@ -138,11 +257,22 @@ static void perf_gtk__add_callchain(struct rb_root *root, GtkTreeStore *store,
child_total = total;
/* Now 'iter' contains info of the last callchain_list */
- perf_gtk__add_callchain(&node->rb_root, store, &iter, col,
- child_total);
+ perf_gtk__add_callchain_graph(&node->rb_root, store, &iter, col,
+ child_total);
}
}
+static void perf_gtk__add_callchain(struct rb_root *root, GtkTreeStore *store,
+ GtkTreeIter *parent, int col, u64 total)
+{
+ if (callchain_param.mode == CHAIN_FLAT)
+ perf_gtk__add_callchain_flat(root, store, parent, col, total);
+ else if (callchain_param.mode == CHAIN_FOLDED)
+ perf_gtk__add_callchain_folded(root, store, parent, col, total);
+ else
+ perf_gtk__add_callchain_graph(root, store, parent, col, total);
+}
+
static void on_row_activated(GtkTreeView *view, GtkTreePath *path,
GtkTreeViewColumn *col __maybe_unused,
gpointer user_data __maybe_unused)
@@ -188,7 +318,7 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists,
col_idx = 0;
perf_hpp__for_each_format(fmt) {
- if (perf_hpp__should_skip(fmt))
+ if (perf_hpp__should_skip(fmt, hists))
continue;
/*
@@ -238,7 +368,7 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists,
col_idx = 0;
perf_hpp__for_each_format(fmt) {
- if (perf_hpp__should_skip(fmt))
+ if (perf_hpp__should_skip(fmt, h->hists))
continue;
if (fmt->color)
diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c
index 5029ba2b55af..bf2a66e254ea 100644
--- a/tools/perf/ui/hist.c
+++ b/tools/perf/ui/hist.c
@@ -443,7 +443,6 @@ LIST_HEAD(perf_hpp__sort_list);
void perf_hpp__init(void)
{
- struct list_head *list;
int i;
for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
@@ -484,17 +483,6 @@ void perf_hpp__init(void)
if (symbol_conf.show_total_period)
hpp_dimension__add_output(PERF_HPP__PERIOD);
-
- /* prepend overhead field for backward compatiblity. */
- list = &perf_hpp__format[PERF_HPP__OVERHEAD].sort_list;
- if (list_empty(list))
- list_add(list, &perf_hpp__sort_list);
-
- if (symbol_conf.cumulate_callchain) {
- list = &perf_hpp__format[PERF_HPP__OVERHEAD_ACC].sort_list;
- if (list_empty(list))
- list_add(list, &perf_hpp__sort_list);
- }
}
void perf_hpp__column_register(struct perf_hpp_fmt *format)
@@ -619,7 +607,7 @@ unsigned int hists__sort_list_width(struct hists *hists)
struct perf_hpp dummy_hpp;
perf_hpp__for_each_format(fmt) {
- if (perf_hpp__should_skip(fmt))
+ if (perf_hpp__should_skip(fmt, hists))
continue;
if (first)
diff --git a/tools/perf/ui/stdio/hist.c b/tools/perf/ui/stdio/hist.c
index dfcbc90146ef..387110d50b00 100644
--- a/tools/perf/ui/stdio/hist.c
+++ b/tools/perf/ui/stdio/hist.c
@@ -34,10 +34,10 @@ static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
return ret;
}
-static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain,
+static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_node *node,
+ struct callchain_list *chain,
int depth, int depth_mask, int period,
- u64 total_samples, u64 hits,
- int left_margin)
+ u64 total_samples, int left_margin)
{
int i;
size_t ret = 0;
@@ -50,10 +50,9 @@ static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain,
else
ret += fprintf(fp, " ");
if (!period && i == depth - 1) {
- double percent;
-
- percent = hits * 100.0 / total_samples;
- ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent);
+ ret += fprintf(fp, "--");
+ ret += callchain_node__fprintf_value(node, fp, total_samples);
+ ret += fprintf(fp, "--");
} else
ret += fprintf(fp, "%s", " ");
}
@@ -82,13 +81,14 @@ static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
int depth_mask, int left_margin)
{
struct rb_node *node, *next;
- struct callchain_node *child;
+ struct callchain_node *child = NULL;
struct callchain_list *chain;
int new_depth_mask = depth_mask;
u64 remaining;
size_t ret = 0;
int i;
uint entries_printed = 0;
+ int cumul_count = 0;
remaining = total_samples;
@@ -100,6 +100,7 @@ static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
child = rb_entry(node, struct callchain_node, rb_node);
cumul = callchain_cumul_hits(child);
remaining -= cumul;
+ cumul_count += callchain_cumul_counts(child);
/*
* The depth mask manages the output of pipes that show
@@ -120,10 +121,9 @@ static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
left_margin);
i = 0;
list_for_each_entry(chain, &child->val, list) {
- ret += ipchain__fprintf_graph(fp, chain, depth,
+ ret += ipchain__fprintf_graph(fp, child, chain, depth,
new_depth_mask, i++,
total_samples,
- cumul,
left_margin);
}
@@ -143,14 +143,23 @@ static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
if (callchain_param.mode == CHAIN_GRAPH_REL &&
remaining && remaining != total_samples) {
+ struct callchain_node rem_node = {
+ .hit = remaining,
+ };
if (!rem_sq_bracket)
return ret;
+ if (callchain_param.value == CCVAL_COUNT && child && child->parent) {
+ rem_node.count = child->parent->children_count - cumul_count;
+ if (rem_node.count <= 0)
+ return ret;
+ }
+
new_depth_mask &= ~(1 << (depth - 1));
- ret += ipchain__fprintf_graph(fp, &rem_hits, depth,
+ ret += ipchain__fprintf_graph(fp, &rem_node, &rem_hits, depth,
new_depth_mask, 0, total_samples,
- remaining, left_margin);
+ left_margin);
}
return ret;
@@ -243,12 +252,11 @@ static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *tree,
struct rb_node *rb_node = rb_first(tree);
while (rb_node) {
- double percent;
-
chain = rb_entry(rb_node, struct callchain_node, rb_node);
- percent = chain->hit * 100.0 / total_samples;
- ret = percent_color_fprintf(fp, " %6.2f%%\n", percent);
+ ret += fprintf(fp, " ");
+ ret += callchain_node__fprintf_value(chain, fp, total_samples);
+ ret += fprintf(fp, "\n");
ret += __callchain__fprintf_flat(fp, chain, total_samples);
ret += fprintf(fp, "\n");
if (++entries_printed == callchain_param.print_limit)
@@ -260,6 +268,57 @@ static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *tree,
return ret;
}
+static size_t __callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
+{
+ const char *sep = symbol_conf.field_sep ?: ";";
+ struct callchain_list *chain;
+ size_t ret = 0;
+ char bf[1024];
+ bool first;
+
+ if (!node)
+ return 0;
+
+ ret += __callchain__fprintf_folded(fp, node->parent);
+
+ first = (ret == 0);
+ list_for_each_entry(chain, &node->val, list) {
+ if (chain->ip >= PERF_CONTEXT_MAX)
+ continue;
+ ret += fprintf(fp, "%s%s", first ? "" : sep,
+ callchain_list__sym_name(chain,
+ bf, sizeof(bf), false));
+ first = false;
+ }
+
+ return ret;
+}
+
+static size_t callchain__fprintf_folded(FILE *fp, struct rb_root *tree,
+ u64 total_samples)
+{
+ size_t ret = 0;
+ u32 entries_printed = 0;
+ struct callchain_node *chain;
+ struct rb_node *rb_node = rb_first(tree);
+
+ while (rb_node) {
+
+ chain = rb_entry(rb_node, struct callchain_node, rb_node);
+
+ ret += callchain_node__fprintf_value(chain, fp, total_samples);
+ ret += fprintf(fp, " ");
+ ret += __callchain__fprintf_folded(fp, chain);
+ ret += fprintf(fp, "\n");
+ if (++entries_printed == callchain_param.print_limit)
+ break;
+
+ rb_node = rb_next(rb_node);
+ }
+
+ return ret;
+}
+
static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
u64 total_samples, int left_margin,
FILE *fp)
@@ -278,6 +337,9 @@ static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
case CHAIN_FLAT:
return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
break;
+ case CHAIN_FOLDED:
+ return callchain__fprintf_folded(fp, &he->sorted_chain, total_samples);
+ break;
case CHAIN_NONE:
break;
default:
@@ -323,7 +385,7 @@ static int hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp)
return 0;
perf_hpp__for_each_format(fmt) {
- if (perf_hpp__should_skip(fmt))
+ if (perf_hpp__should_skip(fmt, he->hists))
continue;
/*
@@ -402,7 +464,7 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
fprintf(fp, "# ");
perf_hpp__for_each_format(fmt) {
- if (perf_hpp__should_skip(fmt))
+ if (perf_hpp__should_skip(fmt, hists))
continue;
if (!first)
@@ -428,7 +490,7 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
perf_hpp__for_each_format(fmt) {
unsigned int i;
- if (perf_hpp__should_skip(fmt))
+ if (perf_hpp__should_skip(fmt, hists))
continue;
if (!first)
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index 591b3fe3ed49..5eec53a3f4ac 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -6,24 +6,20 @@ libperf-y += config.o
libperf-y += ctype.o
libperf-y += db-export.o
libperf-y += env.o
-libperf-y += environment.o
libperf-y += event.o
libperf-y += evlist.o
libperf-y += evsel.o
-libperf-y += exec_cmd.o
-libperf-y += find_next_bit.o
-libperf-y += help.o
+libperf-y += find_bit.o
libperf-y += kallsyms.o
libperf-y += levenshtein.o
libperf-y += llvm-utils.o
-libperf-y += parse-options.o
libperf-y += parse-events.o
libperf-y += perf_regs.o
libperf-y += path.o
libperf-y += rbtree.o
+libperf-y += libstring.o
libperf-y += bitmap.o
libperf-y += hweight.o
-libperf-y += run-command.o
libperf-y += quote.o
libperf-y += strbuf.o
libperf-y += string.o
@@ -32,11 +28,9 @@ libperf-y += strfilter.o
libperf-y += top.o
libperf-y += usage.o
libperf-y += wrapper.o
-libperf-y += sigchain.o
libperf-y += dso.o
libperf-y += symbol.o
libperf-y += color.o
-libperf-y += pager.o
libperf-y += header.o
libperf-y += callchain.o
libperf-y += values.o
@@ -86,8 +80,11 @@ libperf-$(CONFIG_AUXTRACE) += intel-pt.o
libperf-$(CONFIG_AUXTRACE) += intel-bts.o
libperf-y += parse-branch-options.o
libperf-y += parse-regs-options.o
+libperf-y += term.o
+libperf-y += help-unknown-cmd.o
libperf-$(CONFIG_LIBBPF) += bpf-loader.o
+libperf-$(CONFIG_BPF_PROLOGUE) += bpf-prologue.o
libperf-$(CONFIG_LIBELF) += symbol-elf.o
libperf-$(CONFIG_LIBELF) += probe-file.o
libperf-$(CONFIG_LIBELF) += probe-event.o
@@ -110,7 +107,6 @@ libperf-$(CONFIG_ZLIB) += zlib.o
libperf-$(CONFIG_LZMA) += lzma.o
CFLAGS_config.o += -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))"
-CFLAGS_exec_cmd.o += -DPERF_EXEC_PATH="BUILD_STR($(perfexecdir_SQ))" -DPREFIX="BUILD_STR($(prefix_SQ))"
$(OUTPUT)util/parse-events-flex.c: util/parse-events.l $(OUTPUT)util/parse-events-bison.c
$(call rule_mkdir)
@@ -136,8 +132,10 @@ CFLAGS_pmu-bison.o += -DYYENABLE_NLS=0 -DYYLTYPE_IS_TRIVIAL=0 -w
$(OUTPUT)util/parse-events.o: $(OUTPUT)util/parse-events-flex.c $(OUTPUT)util/parse-events-bison.c
$(OUTPUT)util/pmu.o: $(OUTPUT)util/pmu-flex.c $(OUTPUT)util/pmu-bison.c
-CFLAGS_find_next_bit.o += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))"
+CFLAGS_bitmap.o += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))"
+CFLAGS_find_bit.o += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))"
CFLAGS_rbtree.o += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))"
+CFLAGS_libstring.o += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))"
CFLAGS_hweight.o += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))"
CFLAGS_parse-events.o += -Wno-redundant-decls
@@ -145,7 +143,11 @@ $(OUTPUT)util/kallsyms.o: ../lib/symbol/kallsyms.c FORCE
$(call rule_mkdir)
$(call if_changed_dep,cc_o_c)
-$(OUTPUT)util/find_next_bit.o: ../lib/util/find_next_bit.c FORCE
+$(OUTPUT)util/bitmap.o: ../lib/bitmap.c FORCE
+ $(call rule_mkdir)
+ $(call if_changed_dep,cc_o_c)
+
+$(OUTPUT)util/find_bit.o: ../lib/find_bit.c FORCE
$(call rule_mkdir)
$(call if_changed_dep,cc_o_c)
@@ -153,6 +155,10 @@ $(OUTPUT)util/rbtree.o: ../lib/rbtree.c FORCE
$(call rule_mkdir)
$(call if_changed_dep,cc_o_c)
+$(OUTPUT)util/libstring.o: ../lib/string.c FORCE
+ $(call rule_mkdir)
+ $(call if_changed_dep,cc_o_c)
+
$(OUTPUT)util/hweight.o: ../lib/hweight.c FORCE
$(call rule_mkdir)
$(call if_changed_dep,cc_o_c)
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 1dd1949b0e79..b795b6994144 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -65,6 +65,11 @@ static int call__parse(struct ins_operands *ops)
name++;
+#ifdef __arm__
+ if (strchr(name, '+'))
+ return -1;
+#endif
+
tok = strchr(name, '>');
if (tok == NULL)
return -1;
@@ -246,7 +251,11 @@ static int mov__parse(struct ins_operands *ops)
return -1;
target = ++s;
+#ifdef __arm__
+ comment = strchr(s, ';');
+#else
comment = strchr(s, '#');
+#endif
if (comment != NULL)
s = comment - 1;
@@ -354,6 +363,20 @@ static struct ins instructions[] = {
{ .name = "addq", .ops = &mov_ops, },
{ .name = "addw", .ops = &mov_ops, },
{ .name = "and", .ops = &mov_ops, },
+#ifdef __arm__
+ { .name = "b", .ops = &jump_ops, }, // might also be a call
+ { .name = "bcc", .ops = &jump_ops, },
+ { .name = "bcs", .ops = &jump_ops, },
+ { .name = "beq", .ops = &jump_ops, },
+ { .name = "bge", .ops = &jump_ops, },
+ { .name = "bgt", .ops = &jump_ops, },
+ { .name = "bhi", .ops = &jump_ops, },
+ { .name = "bl", .ops = &call_ops, },
+ { .name = "blt", .ops = &jump_ops, },
+ { .name = "bls", .ops = &jump_ops, },
+ { .name = "blx", .ops = &call_ops, },
+ { .name = "bne", .ops = &jump_ops, },
+#endif
{ .name = "bts", .ops = &mov_ops, },
{ .name = "call", .ops = &call_ops, },
{ .name = "callq", .ops = &call_ops, },
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index 7f10430af39c..360fda01f3b0 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -45,7 +45,7 @@
#include "event.h"
#include "session.h"
#include "debug.h"
-#include "parse-options.h"
+#include <subcmd/parse-options.h>
#include "intel-pt.h"
#include "intel-bts.h"
diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c
index 4c50411371db..540a7efa657e 100644
--- a/tools/perf/util/bpf-loader.c
+++ b/tools/perf/util/bpf-loader.c
@@ -5,11 +5,15 @@
* Copyright (C) 2015 Huawei Inc.
*/
+#include <linux/bpf.h>
#include <bpf/libbpf.h>
#include <linux/err.h>
+#include <linux/string.h>
#include "perf.h"
#include "debug.h"
#include "bpf-loader.h"
+#include "bpf-prologue.h"
+#include "llvm-utils.h"
#include "probe-event.h"
#include "probe-finder.h" // for MAX_PROBES
#include "llvm-utils.h"
@@ -32,6 +36,10 @@ DEFINE_PRINT_FN(debug, 1)
struct bpf_prog_priv {
struct perf_probe_event pev;
+ bool need_prologue;
+ struct bpf_insn *insns_buf;
+ int nr_types;
+ int *type_mapping;
};
static bool libbpf_initialized;
@@ -106,10 +114,178 @@ bpf_prog_priv__clear(struct bpf_program *prog __maybe_unused,
struct bpf_prog_priv *priv = _priv;
cleanup_perf_probe_events(&priv->pev, 1);
+ zfree(&priv->insns_buf);
+ zfree(&priv->type_mapping);
free(priv);
}
static int
+prog_config__exec(const char *value, struct perf_probe_event *pev)
+{
+ pev->uprobes = true;
+ pev->target = strdup(value);
+ if (!pev->target)
+ return -ENOMEM;
+ return 0;
+}
+
+static int
+prog_config__module(const char *value, struct perf_probe_event *pev)
+{
+ pev->uprobes = false;
+ pev->target = strdup(value);
+ if (!pev->target)
+ return -ENOMEM;
+ return 0;
+}
+
+static int
+prog_config__bool(const char *value, bool *pbool, bool invert)
+{
+ int err;
+ bool bool_value;
+
+ if (!pbool)
+ return -EINVAL;
+
+ err = strtobool(value, &bool_value);
+ if (err)
+ return err;
+
+ *pbool = invert ? !bool_value : bool_value;
+ return 0;
+}
+
+static int
+prog_config__inlines(const char *value,
+ struct perf_probe_event *pev __maybe_unused)
+{
+ return prog_config__bool(value, &probe_conf.no_inlines, true);
+}
+
+static int
+prog_config__force(const char *value,
+ struct perf_probe_event *pev __maybe_unused)
+{
+ return prog_config__bool(value, &probe_conf.force_add, false);
+}
+
+static struct {
+ const char *key;
+ const char *usage;
+ const char *desc;
+ int (*func)(const char *, struct perf_probe_event *);
+} bpf_prog_config_terms[] = {
+ {
+ .key = "exec",
+ .usage = "exec=<full path of file>",
+ .desc = "Set uprobe target",
+ .func = prog_config__exec,
+ },
+ {
+ .key = "module",
+ .usage = "module=<module name> ",
+ .desc = "Set kprobe module",
+ .func = prog_config__module,
+ },
+ {
+ .key = "inlines",
+ .usage = "inlines=[yes|no] ",
+ .desc = "Probe at inline symbol",
+ .func = prog_config__inlines,
+ },
+ {
+ .key = "force",
+ .usage = "force=[yes|no] ",
+ .desc = "Forcibly add events with existing name",
+ .func = prog_config__force,
+ },
+};
+
+static int
+do_prog_config(const char *key, const char *value,
+ struct perf_probe_event *pev)
+{
+ unsigned int i;
+
+ pr_debug("config bpf program: %s=%s\n", key, value);
+ for (i = 0; i < ARRAY_SIZE(bpf_prog_config_terms); i++)
+ if (strcmp(key, bpf_prog_config_terms[i].key) == 0)
+ return bpf_prog_config_terms[i].func(value, pev);
+
+ pr_debug("BPF: ERROR: invalid program config option: %s=%s\n",
+ key, value);
+
+ pr_debug("\nHint: Valid options are:\n");
+ for (i = 0; i < ARRAY_SIZE(bpf_prog_config_terms); i++)
+ pr_debug("\t%s:\t%s\n", bpf_prog_config_terms[i].usage,
+ bpf_prog_config_terms[i].desc);
+ pr_debug("\n");
+
+ return -BPF_LOADER_ERRNO__PROGCONF_TERM;
+}
+
+static const char *
+parse_prog_config_kvpair(const char *config_str, struct perf_probe_event *pev)
+{
+ char *text = strdup(config_str);
+ char *sep, *line;
+ const char *main_str = NULL;
+ int err = 0;
+
+ if (!text) {
+ pr_debug("No enough memory: dup config_str failed\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ line = text;
+ while ((sep = strchr(line, ';'))) {
+ char *equ;
+
+ *sep = '\0';
+ equ = strchr(line, '=');
+ if (!equ) {
+ pr_warning("WARNING: invalid config in BPF object: %s\n",
+ line);
+ pr_warning("\tShould be 'key=value'.\n");
+ goto nextline;
+ }
+ *equ = '\0';
+
+ err = do_prog_config(line, equ + 1, pev);
+ if (err)
+ break;
+nextline:
+ line = sep + 1;
+ }
+
+ if (!err)
+ main_str = config_str + (line - text);
+ free(text);
+
+ return err ? ERR_PTR(err) : main_str;
+}
+
+static int
+parse_prog_config(const char *config_str, struct perf_probe_event *pev)
+{
+ int err;
+ const char *main_str = parse_prog_config_kvpair(config_str, pev);
+
+ if (IS_ERR(main_str))
+ return PTR_ERR(main_str);
+
+ err = parse_perf_probe_command(main_str, pev);
+ if (err < 0) {
+ pr_debug("bpf: '%s' is not a valid config string\n",
+ config_str);
+ /* parse failed, don't need clear pev. */
+ return -BPF_LOADER_ERRNO__CONFIG;
+ }
+ return 0;
+}
+
+static int
config_bpf_program(struct bpf_program *prog)
{
struct perf_probe_event *pev = NULL;
@@ -117,6 +293,10 @@ config_bpf_program(struct bpf_program *prog)
const char *config_str;
int err;
+ /* Initialize per-program probing setting */
+ probe_conf.no_inlines = false;
+ probe_conf.force_add = false;
+
config_str = bpf_program__title(prog, false);
if (IS_ERR(config_str)) {
pr_debug("bpf: unable to get title for program\n");
@@ -131,13 +311,9 @@ config_bpf_program(struct bpf_program *prog)
pev = &priv->pev;
pr_debug("bpf: config program '%s'\n", config_str);
- err = parse_perf_probe_command(config_str, pev);
- if (err < 0) {
- pr_debug("bpf: '%s' is not a valid config string\n",
- config_str);
- err = -BPF_LOADER_ERRNO__CONFIG;
+ err = parse_prog_config(config_str, pev);
+ if (err)
goto errout;
- }
if (pev->group && strcmp(pev->group, PERF_BPF_PROBE_GROUP)) {
pr_debug("bpf: '%s': group for event is set and not '%s'.\n",
@@ -197,6 +373,220 @@ static int bpf__prepare_probe(void)
return err;
}
+static int
+preproc_gen_prologue(struct bpf_program *prog, int n,
+ struct bpf_insn *orig_insns, int orig_insns_cnt,
+ struct bpf_prog_prep_result *res)
+{
+ struct probe_trace_event *tev;
+ struct perf_probe_event *pev;
+ struct bpf_prog_priv *priv;
+ struct bpf_insn *buf;
+ size_t prologue_cnt = 0;
+ int i, err;
+
+ err = bpf_program__get_private(prog, (void **)&priv);
+ if (err || !priv)
+ goto errout;
+
+ pev = &priv->pev;
+
+ if (n < 0 || n >= priv->nr_types)
+ goto errout;
+
+ /* Find a tev belongs to that type */
+ for (i = 0; i < pev->ntevs; i++) {
+ if (priv->type_mapping[i] == n)
+ break;
+ }
+
+ if (i >= pev->ntevs) {
+ pr_debug("Internal error: prologue type %d not found\n", n);
+ return -BPF_LOADER_ERRNO__PROLOGUE;
+ }
+
+ tev = &pev->tevs[i];
+
+ buf = priv->insns_buf;
+ err = bpf__gen_prologue(tev->args, tev->nargs,
+ buf, &prologue_cnt,
+ BPF_MAXINSNS - orig_insns_cnt);
+ if (err) {
+ const char *title;
+
+ title = bpf_program__title(prog, false);
+ if (!title)
+ title = "[unknown]";
+
+ pr_debug("Failed to generate prologue for program %s\n",
+ title);
+ return err;
+ }
+
+ memcpy(&buf[prologue_cnt], orig_insns,
+ sizeof(struct bpf_insn) * orig_insns_cnt);
+
+ res->new_insn_ptr = buf;
+ res->new_insn_cnt = prologue_cnt + orig_insns_cnt;
+ res->pfd = NULL;
+ return 0;
+
+errout:
+ pr_debug("Internal error in preproc_gen_prologue\n");
+ return -BPF_LOADER_ERRNO__PROLOGUE;
+}
+
+/*
+ * compare_tev_args is reflexive, transitive and antisymmetric.
+ * I can proof it but this margin is too narrow to contain.
+ */
+static int compare_tev_args(const void *ptev1, const void *ptev2)
+{
+ int i, ret;
+ const struct probe_trace_event *tev1 =
+ *(const struct probe_trace_event **)ptev1;
+ const struct probe_trace_event *tev2 =
+ *(const struct probe_trace_event **)ptev2;
+
+ ret = tev2->nargs - tev1->nargs;
+ if (ret)
+ return ret;
+
+ for (i = 0; i < tev1->nargs; i++) {
+ struct probe_trace_arg *arg1, *arg2;
+ struct probe_trace_arg_ref *ref1, *ref2;
+
+ arg1 = &tev1->args[i];
+ arg2 = &tev2->args[i];
+
+ ret = strcmp(arg1->value, arg2->value);
+ if (ret)
+ return ret;
+
+ ref1 = arg1->ref;
+ ref2 = arg2->ref;
+
+ while (ref1 && ref2) {
+ ret = ref2->offset - ref1->offset;
+ if (ret)
+ return ret;
+
+ ref1 = ref1->next;
+ ref2 = ref2->next;
+ }
+
+ if (ref1 || ref2)
+ return ref2 ? 1 : -1;
+ }
+
+ return 0;
+}
+
+/*
+ * Assign a type number to each tevs in a pev.
+ * mapping is an array with same slots as tevs in that pev.
+ * nr_types will be set to number of types.
+ */
+static int map_prologue(struct perf_probe_event *pev, int *mapping,
+ int *nr_types)
+{
+ int i, type = 0;
+ struct probe_trace_event **ptevs;
+
+ size_t array_sz = sizeof(*ptevs) * pev->ntevs;
+
+ ptevs = malloc(array_sz);
+ if (!ptevs) {
+ pr_debug("No ehough memory: alloc ptevs failed\n");
+ return -ENOMEM;
+ }
+
+ pr_debug("In map_prologue, ntevs=%d\n", pev->ntevs);
+ for (i = 0; i < pev->ntevs; i++)
+ ptevs[i] = &pev->tevs[i];
+
+ qsort(ptevs, pev->ntevs, sizeof(*ptevs),
+ compare_tev_args);
+
+ for (i = 0; i < pev->ntevs; i++) {
+ int n;
+
+ n = ptevs[i] - pev->tevs;
+ if (i == 0) {
+ mapping[n] = type;
+ pr_debug("mapping[%d]=%d\n", n, type);
+ continue;
+ }
+
+ if (compare_tev_args(ptevs + i, ptevs + i - 1) == 0)
+ mapping[n] = type;
+ else
+ mapping[n] = ++type;
+
+ pr_debug("mapping[%d]=%d\n", n, mapping[n]);
+ }
+ free(ptevs);
+ *nr_types = type + 1;
+
+ return 0;
+}
+
+static int hook_load_preprocessor(struct bpf_program *prog)
+{
+ struct perf_probe_event *pev;
+ struct bpf_prog_priv *priv;
+ bool need_prologue = false;
+ int err, i;
+
+ err = bpf_program__get_private(prog, (void **)&priv);
+ if (err || !priv) {
+ pr_debug("Internal error when hook preprocessor\n");
+ return -BPF_LOADER_ERRNO__INTERNAL;
+ }
+
+ pev = &priv->pev;
+ for (i = 0; i < pev->ntevs; i++) {
+ struct probe_trace_event *tev = &pev->tevs[i];
+
+ if (tev->nargs > 0) {
+ need_prologue = true;
+ break;
+ }
+ }
+
+ /*
+ * Since all tevs don't have argument, we don't need generate
+ * prologue.
+ */
+ if (!need_prologue) {
+ priv->need_prologue = false;
+ return 0;
+ }
+
+ priv->need_prologue = true;
+ priv->insns_buf = malloc(sizeof(struct bpf_insn) * BPF_MAXINSNS);
+ if (!priv->insns_buf) {
+ pr_debug("No enough memory: alloc insns_buf failed\n");
+ return -ENOMEM;
+ }
+
+ priv->type_mapping = malloc(sizeof(int) * pev->ntevs);
+ if (!priv->type_mapping) {
+ pr_debug("No enough memory: alloc type_mapping failed\n");
+ return -ENOMEM;
+ }
+ memset(priv->type_mapping, -1,
+ sizeof(int) * pev->ntevs);
+
+ err = map_prologue(pev, priv->type_mapping, &priv->nr_types);
+ if (err)
+ return err;
+
+ err = bpf_program__set_prep(prog, priv->nr_types,
+ preproc_gen_prologue);
+ return err;
+}
+
int bpf__probe(struct bpf_object *obj)
{
int err = 0;
@@ -231,6 +621,18 @@ int bpf__probe(struct bpf_object *obj)
pr_debug("bpf_probe: failed to apply perf probe events");
goto out;
}
+
+ /*
+ * After probing, let's consider prologue, which
+ * adds program fetcher to BPF programs.
+ *
+ * hook_load_preprocessorr() hooks pre-processor
+ * to bpf_program, let it generate prologue
+ * dynamically during loading.
+ */
+ err = hook_load_preprocessor(prog);
+ if (err)
+ goto out;
}
out:
return err < 0 ? err : 0;
@@ -314,7 +716,14 @@ int bpf__foreach_tev(struct bpf_object *obj,
for (i = 0; i < pev->ntevs; i++) {
tev = &pev->tevs[i];
- fd = bpf_program__fd(prog);
+ if (priv->need_prologue) {
+ int type = priv->type_mapping[i];
+
+ fd = bpf_program__nth_fd(prog, type);
+ } else {
+ fd = bpf_program__fd(prog);
+ }
+
if (fd < 0) {
pr_debug("bpf: failed to get file descriptor\n");
return fd;
@@ -340,6 +749,10 @@ static const char *bpf_loader_strerror_table[NR_ERRNO] = {
[ERRCODE_OFFSET(EVENTNAME)] = "No event name found in config string",
[ERRCODE_OFFSET(INTERNAL)] = "BPF loader internal error",
[ERRCODE_OFFSET(COMPILE)] = "Error when compiling BPF scriptlet",
+ [ERRCODE_OFFSET(PROGCONF_TERM)] = "Invalid program config term in config string",
+ [ERRCODE_OFFSET(PROLOGUE)] = "Failed to generate prologue",
+ [ERRCODE_OFFSET(PROLOGUE2BIG)] = "Prologue too big for program",
+ [ERRCODE_OFFSET(PROLOGUEOOB)] = "Offset out of bound for prologue",
};
static int
@@ -420,7 +833,11 @@ int bpf__strerror_probe(struct bpf_object *obj __maybe_unused,
int err, char *buf, size_t size)
{
bpf__strerror_head(err, buf, size);
- bpf__strerror_entry(EEXIST, "Probe point exist. Try use 'perf probe -d \"*\"'");
+ case BPF_LOADER_ERRNO__PROGCONF_TERM: {
+ scnprintf(buf, size, "%s (add -v to see detail)", emsg);
+ break;
+ }
+ bpf__strerror_entry(EEXIST, "Probe point exist. Try 'perf probe -d \"*\"' and set 'force=yes'");
bpf__strerror_entry(EACCES, "You need to be root");
bpf__strerror_entry(EPERM, "You need to be root, and /proc/sys/kernel/kptr_restrict should be 0");
bpf__strerror_entry(ENOENT, "You need to check probing points in BPF file");
diff --git a/tools/perf/util/bpf-loader.h b/tools/perf/util/bpf-loader.h
index 9caf3ae4acf3..6fdc0457e2b6 100644
--- a/tools/perf/util/bpf-loader.h
+++ b/tools/perf/util/bpf-loader.h
@@ -20,6 +20,10 @@ enum bpf_loader_errno {
BPF_LOADER_ERRNO__EVENTNAME, /* Event name is missing */
BPF_LOADER_ERRNO__INTERNAL, /* BPF loader internal error */
BPF_LOADER_ERRNO__COMPILE, /* Error when compiling BPF scriptlet */
+ BPF_LOADER_ERRNO__PROGCONF_TERM,/* Invalid program config term in config string */
+ BPF_LOADER_ERRNO__PROLOGUE, /* Failed to generate prologue */
+ BPF_LOADER_ERRNO__PROLOGUE2BIG, /* Prologue too big for program */
+ BPF_LOADER_ERRNO__PROLOGUEOOB, /* Offset out of bound for prologue */
__BPF_LOADER_ERRNO__END,
};
diff --git a/tools/perf/util/bpf-prologue.c b/tools/perf/util/bpf-prologue.c
new file mode 100644
index 000000000000..6cdbee119ceb
--- /dev/null
+++ b/tools/perf/util/bpf-prologue.c
@@ -0,0 +1,455 @@
+/*
+ * bpf-prologue.c
+ *
+ * Copyright (C) 2015 He Kuang <hekuang@huawei.com>
+ * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
+ * Copyright (C) 2015 Huawei Inc.
+ */
+
+#include <bpf/libbpf.h>
+#include "perf.h"
+#include "debug.h"
+#include "bpf-loader.h"
+#include "bpf-prologue.h"
+#include "probe-finder.h"
+#include <dwarf-regs.h>
+#include <linux/filter.h>
+
+#define BPF_REG_SIZE 8
+
+#define JMP_TO_ERROR_CODE -1
+#define JMP_TO_SUCCESS_CODE -2
+#define JMP_TO_USER_CODE -3
+
+struct bpf_insn_pos {
+ struct bpf_insn *begin;
+ struct bpf_insn *end;
+ struct bpf_insn *pos;
+};
+
+static inline int
+pos_get_cnt(struct bpf_insn_pos *pos)
+{
+ return pos->pos - pos->begin;
+}
+
+static int
+append_insn(struct bpf_insn new_insn, struct bpf_insn_pos *pos)
+{
+ if (!pos->pos)
+ return -BPF_LOADER_ERRNO__PROLOGUE2BIG;
+
+ if (pos->pos + 1 >= pos->end) {
+ pr_err("bpf prologue: prologue too long\n");
+ pos->pos = NULL;
+ return -BPF_LOADER_ERRNO__PROLOGUE2BIG;
+ }
+
+ *(pos->pos)++ = new_insn;
+ return 0;
+}
+
+static int
+check_pos(struct bpf_insn_pos *pos)
+{
+ if (!pos->pos || pos->pos >= pos->end)
+ return -BPF_LOADER_ERRNO__PROLOGUE2BIG;
+ return 0;
+}
+
+/* Give it a shorter name */
+#define ins(i, p) append_insn((i), (p))
+
+/*
+ * Give a register name (in 'reg'), generate instruction to
+ * load register into an eBPF register rd:
+ * 'ldd target_reg, offset(ctx_reg)', where:
+ * ctx_reg is pre initialized to pointer of 'struct pt_regs'.
+ */
+static int
+gen_ldx_reg_from_ctx(struct bpf_insn_pos *pos, int ctx_reg,
+ const char *reg, int target_reg)
+{
+ int offset = regs_query_register_offset(reg);
+
+ if (offset < 0) {
+ pr_err("bpf: prologue: failed to get register %s\n",
+ reg);
+ return offset;
+ }
+ ins(BPF_LDX_MEM(BPF_DW, target_reg, ctx_reg, offset), pos);
+
+ return check_pos(pos);
+}
+
+/*
+ * Generate a BPF_FUNC_probe_read function call.
+ *
+ * src_base_addr_reg is a register holding base address,
+ * dst_addr_reg is a register holding dest address (on stack),
+ * result is:
+ *
+ * *[dst_addr_reg] = *([src_base_addr_reg] + offset)
+ *
+ * Arguments of BPF_FUNC_probe_read:
+ * ARG1: ptr to stack (dest)
+ * ARG2: size (8)
+ * ARG3: unsafe ptr (src)
+ */
+static int
+gen_read_mem(struct bpf_insn_pos *pos,
+ int src_base_addr_reg,
+ int dst_addr_reg,
+ long offset)
+{
+ /* mov arg3, src_base_addr_reg */
+ if (src_base_addr_reg != BPF_REG_ARG3)
+ ins(BPF_MOV64_REG(BPF_REG_ARG3, src_base_addr_reg), pos);
+ /* add arg3, #offset */
+ if (offset)
+ ins(BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, offset), pos);
+
+ /* mov arg2, #reg_size */
+ ins(BPF_ALU64_IMM(BPF_MOV, BPF_REG_ARG2, BPF_REG_SIZE), pos);
+
+ /* mov arg1, dst_addr_reg */
+ if (dst_addr_reg != BPF_REG_ARG1)
+ ins(BPF_MOV64_REG(BPF_REG_ARG1, dst_addr_reg), pos);
+
+ /* Call probe_read */
+ ins(BPF_EMIT_CALL(BPF_FUNC_probe_read), pos);
+ /*
+ * Error processing: if read fail, goto error code,
+ * will be relocated. Target should be the start of
+ * error processing code.
+ */
+ ins(BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, JMP_TO_ERROR_CODE),
+ pos);
+
+ return check_pos(pos);
+}
+
+/*
+ * Each arg should be bare register. Fetch and save them into argument
+ * registers (r3 - r5).
+ *
+ * BPF_REG_1 should have been initialized with pointer to
+ * 'struct pt_regs'.
+ */
+static int
+gen_prologue_fastpath(struct bpf_insn_pos *pos,
+ struct probe_trace_arg *args, int nargs)
+{
+ int i, err = 0;
+
+ for (i = 0; i < nargs; i++) {
+ err = gen_ldx_reg_from_ctx(pos, BPF_REG_1, args[i].value,
+ BPF_PROLOGUE_START_ARG_REG + i);
+ if (err)
+ goto errout;
+ }
+
+ return check_pos(pos);
+errout:
+ return err;
+}
+
+/*
+ * Slow path:
+ * At least one argument has the form of 'offset($rx)'.
+ *
+ * Following code first stores them into stack, then loads all of then
+ * to r2 - r5.
+ * Before final loading, the final result should be:
+ *
+ * low address
+ * BPF_REG_FP - 24 ARG3
+ * BPF_REG_FP - 16 ARG2
+ * BPF_REG_FP - 8 ARG1
+ * BPF_REG_FP
+ * high address
+ *
+ * For each argument (described as: offn(...off2(off1(reg)))),
+ * generates following code:
+ *
+ * r7 <- fp
+ * r7 <- r7 - stack_offset // Ideal code should initialize r7 using
+ * // fp before generating args. However,
+ * // eBPF won't regard r7 as stack pointer
+ * // if it is generated by minus 8 from
+ * // another stack pointer except fp.
+ * // This is why we have to set r7
+ * // to fp for each variable.
+ * r3 <- value of 'reg'-> generated using gen_ldx_reg_from_ctx()
+ * (r7) <- r3 // skip following instructions for bare reg
+ * r3 <- r3 + off1 . // skip if off1 == 0
+ * r2 <- 8 \
+ * r1 <- r7 |-> generated by gen_read_mem()
+ * call probe_read /
+ * jnei r0, 0, err ./
+ * r3 <- (r7)
+ * r3 <- r3 + off2 . // skip if off2 == 0
+ * r2 <- 8 \ // r2 may be broken by probe_read, so set again
+ * r1 <- r7 |-> generated by gen_read_mem()
+ * call probe_read /
+ * jnei r0, 0, err ./
+ * ...
+ */
+static int
+gen_prologue_slowpath(struct bpf_insn_pos *pos,
+ struct probe_trace_arg *args, int nargs)
+{
+ int err, i;
+
+ for (i = 0; i < nargs; i++) {
+ struct probe_trace_arg *arg = &args[i];
+ const char *reg = arg->value;
+ struct probe_trace_arg_ref *ref = NULL;
+ int stack_offset = (i + 1) * -8;
+
+ pr_debug("prologue: fetch arg %d, base reg is %s\n",
+ i, reg);
+
+ /* value of base register is stored into ARG3 */
+ err = gen_ldx_reg_from_ctx(pos, BPF_REG_CTX, reg,
+ BPF_REG_ARG3);
+ if (err) {
+ pr_err("prologue: failed to get offset of register %s\n",
+ reg);
+ goto errout;
+ }
+
+ /* Make r7 the stack pointer. */
+ ins(BPF_MOV64_REG(BPF_REG_7, BPF_REG_FP), pos);
+ /* r7 += -8 */
+ ins(BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, stack_offset), pos);
+ /*
+ * Store r3 (base register) onto stack
+ * Ensure fp[offset] is set.
+ * fp is the only valid base register when storing
+ * into stack. We are not allowed to use r7 as base
+ * register here.
+ */
+ ins(BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG3,
+ stack_offset), pos);
+
+ ref = arg->ref;
+ while (ref) {
+ pr_debug("prologue: arg %d: offset %ld\n",
+ i, ref->offset);
+ err = gen_read_mem(pos, BPF_REG_3, BPF_REG_7,
+ ref->offset);
+ if (err) {
+ pr_err("prologue: failed to generate probe_read function call\n");
+ goto errout;
+ }
+
+ ref = ref->next;
+ /*
+ * Load previous result into ARG3. Use
+ * BPF_REG_FP instead of r7 because verifier
+ * allows FP based addressing only.
+ */
+ if (ref)
+ ins(BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3,
+ BPF_REG_FP, stack_offset), pos);
+ }
+ }
+
+ /* Final pass: read to registers */
+ for (i = 0; i < nargs; i++)
+ ins(BPF_LDX_MEM(BPF_DW, BPF_PROLOGUE_START_ARG_REG + i,
+ BPF_REG_FP, -BPF_REG_SIZE * (i + 1)), pos);
+
+ ins(BPF_JMP_IMM(BPF_JA, BPF_REG_0, 0, JMP_TO_SUCCESS_CODE), pos);
+
+ return check_pos(pos);
+errout:
+ return err;
+}
+
+static int
+prologue_relocate(struct bpf_insn_pos *pos, struct bpf_insn *error_code,
+ struct bpf_insn *success_code, struct bpf_insn *user_code)
+{
+ struct bpf_insn *insn;
+
+ if (check_pos(pos))
+ return -BPF_LOADER_ERRNO__PROLOGUE2BIG;
+
+ for (insn = pos->begin; insn < pos->pos; insn++) {
+ struct bpf_insn *target;
+ u8 class = BPF_CLASS(insn->code);
+ u8 opcode;
+
+ if (class != BPF_JMP)
+ continue;
+ opcode = BPF_OP(insn->code);
+ if (opcode == BPF_CALL)
+ continue;
+
+ switch (insn->off) {
+ case JMP_TO_ERROR_CODE:
+ target = error_code;
+ break;
+ case JMP_TO_SUCCESS_CODE:
+ target = success_code;
+ break;
+ case JMP_TO_USER_CODE:
+ target = user_code;
+ break;
+ default:
+ pr_err("bpf prologue: internal error: relocation failed\n");
+ return -BPF_LOADER_ERRNO__PROLOGUE;
+ }
+
+ insn->off = target - (insn + 1);
+ }
+ return 0;
+}
+
+int bpf__gen_prologue(struct probe_trace_arg *args, int nargs,
+ struct bpf_insn *new_prog, size_t *new_cnt,
+ size_t cnt_space)
+{
+ struct bpf_insn *success_code = NULL;
+ struct bpf_insn *error_code = NULL;
+ struct bpf_insn *user_code = NULL;
+ struct bpf_insn_pos pos;
+ bool fastpath = true;
+ int err = 0, i;
+
+ if (!new_prog || !new_cnt)
+ return -EINVAL;
+
+ if (cnt_space > BPF_MAXINSNS)
+ cnt_space = BPF_MAXINSNS;
+
+ pos.begin = new_prog;
+ pos.end = new_prog + cnt_space;
+ pos.pos = new_prog;
+
+ if (!nargs) {
+ ins(BPF_ALU64_IMM(BPF_MOV, BPF_PROLOGUE_FETCH_RESULT_REG, 0),
+ &pos);
+
+ if (check_pos(&pos))
+ goto errout;
+
+ *new_cnt = pos_get_cnt(&pos);
+ return 0;
+ }
+
+ if (nargs > BPF_PROLOGUE_MAX_ARGS) {
+ pr_warning("bpf: prologue: %d arguments are dropped\n",
+ nargs - BPF_PROLOGUE_MAX_ARGS);
+ nargs = BPF_PROLOGUE_MAX_ARGS;
+ }
+
+ /* First pass: validation */
+ for (i = 0; i < nargs; i++) {
+ struct probe_trace_arg_ref *ref = args[i].ref;
+
+ if (args[i].value[0] == '@') {
+ /* TODO: fetch global variable */
+ pr_err("bpf: prologue: global %s%+ld not support\n",
+ args[i].value, ref ? ref->offset : 0);
+ return -ENOTSUP;
+ }
+
+ while (ref) {
+ /* fastpath is true if all args has ref == NULL */
+ fastpath = false;
+
+ /*
+ * Instruction encodes immediate value using
+ * s32, ref->offset is long. On systems which
+ * can't fill long in s32, refuse to process if
+ * ref->offset too large (or small).
+ */
+#ifdef __LP64__
+#define OFFSET_MAX ((1LL << 31) - 1)
+#define OFFSET_MIN ((1LL << 31) * -1)
+ if (ref->offset > OFFSET_MAX ||
+ ref->offset < OFFSET_MIN) {
+ pr_err("bpf: prologue: offset out of bound: %ld\n",
+ ref->offset);
+ return -BPF_LOADER_ERRNO__PROLOGUEOOB;
+ }
+#endif
+ ref = ref->next;
+ }
+ }
+ pr_debug("prologue: pass validation\n");
+
+ if (fastpath) {
+ /* If all variables are registers... */
+ pr_debug("prologue: fast path\n");
+ err = gen_prologue_fastpath(&pos, args, nargs);
+ if (err)
+ goto errout;
+ } else {
+ pr_debug("prologue: slow path\n");
+
+ /* Initialization: move ctx to a callee saved register. */
+ ins(BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1), &pos);
+
+ err = gen_prologue_slowpath(&pos, args, nargs);
+ if (err)
+ goto errout;
+ /*
+ * start of ERROR_CODE (only slow pass needs error code)
+ * mov r2 <- 1 // r2 is error number
+ * mov r3 <- 0 // r3, r4... should be touched or
+ * // verifier would complain
+ * mov r4 <- 0
+ * ...
+ * goto usercode
+ */
+ error_code = pos.pos;
+ ins(BPF_ALU64_IMM(BPF_MOV, BPF_PROLOGUE_FETCH_RESULT_REG, 1),
+ &pos);
+
+ for (i = 0; i < nargs; i++)
+ ins(BPF_ALU64_IMM(BPF_MOV,
+ BPF_PROLOGUE_START_ARG_REG + i,
+ 0),
+ &pos);
+ ins(BPF_JMP_IMM(BPF_JA, BPF_REG_0, 0, JMP_TO_USER_CODE),
+ &pos);
+ }
+
+ /*
+ * start of SUCCESS_CODE:
+ * mov r2 <- 0
+ * goto usercode // skip
+ */
+ success_code = pos.pos;
+ ins(BPF_ALU64_IMM(BPF_MOV, BPF_PROLOGUE_FETCH_RESULT_REG, 0), &pos);
+
+ /*
+ * start of USER_CODE:
+ * Restore ctx to r1
+ */
+ user_code = pos.pos;
+ if (!fastpath) {
+ /*
+ * Only slow path needs restoring of ctx. In fast path,
+ * register are loaded directly from r1.
+ */
+ ins(BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX), &pos);
+ err = prologue_relocate(&pos, error_code, success_code,
+ user_code);
+ if (err)
+ goto errout;
+ }
+
+ err = check_pos(&pos);
+ if (err)
+ goto errout;
+
+ *new_cnt = pos_get_cnt(&pos);
+ return 0;
+errout:
+ return err;
+}
diff --git a/tools/perf/util/bpf-prologue.h b/tools/perf/util/bpf-prologue.h
new file mode 100644
index 000000000000..d94cbea12899
--- /dev/null
+++ b/tools/perf/util/bpf-prologue.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2015, He Kuang <hekuang@huawei.com>
+ * Copyright (C) 2015, Huawei Inc.
+ */
+#ifndef __BPF_PROLOGUE_H
+#define __BPF_PROLOGUE_H
+
+#include <linux/compiler.h>
+#include <linux/filter.h>
+#include "probe-event.h"
+
+#define BPF_PROLOGUE_MAX_ARGS 3
+#define BPF_PROLOGUE_START_ARG_REG BPF_REG_3
+#define BPF_PROLOGUE_FETCH_RESULT_REG BPF_REG_2
+
+#ifdef HAVE_BPF_PROLOGUE
+int bpf__gen_prologue(struct probe_trace_arg *args, int nargs,
+ struct bpf_insn *new_prog, size_t *new_cnt,
+ size_t cnt_space);
+#else
+static inline int
+bpf__gen_prologue(struct probe_trace_arg *args __maybe_unused,
+ int nargs __maybe_unused,
+ struct bpf_insn *new_prog __maybe_unused,
+ size_t *new_cnt,
+ size_t cnt_space __maybe_unused)
+{
+ if (!new_cnt)
+ return -EINVAL;
+ *new_cnt = 0;
+ return -ENOTSUP;
+}
+#endif
+#endif /* __BPF_PROLOGUE_H */
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index 217b5a60e2ab..6a7e273a514a 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -91,7 +91,7 @@ int build_id__sprintf(const u8 *build_id, int len, char *bf)
bid += 2;
}
- return raw - build_id;
+ return (bid - bf) + 1;
}
int sysfs__sprintf_build_id(const char *root_dir, char *sbuild_id)
diff --git a/tools/perf/util/cache.h b/tools/perf/util/cache.h
index c861373aaed3..07b5d63947b1 100644
--- a/tools/perf/util/cache.h
+++ b/tools/perf/util/cache.h
@@ -4,9 +4,12 @@
#include <stdbool.h>
#include "util.h"
#include "strbuf.h"
+#include <subcmd/pager.h>
#include "../perf.h"
#include "../ui/ui.h"
+#include <linux/string.h>
+
#define CMD_EXEC_PATH "--exec-path"
#define CMD_PERF_DIR "--perf-dir="
#define CMD_WORK_TREE "--work-tree="
@@ -18,6 +21,7 @@
#define DEFAULT_PERF_DIR_ENVIRONMENT ".perf"
#define PERF_DEBUGFS_ENVIRONMENT "PERF_DEBUGFS_DIR"
#define PERF_TRACEFS_ENVIRONMENT "PERF_TRACEFS_DIR"
+#define PERF_PAGER_ENVIRONMENT "PERF_PAGER"
typedef int (*config_fn_t)(const char *, const char *, void *);
extern int perf_default_config(const char *, const char *, void *);
@@ -28,11 +32,6 @@ extern int perf_config_bool(const char *, const char *);
extern int config_error_nonbool(const char *);
extern const char *perf_config_dirname(const char *, const char *);
-/* pager.c */
-extern void setup_pager(void);
-extern int pager_in_use(void);
-extern int pager_use_color;
-
char *alias_lookup(const char *alias);
int split_cmdline(char *cmdline, const char ***argv);
@@ -71,9 +70,4 @@ extern char *perf_path(const char *fmt, ...) __attribute__((format (printf, 1, 2
extern char *perf_pathdup(const char *fmt, ...)
__attribute__((format (printf, 1, 2)));
-#ifndef __UCLIBC__
-/* Matches the libc/libbsd function attribute so we declare this unconditionally: */
-extern size_t strlcpy(char *dest, const char *src, size_t size);
-#endif
-
#endif /* __PERF_CACHE_H */
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index 735ad48e1858..53c43eb9489e 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -44,6 +44,10 @@ static int parse_callchain_mode(const char *value)
callchain_param.mode = CHAIN_GRAPH_REL;
return 0;
}
+ if (!strncmp(value, "folded", strlen(value))) {
+ callchain_param.mode = CHAIN_FOLDED;
+ return 0;
+ }
return -1;
}
@@ -79,6 +83,23 @@ static int parse_callchain_sort_key(const char *value)
return -1;
}
+static int parse_callchain_value(const char *value)
+{
+ if (!strncmp(value, "percent", strlen(value))) {
+ callchain_param.value = CCVAL_PERCENT;
+ return 0;
+ }
+ if (!strncmp(value, "period", strlen(value))) {
+ callchain_param.value = CCVAL_PERIOD;
+ return 0;
+ }
+ if (!strncmp(value, "count", strlen(value))) {
+ callchain_param.value = CCVAL_COUNT;
+ return 0;
+ }
+ return -1;
+}
+
static int
__parse_callchain_report_opt(const char *arg, bool allow_record_opt)
{
@@ -102,7 +123,8 @@ __parse_callchain_report_opt(const char *arg, bool allow_record_opt)
if (!parse_callchain_mode(tok) ||
!parse_callchain_order(tok) ||
- !parse_callchain_sort_key(tok)) {
+ !parse_callchain_sort_key(tok) ||
+ !parse_callchain_value(tok)) {
/* parsing ok - move on to the next */
try_stack_size = false;
goto next;
@@ -218,6 +240,7 @@ rb_insert_callchain(struct rb_root *root, struct callchain_node *chain,
switch (mode) {
case CHAIN_FLAT:
+ case CHAIN_FOLDED:
if (rnode->hit < chain->hit)
p = &(*p)->rb_left;
else
@@ -267,6 +290,7 @@ static void
sort_chain_flat(struct rb_root *rb_root, struct callchain_root *root,
u64 min_hit, struct callchain_param *param __maybe_unused)
{
+ *rb_root = RB_ROOT;
__sort_chain_flat(rb_root, &root->node, min_hit);
}
@@ -338,6 +362,7 @@ int callchain_register_param(struct callchain_param *param)
param->sort = sort_chain_graph_rel;
break;
case CHAIN_FLAT:
+ case CHAIN_FOLDED:
param->sort = sort_chain_flat;
break;
case CHAIN_NONE:
@@ -363,6 +388,7 @@ create_child(struct callchain_node *parent, bool inherit_children)
}
new->parent = parent;
INIT_LIST_HEAD(&new->val);
+ INIT_LIST_HEAD(&new->parent_val);
if (inherit_children) {
struct rb_node *n;
@@ -431,6 +457,8 @@ add_child(struct callchain_node *parent,
new->children_hit = 0;
new->hit = period;
+ new->children_count = 0;
+ new->count = 1;
return new;
}
@@ -478,6 +506,9 @@ split_add_child(struct callchain_node *parent,
parent->children_hit = callchain_cumul_hits(new);
new->val_nr = parent->val_nr - idx_local;
parent->val_nr = idx_local;
+ new->count = parent->count;
+ new->children_count = parent->children_count;
+ parent->children_count = callchain_cumul_counts(new);
/* create a new child for the new branch if any */
if (idx_total < cursor->nr) {
@@ -488,6 +519,8 @@ split_add_child(struct callchain_node *parent,
parent->hit = 0;
parent->children_hit += period;
+ parent->count = 0;
+ parent->children_count += 1;
node = callchain_cursor_current(cursor);
new = add_child(parent, cursor, period);
@@ -510,6 +543,7 @@ split_add_child(struct callchain_node *parent,
rb_insert_color(&new->rb_node_in, &parent->rb_root_in);
} else {
parent->hit = period;
+ parent->count = 1;
}
}
@@ -556,6 +590,7 @@ append_chain_children(struct callchain_node *root,
inc_children_hit:
root->children_hit += period;
+ root->children_count++;
}
static int
@@ -608,6 +643,7 @@ append_chain(struct callchain_node *root,
/* we match 100% of the path, increment the hit */
if (matches == root->val_nr && cursor->pos == cursor->nr) {
root->hit += period;
+ root->count++;
return 0;
}
@@ -799,12 +835,72 @@ char *callchain_list__sym_name(struct callchain_list *cl,
return bf;
}
+char *callchain_node__scnprintf_value(struct callchain_node *node,
+ char *bf, size_t bfsize, u64 total)
+{
+ double percent = 0.0;
+ u64 period = callchain_cumul_hits(node);
+ unsigned count = callchain_cumul_counts(node);
+
+ if (callchain_param.mode == CHAIN_FOLDED) {
+ period = node->hit;
+ count = node->count;
+ }
+
+ switch (callchain_param.value) {
+ case CCVAL_PERIOD:
+ scnprintf(bf, bfsize, "%"PRIu64, period);
+ break;
+ case CCVAL_COUNT:
+ scnprintf(bf, bfsize, "%u", count);
+ break;
+ case CCVAL_PERCENT:
+ default:
+ if (total)
+ percent = period * 100.0 / total;
+ scnprintf(bf, bfsize, "%.2f%%", percent);
+ break;
+ }
+ return bf;
+}
+
+int callchain_node__fprintf_value(struct callchain_node *node,
+ FILE *fp, u64 total)
+{
+ double percent = 0.0;
+ u64 period = callchain_cumul_hits(node);
+ unsigned count = callchain_cumul_counts(node);
+
+ if (callchain_param.mode == CHAIN_FOLDED) {
+ period = node->hit;
+ count = node->count;
+ }
+
+ switch (callchain_param.value) {
+ case CCVAL_PERIOD:
+ return fprintf(fp, "%"PRIu64, period);
+ case CCVAL_COUNT:
+ return fprintf(fp, "%u", count);
+ case CCVAL_PERCENT:
+ default:
+ if (total)
+ percent = period * 100.0 / total;
+ return percent_color_fprintf(fp, "%.2f%%", percent);
+ }
+ return 0;
+}
+
static void free_callchain_node(struct callchain_node *node)
{
struct callchain_list *list, *tmp;
struct callchain_node *child;
struct rb_node *n;
+ list_for_each_entry_safe(list, tmp, &node->parent_val, list) {
+ list_del(&list->list);
+ free(list);
+ }
+
list_for_each_entry_safe(list, tmp, &node->val, list) {
list_del(&list->list);
free(list);
@@ -828,3 +924,69 @@ void free_callchain(struct callchain_root *root)
free_callchain_node(&root->node);
}
+
+static u64 decay_callchain_node(struct callchain_node *node)
+{
+ struct callchain_node *child;
+ struct rb_node *n;
+ u64 child_hits = 0;
+
+ n = rb_first(&node->rb_root_in);
+ while (n) {
+ child = container_of(n, struct callchain_node, rb_node_in);
+
+ child_hits += decay_callchain_node(child);
+ n = rb_next(n);
+ }
+
+ node->hit = (node->hit * 7) / 8;
+ node->children_hit = child_hits;
+
+ return node->hit;
+}
+
+void decay_callchain(struct callchain_root *root)
+{
+ if (!symbol_conf.use_callchain)
+ return;
+
+ decay_callchain_node(&root->node);
+}
+
+int callchain_node__make_parent_list(struct callchain_node *node)
+{
+ struct callchain_node *parent = node->parent;
+ struct callchain_list *chain, *new;
+ LIST_HEAD(head);
+
+ while (parent) {
+ list_for_each_entry_reverse(chain, &parent->val, list) {
+ new = malloc(sizeof(*new));
+ if (new == NULL)
+ goto out;
+ *new = *chain;
+ new->has_children = false;
+ list_add_tail(&new->list, &head);
+ }
+ parent = parent->parent;
+ }
+
+ list_for_each_entry_safe_reverse(chain, new, &head, list)
+ list_move_tail(&chain->list, &node->parent_val);
+
+ if (!list_empty(&node->parent_val)) {
+ chain = list_first_entry(&node->parent_val, struct callchain_list, list);
+ chain->has_children = rb_prev(&node->rb_node) || rb_next(&node->rb_node);
+
+ chain = list_first_entry(&node->val, struct callchain_list, list);
+ chain->has_children = false;
+ }
+ return 0;
+
+out:
+ list_for_each_entry_safe(chain, new, &head, list) {
+ list_del(&chain->list);
+ free(chain);
+ }
+ return -ENOMEM;
+}
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index fce8161e54db..18dd22269764 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -24,12 +24,13 @@
#define CALLCHAIN_RECORD_HELP CALLCHAIN_HELP RECORD_MODE_HELP RECORD_SIZE_HELP
#define CALLCHAIN_REPORT_HELP \
- HELP_PAD "print_type:\tcall graph printing style (graph|flat|fractal|none)\n" \
+ HELP_PAD "print_type:\tcall graph printing style (graph|flat|fractal|folded|none)\n" \
HELP_PAD "threshold:\tminimum call graph inclusion threshold (<percent>)\n" \
HELP_PAD "print_limit:\tmaximum number of call graph entry (<number>)\n" \
HELP_PAD "order:\t\tcall graph order (caller|callee)\n" \
HELP_PAD "sort_key:\tcall graph sort key (function|address)\n" \
- HELP_PAD "branch:\t\tinclude last branch info to call graph (branch)\n"
+ HELP_PAD "branch:\t\tinclude last branch info to call graph (branch)\n" \
+ HELP_PAD "value:\t\tcall graph value (percent|period|count)\n"
enum perf_call_graph_mode {
CALLCHAIN_NONE,
@@ -43,7 +44,8 @@ enum chain_mode {
CHAIN_NONE,
CHAIN_FLAT,
CHAIN_GRAPH_ABS,
- CHAIN_GRAPH_REL
+ CHAIN_GRAPH_REL,
+ CHAIN_FOLDED,
};
enum chain_order {
@@ -54,11 +56,14 @@ enum chain_order {
struct callchain_node {
struct callchain_node *parent;
struct list_head val;
+ struct list_head parent_val;
struct rb_node rb_node_in; /* to insert nodes in an rbtree */
struct rb_node rb_node; /* to sort nodes in an output tree */
struct rb_root rb_root_in; /* input tree of children */
struct rb_root rb_root; /* sorted output tree of children */
unsigned int val_nr;
+ unsigned int count;
+ unsigned int children_count;
u64 hit;
u64 children_hit;
};
@@ -78,6 +83,12 @@ enum chain_key {
CCKEY_ADDRESS
};
+enum chain_value {
+ CCVAL_PERCENT,
+ CCVAL_PERIOD,
+ CCVAL_COUNT,
+};
+
struct callchain_param {
bool enabled;
enum perf_call_graph_mode record_mode;
@@ -90,6 +101,7 @@ struct callchain_param {
bool order_set;
enum chain_key key;
bool branch_callstack;
+ enum chain_value value;
};
extern struct callchain_param callchain_param;
@@ -131,6 +143,7 @@ extern __thread struct callchain_cursor callchain_cursor;
static inline void callchain_init(struct callchain_root *root)
{
INIT_LIST_HEAD(&root->node.val);
+ INIT_LIST_HEAD(&root->node.parent_val);
root->node.parent = NULL;
root->node.hit = 0;
@@ -144,6 +157,11 @@ static inline u64 callchain_cumul_hits(struct callchain_node *node)
return node->hit + node->children_hit;
}
+static inline unsigned callchain_cumul_counts(struct callchain_node *node)
+{
+ return node->count + node->children_count;
+}
+
int callchain_register_param(struct callchain_param *param);
int callchain_append(struct callchain_root *root,
struct callchain_cursor *cursor,
@@ -229,7 +247,13 @@ static inline int arch_skip_callchain_idx(struct thread *thread __maybe_unused,
char *callchain_list__sym_name(struct callchain_list *cl,
char *bf, size_t bfsize, bool show_dso);
+char *callchain_node__scnprintf_value(struct callchain_node *node,
+ char *bf, size_t bfsize, u64 total);
+int callchain_node__fprintf_value(struct callchain_node *node,
+ FILE *fp, u64 total);
void free_callchain(struct callchain_root *root);
+void decay_callchain(struct callchain_root *root);
+int callchain_node__make_parent_list(struct callchain_node *node);
#endif /* __PERF_CALLCHAIN_H */
diff --git a/tools/perf/util/cgroup.c b/tools/perf/util/cgroup.c
index 32e12ecfe9c5..90aa1b46b2e5 100644
--- a/tools/perf/util/cgroup.c
+++ b/tools/perf/util/cgroup.c
@@ -1,6 +1,6 @@
#include "util.h"
#include "../perf.h"
-#include "parse-options.h"
+#include <subcmd/parse-options.h>
#include "evsel.h"
#include "cgroup.h"
#include "evlist.h"
diff --git a/tools/perf/util/color.c b/tools/perf/util/color.c
index 9b9565416f90..e5fb88bab9e1 100644
--- a/tools/perf/util/color.c
+++ b/tools/perf/util/color.c
@@ -24,7 +24,7 @@ int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty)
auto_color:
if (stdout_is_tty < 0)
stdout_is_tty = isatty(1);
- if (stdout_is_tty || (pager_in_use() && pager_use_color)) {
+ if (stdout_is_tty || pager_in_use()) {
char *term = getenv("TERM");
if (term && strcmp(term, "dumb"))
return 1;
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index 2e452ac1353d..d3e12e30e1d5 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -10,7 +10,7 @@
*/
#include "util.h"
#include "cache.h"
-#include "exec_cmd.h"
+#include <subcmd/exec-cmd.h>
#include "util/hist.h" /* perf_hist_config */
#include "util/llvm-utils.h" /* perf_llvm_config */
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
index 10af1e7524fb..fa935093a599 100644
--- a/tools/perf/util/cpumap.c
+++ b/tools/perf/util/cpumap.c
@@ -5,6 +5,7 @@
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
+#include <linux/bitmap.h>
#include "asm/bug.h"
static struct cpu_map *cpu_map__default_new(void)
@@ -179,6 +180,56 @@ out:
return cpus;
}
+static struct cpu_map *cpu_map__from_entries(struct cpu_map_entries *cpus)
+{
+ struct cpu_map *map;
+
+ map = cpu_map__empty_new(cpus->nr);
+ if (map) {
+ unsigned i;
+
+ for (i = 0; i < cpus->nr; i++) {
+ /*
+ * Special treatment for -1, which is not real cpu number,
+ * and we need to use (int) -1 to initialize map[i],
+ * otherwise it would become 65535.
+ */
+ if (cpus->cpu[i] == (u16) -1)
+ map->map[i] = -1;
+ else
+ map->map[i] = (int) cpus->cpu[i];
+ }
+ }
+
+ return map;
+}
+
+static struct cpu_map *cpu_map__from_mask(struct cpu_map_mask *mask)
+{
+ struct cpu_map *map;
+ int nr, nbits = mask->nr * mask->long_size * BITS_PER_BYTE;
+
+ nr = bitmap_weight(mask->mask, nbits);
+
+ map = cpu_map__empty_new(nr);
+ if (map) {
+ int cpu, i = 0;
+
+ for_each_set_bit(cpu, mask->mask, nbits)
+ map->map[i++] = cpu;
+ }
+ return map;
+
+}
+
+struct cpu_map *cpu_map__new_data(struct cpu_map_data *data)
+{
+ if (data->type == PERF_CPU_MAP__CPUS)
+ return cpu_map__from_entries((struct cpu_map_entries *)data->data);
+ else
+ return cpu_map__from_mask((struct cpu_map_mask *)data->data);
+}
+
size_t cpu_map__fprintf(struct cpu_map *map, FILE *fp)
{
int i;
diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h
index 85f7772457fa..71c41b9efabb 100644
--- a/tools/perf/util/cpumap.h
+++ b/tools/perf/util/cpumap.h
@@ -17,6 +17,7 @@ struct cpu_map {
struct cpu_map *cpu_map__new(const char *cpu_list);
struct cpu_map *cpu_map__empty_new(int nr);
struct cpu_map *cpu_map__dummy_new(void);
+struct cpu_map *cpu_map__new_data(struct cpu_map_data *data);
struct cpu_map *cpu_map__read(FILE *file);
size_t cpu_map__fprintf(struct cpu_map *map, FILE *fp);
int cpu_map__get_socket_id(int cpu);
diff --git a/tools/perf/util/data-convert-bt.c b/tools/perf/util/data-convert-bt.c
index 5bfc1198ab46..34cd1e4039d3 100644
--- a/tools/perf/util/data-convert-bt.c
+++ b/tools/perf/util/data-convert-bt.c
@@ -63,6 +63,7 @@ struct ctf_writer {
struct bt_ctf_field_type *s32;
struct bt_ctf_field_type *u32;
struct bt_ctf_field_type *string;
+ struct bt_ctf_field_type *u32_hex;
struct bt_ctf_field_type *u64_hex;
};
struct bt_ctf_field_type *array[6];
@@ -982,6 +983,7 @@ do { \
CREATE_INT_TYPE(cw->data.u64, 64, false, false);
CREATE_INT_TYPE(cw->data.s32, 32, true, false);
CREATE_INT_TYPE(cw->data.u32, 32, false, false);
+ CREATE_INT_TYPE(cw->data.u32_hex, 32, false, true);
CREATE_INT_TYPE(cw->data.u64_hex, 64, false, true);
cw->data.string = bt_ctf_field_type_string_create();
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index 425df5c86c9c..e8e9a9dbf5e3 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -1243,6 +1243,8 @@ struct dso *__dsos__addnew(struct dsos *dsos, const char *name)
if (dso != NULL) {
__dsos__add(dsos, dso);
dso__set_basename(dso);
+ /* Put dso here because __dsos_add already got it */
+ dso__put(dso);
}
return dso;
}
diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
index 6af4f7c36820..7dd5939dea2e 100644
--- a/tools/perf/util/env.c
+++ b/tools/perf/util/env.c
@@ -25,15 +25,6 @@ int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[])
{
int i;
- /*
- * If env->cmdline_argv has already been set, do not override it. This allows
- * a command to set the cmdline, parse args and then call another
- * builtin function that implements a command -- e.g, cmd_kvm calling
- * cmd_record.
- */
- if (env->cmdline_argv != NULL)
- return 0;
-
/* do not include NULL termination */
env->cmdline_argv = calloc(argc, sizeof(char *));
if (env->cmdline_argv == NULL)
diff --git a/tools/perf/util/environment.c b/tools/perf/util/environment.c
deleted file mode 100644
index 7405123692f1..000000000000
--- a/tools/perf/util/environment.c
+++ /dev/null
@@ -1,8 +0,0 @@
-/*
- * We put all the perf config variables in this same object
- * file, so that programs can link against the config parser
- * without having to link against all the rest of perf.
- */
-#include "cache.h"
-
-int pager_use_color = 1;
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 8b10621b415c..85155e91b61b 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -10,6 +10,8 @@
#include "thread.h"
#include "thread_map.h"
#include "symbol/kallsyms.h"
+#include "asm/bug.h"
+#include "stat.h"
static const char *perf_event__names[] = {
[0] = "TOTAL",
@@ -37,6 +39,12 @@ static const char *perf_event__names[] = {
[PERF_RECORD_AUXTRACE_INFO] = "AUXTRACE_INFO",
[PERF_RECORD_AUXTRACE] = "AUXTRACE",
[PERF_RECORD_AUXTRACE_ERROR] = "AUXTRACE_ERROR",
+ [PERF_RECORD_THREAD_MAP] = "THREAD_MAP",
+ [PERF_RECORD_CPU_MAP] = "CPU_MAP",
+ [PERF_RECORD_STAT_CONFIG] = "STAT_CONFIG",
+ [PERF_RECORD_STAT] = "STAT",
+ [PERF_RECORD_STAT_ROUND] = "STAT_ROUND",
+ [PERF_RECORD_EVENT_UPDATE] = "EVENT_UPDATE",
};
const char *perf_event__name(unsigned int id)
@@ -495,7 +503,7 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool,
if (comm_event == NULL)
goto out;
- mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size);
+ mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
if (mmap_event == NULL)
goto out_free_comm;
@@ -569,7 +577,7 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
if (comm_event == NULL)
goto out;
- mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size);
+ mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
if (mmap_event == NULL)
goto out_free_comm;
@@ -699,6 +707,274 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
return err;
}
+int perf_event__synthesize_thread_map2(struct perf_tool *tool,
+ struct thread_map *threads,
+ perf_event__handler_t process,
+ struct machine *machine)
+{
+ union perf_event *event;
+ int i, err, size;
+
+ size = sizeof(event->thread_map);
+ size += threads->nr * sizeof(event->thread_map.entries[0]);
+
+ event = zalloc(size);
+ if (!event)
+ return -ENOMEM;
+
+ event->header.type = PERF_RECORD_THREAD_MAP;
+ event->header.size = size;
+ event->thread_map.nr = threads->nr;
+
+ for (i = 0; i < threads->nr; i++) {
+ struct thread_map_event_entry *entry = &event->thread_map.entries[i];
+ char *comm = thread_map__comm(threads, i);
+
+ if (!comm)
+ comm = (char *) "";
+
+ entry->pid = thread_map__pid(threads, i);
+ strncpy((char *) &entry->comm, comm, sizeof(entry->comm));
+ }
+
+ err = process(tool, event, NULL, machine);
+
+ free(event);
+ return err;
+}
+
+static void synthesize_cpus(struct cpu_map_entries *cpus,
+ struct cpu_map *map)
+{
+ int i;
+
+ cpus->nr = map->nr;
+
+ for (i = 0; i < map->nr; i++)
+ cpus->cpu[i] = map->map[i];
+}
+
+static void synthesize_mask(struct cpu_map_mask *mask,
+ struct cpu_map *map, int max)
+{
+ int i;
+
+ mask->nr = BITS_TO_LONGS(max);
+ mask->long_size = sizeof(long);
+
+ for (i = 0; i < map->nr; i++)
+ set_bit(map->map[i], mask->mask);
+}
+
+static size_t cpus_size(struct cpu_map *map)
+{
+ return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16);
+}
+
+static size_t mask_size(struct cpu_map *map, int *max)
+{
+ int i;
+
+ *max = 0;
+
+ for (i = 0; i < map->nr; i++) {
+ /* bit possition of the cpu is + 1 */
+ int bit = map->map[i] + 1;
+
+ if (bit > *max)
+ *max = bit;
+ }
+
+ return sizeof(struct cpu_map_mask) + BITS_TO_LONGS(*max) * sizeof(long);
+}
+
+void *cpu_map_data__alloc(struct cpu_map *map, size_t *size, u16 *type, int *max)
+{
+ size_t size_cpus, size_mask;
+ bool is_dummy = cpu_map__empty(map);
+
+ /*
+ * Both array and mask data have variable size based
+ * on the number of cpus and their actual values.
+ * The size of the 'struct cpu_map_data' is:
+ *
+ * array = size of 'struct cpu_map_entries' +
+ * number of cpus * sizeof(u64)
+ *
+ * mask = size of 'struct cpu_map_mask' +
+ * maximum cpu bit converted to size of longs
+ *
+ * and finaly + the size of 'struct cpu_map_data'.
+ */
+ size_cpus = cpus_size(map);
+ size_mask = mask_size(map, max);
+
+ if (is_dummy || (size_cpus < size_mask)) {
+ *size += size_cpus;
+ *type = PERF_CPU_MAP__CPUS;
+ } else {
+ *size += size_mask;
+ *type = PERF_CPU_MAP__MASK;
+ }
+
+ *size += sizeof(struct cpu_map_data);
+ return zalloc(*size);
+}
+
+void cpu_map_data__synthesize(struct cpu_map_data *data, struct cpu_map *map,
+ u16 type, int max)
+{
+ data->type = type;
+
+ switch (type) {
+ case PERF_CPU_MAP__CPUS:
+ synthesize_cpus((struct cpu_map_entries *) data->data, map);
+ break;
+ case PERF_CPU_MAP__MASK:
+ synthesize_mask((struct cpu_map_mask *) data->data, map, max);
+ default:
+ break;
+ };
+}
+
+static struct cpu_map_event* cpu_map_event__new(struct cpu_map *map)
+{
+ size_t size = sizeof(struct cpu_map_event);
+ struct cpu_map_event *event;
+ int max;
+ u16 type;
+
+ event = cpu_map_data__alloc(map, &size, &type, &max);
+ if (!event)
+ return NULL;
+
+ event->header.type = PERF_RECORD_CPU_MAP;
+ event->header.size = size;
+ event->data.type = type;
+
+ cpu_map_data__synthesize(&event->data, map, type, max);
+ return event;
+}
+
+int perf_event__synthesize_cpu_map(struct perf_tool *tool,
+ struct cpu_map *map,
+ perf_event__handler_t process,
+ struct machine *machine)
+{
+ struct cpu_map_event *event;
+ int err;
+
+ event = cpu_map_event__new(map);
+ if (!event)
+ return -ENOMEM;
+
+ err = process(tool, (union perf_event *) event, NULL, machine);
+
+ free(event);
+ return err;
+}
+
+int perf_event__synthesize_stat_config(struct perf_tool *tool,
+ struct perf_stat_config *config,
+ perf_event__handler_t process,
+ struct machine *machine)
+{
+ struct stat_config_event *event;
+ int size, i = 0, err;
+
+ size = sizeof(*event);
+ size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0]));
+
+ event = zalloc(size);
+ if (!event)
+ return -ENOMEM;
+
+ event->header.type = PERF_RECORD_STAT_CONFIG;
+ event->header.size = size;
+ event->nr = PERF_STAT_CONFIG_TERM__MAX;
+
+#define ADD(__term, __val) \
+ event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term; \
+ event->data[i].val = __val; \
+ i++;
+
+ ADD(AGGR_MODE, config->aggr_mode)
+ ADD(INTERVAL, config->interval)
+ ADD(SCALE, config->scale)
+
+ WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX,
+ "stat config terms unbalanced\n");
+#undef ADD
+
+ err = process(tool, (union perf_event *) event, NULL, machine);
+
+ free(event);
+ return err;
+}
+
+int perf_event__synthesize_stat(struct perf_tool *tool,
+ u32 cpu, u32 thread, u64 id,
+ struct perf_counts_values *count,
+ perf_event__handler_t process,
+ struct machine *machine)
+{
+ struct stat_event event;
+
+ event.header.type = PERF_RECORD_STAT;
+ event.header.size = sizeof(event);
+ event.header.misc = 0;
+
+ event.id = id;
+ event.cpu = cpu;
+ event.thread = thread;
+ event.val = count->val;
+ event.ena = count->ena;
+ event.run = count->run;
+
+ return process(tool, (union perf_event *) &event, NULL, machine);
+}
+
+int perf_event__synthesize_stat_round(struct perf_tool *tool,
+ u64 evtime, u64 type,
+ perf_event__handler_t process,
+ struct machine *machine)
+{
+ struct stat_round_event event;
+
+ event.header.type = PERF_RECORD_STAT_ROUND;
+ event.header.size = sizeof(event);
+ event.header.misc = 0;
+
+ event.time = evtime;
+ event.type = type;
+
+ return process(tool, (union perf_event *) &event, NULL, machine);
+}
+
+void perf_event__read_stat_config(struct perf_stat_config *config,
+ struct stat_config_event *event)
+{
+ unsigned i;
+
+ for (i = 0; i < event->nr; i++) {
+
+ switch (event->data[i].tag) {
+#define CASE(__term, __val) \
+ case PERF_STAT_CONFIG_TERM__##__term: \
+ config->__val = event->data[i].val; \
+ break;
+
+ CASE(AGGR_MODE, aggr_mode)
+ CASE(SCALE, scale)
+ CASE(INTERVAL, interval)
+#undef CASE
+ default:
+ pr_warning("unknown stat config term %" PRIu64 "\n",
+ event->data[i].tag);
+ }
+ }
+}
+
size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp)
{
const char *s;
@@ -783,6 +1059,38 @@ size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp)
event->mmap2.filename);
}
+size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp)
+{
+ struct thread_map *threads = thread_map__new_event(&event->thread_map);
+ size_t ret;
+
+ ret = fprintf(fp, " nr: ");
+
+ if (threads)
+ ret += thread_map__fprintf(threads, fp);
+ else
+ ret += fprintf(fp, "failed to get threads from event\n");
+
+ thread_map__put(threads);
+ return ret;
+}
+
+size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp)
+{
+ struct cpu_map *cpus = cpu_map__new_data(&event->cpu_map.data);
+ size_t ret;
+
+ ret = fprintf(fp, " nr: ");
+
+ if (cpus)
+ ret += cpu_map__fprintf(cpus, fp);
+ else
+ ret += fprintf(fp, "failed to get cpumap from event\n");
+
+ cpu_map__put(cpus);
+ return ret;
+}
+
int perf_event__process_mmap(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index a0dbcbd4f6d8..b7ffb7ee9971 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -226,6 +226,12 @@ enum perf_user_event_type { /* above any possible kernel type */
PERF_RECORD_AUXTRACE_INFO = 70,
PERF_RECORD_AUXTRACE = 71,
PERF_RECORD_AUXTRACE_ERROR = 72,
+ PERF_RECORD_THREAD_MAP = 73,
+ PERF_RECORD_CPU_MAP = 74,
+ PERF_RECORD_STAT_CONFIG = 75,
+ PERF_RECORD_STAT = 76,
+ PERF_RECORD_STAT_ROUND = 77,
+ PERF_RECORD_EVENT_UPDATE = 78,
PERF_RECORD_HEADER_MAX
};
@@ -270,12 +276,61 @@ struct events_stats {
u32 nr_proc_map_timeout;
};
+enum {
+ PERF_CPU_MAP__CPUS = 0,
+ PERF_CPU_MAP__MASK = 1,
+};
+
+struct cpu_map_entries {
+ u16 nr;
+ u16 cpu[];
+};
+
+struct cpu_map_mask {
+ u16 nr;
+ u16 long_size;
+ unsigned long mask[];
+};
+
+struct cpu_map_data {
+ u16 type;
+ char data[];
+};
+
+struct cpu_map_event {
+ struct perf_event_header header;
+ struct cpu_map_data data;
+};
+
struct attr_event {
struct perf_event_header header;
struct perf_event_attr attr;
u64 id[];
};
+enum {
+ PERF_EVENT_UPDATE__UNIT = 0,
+ PERF_EVENT_UPDATE__SCALE = 1,
+ PERF_EVENT_UPDATE__NAME = 2,
+ PERF_EVENT_UPDATE__CPUS = 3,
+};
+
+struct event_update_event_cpus {
+ struct cpu_map_data cpus;
+};
+
+struct event_update_event_scale {
+ double scale;
+};
+
+struct event_update_event {
+ struct perf_event_header header;
+ u64 type;
+ u64 id;
+
+ char data[];
+};
+
#define MAX_EVENT_NAME 64
struct perf_trace_event_type {
@@ -356,6 +411,63 @@ struct context_switch_event {
u32 next_prev_tid;
};
+struct thread_map_event_entry {
+ u64 pid;
+ char comm[16];
+};
+
+struct thread_map_event {
+ struct perf_event_header header;
+ u64 nr;
+ struct thread_map_event_entry entries[];
+};
+
+enum {
+ PERF_STAT_CONFIG_TERM__AGGR_MODE = 0,
+ PERF_STAT_CONFIG_TERM__INTERVAL = 1,
+ PERF_STAT_CONFIG_TERM__SCALE = 2,
+ PERF_STAT_CONFIG_TERM__MAX = 3,
+};
+
+struct stat_config_event_entry {
+ u64 tag;
+ u64 val;
+};
+
+struct stat_config_event {
+ struct perf_event_header header;
+ u64 nr;
+ struct stat_config_event_entry data[];
+};
+
+struct stat_event {
+ struct perf_event_header header;
+
+ u64 id;
+ u32 cpu;
+ u32 thread;
+
+ union {
+ struct {
+ u64 val;
+ u64 ena;
+ u64 run;
+ };
+ u64 values[3];
+ };
+};
+
+enum {
+ PERF_STAT_ROUND_TYPE__INTERVAL = 0,
+ PERF_STAT_ROUND_TYPE__FINAL = 1,
+};
+
+struct stat_round_event {
+ struct perf_event_header header;
+ u64 type;
+ u64 time;
+};
+
union perf_event {
struct perf_event_header header;
struct mmap_event mmap;
@@ -368,6 +480,7 @@ union perf_event {
struct throttle_event throttle;
struct sample_event sample;
struct attr_event attr;
+ struct event_update_event event_update;
struct event_type_event event_type;
struct tracing_data_event tracing_data;
struct build_id_event build_id;
@@ -378,12 +491,20 @@ union perf_event {
struct aux_event aux;
struct itrace_start_event itrace_start;
struct context_switch_event context_switch;
+ struct thread_map_event thread_map;
+ struct cpu_map_event cpu_map;
+ struct stat_config_event stat_config;
+ struct stat_event stat;
+ struct stat_round_event stat_round;
};
void perf_event__print_totals(void);
struct perf_tool;
struct thread_map;
+struct cpu_map;
+struct perf_stat_config;
+struct perf_counts_values;
typedef int (*perf_event__handler_t)(struct perf_tool *tool,
union perf_event *event,
@@ -395,6 +516,14 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool,
perf_event__handler_t process,
struct machine *machine, bool mmap_data,
unsigned int proc_map_timeout);
+int perf_event__synthesize_thread_map2(struct perf_tool *tool,
+ struct thread_map *threads,
+ perf_event__handler_t process,
+ struct machine *machine);
+int perf_event__synthesize_cpu_map(struct perf_tool *tool,
+ struct cpu_map *cpus,
+ perf_event__handler_t process,
+ struct machine *machine);
int perf_event__synthesize_threads(struct perf_tool *tool,
perf_event__handler_t process,
struct machine *machine, bool mmap_data,
@@ -402,7 +531,21 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
perf_event__handler_t process,
struct machine *machine);
-
+int perf_event__synthesize_stat_config(struct perf_tool *tool,
+ struct perf_stat_config *config,
+ perf_event__handler_t process,
+ struct machine *machine);
+void perf_event__read_stat_config(struct perf_stat_config *config,
+ struct stat_config_event *event);
+int perf_event__synthesize_stat(struct perf_tool *tool,
+ u32 cpu, u32 thread, u64 id,
+ struct perf_counts_values *count,
+ perf_event__handler_t process,
+ struct machine *machine);
+int perf_event__synthesize_stat_round(struct perf_tool *tool,
+ u64 time, u64 type,
+ perf_event__handler_t process,
+ struct machine *machine);
int perf_event__synthesize_modules(struct perf_tool *tool,
perf_event__handler_t process,
struct machine *machine);
@@ -499,9 +642,14 @@ size_t perf_event__fprintf_task(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp);
+size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp);
+size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp);
size_t perf_event__fprintf(union perf_event *event, FILE *fp);
u64 kallsyms__get_function_start(const char *kallsyms_filename,
const char *symbol_name);
+void *cpu_map_data__alloc(struct cpu_map *map, size_t *size, u16 *type, int *max);
+void cpu_map_data__synthesize(struct cpu_map_data *data, struct cpu_map *map,
+ u16 type, int max);
#endif /* __PERF_RECORD_H */
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index d1392194a9a9..d81f13de2476 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -18,7 +18,7 @@
#include <unistd.h>
#include "parse-events.h"
-#include "parse-options.h"
+#include <subcmd/parse-options.h>
#include <sys/mman.h>
@@ -68,6 +68,18 @@ struct perf_evlist *perf_evlist__new_default(void)
return evlist;
}
+struct perf_evlist *perf_evlist__new_dummy(void)
+{
+ struct perf_evlist *evlist = perf_evlist__new();
+
+ if (evlist && perf_evlist__add_dummy(evlist)) {
+ perf_evlist__delete(evlist);
+ evlist = NULL;
+ }
+
+ return evlist;
+}
+
/**
* perf_evlist__set_id_pos - set the positions of event ids.
* @evlist: selected event list
@@ -248,6 +260,22 @@ error:
return -ENOMEM;
}
+int perf_evlist__add_dummy(struct perf_evlist *evlist)
+{
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_SOFTWARE,
+ .config = PERF_COUNT_SW_DUMMY,
+ .size = sizeof(attr), /* to capture ABI version */
+ };
+ struct perf_evsel *evsel = perf_evsel__new(&attr);
+
+ if (evsel == NULL)
+ return -ENOMEM;
+
+ perf_evlist__add(evlist, evsel);
+ return 0;
+}
+
static int perf_evlist__add_attrs(struct perf_evlist *evlist,
struct perf_event_attr *attrs, size_t nr_attrs)
{
@@ -336,20 +364,12 @@ static int perf_evlist__nr_threads(struct perf_evlist *evlist,
void perf_evlist__disable(struct perf_evlist *evlist)
{
- int cpu, thread;
struct perf_evsel *pos;
- int nr_cpus = cpu_map__nr(evlist->cpus);
- int nr_threads;
- for (cpu = 0; cpu < nr_cpus; cpu++) {
- evlist__for_each(evlist, pos) {
- if (!perf_evsel__is_group_leader(pos) || !pos->fd)
- continue;
- nr_threads = perf_evlist__nr_threads(evlist, pos);
- for (thread = 0; thread < nr_threads; thread++)
- ioctl(FD(pos, cpu, thread),
- PERF_EVENT_IOC_DISABLE, 0);
- }
+ evlist__for_each(evlist, pos) {
+ if (!perf_evsel__is_group_leader(pos) || !pos->fd)
+ continue;
+ perf_evsel__disable(pos);
}
evlist->enabled = false;
@@ -357,20 +377,12 @@ void perf_evlist__disable(struct perf_evlist *evlist)
void perf_evlist__enable(struct perf_evlist *evlist)
{
- int cpu, thread;
struct perf_evsel *pos;
- int nr_cpus = cpu_map__nr(evlist->cpus);
- int nr_threads;
- for (cpu = 0; cpu < nr_cpus; cpu++) {
- evlist__for_each(evlist, pos) {
- if (!perf_evsel__is_group_leader(pos) || !pos->fd)
- continue;
- nr_threads = perf_evlist__nr_threads(evlist, pos);
- for (thread = 0; thread < nr_threads; thread++)
- ioctl(FD(pos, cpu, thread),
- PERF_EVENT_IOC_ENABLE, 0);
- }
+ evlist__for_each(evlist, pos) {
+ if (!perf_evsel__is_group_leader(pos) || !pos->fd)
+ continue;
+ perf_evsel__enable(pos);
}
evlist->enabled = true;
@@ -381,48 +393,6 @@ void perf_evlist__toggle_enable(struct perf_evlist *evlist)
(evlist->enabled ? perf_evlist__disable : perf_evlist__enable)(evlist);
}
-int perf_evlist__disable_event(struct perf_evlist *evlist,
- struct perf_evsel *evsel)
-{
- int cpu, thread, err;
- int nr_cpus = cpu_map__nr(evlist->cpus);
- int nr_threads = perf_evlist__nr_threads(evlist, evsel);
-
- if (!evsel->fd)
- return 0;
-
- for (cpu = 0; cpu < nr_cpus; cpu++) {
- for (thread = 0; thread < nr_threads; thread++) {
- err = ioctl(FD(evsel, cpu, thread),
- PERF_EVENT_IOC_DISABLE, 0);
- if (err)
- return err;
- }
- }
- return 0;
-}
-
-int perf_evlist__enable_event(struct perf_evlist *evlist,
- struct perf_evsel *evsel)
-{
- int cpu, thread, err;
- int nr_cpus = cpu_map__nr(evlist->cpus);
- int nr_threads = perf_evlist__nr_threads(evlist, evsel);
-
- if (!evsel->fd)
- return -EINVAL;
-
- for (cpu = 0; cpu < nr_cpus; cpu++) {
- for (thread = 0; thread < nr_threads; thread++) {
- err = ioctl(FD(evsel, cpu, thread),
- PERF_EVENT_IOC_ENABLE, 0);
- if (err)
- return err;
- }
- }
- return 0;
-}
-
static int perf_evlist__enable_event_cpu(struct perf_evlist *evlist,
struct perf_evsel *evsel, int cpu)
{
@@ -550,9 +520,9 @@ void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
evsel->id[evsel->ids++] = id;
}
-static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
- struct perf_evsel *evsel,
- int cpu, int thread, int fd)
+int perf_evlist__id_add_fd(struct perf_evlist *evlist,
+ struct perf_evsel *evsel,
+ int cpu, int thread, int fd)
{
u64 read_data[4] = { 0, };
int id_idx = 1; /* The first entry is the counter value */
@@ -1486,7 +1456,7 @@ int perf_evlist__open(struct perf_evlist *evlist)
perf_evlist__update_id_pos(evlist);
evlist__for_each(evlist, evsel) {
- err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
+ err = perf_evsel__open(evsel, evsel->cpus, evsel->threads);
if (err < 0)
goto out_err;
}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index a459fe71b452..7c4d9a206776 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -67,6 +67,7 @@ struct perf_evsel_str_handler {
struct perf_evlist *perf_evlist__new(void);
struct perf_evlist *perf_evlist__new_default(void);
+struct perf_evlist *perf_evlist__new_dummy(void);
void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
struct thread_map *threads);
void perf_evlist__exit(struct perf_evlist *evlist);
@@ -81,6 +82,8 @@ int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
#define perf_evlist__add_default_attrs(evlist, array) \
__perf_evlist__add_default_attrs(evlist, array, ARRAY_SIZE(array))
+int perf_evlist__add_dummy(struct perf_evlist *evlist);
+
int perf_evlist__add_newtp(struct perf_evlist *evlist,
const char *sys, const char *name, void *handler);
@@ -97,6 +100,9 @@ perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
int cpu, int thread, u64 id);
+int perf_evlist__id_add_fd(struct perf_evlist *evlist,
+ struct perf_evsel *evsel,
+ int cpu, int thread, int fd);
int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd);
int perf_evlist__alloc_pollfd(struct perf_evlist *evlist);
@@ -149,10 +155,6 @@ void perf_evlist__disable(struct perf_evlist *evlist);
void perf_evlist__enable(struct perf_evlist *evlist);
void perf_evlist__toggle_enable(struct perf_evlist *evlist);
-int perf_evlist__disable_event(struct perf_evlist *evlist,
- struct perf_evsel *evsel);
-int perf_evlist__enable_event(struct perf_evlist *evlist,
- struct perf_evsel *evsel);
int perf_evlist__enable_event_idx(struct perf_evlist *evlist,
struct perf_evsel *evsel, int idx);
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 397fb4ed3c97..cdbaf9b51e42 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -36,6 +36,7 @@ static struct {
bool cloexec;
bool clockid;
bool clockid_wrong;
+ bool lbr_flags;
} perf_missing_features;
static clockid_t clockid;
@@ -574,7 +575,9 @@ perf_evsel__config_callgraph(struct perf_evsel *evsel,
} else {
perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
attr->branch_sample_type = PERF_SAMPLE_BRANCH_USER |
- PERF_SAMPLE_BRANCH_CALL_STACK;
+ PERF_SAMPLE_BRANCH_CALL_STACK |
+ PERF_SAMPLE_BRANCH_NO_CYCLES |
+ PERF_SAMPLE_BRANCH_NO_FLAGS;
}
} else
pr_warning("Cannot use LBR callstack with branch stack. "
@@ -981,13 +984,26 @@ int perf_evsel__append_filter(struct perf_evsel *evsel,
return -1;
}
-int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads)
+int perf_evsel__enable(struct perf_evsel *evsel)
{
+ int nthreads = thread_map__nr(evsel->threads);
+ int ncpus = cpu_map__nr(evsel->cpus);
+
return perf_evsel__run_ioctl(evsel, ncpus, nthreads,
PERF_EVENT_IOC_ENABLE,
0);
}
+int perf_evsel__disable(struct perf_evsel *evsel)
+{
+ int nthreads = thread_map__nr(evsel->threads);
+ int ncpus = cpu_map__nr(evsel->cpus);
+
+ return perf_evsel__run_ioctl(evsel, ncpus, nthreads,
+ PERF_EVENT_IOC_DISABLE,
+ 0);
+}
+
int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
{
if (ncpus == 0 || nthreads == 0)
@@ -1192,6 +1208,7 @@ static void __p_sample_type(char *buf, size_t size, u64 value)
bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW),
bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER),
bit_name(IDENTIFIER), bit_name(REGS_INTR), bit_name(DATA_SRC),
+ bit_name(WEIGHT),
{ .name = NULL, }
};
#undef bit_name
@@ -1323,6 +1340,9 @@ fallback_missing_features:
evsel->attr.mmap2 = 0;
if (perf_missing_features.exclude_guest)
evsel->attr.exclude_guest = evsel->attr.exclude_host = 0;
+ if (perf_missing_features.lbr_flags)
+ evsel->attr.branch_sample_type &= ~(PERF_SAMPLE_BRANCH_NO_FLAGS |
+ PERF_SAMPLE_BRANCH_NO_CYCLES);
retry_sample_id:
if (perf_missing_features.sample_id_all)
evsel->attr.sample_id_all = 0;
@@ -1441,6 +1461,12 @@ try_fallback:
} else if (!perf_missing_features.sample_id_all) {
perf_missing_features.sample_id_all = true;
goto retry_sample_id;
+ } else if (!perf_missing_features.lbr_flags &&
+ (evsel->attr.branch_sample_type &
+ (PERF_SAMPLE_BRANCH_NO_CYCLES |
+ PERF_SAMPLE_BRANCH_NO_FLAGS))) {
+ perf_missing_features.lbr_flags = true;
+ goto fallback_missing_features;
}
out_close:
@@ -2272,6 +2298,29 @@ int perf_evsel__fprintf(struct perf_evsel *evsel,
printed += comma_fprintf(fp, &first, " %s=%" PRIu64,
term, (u64)evsel->attr.sample_freq);
}
+
+ if (details->trace_fields) {
+ struct format_field *field;
+
+ if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
+ printed += comma_fprintf(fp, &first, " (not a tracepoint)");
+ goto out;
+ }
+
+ field = evsel->tp_format->format.fields;
+ if (field == NULL) {
+ printed += comma_fprintf(fp, &first, " (no trace field)");
+ goto out;
+ }
+
+ printed += comma_fprintf(fp, &first, " trace_fields: %s", field->name);
+
+ field = field->next;
+ while (field) {
+ printed += comma_fprintf(fp, &first, "%s", field->name);
+ field = field->next;
+ }
+ }
out:
fputc('\n', fp);
return ++printed;
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 0e49bd742c63..8e75434bd01c 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -227,7 +227,8 @@ int perf_evsel__append_filter(struct perf_evsel *evsel,
const char *op, const char *filter);
int perf_evsel__apply_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
const char *filter);
-int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads);
+int perf_evsel__enable(struct perf_evsel *evsel);
+int perf_evsel__disable(struct perf_evsel *evsel);
int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
struct cpu_map *cpus);
@@ -368,6 +369,7 @@ struct perf_attr_details {
bool verbose;
bool event_group;
bool force;
+ bool trace_fields;
};
int perf_evsel__fprintf(struct perf_evsel *evsel,
diff --git a/tools/perf/util/exec_cmd.c b/tools/perf/util/exec_cmd.c
deleted file mode 100644
index 7adf4ad15d8f..000000000000
--- a/tools/perf/util/exec_cmd.c
+++ /dev/null
@@ -1,148 +0,0 @@
-#include "cache.h"
-#include "exec_cmd.h"
-#include "quote.h"
-
-#include <string.h>
-
-#define MAX_ARGS 32
-
-static const char *argv_exec_path;
-static const char *argv0_path;
-
-const char *system_path(const char *path)
-{
- static const char *prefix = PREFIX;
- struct strbuf d = STRBUF_INIT;
-
- if (is_absolute_path(path))
- return path;
-
- strbuf_addf(&d, "%s/%s", prefix, path);
- path = strbuf_detach(&d, NULL);
- return path;
-}
-
-const char *perf_extract_argv0_path(const char *argv0)
-{
- const char *slash;
-
- if (!argv0 || !*argv0)
- return NULL;
- slash = argv0 + strlen(argv0);
-
- while (argv0 <= slash && !is_dir_sep(*slash))
- slash--;
-
- if (slash >= argv0) {
- argv0_path = strndup(argv0, slash - argv0);
- return argv0_path ? slash + 1 : NULL;
- }
-
- return argv0;
-}
-
-void perf_set_argv_exec_path(const char *exec_path)
-{
- argv_exec_path = exec_path;
- /*
- * Propagate this setting to external programs.
- */
- setenv(EXEC_PATH_ENVIRONMENT, exec_path, 1);
-}
-
-
-/* Returns the highest-priority, location to look for perf programs. */
-const char *perf_exec_path(void)
-{
- const char *env;
-
- if (argv_exec_path)
- return argv_exec_path;
-
- env = getenv(EXEC_PATH_ENVIRONMENT);
- if (env && *env) {
- return env;
- }
-
- return system_path(PERF_EXEC_PATH);
-}
-
-static void add_path(struct strbuf *out, const char *path)
-{
- if (path && *path) {
- if (is_absolute_path(path))
- strbuf_addstr(out, path);
- else
- strbuf_addstr(out, make_nonrelative_path(path));
-
- strbuf_addch(out, PATH_SEP);
- }
-}
-
-void setup_path(void)
-{
- const char *old_path = getenv("PATH");
- struct strbuf new_path = STRBUF_INIT;
-
- add_path(&new_path, perf_exec_path());
- add_path(&new_path, argv0_path);
-
- if (old_path)
- strbuf_addstr(&new_path, old_path);
- else
- strbuf_addstr(&new_path, "/usr/local/bin:/usr/bin:/bin");
-
- setenv("PATH", new_path.buf, 1);
-
- strbuf_release(&new_path);
-}
-
-static const char **prepare_perf_cmd(const char **argv)
-{
- int argc;
- const char **nargv;
-
- for (argc = 0; argv[argc]; argc++)
- ; /* just counting */
- nargv = malloc(sizeof(*nargv) * (argc + 2));
-
- nargv[0] = "perf";
- for (argc = 0; argv[argc]; argc++)
- nargv[argc + 1] = argv[argc];
- nargv[argc + 1] = NULL;
- return nargv;
-}
-
-int execv_perf_cmd(const char **argv) {
- const char **nargv = prepare_perf_cmd(argv);
-
- /* execvp() can only ever return if it fails */
- execvp("perf", (char **)nargv);
-
- free(nargv);
- return -1;
-}
-
-
-int execl_perf_cmd(const char *cmd,...)
-{
- int argc;
- const char *argv[MAX_ARGS + 1];
- const char *arg;
- va_list param;
-
- va_start(param, cmd);
- argv[0] = cmd;
- argc = 1;
- while (argc < MAX_ARGS) {
- arg = argv[argc++] = va_arg(param, char *);
- if (!arg)
- break;
- }
- va_end(param);
- if (MAX_ARGS <= argc)
- return error("too many args to run %s", cmd);
-
- argv[argc] = NULL;
- return execv_perf_cmd(argv);
-}
diff --git a/tools/perf/util/exec_cmd.h b/tools/perf/util/exec_cmd.h
deleted file mode 100644
index bc4b915963f5..000000000000
--- a/tools/perf/util/exec_cmd.h
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef __PERF_EXEC_CMD_H
-#define __PERF_EXEC_CMD_H
-
-extern void perf_set_argv_exec_path(const char *exec_path);
-extern const char *perf_extract_argv0_path(const char *path);
-extern const char *perf_exec_path(void);
-extern void setup_path(void);
-extern int execv_perf_cmd(const char **argv); /* NULL terminated */
-extern int execl_perf_cmd(const char *cmd, ...);
-extern const char *system_path(const char *path);
-
-#endif /* __PERF_EXEC_CMD_H */
diff --git a/tools/perf/util/generate-cmdlist.sh b/tools/perf/util/generate-cmdlist.sh
index 36a885d2cd22..0ac2037c970c 100755
--- a/tools/perf/util/generate-cmdlist.sh
+++ b/tools/perf/util/generate-cmdlist.sh
@@ -36,4 +36,19 @@ do
}' "Documentation/perf-$cmd.txt"
done
echo "#endif /* HAVE_LIBELF_SUPPORT */"
+
+echo "#ifdef HAVE_LIBAUDIT_SUPPORT"
+sed -n -e 's/^perf-\([^ ]*\)[ ].* audit*/\1/p' command-list.txt |
+sort |
+while read cmd
+do
+ sed -n '
+ /^NAME/,/perf-'"$cmd"'/H
+ ${
+ x
+ s/.*perf-'"$cmd"' - \(.*\)/ {"'"$cmd"'", "\1"},/
+ p
+ }' "Documentation/perf-$cmd.txt"
+done
+echo "#endif /* HAVE_LIBELF_SUPPORT */"
echo "};"
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 43838003c1a1..f50b7235ecb6 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -724,7 +724,7 @@ static int write_numa_topology(int fd, struct perf_header *h __maybe_unused,
done:
free(buf);
fclose(fp);
- free(node_map);
+ cpu_map__put(node_map);
return ret;
}
@@ -868,6 +868,13 @@ static int write_auxtrace(int fd, struct perf_header *h,
return err;
}
+static int write_stat(int fd __maybe_unused,
+ struct perf_header *h __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
+{
+ return 0;
+}
+
static void print_hostname(struct perf_header *ph, int fd __maybe_unused,
FILE *fp)
{
@@ -1159,6 +1166,12 @@ static void print_auxtrace(struct perf_header *ph __maybe_unused,
fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n");
}
+static void print_stat(struct perf_header *ph __maybe_unused,
+ int fd __maybe_unused, FILE *fp)
+{
+ fprintf(fp, "# contains stat data\n");
+}
+
static void print_pmu_mappings(struct perf_header *ph, int fd __maybe_unused,
FILE *fp)
{
@@ -1948,6 +1961,7 @@ static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
FEAT_OPP(HEADER_PMU_MAPPINGS, pmu_mappings),
FEAT_OPP(HEADER_GROUP_DESC, group_desc),
FEAT_OPP(HEADER_AUXTRACE, auxtrace),
+ FEAT_OPA(HEADER_STAT, stat),
};
struct header_print_data {
@@ -2686,6 +2700,152 @@ int perf_event__synthesize_attr(struct perf_tool *tool,
return err;
}
+static struct event_update_event *
+event_update_event__new(size_t size, u64 type, u64 id)
+{
+ struct event_update_event *ev;
+
+ size += sizeof(*ev);
+ size = PERF_ALIGN(size, sizeof(u64));
+
+ ev = zalloc(size);
+ if (ev) {
+ ev->header.type = PERF_RECORD_EVENT_UPDATE;
+ ev->header.size = (u16)size;
+ ev->type = type;
+ ev->id = id;
+ }
+ return ev;
+}
+
+int
+perf_event__synthesize_event_update_unit(struct perf_tool *tool,
+ struct perf_evsel *evsel,
+ perf_event__handler_t process)
+{
+ struct event_update_event *ev;
+ size_t size = strlen(evsel->unit);
+ int err;
+
+ ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->id[0]);
+ if (ev == NULL)
+ return -ENOMEM;
+
+ strncpy(ev->data, evsel->unit, size);
+ err = process(tool, (union perf_event *)ev, NULL, NULL);
+ free(ev);
+ return err;
+}
+
+int
+perf_event__synthesize_event_update_scale(struct perf_tool *tool,
+ struct perf_evsel *evsel,
+ perf_event__handler_t process)
+{
+ struct event_update_event *ev;
+ struct event_update_event_scale *ev_data;
+ int err;
+
+ ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->id[0]);
+ if (ev == NULL)
+ return -ENOMEM;
+
+ ev_data = (struct event_update_event_scale *) ev->data;
+ ev_data->scale = evsel->scale;
+ err = process(tool, (union perf_event*) ev, NULL, NULL);
+ free(ev);
+ return err;
+}
+
+int
+perf_event__synthesize_event_update_name(struct perf_tool *tool,
+ struct perf_evsel *evsel,
+ perf_event__handler_t process)
+{
+ struct event_update_event *ev;
+ size_t len = strlen(evsel->name);
+ int err;
+
+ ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->id[0]);
+ if (ev == NULL)
+ return -ENOMEM;
+
+ strncpy(ev->data, evsel->name, len);
+ err = process(tool, (union perf_event*) ev, NULL, NULL);
+ free(ev);
+ return err;
+}
+
+int
+perf_event__synthesize_event_update_cpus(struct perf_tool *tool,
+ struct perf_evsel *evsel,
+ perf_event__handler_t process)
+{
+ size_t size = sizeof(struct event_update_event);
+ struct event_update_event *ev;
+ int max, err;
+ u16 type;
+
+ if (!evsel->own_cpus)
+ return 0;
+
+ ev = cpu_map_data__alloc(evsel->own_cpus, &size, &type, &max);
+ if (!ev)
+ return -ENOMEM;
+
+ ev->header.type = PERF_RECORD_EVENT_UPDATE;
+ ev->header.size = (u16)size;
+ ev->type = PERF_EVENT_UPDATE__CPUS;
+ ev->id = evsel->id[0];
+
+ cpu_map_data__synthesize((struct cpu_map_data *) ev->data,
+ evsel->own_cpus,
+ type, max);
+
+ err = process(tool, (union perf_event*) ev, NULL, NULL);
+ free(ev);
+ return err;
+}
+
+size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
+{
+ struct event_update_event *ev = &event->event_update;
+ struct event_update_event_scale *ev_scale;
+ struct event_update_event_cpus *ev_cpus;
+ struct cpu_map *map;
+ size_t ret;
+
+ ret = fprintf(fp, "\n... id: %" PRIu64 "\n", ev->id);
+
+ switch (ev->type) {
+ case PERF_EVENT_UPDATE__SCALE:
+ ev_scale = (struct event_update_event_scale *) ev->data;
+ ret += fprintf(fp, "... scale: %f\n", ev_scale->scale);
+ break;
+ case PERF_EVENT_UPDATE__UNIT:
+ ret += fprintf(fp, "... unit: %s\n", ev->data);
+ break;
+ case PERF_EVENT_UPDATE__NAME:
+ ret += fprintf(fp, "... name: %s\n", ev->data);
+ break;
+ case PERF_EVENT_UPDATE__CPUS:
+ ev_cpus = (struct event_update_event_cpus *) ev->data;
+ ret += fprintf(fp, "... ");
+
+ map = cpu_map__new_data(&ev_cpus->cpus);
+ if (map)
+ ret += cpu_map__fprintf(map, fp);
+ else
+ ret += fprintf(fp, "failed to get cpus\n");
+ break;
+ default:
+ ret += fprintf(fp, "... unknown type\n");
+ break;
+ }
+
+ return ret;
+}
+
int perf_event__synthesize_attrs(struct perf_tool *tool,
struct perf_session *session,
perf_event__handler_t process)
@@ -2745,6 +2905,51 @@ int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
return 0;
}
+int perf_event__process_event_update(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_evlist **pevlist)
+{
+ struct event_update_event *ev = &event->event_update;
+ struct event_update_event_scale *ev_scale;
+ struct event_update_event_cpus *ev_cpus;
+ struct perf_evlist *evlist;
+ struct perf_evsel *evsel;
+ struct cpu_map *map;
+
+ if (!pevlist || *pevlist == NULL)
+ return -EINVAL;
+
+ evlist = *pevlist;
+
+ evsel = perf_evlist__id2evsel(evlist, ev->id);
+ if (evsel == NULL)
+ return -EINVAL;
+
+ switch (ev->type) {
+ case PERF_EVENT_UPDATE__UNIT:
+ evsel->unit = strdup(ev->data);
+ break;
+ case PERF_EVENT_UPDATE__NAME:
+ evsel->name = strdup(ev->data);
+ break;
+ case PERF_EVENT_UPDATE__SCALE:
+ ev_scale = (struct event_update_event_scale *) ev->data;
+ evsel->scale = ev_scale->scale;
+ case PERF_EVENT_UPDATE__CPUS:
+ ev_cpus = (struct event_update_event_cpus *) ev->data;
+
+ map = cpu_map__new_data(&ev_cpus->cpus);
+ if (map)
+ evsel->own_cpus = map;
+ else
+ pr_err("failed to get event_update cpus\n");
+ default:
+ break;
+ }
+
+ return 0;
+}
+
int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
struct perf_evlist *evlist,
perf_event__handler_t process)
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index 05f27cb6b7e3..cff9892452ee 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -31,6 +31,7 @@ enum {
HEADER_PMU_MAPPINGS,
HEADER_GROUP_DESC,
HEADER_AUXTRACE,
+ HEADER_STAT,
HEADER_LAST_FEATURE,
HEADER_FEAT_BITS = 256,
};
@@ -105,8 +106,24 @@ int perf_event__synthesize_attr(struct perf_tool *tool,
int perf_event__synthesize_attrs(struct perf_tool *tool,
struct perf_session *session,
perf_event__handler_t process);
+int perf_event__synthesize_event_update_unit(struct perf_tool *tool,
+ struct perf_evsel *evsel,
+ perf_event__handler_t process);
+int perf_event__synthesize_event_update_scale(struct perf_tool *tool,
+ struct perf_evsel *evsel,
+ perf_event__handler_t process);
+int perf_event__synthesize_event_update_name(struct perf_tool *tool,
+ struct perf_evsel *evsel,
+ perf_event__handler_t process);
+int perf_event__synthesize_event_update_cpus(struct perf_tool *tool,
+ struct perf_evsel *evsel,
+ perf_event__handler_t process);
int perf_event__process_attr(struct perf_tool *tool, union perf_event *event,
struct perf_evlist **pevlist);
+int perf_event__process_event_update(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_evlist **pevlist);
+size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp);
int perf_event__synthesize_tracing_data(struct perf_tool *tool,
int fd, struct perf_evlist *evlist,
diff --git a/tools/perf/util/help-unknown-cmd.c b/tools/perf/util/help-unknown-cmd.c
new file mode 100644
index 000000000000..dc1e41c9b054
--- /dev/null
+++ b/tools/perf/util/help-unknown-cmd.c
@@ -0,0 +1,103 @@
+#include "cache.h"
+#include <subcmd/help.h>
+#include "../builtin.h"
+#include "levenshtein.h"
+
+static int autocorrect;
+static struct cmdnames aliases;
+
+static int perf_unknown_cmd_config(const char *var, const char *value, void *cb)
+{
+ if (!strcmp(var, "help.autocorrect"))
+ autocorrect = perf_config_int(var,value);
+ /* Also use aliases for command lookup */
+ if (!prefixcmp(var, "alias."))
+ add_cmdname(&aliases, var + 6, strlen(var + 6));
+
+ return perf_default_config(var, value, cb);
+}
+
+static int levenshtein_compare(const void *p1, const void *p2)
+{
+ const struct cmdname *const *c1 = p1, *const *c2 = p2;
+ const char *s1 = (*c1)->name, *s2 = (*c2)->name;
+ int l1 = (*c1)->len;
+ int l2 = (*c2)->len;
+ return l1 != l2 ? l1 - l2 : strcmp(s1, s2);
+}
+
+static void add_cmd_list(struct cmdnames *cmds, struct cmdnames *old)
+{
+ unsigned int i;
+
+ ALLOC_GROW(cmds->names, cmds->cnt + old->cnt, cmds->alloc);
+
+ for (i = 0; i < old->cnt; i++)
+ cmds->names[cmds->cnt++] = old->names[i];
+ zfree(&old->names);
+ old->cnt = 0;
+}
+
+const char *help_unknown_cmd(const char *cmd)
+{
+ unsigned int i, n = 0, best_similarity = 0;
+ struct cmdnames main_cmds, other_cmds;
+
+ memset(&main_cmds, 0, sizeof(main_cmds));
+ memset(&other_cmds, 0, sizeof(main_cmds));
+ memset(&aliases, 0, sizeof(aliases));
+
+ perf_config(perf_unknown_cmd_config, NULL);
+
+ load_command_list("perf-", &main_cmds, &other_cmds);
+
+ add_cmd_list(&main_cmds, &aliases);
+ add_cmd_list(&main_cmds, &other_cmds);
+ qsort(main_cmds.names, main_cmds.cnt,
+ sizeof(main_cmds.names), cmdname_compare);
+ uniq(&main_cmds);
+
+ if (main_cmds.cnt) {
+ /* This reuses cmdname->len for similarity index */
+ for (i = 0; i < main_cmds.cnt; ++i)
+ main_cmds.names[i]->len =
+ levenshtein(cmd, main_cmds.names[i]->name, 0, 2, 1, 4);
+
+ qsort(main_cmds.names, main_cmds.cnt,
+ sizeof(*main_cmds.names), levenshtein_compare);
+
+ best_similarity = main_cmds.names[0]->len;
+ n = 1;
+ while (n < main_cmds.cnt && best_similarity == main_cmds.names[n]->len)
+ ++n;
+ }
+
+ if (autocorrect && n == 1) {
+ const char *assumed = main_cmds.names[0]->name;
+
+ main_cmds.names[0] = NULL;
+ clean_cmdnames(&main_cmds);
+ fprintf(stderr, "WARNING: You called a perf program named '%s', "
+ "which does not exist.\n"
+ "Continuing under the assumption that you meant '%s'\n",
+ cmd, assumed);
+ if (autocorrect > 0) {
+ fprintf(stderr, "in %0.1f seconds automatically...\n",
+ (float)autocorrect/10.0);
+ poll(NULL, 0, autocorrect * 100);
+ }
+ return assumed;
+ }
+
+ fprintf(stderr, "perf: '%s' is not a perf-command. See 'perf --help'.\n", cmd);
+
+ if (main_cmds.cnt && best_similarity < 6) {
+ fprintf(stderr, "\nDid you mean %s?\n",
+ n < 2 ? "this": "one of these");
+
+ for (i = 0; i < n; i++)
+ fprintf(stderr, "\t%s\n", main_cmds.names[i]->name);
+ }
+
+ exit(1);
+}
diff --git a/tools/perf/util/help-unknown-cmd.h b/tools/perf/util/help-unknown-cmd.h
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/tools/perf/util/help-unknown-cmd.h
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 4fd37d6708cb..68a7612019dc 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -131,6 +131,8 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
symlen = unresolved_col_width + 4 + 2;
hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
symlen);
+ hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
+ symlen);
}
if (h->mem_info->iaddr.sym) {
@@ -254,6 +256,7 @@ static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
he_stat__decay(&he->stat);
if (symbol_conf.cumulate_callchain)
he_stat__decay(he->stat_acc);
+ decay_callchain(he->callchain);
diff = prev_period - he->stat.period;
@@ -270,6 +273,8 @@ static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
if (sort__need_collapse)
rb_erase(&he->rb_node_in, &hists->entries_collapsed);
+ else
+ rb_erase(&he->rb_node_in, hists->entries_in);
--hists->nr_entries;
if (!he->filtered)
@@ -367,6 +372,25 @@ static struct hist_entry *hist_entry__new(struct hist_entry *template,
if (symbol_conf.use_callchain)
callchain_init(he->callchain);
+ if (he->raw_data) {
+ he->raw_data = memdup(he->raw_data, he->raw_size);
+
+ if (he->raw_data == NULL) {
+ map__put(he->ms.map);
+ if (he->branch_info) {
+ map__put(he->branch_info->from.map);
+ map__put(he->branch_info->to.map);
+ free(he->branch_info);
+ }
+ if (he->mem_info) {
+ map__put(he->mem_info->iaddr.map);
+ map__put(he->mem_info->daddr.map);
+ }
+ free(he->stat_acc);
+ free(he);
+ return NULL;
+ }
+ }
INIT_LIST_HEAD(&he->pairs.node);
thread__get(he->thread);
}
@@ -459,7 +483,7 @@ struct hist_entry *__hists__add_entry(struct hists *hists,
struct symbol *sym_parent,
struct branch_info *bi,
struct mem_info *mi,
- u64 period, u64 weight, u64 transaction,
+ struct perf_sample *sample,
bool sample_self)
{
struct hist_entry entry = {
@@ -476,15 +500,17 @@ struct hist_entry *__hists__add_entry(struct hists *hists,
.level = al->level,
.stat = {
.nr_events = 1,
- .period = period,
- .weight = weight,
+ .period = sample->period,
+ .weight = sample->weight,
},
.parent = sym_parent,
.filtered = symbol__parent_filter(sym_parent) | al->filtered,
.hists = hists,
.branch_info = bi,
.mem_info = mi,
- .transaction = transaction,
+ .transaction = sample->transaction,
+ .raw_data = sample->raw_data,
+ .raw_size = sample->raw_size,
};
return hists__findnew_entry(hists, &entry, al, sample_self);
@@ -524,12 +550,13 @@ iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al
u64 cost;
struct mem_info *mi = iter->priv;
struct hists *hists = evsel__hists(iter->evsel);
+ struct perf_sample *sample = iter->sample;
struct hist_entry *he;
if (mi == NULL)
return -EINVAL;
- cost = iter->sample->weight;
+ cost = sample->weight;
if (!cost)
cost = 1;
@@ -540,8 +567,10 @@ iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al
* and this is indirectly achieved by passing period=weight here
* and the he_stat__add_period() function.
*/
+ sample->period = cost;
+
he = __hists__add_entry(hists, al, iter->parent, NULL, mi,
- cost, cost, 0, true);
+ sample, true);
if (!he)
return -ENOMEM;
@@ -628,6 +657,7 @@ iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *a
struct branch_info *bi;
struct perf_evsel *evsel = iter->evsel;
struct hists *hists = evsel__hists(evsel);
+ struct perf_sample *sample = iter->sample;
struct hist_entry *he = NULL;
int i = iter->curr;
int err = 0;
@@ -641,9 +671,11 @@ iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *a
* The report shows the percentage of total branches captured
* and not events sampled. Thus we use a pseudo period of 1.
*/
+ sample->period = 1;
+ sample->weight = bi->flags.cycles ? bi->flags.cycles : 1;
+
he = __hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
- 1, bi->flags.cycles ? bi->flags.cycles : 1,
- 0, true);
+ sample, true);
if (he == NULL)
return -ENOMEM;
@@ -680,8 +712,7 @@ iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location
struct hist_entry *he;
he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
- sample->period, sample->weight,
- sample->transaction, true);
+ sample, true);
if (he == NULL)
return -ENOMEM;
@@ -742,8 +773,7 @@ iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
int err = 0;
he = __hists__add_entry(hists, al, iter->parent, NULL, NULL,
- sample->period, sample->weight,
- sample->transaction, true);
+ sample, true);
if (he == NULL)
return -ENOMEM;
@@ -795,6 +825,8 @@ iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
.sym = al->sym,
},
.parent = iter->parent,
+ .raw_data = sample->raw_data,
+ .raw_size = sample->raw_size,
};
int i;
struct callchain_cursor cursor;
@@ -816,8 +848,7 @@ iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
}
he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
- sample->period, sample->weight,
- sample->transaction, false);
+ sample, false);
if (he == NULL)
return -ENOMEM;
@@ -924,9 +955,6 @@ hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
int64_t cmp = 0;
perf_hpp__for_each_sort_list(fmt) {
- if (perf_hpp__should_skip(fmt))
- continue;
-
cmp = fmt->cmp(fmt, left, right);
if (cmp)
break;
@@ -942,9 +970,6 @@ hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
int64_t cmp = 0;
perf_hpp__for_each_sort_list(fmt) {
- if (perf_hpp__should_skip(fmt))
- continue;
-
cmp = fmt->collapse(fmt, left, right);
if (cmp)
break;
@@ -975,6 +1000,8 @@ void hist_entry__delete(struct hist_entry *he)
if (he->srcfile && he->srcfile[0])
free(he->srcfile);
free_callchain(he->callchain);
+ free(he->trace_output);
+ free(he->raw_data);
free(he);
}
@@ -982,9 +1009,8 @@ void hist_entry__delete(struct hist_entry *he)
* collapse the histogram
*/
-static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
- struct rb_root *root,
- struct hist_entry *he)
+bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
+ struct rb_root *root, struct hist_entry *he)
{
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
@@ -1024,7 +1050,7 @@ static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
return true;
}
-static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
+struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
{
struct rb_root *root;
@@ -1088,7 +1114,7 @@ static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
int64_t cmp = 0;
perf_hpp__for_each_sort_list(fmt) {
- if (perf_hpp__should_skip(fmt))
+ if (perf_hpp__should_skip(fmt, a->hists))
continue;
cmp = fmt->sort(fmt, a, b);
@@ -1559,10 +1585,8 @@ int perf_hist_config(const char *var, const char *value)
return 0;
}
-static int hists_evsel__init(struct perf_evsel *evsel)
+int __hists__init(struct hists *hists)
{
- struct hists *hists = evsel__hists(evsel);
-
memset(hists, 0, sizeof(*hists));
hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
hists->entries_in = &hists->entries_in_array[0];
@@ -1573,6 +1597,43 @@ static int hists_evsel__init(struct perf_evsel *evsel)
return 0;
}
+static void hists__delete_remaining_entries(struct rb_root *root)
+{
+ struct rb_node *node;
+ struct hist_entry *he;
+
+ while (!RB_EMPTY_ROOT(root)) {
+ node = rb_first(root);
+ rb_erase(node, root);
+
+ he = rb_entry(node, struct hist_entry, rb_node_in);
+ hist_entry__delete(he);
+ }
+}
+
+static void hists__delete_all_entries(struct hists *hists)
+{
+ hists__delete_entries(hists);
+ hists__delete_remaining_entries(&hists->entries_in_array[0]);
+ hists__delete_remaining_entries(&hists->entries_in_array[1]);
+ hists__delete_remaining_entries(&hists->entries_collapsed);
+}
+
+static void hists_evsel__exit(struct perf_evsel *evsel)
+{
+ struct hists *hists = evsel__hists(evsel);
+
+ hists__delete_all_entries(hists);
+}
+
+static int hists_evsel__init(struct perf_evsel *evsel)
+{
+ struct hists *hists = evsel__hists(evsel);
+
+ __hists__init(hists);
+ return 0;
+}
+
/*
* XXX We probably need a hists_evsel__exit() to free the hist_entries
* stored in the rbtree...
@@ -1581,7 +1642,8 @@ static int hists_evsel__init(struct perf_evsel *evsel)
int hists__init(void)
{
int err = perf_evsel__object_config(sizeof(struct hists_evsel),
- hists_evsel__init, NULL);
+ hists_evsel__init,
+ hists_evsel__exit);
if (err)
fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index a48a2078d288..d4ec4822a103 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -52,6 +52,7 @@ enum hist_column {
HISTC_MEM_IADDR_SYMBOL,
HISTC_TRANSACTION,
HISTC_CYCLES,
+ HISTC_TRACE,
HISTC_NR_COLS, /* Last entry */
};
@@ -114,8 +115,8 @@ struct hist_entry *__hists__add_entry(struct hists *hists,
struct addr_location *al,
struct symbol *parent,
struct branch_info *bi,
- struct mem_info *mi, u64 period,
- u64 weight, u64 transaction,
+ struct mem_info *mi,
+ struct perf_sample *sample,
bool sample_self);
int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
int max_stack_depth, void *arg);
@@ -184,6 +185,11 @@ static inline struct hists *evsel__hists(struct perf_evsel *evsel)
}
int hists__init(void);
+int __hists__init(struct hists *hists);
+
+struct rb_root *hists__get_rotate_entries_in(struct hists *hists);
+bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
+ struct rb_root *root, struct hist_entry *he);
struct perf_hpp {
char *buf;
@@ -261,10 +267,20 @@ void perf_hpp__append_sort_keys(void);
bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format);
bool perf_hpp__same_sort_entry(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b);
+bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *format);
+bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists);
-static inline bool perf_hpp__should_skip(struct perf_hpp_fmt *format)
+static inline bool perf_hpp__should_skip(struct perf_hpp_fmt *format,
+ struct hists *hists)
{
- return format->elide;
+ if (format->elide)
+ return true;
+
+ if (perf_hpp__is_dynamic_entry(format) &&
+ !perf_hpp__defined_dynamic_entry(format, hists))
+ return true;
+
+ return false;
}
void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists);
diff --git a/tools/perf/util/include/linux/string.h b/tools/perf/util/include/linux/string.h
deleted file mode 100644
index 6f19c548ecc0..000000000000
--- a/tools/perf/util/include/linux/string.h
+++ /dev/null
@@ -1,3 +0,0 @@
-#include <string.h>
-
-void *memdup(const void *src, size_t len);
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index 97f963a3dcb9..81a2eb77ba7f 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -1744,7 +1744,7 @@ static void intel_pt_free(struct perf_session *session)
auxtrace_heap__free(&pt->heap);
intel_pt_free_events(session);
session->auxtrace = NULL;
- thread__delete(pt->unknown_thread);
+ thread__put(pt->unknown_thread);
free(pt);
}
@@ -2153,7 +2153,7 @@ int intel_pt_process_auxtrace_info(union perf_event *event,
return 0;
err_delete_thread:
- thread__delete(pt->unknown_thread);
+ thread__zput(pt->unknown_thread);
err_free_queues:
intel_pt_log_disable();
auxtrace_queues__free(&pt->queues);
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 8b303ff20289..ad79297c76c8 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -25,6 +25,7 @@ static void dsos__init(struct dsos *dsos)
int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
{
+ memset(machine, 0, sizeof(*machine));
map_groups__init(&machine->kmaps, machine);
RB_CLEAR_NODE(&machine->rb_node);
dsos__init(&machine->dsos);
@@ -44,6 +45,8 @@ int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
machine->comm_exec = false;
machine->kernel_start = 0;
+ memset(machine->vmlinux_maps, 0, sizeof(machine->vmlinux_maps));
+
machine->root_dir = strdup(root_dir);
if (machine->root_dir == NULL)
return -ENOMEM;
@@ -122,6 +125,7 @@ void machine__delete_threads(struct machine *machine)
void machine__exit(struct machine *machine)
{
+ machine__destroy_kernel_maps(machine);
map_groups__exit(&machine->kmaps);
dsos__exit(&machine->dsos);
machine__exit_vdso(machine);
@@ -348,13 +352,18 @@ static void machine__update_thread_pid(struct machine *machine,
}
th->mg = map_groups__get(leader->mg);
-
+out_put:
+ thread__put(leader);
return;
-
out_err:
pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid);
+ goto out_put;
}
+/*
+ * Caller must eventually drop thread->refcnt returned with a successfull
+ * lookup/new thread inserted.
+ */
static struct thread *____machine__findnew_thread(struct machine *machine,
pid_t pid, pid_t tid,
bool create)
@@ -372,7 +381,7 @@ static struct thread *____machine__findnew_thread(struct machine *machine,
if (th != NULL) {
if (th->tid == tid) {
machine__update_thread_pid(machine, th, pid);
- return th;
+ return thread__get(th);
}
machine->last_match = NULL;
@@ -385,7 +394,7 @@ static struct thread *____machine__findnew_thread(struct machine *machine,
if (th->tid == tid) {
machine->last_match = th;
machine__update_thread_pid(machine, th, pid);
- return th;
+ return thread__get(th);
}
if (tid < th->tid)
@@ -413,7 +422,7 @@ static struct thread *____machine__findnew_thread(struct machine *machine,
if (thread__init_map_groups(th, machine)) {
rb_erase_init(&th->rb_node, &machine->threads);
RB_CLEAR_NODE(&th->rb_node);
- thread__delete(th);
+ thread__put(th);
return NULL;
}
/*
@@ -437,7 +446,7 @@ struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
struct thread *th;
pthread_rwlock_wrlock(&machine->threads_lock);
- th = thread__get(__machine__findnew_thread(machine, pid, tid));
+ th = __machine__findnew_thread(machine, pid, tid);
pthread_rwlock_unlock(&machine->threads_lock);
return th;
}
@@ -447,7 +456,7 @@ struct thread *machine__find_thread(struct machine *machine, pid_t pid,
{
struct thread *th;
pthread_rwlock_rdlock(&machine->threads_lock);
- th = thread__get(____machine__findnew_thread(machine, pid, tid, false));
+ th = ____machine__findnew_thread(machine, pid, tid, false);
pthread_rwlock_unlock(&machine->threads_lock);
return th;
}
@@ -560,11 +569,29 @@ int machine__process_switch_event(struct machine *machine __maybe_unused,
return 0;
}
+static void dso__adjust_kmod_long_name(struct dso *dso, const char *filename)
+{
+ const char *dup_filename;
+
+ if (!filename || !dso || !dso->long_name)
+ return;
+ if (dso->long_name[0] != '[')
+ return;
+ if (!strchr(filename, '/'))
+ return;
+
+ dup_filename = strdup(filename);
+ if (!dup_filename)
+ return;
+
+ dso__set_long_name(dso, dup_filename, true);
+}
+
struct map *machine__findnew_module_map(struct machine *machine, u64 start,
const char *filename)
{
struct map *map = NULL;
- struct dso *dso;
+ struct dso *dso = NULL;
struct kmod_path m;
if (kmod_path__parse_name(&m, filename))
@@ -572,8 +599,15 @@ struct map *machine__findnew_module_map(struct machine *machine, u64 start,
map = map_groups__find_by_name(&machine->kmaps, MAP__FUNCTION,
m.name);
- if (map)
+ if (map) {
+ /*
+ * If the map's dso is an offline module, give dso__load()
+ * a chance to find the file path of that module by fixing
+ * long_name.
+ */
+ dso__adjust_kmod_long_name(map->dso, filename);
goto out;
+ }
dso = machine__findnew_module_dso(machine, &m, filename);
if (dso == NULL)
@@ -585,7 +619,11 @@ struct map *machine__findnew_module_map(struct machine *machine, u64 start,
map_groups__insert(&machine->kmaps, map);
+ /* Put the map here because map_groups__insert alread got it */
+ map__put(map);
out:
+ /* put the dso here, corresponding to machine__findnew_module_dso */
+ dso__put(dso);
free(m.name);
return map;
}
@@ -740,6 +778,9 @@ int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
enum map_type type;
u64 start = machine__get_running_kernel_start(machine, NULL);
+ /* In case of renewal the kernel map, destroy previous one */
+ machine__destroy_kernel_maps(machine);
+
for (type = 0; type < MAP__NR_TYPES; ++type) {
struct kmap *kmap;
struct map *map;
@@ -788,6 +829,7 @@ void machine__destroy_kernel_maps(struct machine *machine)
kmap->ref_reloc_sym = NULL;
}
+ map__put(machine->vmlinux_maps[type]);
machine->vmlinux_maps[type] = NULL;
}
}
@@ -1084,11 +1126,14 @@ int machine__create_kernel_maps(struct machine *machine)
struct dso *kernel = machine__get_kernel(machine);
const char *name;
u64 addr = machine__get_running_kernel_start(machine, &name);
- if (!addr)
+ int ret;
+
+ if (!addr || kernel == NULL)
return -1;
- if (kernel == NULL ||
- __machine__create_kernel_maps(machine, kernel) < 0)
+ ret = __machine__create_kernel_maps(machine, kernel);
+ dso__put(kernel);
+ if (ret < 0)
return -1;
if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
@@ -1609,6 +1654,8 @@ static int add_callchain_ip(struct thread *thread,
}
}
+ if (symbol_conf.hide_unresolved && al.sym == NULL)
+ return 0;
return callchain_cursor_append(&callchain_cursor, al.addr, al.map, al.sym);
}
@@ -1863,6 +1910,9 @@ check_calls:
static int unwind_entry(struct unwind_entry *entry, void *arg)
{
struct callchain_cursor *cursor = arg;
+
+ if (symbol_conf.hide_unresolved && entry->sym == NULL)
+ return 0;
return callchain_cursor_append(cursor, entry->ip,
entry->map, entry->sym);
}
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index afc6b56cf749..171b6d10a04b 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -26,8 +26,8 @@ const char *map_type__name[MAP__NR_TYPES] = {
static inline int is_anon_memory(const char *filename)
{
return !strcmp(filename, "//anon") ||
- !strcmp(filename, "/dev/zero (deleted)") ||
- !strcmp(filename, "/anon_hugepage (deleted)");
+ !strncmp(filename, "/dev/zero", sizeof("/dev/zero") - 1) ||
+ !strncmp(filename, "/anon_hugepage", sizeof("/anon_hugepage") - 1);
}
static inline int is_no_dso_memory(const char *filename)
@@ -691,6 +691,7 @@ static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp
__map_groups__insert(pos->groups, before);
if (verbose >= 2)
map__fprintf(before, fp);
+ map__put(before);
}
if (map->end < pos->end) {
@@ -705,6 +706,7 @@ static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp
__map_groups__insert(pos->groups, after);
if (verbose >= 2)
map__fprintf(after, fp);
+ map__put(after);
}
put_map:
map__put(pos);
@@ -742,6 +744,7 @@ int map_groups__clone(struct map_groups *mg,
if (new == NULL)
goto out_unlock;
map_groups__insert(mg, new);
+ map__put(new);
}
err = 0;
diff --git a/tools/perf/util/parse-branch-options.c b/tools/perf/util/parse-branch-options.c
index 355eecf6bf59..afc088dd7d20 100644
--- a/tools/perf/util/parse-branch-options.c
+++ b/tools/perf/util/parse-branch-options.c
@@ -1,7 +1,7 @@
#include "perf.h"
#include "util/util.h"
#include "util/debug.h"
-#include "util/parse-options.h"
+#include <subcmd/parse-options.h>
#include "util/parse-branch-options.h"
#define BRANCH_OPT(n, m) \
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index e48d9da75707..4f7b0efdde2f 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -4,9 +4,9 @@
#include "../perf.h"
#include "evlist.h"
#include "evsel.h"
-#include "parse-options.h"
+#include <subcmd/parse-options.h>
#include "parse-events.h"
-#include "exec_cmd.h"
+#include <subcmd/exec-cmd.h>
#include "string.h"
#include "symbol.h"
#include "cache.h"
@@ -124,6 +124,10 @@ struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = {
.symbol = "dummy",
.alias = "",
},
+ [PERF_COUNT_SW_BPF_OUTPUT] = {
+ .symbol = "bpf-output",
+ .alias = "",
+ },
};
#define __PERF_EVENT_FIELD(config, name) \
@@ -1879,7 +1883,7 @@ restart:
for (i = 0; i < max; i++, syms++) {
- if (event_glob != NULL &&
+ if (event_glob != NULL && syms->symbol != NULL &&
!(strglobmatch(syms->symbol, event_glob) ||
(syms->alias && strglobmatch(syms->alias, event_glob))))
continue;
diff --git a/tools/perf/util/parse-regs-options.c b/tools/perf/util/parse-regs-options.c
index 4f2c1c255d81..646ecf736aad 100644
--- a/tools/perf/util/parse-regs-options.c
+++ b/tools/perf/util/parse-regs-options.c
@@ -1,7 +1,7 @@
#include "perf.h"
#include "util/util.h"
#include "util/debug.h"
-#include "util/parse-options.h"
+#include <subcmd/parse-options.h>
#include "util/parse-regs-options.h"
int
diff --git a/tools/perf/util/path.c b/tools/perf/util/path.c
index 5d13cb45b317..3654d964e49d 100644
--- a/tools/perf/util/path.c
+++ b/tools/perf/util/path.c
@@ -22,24 +22,6 @@ static const char *get_perf_dir(void)
return ".";
}
-/*
- * If libc has strlcpy() then that version will override this
- * implementation:
- */
-size_t __weak strlcpy(char *dest, const char *src, size_t size)
-{
- size_t ret = strlen(src);
-
- if (size) {
- size_t len = (ret >= size) ? size - 1 : ret;
-
- memcpy(dest, src, len);
- dest[len] = '\0';
- }
-
- return ret;
-}
-
static char *get_pathname(void)
{
static char pathname_array[4][PATH_MAX];
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index e4b173dec4b9..b597bcc8fc78 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -220,6 +220,7 @@ static int __perf_pmu__new_alias(struct list_head *list, char *dir, char *name,
alias->scale = 1.0;
alias->unit[0] = '\0';
alias->per_pkg = false;
+ alias->snapshot = false;
ret = parse_events_terms(&alias->terms, val);
if (ret) {
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 03875f9154e7..93996ec4bbe3 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -2326,8 +2326,11 @@ static int get_new_event_name(char *buf, size_t len, const char *base,
goto out;
if (!allow_suffix) {
- pr_warning("Error: event \"%s\" already exists. "
- "(Use -f to force duplicates.)\n", buf);
+ pr_warning("Error: event \"%s\" already exists.\n"
+ " Hint: Remove existing event by 'perf probe -d'\n"
+ " or force duplicates by 'perf probe -f'\n"
+ " or set 'force=yes' in BPF source.\n",
+ buf);
ret = -EEXIST;
goto out;
}
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index 05012bb178d7..2be10fb27172 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -654,6 +654,7 @@ static int convert_to_trace_point(Dwarf_Die *sp_die, Dwfl_Module *mod,
static int call_probe_finder(Dwarf_Die *sc_die, struct probe_finder *pf)
{
Dwarf_Attribute fb_attr;
+ Dwarf_Frame *frame = NULL;
size_t nops;
int ret;
@@ -686,11 +687,11 @@ static int call_probe_finder(Dwarf_Die *sc_die, struct probe_finder *pf)
#if _ELFUTILS_PREREQ(0, 142)
} else if (nops == 1 && pf->fb_ops[0].atom == DW_OP_call_frame_cfa &&
pf->cfi != NULL) {
- Dwarf_Frame *frame;
if (dwarf_cfi_addrframe(pf->cfi, pf->addr, &frame) != 0 ||
dwarf_frame_cfa(frame, &pf->fb_ops, &nops) != 0) {
pr_warning("Failed to get call frame on 0x%jx\n",
(uintmax_t)pf->addr);
+ free(frame);
return -ENOENT;
}
#endif
@@ -699,7 +700,8 @@ static int call_probe_finder(Dwarf_Die *sc_die, struct probe_finder *pf)
/* Call finder's callback handler */
ret = pf->callback(sc_die, pf);
- /* *pf->fb_ops will be cached in libdw. Don't free it. */
+ /* Since *pf->fb_ops can be a part of frame. we should free it here. */
+ free(frame);
pf->fb_ops = NULL;
return ret;
diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources
index 51be28b1bca2..8162ba0e2e57 100644
--- a/tools/perf/util/python-ext-sources
+++ b/tools/perf/util/python-ext-sources
@@ -10,6 +10,8 @@ util/ctype.c
util/evlist.c
util/evsel.c
util/cpumap.c
+../lib/bitmap.c
+../lib/find_bit.c
../lib/hweight.c
util/thread_map.c
util/util.c
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index a8e825fca42a..d72fafc1c800 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -41,6 +41,9 @@
#include "../thread-stack.h"
#include "../trace-event.h"
#include "../machine.h"
+#include "thread_map.h"
+#include "cpumap.h"
+#include "stat.h"
PyMODINIT_FUNC initperf_trace_context(void);
@@ -859,6 +862,104 @@ static void python_process_event(union perf_event *event,
}
}
+static void get_handler_name(char *str, size_t size,
+ struct perf_evsel *evsel)
+{
+ char *p = str;
+
+ scnprintf(str, size, "stat__%s", perf_evsel__name(evsel));
+
+ while ((p = strchr(p, ':'))) {
+ *p = '_';
+ p++;
+ }
+}
+
+static void
+process_stat(struct perf_evsel *counter, int cpu, int thread, u64 tstamp,
+ struct perf_counts_values *count)
+{
+ PyObject *handler, *t;
+ static char handler_name[256];
+ int n = 0;
+
+ t = PyTuple_New(MAX_FIELDS);
+ if (!t)
+ Py_FatalError("couldn't create Python tuple");
+
+ get_handler_name(handler_name, sizeof(handler_name),
+ counter);
+
+ handler = get_handler(handler_name);
+ if (!handler) {
+ pr_debug("can't find python handler %s\n", handler_name);
+ return;
+ }
+
+ PyTuple_SetItem(t, n++, PyInt_FromLong(cpu));
+ PyTuple_SetItem(t, n++, PyInt_FromLong(thread));
+
+ tuple_set_u64(t, n++, tstamp);
+ tuple_set_u64(t, n++, count->val);
+ tuple_set_u64(t, n++, count->ena);
+ tuple_set_u64(t, n++, count->run);
+
+ if (_PyTuple_Resize(&t, n) == -1)
+ Py_FatalError("error resizing Python tuple");
+
+ call_object(handler, t, handler_name);
+
+ Py_DECREF(t);
+}
+
+static void python_process_stat(struct perf_stat_config *config,
+ struct perf_evsel *counter, u64 tstamp)
+{
+ struct thread_map *threads = counter->threads;
+ struct cpu_map *cpus = counter->cpus;
+ int cpu, thread;
+
+ if (config->aggr_mode == AGGR_GLOBAL) {
+ process_stat(counter, -1, -1, tstamp,
+ &counter->counts->aggr);
+ return;
+ }
+
+ for (thread = 0; thread < threads->nr; thread++) {
+ for (cpu = 0; cpu < cpus->nr; cpu++) {
+ process_stat(counter, cpus->map[cpu],
+ thread_map__pid(threads, thread), tstamp,
+ perf_counts(counter->counts, cpu, thread));
+ }
+ }
+}
+
+static void python_process_stat_interval(u64 tstamp)
+{
+ PyObject *handler, *t;
+ static const char handler_name[] = "stat__interval";
+ int n = 0;
+
+ t = PyTuple_New(MAX_FIELDS);
+ if (!t)
+ Py_FatalError("couldn't create Python tuple");
+
+ handler = get_handler(handler_name);
+ if (!handler) {
+ pr_debug("can't find python handler %s\n", handler_name);
+ return;
+ }
+
+ tuple_set_u64(t, n++, tstamp);
+
+ if (_PyTuple_Resize(&t, n) == -1)
+ Py_FatalError("error resizing Python tuple");
+
+ call_object(handler, t, handler_name);
+
+ Py_DECREF(t);
+}
+
static int run_start_sub(void)
{
main_module = PyImport_AddModule("__main__");
@@ -1201,10 +1302,12 @@ static int python_generate_script(struct pevent *pevent, const char *outfile)
}
struct scripting_ops python_scripting_ops = {
- .name = "Python",
- .start_script = python_start_script,
- .flush_script = python_flush_script,
- .stop_script = python_stop_script,
- .process_event = python_process_event,
- .generate_script = python_generate_script,
+ .name = "Python",
+ .start_script = python_start_script,
+ .flush_script = python_flush_script,
+ .stop_script = python_stop_script,
+ .process_event = python_process_event,
+ .process_stat = python_process_stat,
+ .process_stat_interval = python_process_stat_interval,
+ .generate_script = python_generate_script,
};
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index c35ffdd360fe..40b7a0d0905b 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -17,6 +17,7 @@
#include "asm/bug.h"
#include "auxtrace.h"
#include "thread-stack.h"
+#include "stat.h"
static int perf_session__deliver_event(struct perf_session *session,
union perf_event *event,
@@ -36,6 +37,9 @@ static int perf_session__open(struct perf_session *session)
if (perf_data_file__is_pipe(file))
return 0;
+ if (perf_header__has_feat(&session->header, HEADER_STAT))
+ return 0;
+
if (!perf_evlist__valid_sample_type(session->evlist)) {
pr_err("non matching sample_type\n");
return -1;
@@ -205,6 +209,18 @@ static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
return 0;
}
+static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused,
+ union perf_event *event __maybe_unused,
+ struct perf_evlist **pevlist
+ __maybe_unused)
+{
+ if (dump_trace)
+ perf_event__fprintf_event_update(event, stdout);
+
+ dump_printf(": unhandled!\n");
+ return 0;
+}
+
static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
union perf_event *event __maybe_unused,
struct perf_sample *sample __maybe_unused,
@@ -296,6 +312,67 @@ int process_event_auxtrace_error_stub(struct perf_tool *tool __maybe_unused,
return 0;
}
+
+static
+int process_event_thread_map_stub(struct perf_tool *tool __maybe_unused,
+ union perf_event *event __maybe_unused,
+ struct perf_session *session __maybe_unused)
+{
+ if (dump_trace)
+ perf_event__fprintf_thread_map(event, stdout);
+
+ dump_printf(": unhandled!\n");
+ return 0;
+}
+
+static
+int process_event_cpu_map_stub(struct perf_tool *tool __maybe_unused,
+ union perf_event *event __maybe_unused,
+ struct perf_session *session __maybe_unused)
+{
+ if (dump_trace)
+ perf_event__fprintf_cpu_map(event, stdout);
+
+ dump_printf(": unhandled!\n");
+ return 0;
+}
+
+static
+int process_event_stat_config_stub(struct perf_tool *tool __maybe_unused,
+ union perf_event *event __maybe_unused,
+ struct perf_session *session __maybe_unused)
+{
+ if (dump_trace)
+ perf_event__fprintf_stat_config(event, stdout);
+
+ dump_printf(": unhandled!\n");
+ return 0;
+}
+
+static int process_stat_stub(struct perf_tool *tool __maybe_unused,
+ union perf_event *event __maybe_unused,
+ struct perf_session *perf_session
+ __maybe_unused)
+{
+ if (dump_trace)
+ perf_event__fprintf_stat(event, stdout);
+
+ dump_printf(": unhandled!\n");
+ return 0;
+}
+
+static int process_stat_round_stub(struct perf_tool *tool __maybe_unused,
+ union perf_event *event __maybe_unused,
+ struct perf_session *perf_session
+ __maybe_unused)
+{
+ if (dump_trace)
+ perf_event__fprintf_stat_round(event, stdout);
+
+ dump_printf(": unhandled!\n");
+ return 0;
+}
+
void perf_tool__fill_defaults(struct perf_tool *tool)
{
if (tool->sample == NULL)
@@ -328,6 +405,8 @@ void perf_tool__fill_defaults(struct perf_tool *tool)
tool->unthrottle = process_event_stub;
if (tool->attr == NULL)
tool->attr = process_event_synth_attr_stub;
+ if (tool->event_update == NULL)
+ tool->event_update = process_event_synth_event_update_stub;
if (tool->tracing_data == NULL)
tool->tracing_data = process_event_synth_tracing_data_stub;
if (tool->build_id == NULL)
@@ -346,6 +425,16 @@ void perf_tool__fill_defaults(struct perf_tool *tool)
tool->auxtrace = process_event_auxtrace_stub;
if (tool->auxtrace_error == NULL)
tool->auxtrace_error = process_event_auxtrace_error_stub;
+ if (tool->thread_map == NULL)
+ tool->thread_map = process_event_thread_map_stub;
+ if (tool->cpu_map == NULL)
+ tool->cpu_map = process_event_cpu_map_stub;
+ if (tool->stat_config == NULL)
+ tool->stat_config = process_event_stat_config_stub;
+ if (tool->stat == NULL)
+ tool->stat = process_stat_stub;
+ if (tool->stat_round == NULL)
+ tool->stat_round = process_stat_round_stub;
}
static void swap_sample_id_all(union perf_event *event, void *data)
@@ -569,6 +658,13 @@ static void perf_event__hdr_attr_swap(union perf_event *event,
mem_bswap_64(event->attr.id, size);
}
+static void perf_event__event_update_swap(union perf_event *event,
+ bool sample_id_all __maybe_unused)
+{
+ event->event_update.type = bswap_64(event->event_update.type);
+ event->event_update.id = bswap_64(event->event_update.id);
+}
+
static void perf_event__event_type_swap(union perf_event *event,
bool sample_id_all __maybe_unused)
{
@@ -616,6 +712,81 @@ static void perf_event__auxtrace_error_swap(union perf_event *event,
event->auxtrace_error.ip = bswap_64(event->auxtrace_error.ip);
}
+static void perf_event__thread_map_swap(union perf_event *event,
+ bool sample_id_all __maybe_unused)
+{
+ unsigned i;
+
+ event->thread_map.nr = bswap_64(event->thread_map.nr);
+
+ for (i = 0; i < event->thread_map.nr; i++)
+ event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
+}
+
+static void perf_event__cpu_map_swap(union perf_event *event,
+ bool sample_id_all __maybe_unused)
+{
+ struct cpu_map_data *data = &event->cpu_map.data;
+ struct cpu_map_entries *cpus;
+ struct cpu_map_mask *mask;
+ unsigned i;
+
+ data->type = bswap_64(data->type);
+
+ switch (data->type) {
+ case PERF_CPU_MAP__CPUS:
+ cpus = (struct cpu_map_entries *)data->data;
+
+ cpus->nr = bswap_16(cpus->nr);
+
+ for (i = 0; i < cpus->nr; i++)
+ cpus->cpu[i] = bswap_16(cpus->cpu[i]);
+ break;
+ case PERF_CPU_MAP__MASK:
+ mask = (struct cpu_map_mask *) data->data;
+
+ mask->nr = bswap_16(mask->nr);
+ mask->long_size = bswap_16(mask->long_size);
+
+ switch (mask->long_size) {
+ case 4: mem_bswap_32(&mask->mask, mask->nr); break;
+ case 8: mem_bswap_64(&mask->mask, mask->nr); break;
+ default:
+ pr_err("cpu_map swap: unsupported long size\n");
+ }
+ default:
+ break;
+ }
+}
+
+static void perf_event__stat_config_swap(union perf_event *event,
+ bool sample_id_all __maybe_unused)
+{
+ u64 size;
+
+ size = event->stat_config.nr * sizeof(event->stat_config.data[0]);
+ size += 1; /* nr item itself */
+ mem_bswap_64(&event->stat_config.nr, size);
+}
+
+static void perf_event__stat_swap(union perf_event *event,
+ bool sample_id_all __maybe_unused)
+{
+ event->stat.id = bswap_64(event->stat.id);
+ event->stat.thread = bswap_32(event->stat.thread);
+ event->stat.cpu = bswap_32(event->stat.cpu);
+ event->stat.val = bswap_64(event->stat.val);
+ event->stat.ena = bswap_64(event->stat.ena);
+ event->stat.run = bswap_64(event->stat.run);
+}
+
+static void perf_event__stat_round_swap(union perf_event *event,
+ bool sample_id_all __maybe_unused)
+{
+ event->stat_round.type = bswap_64(event->stat_round.type);
+ event->stat_round.time = bswap_64(event->stat_round.time);
+}
+
typedef void (*perf_event__swap_op)(union perf_event *event,
bool sample_id_all);
@@ -643,6 +814,12 @@ static perf_event__swap_op perf_event__swap_ops[] = {
[PERF_RECORD_AUXTRACE_INFO] = perf_event__auxtrace_info_swap,
[PERF_RECORD_AUXTRACE] = perf_event__auxtrace_swap,
[PERF_RECORD_AUXTRACE_ERROR] = perf_event__auxtrace_error_swap,
+ [PERF_RECORD_THREAD_MAP] = perf_event__thread_map_swap,
+ [PERF_RECORD_CPU_MAP] = perf_event__cpu_map_swap,
+ [PERF_RECORD_STAT_CONFIG] = perf_event__stat_config_swap,
+ [PERF_RECORD_STAT] = perf_event__stat_swap,
+ [PERF_RECORD_STAT_ROUND] = perf_event__stat_round_swap,
+ [PERF_RECORD_EVENT_UPDATE] = perf_event__event_update_swap,
[PERF_RECORD_HEADER_MAX] = NULL,
};
@@ -972,7 +1149,7 @@ static struct machine *machines__find_for_cpumode(struct machines *machines,
machine = machines__find(machines, pid);
if (!machine)
- machine = machines__find(machines, DEFAULT_GUEST_KERNEL_ID);
+ machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
return machine;
}
@@ -1154,6 +1331,8 @@ static s64 perf_session__process_user_event(struct perf_session *session,
perf_session__set_comm_exec(session);
}
return err;
+ case PERF_RECORD_EVENT_UPDATE:
+ return tool->event_update(tool, event, &session->evlist);
case PERF_RECORD_HEADER_EVENT_TYPE:
/*
* Depreceated, but we need to handle it for sake
@@ -1179,6 +1358,16 @@ static s64 perf_session__process_user_event(struct perf_session *session,
case PERF_RECORD_AUXTRACE_ERROR:
perf_session__auxtrace_error_inc(session, event);
return tool->auxtrace_error(tool, event, session);
+ case PERF_RECORD_THREAD_MAP:
+ return tool->thread_map(tool, event, session);
+ case PERF_RECORD_CPU_MAP:
+ return tool->cpu_map(tool, event, session);
+ case PERF_RECORD_STAT_CONFIG:
+ return tool->stat_config(tool, event, session);
+ case PERF_RECORD_STAT:
+ return tool->stat(tool, event, session);
+ case PERF_RECORD_STAT_ROUND:
+ return tool->stat_round(tool, event, session);
default:
return -EINVAL;
}
@@ -1311,17 +1500,20 @@ struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
return machine__findnew_thread(&session->machines.host, -1, pid);
}
-struct thread *perf_session__register_idle_thread(struct perf_session *session)
+int perf_session__register_idle_thread(struct perf_session *session)
{
struct thread *thread;
+ int err = 0;
thread = machine__findnew_thread(&session->machines.host, 0, 0);
if (thread == NULL || thread__set_comm(thread, "swapper", 0)) {
pr_err("problem inserting idle task.\n");
- thread = NULL;
+ err = -1;
}
- return thread;
+ /* machine__findnew_thread() got the thread, so put it */
+ thread__put(thread);
+ return err;
}
static void perf_session__warn_about_errors(const struct perf_session *session)
@@ -1676,7 +1868,7 @@ int perf_session__process_events(struct perf_session *session)
u64 size = perf_data_file__size(session->file);
int err;
- if (perf_session__register_idle_thread(session) == NULL)
+ if (perf_session__register_idle_thread(session) < 0)
return -ENOMEM;
if (!perf_data_file__is_pipe(session->file))
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index 3e900c0efc73..5f792e35d4c1 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -89,7 +89,7 @@ struct machine *perf_session__findnew_machine(struct perf_session *session, pid_
}
struct thread *perf_session__findnew(struct perf_session *session, pid_t pid);
-struct thread *perf_session__register_idle_thread(struct perf_session *session);
+int perf_session__register_idle_thread(struct perf_session *session);
size_t perf_session__fprintf(struct perf_session *session, FILE *fp);
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 2d8ccd4d9e1b..ec722346e6ff 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -4,6 +4,8 @@
#include "comm.h"
#include "symbol.h"
#include "evsel.h"
+#include "evlist.h"
+#include <traceevent/event-parse.h>
regex_t parent_regex;
const char default_parent_pattern[] = "^sys_|^do_page_fault";
@@ -13,6 +15,7 @@ const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cy
const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked";
const char default_top_sort_order[] = "dso,symbol";
const char default_diff_sort_order[] = "dso,symbol";
+const char default_tracepoint_sort_order[] = "trace";
const char *sort_order;
const char *field_order;
regex_t ignore_callees_regex;
@@ -443,6 +446,70 @@ struct sort_entry sort_socket = {
.se_width_idx = HISTC_SOCKET,
};
+/* --sort trace */
+
+static char *get_trace_output(struct hist_entry *he)
+{
+ struct trace_seq seq;
+ struct perf_evsel *evsel;
+ struct pevent_record rec = {
+ .data = he->raw_data,
+ .size = he->raw_size,
+ };
+
+ evsel = hists_to_evsel(he->hists);
+
+ trace_seq_init(&seq);
+ if (symbol_conf.raw_trace) {
+ pevent_print_fields(&seq, he->raw_data, he->raw_size,
+ evsel->tp_format);
+ } else {
+ pevent_event_info(&seq, evsel->tp_format, &rec);
+ }
+ return seq.buffer;
+}
+
+static int64_t
+sort__trace_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ struct perf_evsel *evsel;
+
+ evsel = hists_to_evsel(left->hists);
+ if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
+ return 0;
+
+ if (left->trace_output == NULL)
+ left->trace_output = get_trace_output(left);
+ if (right->trace_output == NULL)
+ right->trace_output = get_trace_output(right);
+
+ hists__new_col_len(left->hists, HISTC_TRACE, strlen(left->trace_output));
+ hists__new_col_len(right->hists, HISTC_TRACE, strlen(right->trace_output));
+
+ return strcmp(right->trace_output, left->trace_output);
+}
+
+static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width)
+{
+ struct perf_evsel *evsel;
+
+ evsel = hists_to_evsel(he->hists);
+ if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
+ return scnprintf(bf, size, "%-*.*s", width, width, "N/A");
+
+ if (he->trace_output == NULL)
+ he->trace_output = get_trace_output(he);
+ return repsep_snprintf(bf, size, "%-*.*s", width, width, he->trace_output);
+}
+
+struct sort_entry sort_trace = {
+ .se_header = "Trace output",
+ .se_cmp = sort__trace_cmp,
+ .se_snprintf = hist_entry__trace_snprintf,
+ .se_width_idx = HISTC_TRACE,
+};
+
/* sort keys for branch stacks */
static int64_t
@@ -1312,6 +1379,7 @@ static struct sort_dimension common_sort_dimensions[] = {
DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
DIM(SORT_TRANSACTION, "transaction", sort_transaction),
+ DIM(SORT_TRACE, "trace", sort_trace),
};
#undef DIM
@@ -1529,6 +1597,455 @@ static int __sort_dimension__add_hpp_output(struct sort_dimension *sd)
return 0;
}
+struct hpp_dynamic_entry {
+ struct perf_hpp_fmt hpp;
+ struct perf_evsel *evsel;
+ struct format_field *field;
+ unsigned dynamic_len;
+ bool raw_trace;
+};
+
+static int hde_width(struct hpp_dynamic_entry *hde)
+{
+ if (!hde->hpp.len) {
+ int len = hde->dynamic_len;
+ int namelen = strlen(hde->field->name);
+ int fieldlen = hde->field->size;
+
+ if (namelen > len)
+ len = namelen;
+
+ if (!(hde->field->flags & FIELD_IS_STRING)) {
+ /* length for print hex numbers */
+ fieldlen = hde->field->size * 2 + 2;
+ }
+ if (fieldlen > len)
+ len = fieldlen;
+
+ hde->hpp.len = len;
+ }
+ return hde->hpp.len;
+}
+
+static void update_dynamic_len(struct hpp_dynamic_entry *hde,
+ struct hist_entry *he)
+{
+ char *str, *pos;
+ struct format_field *field = hde->field;
+ size_t namelen;
+ bool last = false;
+
+ if (hde->raw_trace)
+ return;
+
+ /* parse pretty print result and update max length */
+ if (!he->trace_output)
+ he->trace_output = get_trace_output(he);
+
+ namelen = strlen(field->name);
+ str = he->trace_output;
+
+ while (str) {
+ pos = strchr(str, ' ');
+ if (pos == NULL) {
+ last = true;
+ pos = str + strlen(str);
+ }
+
+ if (!strncmp(str, field->name, namelen)) {
+ size_t len;
+
+ str += namelen + 1;
+ len = pos - str;
+
+ if (len > hde->dynamic_len)
+ hde->dynamic_len = len;
+ break;
+ }
+
+ if (last)
+ str = NULL;
+ else
+ str = pos + 1;
+ }
+}
+
+static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct perf_evsel *evsel __maybe_unused)
+{
+ struct hpp_dynamic_entry *hde;
+ size_t len = fmt->user_len;
+
+ hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
+
+ if (!len)
+ len = hde_width(hde);
+
+ return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name);
+}
+
+static int __sort__hde_width(struct perf_hpp_fmt *fmt,
+ struct perf_hpp *hpp __maybe_unused,
+ struct perf_evsel *evsel __maybe_unused)
+{
+ struct hpp_dynamic_entry *hde;
+ size_t len = fmt->user_len;
+
+ hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
+
+ if (!len)
+ len = hde_width(hde);
+
+ return len;
+}
+
+bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists)
+{
+ struct hpp_dynamic_entry *hde;
+
+ hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
+
+ return hists_to_evsel(hists) == hde->evsel;
+}
+
+static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ struct hpp_dynamic_entry *hde;
+ size_t len = fmt->user_len;
+ char *str, *pos;
+ struct format_field *field;
+ size_t namelen;
+ bool last = false;
+ int ret;
+
+ hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
+
+ if (!len)
+ len = hde_width(hde);
+
+ if (hde->raw_trace)
+ goto raw_field;
+
+ field = hde->field;
+ namelen = strlen(field->name);
+ str = he->trace_output;
+
+ while (str) {
+ pos = strchr(str, ' ');
+ if (pos == NULL) {
+ last = true;
+ pos = str + strlen(str);
+ }
+
+ if (!strncmp(str, field->name, namelen)) {
+ str += namelen + 1;
+ str = strndup(str, pos - str);
+
+ if (str == NULL)
+ return scnprintf(hpp->buf, hpp->size,
+ "%*.*s", len, len, "ERROR");
+ break;
+ }
+
+ if (last)
+ str = NULL;
+ else
+ str = pos + 1;
+ }
+
+ if (str == NULL) {
+ struct trace_seq seq;
+raw_field:
+ trace_seq_init(&seq);
+ pevent_print_field(&seq, he->raw_data, hde->field);
+ str = seq.buffer;
+ }
+
+ ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str);
+ free(str);
+ return ret;
+}
+
+static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
+ struct hist_entry *a, struct hist_entry *b)
+{
+ struct hpp_dynamic_entry *hde;
+ struct format_field *field;
+ unsigned offset, size;
+
+ hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
+
+ field = hde->field;
+ if (field->flags & FIELD_IS_DYNAMIC) {
+ unsigned long long dyn;
+
+ pevent_read_number_field(field, a->raw_data, &dyn);
+ offset = dyn & 0xffff;
+ size = (dyn >> 16) & 0xffff;
+
+ /* record max width for output */
+ if (size > hde->dynamic_len)
+ hde->dynamic_len = size;
+ } else {
+ offset = field->offset;
+ size = field->size;
+
+ update_dynamic_len(hde, a);
+ update_dynamic_len(hde, b);
+ }
+
+ return memcmp(a->raw_data + offset, b->raw_data + offset, size);
+}
+
+bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt)
+{
+ return fmt->cmp == __sort__hde_cmp;
+}
+
+static struct hpp_dynamic_entry *
+__alloc_dynamic_entry(struct perf_evsel *evsel, struct format_field *field)
+{
+ struct hpp_dynamic_entry *hde;
+
+ hde = malloc(sizeof(*hde));
+ if (hde == NULL) {
+ pr_debug("Memory allocation failed\n");
+ return NULL;
+ }
+
+ hde->evsel = evsel;
+ hde->field = field;
+ hde->dynamic_len = 0;
+
+ hde->hpp.name = field->name;
+ hde->hpp.header = __sort__hde_header;
+ hde->hpp.width = __sort__hde_width;
+ hde->hpp.entry = __sort__hde_entry;
+ hde->hpp.color = NULL;
+
+ hde->hpp.cmp = __sort__hde_cmp;
+ hde->hpp.collapse = __sort__hde_cmp;
+ hde->hpp.sort = __sort__hde_cmp;
+
+ INIT_LIST_HEAD(&hde->hpp.list);
+ INIT_LIST_HEAD(&hde->hpp.sort_list);
+ hde->hpp.elide = false;
+ hde->hpp.len = 0;
+ hde->hpp.user_len = 0;
+
+ return hde;
+}
+
+static int parse_field_name(char *str, char **event, char **field, char **opt)
+{
+ char *event_name, *field_name, *opt_name;
+
+ event_name = str;
+ field_name = strchr(str, '.');
+
+ if (field_name) {
+ *field_name++ = '\0';
+ } else {
+ event_name = NULL;
+ field_name = str;
+ }
+
+ opt_name = strchr(field_name, '/');
+ if (opt_name)
+ *opt_name++ = '\0';
+
+ *event = event_name;
+ *field = field_name;
+ *opt = opt_name;
+
+ return 0;
+}
+
+/* find match evsel using a given event name. The event name can be:
+ * 1. '%' + event index (e.g. '%1' for first event)
+ * 2. full event name (e.g. sched:sched_switch)
+ * 3. partial event name (should not contain ':')
+ */
+static struct perf_evsel *find_evsel(struct perf_evlist *evlist, char *event_name)
+{
+ struct perf_evsel *evsel = NULL;
+ struct perf_evsel *pos;
+ bool full_name;
+
+ /* case 1 */
+ if (event_name[0] == '%') {
+ int nr = strtol(event_name+1, NULL, 0);
+
+ if (nr > evlist->nr_entries)
+ return NULL;
+
+ evsel = perf_evlist__first(evlist);
+ while (--nr > 0)
+ evsel = perf_evsel__next(evsel);
+
+ return evsel;
+ }
+
+ full_name = !!strchr(event_name, ':');
+ evlist__for_each(evlist, pos) {
+ /* case 2 */
+ if (full_name && !strcmp(pos->name, event_name))
+ return pos;
+ /* case 3 */
+ if (!full_name && strstr(pos->name, event_name)) {
+ if (evsel) {
+ pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
+ event_name, evsel->name, pos->name);
+ return NULL;
+ }
+ evsel = pos;
+ }
+ }
+
+ return evsel;
+}
+
+static int __dynamic_dimension__add(struct perf_evsel *evsel,
+ struct format_field *field,
+ bool raw_trace)
+{
+ struct hpp_dynamic_entry *hde;
+
+ hde = __alloc_dynamic_entry(evsel, field);
+ if (hde == NULL)
+ return -ENOMEM;
+
+ hde->raw_trace = raw_trace;
+
+ perf_hpp__register_sort_field(&hde->hpp);
+ return 0;
+}
+
+static int add_evsel_fields(struct perf_evsel *evsel, bool raw_trace)
+{
+ int ret;
+ struct format_field *field;
+
+ field = evsel->tp_format->format.fields;
+ while (field) {
+ ret = __dynamic_dimension__add(evsel, field, raw_trace);
+ if (ret < 0)
+ return ret;
+
+ field = field->next;
+ }
+ return 0;
+}
+
+static int add_all_dynamic_fields(struct perf_evlist *evlist, bool raw_trace)
+{
+ int ret;
+ struct perf_evsel *evsel;
+
+ evlist__for_each(evlist, evsel) {
+ if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
+ continue;
+
+ ret = add_evsel_fields(evsel, raw_trace);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+static int add_all_matching_fields(struct perf_evlist *evlist,
+ char *field_name, bool raw_trace)
+{
+ int ret = -ESRCH;
+ struct perf_evsel *evsel;
+ struct format_field *field;
+
+ evlist__for_each(evlist, evsel) {
+ if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
+ continue;
+
+ field = pevent_find_any_field(evsel->tp_format, field_name);
+ if (field == NULL)
+ continue;
+
+ ret = __dynamic_dimension__add(evsel, field, raw_trace);
+ if (ret < 0)
+ break;
+ }
+ return ret;
+}
+
+static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok)
+{
+ char *str, *event_name, *field_name, *opt_name;
+ struct perf_evsel *evsel;
+ struct format_field *field;
+ bool raw_trace = symbol_conf.raw_trace;
+ int ret = 0;
+
+ if (evlist == NULL)
+ return -ENOENT;
+
+ str = strdup(tok);
+ if (str == NULL)
+ return -ENOMEM;
+
+ if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (opt_name) {
+ if (strcmp(opt_name, "raw")) {
+ pr_debug("unsupported field option %s\n", opt_name);
+ ret = -EINVAL;
+ goto out;
+ }
+ raw_trace = true;
+ }
+
+ if (!strcmp(field_name, "trace_fields")) {
+ ret = add_all_dynamic_fields(evlist, raw_trace);
+ goto out;
+ }
+
+ if (event_name == NULL) {
+ ret = add_all_matching_fields(evlist, field_name, raw_trace);
+ goto out;
+ }
+
+ evsel = find_evsel(evlist, event_name);
+ if (evsel == NULL) {
+ pr_debug("Cannot find event: %s\n", event_name);
+ ret = -ENOENT;
+ goto out;
+ }
+
+ if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
+ pr_debug("%s is not a tracepoint event\n", event_name);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!strcmp(field_name, "*")) {
+ ret = add_evsel_fields(evsel, raw_trace);
+ } else {
+ field = pevent_find_any_field(evsel->tp_format, field_name);
+ if (field == NULL) {
+ pr_debug("Cannot find event field for %s.%s\n",
+ event_name, field_name);
+ return -ENOENT;
+ }
+
+ ret = __dynamic_dimension__add(evsel, field, raw_trace);
+ }
+
+out:
+ free(str);
+ return ret;
+}
+
static int __sort_dimension__add(struct sort_dimension *sd)
{
if (sd->taken)
@@ -1583,7 +2100,8 @@ int hpp_dimension__add_output(unsigned col)
return __hpp_dimension__add_output(&hpp_sort_dimensions[col]);
}
-int sort_dimension__add(const char *tok)
+static int sort_dimension__add(const char *tok,
+ struct perf_evlist *evlist __maybe_unused)
{
unsigned int i;
@@ -1664,10 +2182,13 @@ int sort_dimension__add(const char *tok)
return 0;
}
+ if (!add_dynamic_entry(evlist, tok))
+ return 0;
+
return -ESRCH;
}
-static const char *get_default_sort_order(void)
+static const char *get_default_sort_order(struct perf_evlist *evlist)
{
const char *default_sort_orders[] = {
default_sort_order,
@@ -1675,14 +2196,33 @@ static const char *get_default_sort_order(void)
default_mem_sort_order,
default_top_sort_order,
default_diff_sort_order,
+ default_tracepoint_sort_order,
};
+ bool use_trace = true;
+ struct perf_evsel *evsel;
BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
+ if (evlist == NULL)
+ goto out_no_evlist;
+
+ evlist__for_each(evlist, evsel) {
+ if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
+ use_trace = false;
+ break;
+ }
+ }
+
+ if (use_trace) {
+ sort__mode = SORT_MODE__TRACEPOINT;
+ if (symbol_conf.raw_trace)
+ return "trace_fields";
+ }
+out_no_evlist:
return default_sort_orders[sort__mode];
}
-static int setup_sort_order(void)
+static int setup_sort_order(struct perf_evlist *evlist)
{
char *new_sort_order;
@@ -1703,7 +2243,7 @@ static int setup_sort_order(void)
* because it's checked over the rest of the code.
*/
if (asprintf(&new_sort_order, "%s,%s",
- get_default_sort_order(), sort_order + 1) < 0) {
+ get_default_sort_order(evlist), sort_order + 1) < 0) {
error("Not enough memory to set up --sort");
return -ENOMEM;
}
@@ -1712,13 +2252,41 @@ static int setup_sort_order(void)
return 0;
}
-static int __setup_sorting(void)
+/*
+ * Adds 'pre,' prefix into 'str' is 'pre' is
+ * not already part of 'str'.
+ */
+static char *prefix_if_not_in(const char *pre, char *str)
+{
+ char *n;
+
+ if (!str || strstr(str, pre))
+ return str;
+
+ if (asprintf(&n, "%s,%s", pre, str) < 0)
+ return NULL;
+
+ free(str);
+ return n;
+}
+
+static char *setup_overhead(char *keys)
+{
+ keys = prefix_if_not_in("overhead", keys);
+
+ if (symbol_conf.cumulate_callchain)
+ keys = prefix_if_not_in("overhead_children", keys);
+
+ return keys;
+}
+
+static int __setup_sorting(struct perf_evlist *evlist)
{
char *tmp, *tok, *str;
const char *sort_keys;
int ret = 0;
- ret = setup_sort_order();
+ ret = setup_sort_order(evlist);
if (ret)
return ret;
@@ -1732,7 +2300,7 @@ static int __setup_sorting(void)
return 0;
}
- sort_keys = get_default_sort_order();
+ sort_keys = get_default_sort_order(evlist);
}
str = strdup(sort_keys);
@@ -1741,9 +2309,20 @@ static int __setup_sorting(void)
return -ENOMEM;
}
+ /*
+ * Prepend overhead fields for backward compatibility.
+ */
+ if (!is_strict_order(field_order)) {
+ str = setup_overhead(str);
+ if (str == NULL) {
+ error("Not enough memory to setup overhead keys");
+ return -ENOMEM;
+ }
+ }
+
for (tok = strtok_r(str, ", ", &tmp);
tok; tok = strtok_r(NULL, ", ", &tmp)) {
- ret = sort_dimension__add(tok);
+ ret = sort_dimension__add(tok, evlist);
if (ret == -EINVAL) {
error("Invalid --sort key: `%s'", tok);
break;
@@ -1954,16 +2533,16 @@ out:
return ret;
}
-int setup_sorting(void)
+int setup_sorting(struct perf_evlist *evlist)
{
int err;
- err = __setup_sorting();
+ err = __setup_sorting(evlist);
if (err < 0)
return err;
if (parent_pattern != default_parent_pattern) {
- err = sort_dimension__add("parent");
+ err = sort_dimension__add("parent", evlist);
if (err < 0)
return err;
}
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index 31228851e397..687bbb124428 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -18,7 +18,7 @@
#include "debug.h"
#include "header.h"
-#include "parse-options.h"
+#include <subcmd/parse-options.h>
#include "parse-events.h"
#include "hist.h"
#include "thread.h"
@@ -122,6 +122,9 @@ struct hist_entry {
struct branch_info *branch_info;
struct hists *hists;
struct mem_info *mem_info;
+ void *raw_data;
+ u32 raw_size;
+ void *trace_output;
struct callchain_root callchain[0]; /* must be last member */
};
@@ -164,6 +167,7 @@ enum sort_mode {
SORT_MODE__MEMORY,
SORT_MODE__TOP,
SORT_MODE__DIFF,
+ SORT_MODE__TRACEPOINT,
};
enum sort_type {
@@ -180,6 +184,7 @@ enum sort_type {
SORT_LOCAL_WEIGHT,
SORT_GLOBAL_WEIGHT,
SORT_TRANSACTION,
+ SORT_TRACE,
/* branch stack specific sort keys */
__SORT_BRANCH_STACK,
@@ -209,8 +214,6 @@ enum sort_type {
*/
struct sort_entry {
- struct list_head list;
-
const char *se_header;
int64_t (*se_cmp)(struct hist_entry *, struct hist_entry *);
@@ -224,10 +227,11 @@ struct sort_entry {
extern struct sort_entry sort_thread;
extern struct list_head hist_entry__sort_list;
-int setup_sorting(void);
+struct perf_evlist;
+struct pevent;
+int setup_sorting(struct perf_evlist *evlist);
int setup_output_field(void);
void reset_output_field(void);
-extern int sort_dimension__add(const char *);
void sort__setup_elide(FILE *fp);
void perf_hpp__set_elide(int idx, bool elide);
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index 2d9d8306dbd3..2b58edccd56f 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -310,7 +310,6 @@ int perf_stat_process_counter(struct perf_stat_config *config,
int i, ret;
aggr->val = aggr->ena = aggr->run = 0;
- init_stats(ps->res_stats);
if (counter->per_pkg)
zero_per_pkg(counter);
@@ -341,3 +340,65 @@ int perf_stat_process_counter(struct perf_stat_config *config,
return 0;
}
+
+int perf_event__process_stat_event(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_session *session)
+{
+ struct perf_counts_values count;
+ struct stat_event *st = &event->stat;
+ struct perf_evsel *counter;
+
+ count.val = st->val;
+ count.ena = st->ena;
+ count.run = st->run;
+
+ counter = perf_evlist__id2evsel(session->evlist, st->id);
+ if (!counter) {
+ pr_err("Failed to resolve counter for stat event.\n");
+ return -EINVAL;
+ }
+
+ *perf_counts(counter->counts, st->cpu, st->thread) = count;
+ counter->supported = true;
+ return 0;
+}
+
+size_t perf_event__fprintf_stat(union perf_event *event, FILE *fp)
+{
+ struct stat_event *st = (struct stat_event *) event;
+ size_t ret;
+
+ ret = fprintf(fp, "\n... id %" PRIu64 ", cpu %d, thread %d\n",
+ st->id, st->cpu, st->thread);
+ ret += fprintf(fp, "... value %" PRIu64 ", enabled %" PRIu64 ", running %" PRIu64 "\n",
+ st->val, st->ena, st->run);
+
+ return ret;
+}
+
+size_t perf_event__fprintf_stat_round(union perf_event *event, FILE *fp)
+{
+ struct stat_round_event *rd = (struct stat_round_event *)event;
+ size_t ret;
+
+ ret = fprintf(fp, "\n... time %" PRIu64 ", type %s\n", rd->time,
+ rd->type == PERF_STAT_ROUND_TYPE__FINAL ? "FINAL" : "INTERVAL");
+
+ return ret;
+}
+
+size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp)
+{
+ struct perf_stat_config sc;
+ size_t ret;
+
+ perf_event__read_stat_config(&sc, &event->stat_config);
+
+ ret = fprintf(fp, "\n");
+ ret += fprintf(fp, "... aggr_mode %d\n", sc.aggr_mode);
+ ret += fprintf(fp, "... scale %d\n", sc.scale);
+ ret += fprintf(fp, "... interval %u\n", sc.interval);
+
+ return ret;
+}
diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h
index da1d11c4f8c1..086f4e128d63 100644
--- a/tools/perf/util/stat.h
+++ b/tools/perf/util/stat.h
@@ -90,4 +90,14 @@ void perf_evlist__reset_stats(struct perf_evlist *evlist);
int perf_stat_process_counter(struct perf_stat_config *config,
struct perf_evsel *counter);
+struct perf_tool;
+union perf_event;
+struct perf_session;
+int perf_event__process_stat_event(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_session *session);
+
+size_t perf_event__fprintf_stat(union perf_event *event, FILE *fp);
+size_t perf_event__fprintf_stat_round(union perf_event *event, FILE *fp);
+size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp);
#endif
diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c
index fc8781de62db..7f7e072be746 100644
--- a/tools/perf/util/string.c
+++ b/tools/perf/util/string.c
@@ -342,22 +342,6 @@ char *rtrim(char *s)
return s;
}
-/**
- * memdup - duplicate region of memory
- * @src: memory region to duplicate
- * @len: memory region length
- */
-void *memdup(const void *src, size_t len)
-{
- void *p;
-
- p = malloc(len);
- if (p)
- memcpy(p, src, len);
-
- return p;
-}
-
char *asprintf_expr_inout_ints(const char *var, bool in, size_t nints, int *ints)
{
/*
diff --git a/tools/perf/util/strlist.c b/tools/perf/util/strlist.c
index bdf98f6f27bb..0d3dfcb919b4 100644
--- a/tools/perf/util/strlist.c
+++ b/tools/perf/util/strlist.c
@@ -126,6 +126,11 @@ static int strlist__parse_list_entry(struct strlist *slist, const char *s,
err = strlist__load(slist, subst);
goto out;
}
+
+ if (slist->file_only) {
+ err = -ENOENT;
+ goto out;
+ }
}
err = strlist__add(slist, s);
@@ -157,11 +162,13 @@ struct strlist *strlist__new(const char *list, const struct strlist_config *conf
if (slist != NULL) {
bool dupstr = true;
+ bool file_only = false;
const char *dirname = NULL;
if (config) {
dupstr = !config->dont_dupstr;
dirname = config->dirname;
+ file_only = config->file_only;
}
rblist__init(&slist->rblist);
@@ -170,6 +177,7 @@ struct strlist *strlist__new(const char *list, const struct strlist_config *conf
slist->rblist.node_delete = strlist__node_delete;
slist->dupstr = dupstr;
+ slist->file_only = file_only;
if (list && strlist__parse_list(slist, list, dirname) != 0)
goto out_error;
diff --git a/tools/perf/util/strlist.h b/tools/perf/util/strlist.h
index 297565aa7535..ca990029e243 100644
--- a/tools/perf/util/strlist.h
+++ b/tools/perf/util/strlist.h
@@ -13,11 +13,18 @@ struct str_node {
struct strlist {
struct rblist rblist;
- bool dupstr;
+ bool dupstr;
+ bool file_only;
};
+/*
+ * @file_only: When dirname is present, only consider entries as filenames,
+ * that should not be added to the list if dirname/entry is not
+ * found
+ */
struct strlist_config {
bool dont_dupstr;
+ bool file_only;
const char *dirname;
};
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 475d88d0a1c9..562b8ebeae5b 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -1026,8 +1026,8 @@ int dso__load_sym(struct dso *dso, struct map *map,
curr_dso->long_name_len = dso->long_name_len;
curr_map = map__new2(start, curr_dso,
map->type);
+ dso__put(curr_dso);
if (curr_map == NULL) {
- dso__put(curr_dso);
goto out_elf_end;
}
if (adjust_kernel_syms) {
@@ -1042,7 +1042,14 @@ int dso__load_sym(struct dso *dso, struct map *map,
}
curr_dso->symtab_type = dso->symtab_type;
map_groups__insert(kmaps, curr_map);
+ /*
+ * Add it before we drop the referece to curr_map,
+ * i.e. while we still are sure to have a reference
+ * to this DSO via curr_map->dso.
+ */
dsos__add(&map->groups->machine->dsos, curr_dso);
+ /* kmaps already got it */
+ map__put(curr_map);
dso__set_loaded(curr_dso, map->type);
} else
curr_dso = curr_map->dso;
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index cd08027a6d2c..ab02209a7cf3 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -39,6 +39,7 @@ struct symbol_conf symbol_conf = {
.cumulate_callchain = true,
.show_hist_headers = true,
.symfs = "",
+ .event_group = true,
};
static enum dso_binary_type binary_type_symtab[] = {
@@ -1465,7 +1466,7 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
* Read the build id if possible. This is required for
* DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work
*/
- if (filename__read_build_id(dso->name, build_id, BUILD_ID_SIZE) > 0)
+ if (filename__read_build_id(dso->long_name, build_id, BUILD_ID_SIZE) > 0)
dso__set_build_id(dso, build_id);
/*
@@ -1860,24 +1861,44 @@ static void vmlinux_path__exit(void)
zfree(&vmlinux_path);
}
+static const char * const vmlinux_paths[] = {
+ "vmlinux",
+ "/boot/vmlinux"
+};
+
+static const char * const vmlinux_paths_upd[] = {
+ "/boot/vmlinux-%s",
+ "/usr/lib/debug/boot/vmlinux-%s",
+ "/lib/modules/%s/build/vmlinux",
+ "/usr/lib/debug/lib/modules/%s/vmlinux",
+ "/usr/lib/debug/boot/vmlinux-%s.debug"
+};
+
+static int vmlinux_path__add(const char *new_entry)
+{
+ vmlinux_path[vmlinux_path__nr_entries] = strdup(new_entry);
+ if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
+ return -1;
+ ++vmlinux_path__nr_entries;
+
+ return 0;
+}
+
static int vmlinux_path__init(struct perf_env *env)
{
struct utsname uts;
char bf[PATH_MAX];
char *kernel_version;
+ unsigned int i;
- vmlinux_path = malloc(sizeof(char *) * 6);
+ vmlinux_path = malloc(sizeof(char *) * (ARRAY_SIZE(vmlinux_paths) +
+ ARRAY_SIZE(vmlinux_paths_upd)));
if (vmlinux_path == NULL)
return -1;
- vmlinux_path[vmlinux_path__nr_entries] = strdup("vmlinux");
- if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
- goto out_fail;
- ++vmlinux_path__nr_entries;
- vmlinux_path[vmlinux_path__nr_entries] = strdup("/boot/vmlinux");
- if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
- goto out_fail;
- ++vmlinux_path__nr_entries;
+ for (i = 0; i < ARRAY_SIZE(vmlinux_paths); i++)
+ if (vmlinux_path__add(vmlinux_paths[i]) < 0)
+ goto out_fail;
/* only try kernel version if no symfs was given */
if (symbol_conf.symfs[0] != 0)
@@ -1892,28 +1913,11 @@ static int vmlinux_path__init(struct perf_env *env)
kernel_version = uts.release;
}
- snprintf(bf, sizeof(bf), "/boot/vmlinux-%s", kernel_version);
- vmlinux_path[vmlinux_path__nr_entries] = strdup(bf);
- if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
- goto out_fail;
- ++vmlinux_path__nr_entries;
- snprintf(bf, sizeof(bf), "/usr/lib/debug/boot/vmlinux-%s",
- kernel_version);
- vmlinux_path[vmlinux_path__nr_entries] = strdup(bf);
- if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
- goto out_fail;
- ++vmlinux_path__nr_entries;
- snprintf(bf, sizeof(bf), "/lib/modules/%s/build/vmlinux", kernel_version);
- vmlinux_path[vmlinux_path__nr_entries] = strdup(bf);
- if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
- goto out_fail;
- ++vmlinux_path__nr_entries;
- snprintf(bf, sizeof(bf), "/usr/lib/debug/lib/modules/%s/vmlinux",
- kernel_version);
- vmlinux_path[vmlinux_path__nr_entries] = strdup(bf);
- if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
- goto out_fail;
- ++vmlinux_path__nr_entries;
+ for (i = 0; i < ARRAY_SIZE(vmlinux_paths_upd); i++) {
+ snprintf(bf, sizeof(bf), vmlinux_paths_upd[i], kernel_version);
+ if (vmlinux_path__add(bf) < 0)
+ goto out_fail;
+ }
return 0;
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index dcd786e364f2..ccd1caa40e11 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -108,7 +108,9 @@ struct symbol_conf {
show_hist_headers,
branch_callstack,
has_filter,
- show_ref_callgraph;
+ show_ref_callgraph,
+ hide_unresolved,
+ raw_trace;
const char *vmlinux_name,
*kallsyms_name,
*source_prefix,
diff --git a/tools/perf/util/term.c b/tools/perf/util/term.c
new file mode 100644
index 000000000000..90b47d8aa19c
--- /dev/null
+++ b/tools/perf/util/term.c
@@ -0,0 +1,35 @@
+#include "util.h"
+
+void get_term_dimensions(struct winsize *ws)
+{
+ char *s = getenv("LINES");
+
+ if (s != NULL) {
+ ws->ws_row = atoi(s);
+ s = getenv("COLUMNS");
+ if (s != NULL) {
+ ws->ws_col = atoi(s);
+ if (ws->ws_row && ws->ws_col)
+ return;
+ }
+ }
+#ifdef TIOCGWINSZ
+ if (ioctl(1, TIOCGWINSZ, ws) == 0 &&
+ ws->ws_row && ws->ws_col)
+ return;
+#endif
+ ws->ws_row = 25;
+ ws->ws_col = 80;
+}
+
+void set_term_quiet_input(struct termios *old)
+{
+ struct termios tc;
+
+ tcgetattr(0, old);
+ tc = *old;
+ tc.c_lflag &= ~(ICANON | ECHO);
+ tc.c_cc[VMIN] = 0;
+ tc.c_cc[VTIME] = 0;
+ tcsetattr(0, TCSANOW, &tc);
+}
diff --git a/tools/perf/util/term.h b/tools/perf/util/term.h
new file mode 100644
index 000000000000..2c06a61846a1
--- /dev/null
+++ b/tools/perf/util/term.h
@@ -0,0 +1,10 @@
+#ifndef __PERF_TERM_H
+#define __PERF_TERM_H
+
+struct termios;
+struct winsize;
+
+void get_term_dimensions(struct winsize *ws);
+void set_term_quiet_input(struct termios *old);
+
+#endif /* __PERF_TERM_H */
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 0a9ae8014729..dfd00c6dad6e 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -19,8 +19,10 @@ int thread__init_map_groups(struct thread *thread, struct machine *machine)
thread->mg = map_groups__new(machine);
} else {
leader = __machine__findnew_thread(machine, pid, pid);
- if (leader)
+ if (leader) {
thread->mg = map_groups__get(leader->mg);
+ thread__put(leader);
+ }
}
return thread->mg ? 0 : -1;
@@ -53,7 +55,7 @@ struct thread *thread__new(pid_t pid, pid_t tid)
goto err_thread;
list_add(&comm->list, &thread->comm_list);
- atomic_set(&thread->refcnt, 0);
+ atomic_set(&thread->refcnt, 1);
RB_CLEAR_NODE(&thread->rb_node);
}
@@ -95,6 +97,10 @@ struct thread *thread__get(struct thread *thread)
void thread__put(struct thread *thread)
{
if (thread && atomic_dec_and_test(&thread->refcnt)) {
+ /*
+ * Remove it from the dead_threads list, as last reference
+ * is gone.
+ */
list_del_init(&thread->node);
thread__delete(thread);
}
diff --git a/tools/perf/util/thread_map.c b/tools/perf/util/thread_map.c
index 6ec3c5ca438f..08afc6909953 100644
--- a/tools/perf/util/thread_map.c
+++ b/tools/perf/util/thread_map.c
@@ -13,6 +13,7 @@
#include "thread_map.h"
#include "util.h"
#include "debug.h"
+#include "event.h"
/* Skip "." and ".." directories */
static int filter(const struct dirent *dir)
@@ -304,6 +305,7 @@ out:
out_free_threads:
zfree(&threads);
+ strlist__delete(slist);
goto out;
}
@@ -408,3 +410,29 @@ void thread_map__read_comms(struct thread_map *threads)
for (i = 0; i < threads->nr; ++i)
comm_init(threads, i);
}
+
+static void thread_map__copy_event(struct thread_map *threads,
+ struct thread_map_event *event)
+{
+ unsigned i;
+
+ threads->nr = (int) event->nr;
+
+ for (i = 0; i < event->nr; i++) {
+ thread_map__set_pid(threads, i, (pid_t) event->entries[i].pid);
+ threads->map[i].comm = strndup(event->entries[i].comm, 16);
+ }
+
+ atomic_set(&threads->refcnt, 1);
+}
+
+struct thread_map *thread_map__new_event(struct thread_map_event *event)
+{
+ struct thread_map *threads;
+
+ threads = thread_map__alloc(event->nr);
+ if (threads)
+ thread_map__copy_event(threads, event);
+
+ return threads;
+}
diff --git a/tools/perf/util/thread_map.h b/tools/perf/util/thread_map.h
index af679d8a50f8..85e4c7c4fbde 100644
--- a/tools/perf/util/thread_map.h
+++ b/tools/perf/util/thread_map.h
@@ -16,11 +16,14 @@ struct thread_map {
struct thread_map_data map[];
};
+struct thread_map_event;
+
struct thread_map *thread_map__new_dummy(void);
struct thread_map *thread_map__new_by_pid(pid_t pid);
struct thread_map *thread_map__new_by_tid(pid_t tid);
struct thread_map *thread_map__new_by_uid(uid_t uid);
struct thread_map *thread_map__new(pid_t pid, pid_t tid, uid_t uid);
+struct thread_map *thread_map__new_event(struct thread_map_event *event);
struct thread_map *thread_map__get(struct thread_map *map);
void thread_map__put(struct thread_map *map);
diff --git a/tools/perf/util/tool.h b/tools/perf/util/tool.h
index cab8cc24831b..55de4cffcd4e 100644
--- a/tools/perf/util/tool.h
+++ b/tools/perf/util/tool.h
@@ -50,12 +50,18 @@ struct perf_tool {
throttle,
unthrottle;
event_attr_op attr;
+ event_attr_op event_update;
event_op2 tracing_data;
event_oe finished_round;
event_op2 build_id,
id_index,
auxtrace_info,
- auxtrace_error;
+ auxtrace_error,
+ thread_map,
+ cpu_map,
+ stat_config,
+ stat,
+ stat_round;
event_op3 auxtrace;
bool ordered_events;
bool ordering_requires_timestamps;
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index 8ff7d620d942..33b52eaa39db 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -209,7 +209,7 @@ static const struct flag flags[] = {
{ "NET_TX_SOFTIRQ", 2 },
{ "NET_RX_SOFTIRQ", 3 },
{ "BLOCK_SOFTIRQ", 4 },
- { "BLOCK_IOPOLL_SOFTIRQ", 5 },
+ { "IRQ_POLL_SOFTIRQ", 5 },
{ "TASKLET_SOFTIRQ", 6 },
{ "SCHED_SOFTIRQ", 7 },
{ "HRTIMER_SOFTIRQ", 8 },
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h
index b85ee55cca0c..bce5b1dac268 100644
--- a/tools/perf/util/trace-event.h
+++ b/tools/perf/util/trace-event.h
@@ -65,6 +65,7 @@ int tracing_data_put(struct tracing_data *tdata);
struct addr_location;
struct perf_session;
+struct perf_stat_config;
struct scripting_ops {
const char *name;
@@ -75,6 +76,9 @@ struct scripting_ops {
struct perf_sample *sample,
struct perf_evsel *evsel,
struct addr_location *al);
+ void (*process_stat)(struct perf_stat_config *config,
+ struct perf_evsel *evsel, u64 tstamp);
+ void (*process_stat_interval)(u64 tstamp);
int (*generate_script) (struct pevent *pevent, const char *outfile);
};
diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
index 2dcfe9a7c8d0..cf5e250bc78e 100644
--- a/tools/perf/util/unwind-libdw.c
+++ b/tools/perf/util/unwind-libdw.c
@@ -11,6 +11,7 @@
#include <linux/types.h>
#include "event.h"
#include "perf_regs.h"
+#include "callchain.h"
static char *debuginfo_path;
@@ -52,25 +53,28 @@ static int report_module(u64 ip, struct unwind_info *ui)
return __report_module(&al, ip, ui);
}
+/*
+ * Store all entries within entries array,
+ * we will process it after we finish unwind.
+ */
static int entry(u64 ip, struct unwind_info *ui)
{
- struct unwind_entry e;
+ struct unwind_entry *e = &ui->entries[ui->idx++];
struct addr_location al;
if (__report_module(&al, ip, ui))
return -1;
- e.ip = ip;
- e.map = al.map;
- e.sym = al.sym;
+ e->ip = ip;
+ e->map = al.map;
+ e->sym = al.sym;
pr_debug("unwind: %s:ip = 0x%" PRIx64 " (0x%" PRIx64 ")\n",
al.sym ? al.sym->name : "''",
ip,
al.map ? al.map->map_ip(al.map, ip) : (u64) 0);
-
- return ui->cb(&e, ui->arg);
+ return 0;
}
static pid_t next_thread(Dwfl *dwfl, void *arg, void **thread_argp)
@@ -92,6 +96,16 @@ static int access_dso_mem(struct unwind_info *ui, Dwarf_Addr addr,
thread__find_addr_map(ui->thread, PERF_RECORD_MISC_USER,
MAP__FUNCTION, addr, &al);
if (!al.map) {
+ /*
+ * We've seen cases (softice) where DWARF unwinder went
+ * through non executable mmaps, which we need to lookup
+ * in MAP__VARIABLE tree.
+ */
+ thread__find_addr_map(ui->thread, PERF_RECORD_MISC_USER,
+ MAP__VARIABLE, addr, &al);
+ }
+
+ if (!al.map) {
pr_debug("unwind: no map for %lx\n", (unsigned long)addr);
return -1;
}
@@ -168,7 +182,7 @@ int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
struct perf_sample *data,
int max_stack)
{
- struct unwind_info ui = {
+ struct unwind_info *ui, ui_buf = {
.sample = data,
.thread = thread,
.machine = thread->mg->machine,
@@ -177,35 +191,54 @@ int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
.max_stack = max_stack,
};
Dwarf_Word ip;
- int err = -EINVAL;
+ int err = -EINVAL, i;
if (!data->user_regs.regs)
return -EINVAL;
- ui.dwfl = dwfl_begin(&offline_callbacks);
- if (!ui.dwfl)
+ ui = zalloc(sizeof(ui_buf) + sizeof(ui_buf.entries[0]) * max_stack);
+ if (!ui)
+ return -ENOMEM;
+
+ *ui = ui_buf;
+
+ ui->dwfl = dwfl_begin(&offline_callbacks);
+ if (!ui->dwfl)
goto out;
err = perf_reg_value(&ip, &data->user_regs, PERF_REG_IP);
if (err)
goto out;
- err = report_module(ip, &ui);
+ err = report_module(ip, ui);
if (err)
goto out;
- if (!dwfl_attach_state(ui.dwfl, EM_NONE, thread->tid, &callbacks, &ui))
+ if (!dwfl_attach_state(ui->dwfl, EM_NONE, thread->tid, &callbacks, ui))
goto out;
- err = dwfl_getthread_frames(ui.dwfl, thread->tid, frame_callback, &ui);
+ err = dwfl_getthread_frames(ui->dwfl, thread->tid, frame_callback, ui);
- if (err && !ui.max_stack)
+ if (err && !ui->max_stack)
err = 0;
+ /*
+ * Display what we got based on the order setup.
+ */
+ for (i = 0; i < ui->idx && !err; i++) {
+ int j = i;
+
+ if (callchain_param.order == ORDER_CALLER)
+ j = ui->idx - i - 1;
+
+ err = ui->entries[j].ip ? ui->cb(&ui->entries[j], ui->arg) : 0;
+ }
+
out:
if (err)
pr_debug("unwind: failed with '%s'\n", dwfl_errmsg(-1));
- dwfl_end(ui.dwfl);
+ dwfl_end(ui->dwfl);
+ free(ui);
return 0;
}
diff --git a/tools/perf/util/unwind-libdw.h b/tools/perf/util/unwind-libdw.h
index 417a1426f3ad..58328669ed16 100644
--- a/tools/perf/util/unwind-libdw.h
+++ b/tools/perf/util/unwind-libdw.h
@@ -16,6 +16,8 @@ struct unwind_info {
unwind_entry_cb_t cb;
void *arg;
int max_stack;
+ int idx;
+ struct unwind_entry entries[];
};
#endif /* __PERF_UNWIND_LIBDW_H */
diff --git a/tools/perf/util/unwind-libunwind.c b/tools/perf/util/unwind-libunwind.c
index c83832b555e5..ee7e372297e5 100644
--- a/tools/perf/util/unwind-libunwind.c
+++ b/tools/perf/util/unwind-libunwind.c
@@ -319,6 +319,15 @@ static struct map *find_map(unw_word_t ip, struct unwind_info *ui)
thread__find_addr_map(ui->thread, PERF_RECORD_MISC_USER,
MAP__FUNCTION, ip, &al);
+ if (!al.map) {
+ /*
+ * We've seen cases (softice) where DWARF unwinder went
+ * through non executable mmaps, which we need to lookup
+ * in MAP__VARIABLE tree.
+ */
+ thread__find_addr_map(ui->thread, PERF_RECORD_MISC_USER,
+ MAP__VARIABLE, ip, &al);
+ }
return al.map;
}
@@ -416,20 +425,19 @@ get_proc_name(unw_addr_space_t __maybe_unused as,
static int access_dso_mem(struct unwind_info *ui, unw_word_t addr,
unw_word_t *data)
{
- struct addr_location al;
+ struct map *map;
ssize_t size;
- thread__find_addr_map(ui->thread, PERF_RECORD_MISC_USER,
- MAP__FUNCTION, addr, &al);
- if (!al.map) {
+ map = find_map(addr, ui);
+ if (!map) {
pr_debug("unwind: no map for %lx\n", (unsigned long)addr);
return -1;
}
- if (!al.map->dso)
+ if (!map->dso)
return -1;
- size = dso__data_read_addr(al.map->dso, al.map, ui->machine,
+ size = dso__data_read_addr(map->dso, map, ui->machine,
addr, (u8 *) data, sizeof(*data));
return !(size == sizeof(*data));
@@ -614,23 +622,48 @@ void unwind__finish_access(struct thread *thread)
static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb,
void *arg, int max_stack)
{
+ u64 val;
+ unw_word_t ips[max_stack];
unw_addr_space_t addr_space;
unw_cursor_t c;
- int ret;
-
- addr_space = thread__priv(ui->thread);
- if (addr_space == NULL)
- return -1;
+ int ret, i = 0;
- ret = unw_init_remote(&c, addr_space, ui);
+ ret = perf_reg_value(&val, &ui->sample->user_regs, PERF_REG_IP);
if (ret)
- display_error(ret);
+ return ret;
- while (!ret && (unw_step(&c) > 0) && max_stack--) {
- unw_word_t ip;
+ ips[i++] = (unw_word_t) val;
- unw_get_reg(&c, UNW_REG_IP, &ip);
- ret = ip ? entry(ip, ui->thread, cb, arg) : 0;
+ /*
+ * If we need more than one entry, do the DWARF
+ * unwind itself.
+ */
+ if (max_stack - 1 > 0) {
+ addr_space = thread__priv(ui->thread);
+ if (addr_space == NULL)
+ return -1;
+
+ ret = unw_init_remote(&c, addr_space, ui);
+ if (ret)
+ display_error(ret);
+
+ while (!ret && (unw_step(&c) > 0) && i < max_stack) {
+ unw_get_reg(&c, UNW_REG_IP, &ips[i]);
+ ++i;
+ }
+
+ max_stack = i;
+ }
+
+ /*
+ * Display what we got based on the order setup.
+ */
+ for (i = 0; i < max_stack && !ret; i++) {
+ int j = i;
+
+ if (callchain_param.order == ORDER_CALLER)
+ j = max_stack - i - 1;
+ ret = ips[j] ? entry(ips[j], ui->thread, cb, arg) : 0;
}
return ret;
@@ -640,24 +673,17 @@ int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
struct thread *thread,
struct perf_sample *data, int max_stack)
{
- u64 ip;
struct unwind_info ui = {
.sample = data,
.thread = thread,
.machine = thread->mg->machine,
};
- int ret;
if (!data->user_regs.regs)
return -EINVAL;
- ret = perf_reg_value(&ip, &data->user_regs, PERF_REG_IP);
- if (ret)
- return ret;
-
- ret = entry(ip, thread, cb, arg);
- if (ret)
- return -ENOMEM;
+ if (max_stack <= 0)
+ return -EINVAL;
- return --max_stack > 0 ? get_entries(&ui, cb, arg, max_stack) : 0;
+ return get_entries(&ui, cb, arg, max_stack);
}
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index 47b1e36c7ea0..ead9509835d2 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -16,12 +16,14 @@
#include <linux/kernel.h>
#include <unistd.h>
#include "callchain.h"
+#include "strlist.h"
struct callchain_param callchain_param = {
.mode = CHAIN_GRAPH_ABS,
.min_percent = 0.5,
.order = ORDER_CALLEE,
- .key = CCKEY_FUNCTION
+ .key = CCKEY_FUNCTION,
+ .value = CCVAL_PERCENT,
};
/*
@@ -351,41 +353,8 @@ void sighandler_dump_stack(int sig)
{
psignal(sig, "perf");
dump_stack();
- exit(sig);
-}
-
-void get_term_dimensions(struct winsize *ws)
-{
- char *s = getenv("LINES");
-
- if (s != NULL) {
- ws->ws_row = atoi(s);
- s = getenv("COLUMNS");
- if (s != NULL) {
- ws->ws_col = atoi(s);
- if (ws->ws_row && ws->ws_col)
- return;
- }
- }
-#ifdef TIOCGWINSZ
- if (ioctl(1, TIOCGWINSZ, ws) == 0 &&
- ws->ws_row && ws->ws_col)
- return;
-#endif
- ws->ws_row = 25;
- ws->ws_col = 80;
-}
-
-void set_term_quiet_input(struct termios *old)
-{
- struct termios tc;
-
- tcgetattr(0, old);
- tc = *old;
- tc.c_lflag &= ~(ICANON | ECHO);
- tc.c_cc[VMIN] = 0;
- tc.c_cc[VTIME] = 0;
- tcsetattr(0, TCSANOW, &tc);
+ signal(sig, SIG_DFL);
+ raise(sig);
}
int parse_nsec_time(const char *str, u64 *ptime)
@@ -695,3 +664,30 @@ fetch_kernel_version(unsigned int *puint, char *str,
*puint = (version << 16) + (patchlevel << 8) + sublevel;
return 0;
}
+
+const char *perf_tip(const char *dirpath)
+{
+ struct strlist *tips;
+ struct str_node *node;
+ char *tip = NULL;
+ struct strlist_config conf = {
+ .dirname = dirpath,
+ .file_only = true,
+ };
+
+ tips = strlist__new("tips.txt", &conf);
+ if (tips == NULL)
+ return errno == ENOENT ? NULL : "Tip: get more memory! ;-p";
+
+ if (strlist__nr_entries(tips) == 0)
+ goto out;
+
+ node = strlist__entry(tips, random() % strlist__nr_entries(tips));
+ if (asprintf(&tip, "Tip: %s", node->s) < 0)
+ tip = (char *)"Tip: get more memory! ;-)";
+
+out:
+ strlist__delete(tips);
+
+ return tip;
+}
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index dcc659017976..fe915e616f9b 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -53,6 +53,7 @@
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
+#include <term.h>
#include <errno.h>
#include <limits.h>
#include <sys/param.h>
@@ -150,12 +151,6 @@ extern void set_warning_routine(void (*routine)(const char *err, va_list params)
extern int prefixcmp(const char *str, const char *prefix);
extern void set_buildid_dir(const char *dir);
-static inline const char *skip_prefix(const char *str, const char *prefix)
-{
- size_t len = strlen(prefix);
- return strncmp(str, prefix, len) ? NULL : str + len;
-}
-
#ifdef __GLIBC_PREREQ
#if __GLIBC_PREREQ(2, 1)
#define HAVE_STRCHRNUL
@@ -186,14 +181,6 @@ static inline void *zalloc(size_t size)
#define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
-static inline int has_extension(const char *filename, const char *ext)
-{
- size_t len = strlen(filename);
- size_t extlen = strlen(ext);
-
- return len > extlen && !memcmp(filename + len - extlen, ext, extlen);
-}
-
/* Sane ctype - no locale, and works with signed chars */
#undef isascii
#undef isspace
@@ -282,9 +269,6 @@ void sighandler_dump_stack(int sig);
extern unsigned int page_size;
extern int cacheline_size;
-void get_term_dimensions(struct winsize *ws);
-void set_term_quiet_input(struct termios *old);
-
struct parse_tag {
char tag;
int mult;
@@ -358,4 +342,6 @@ int fetch_kernel_version(unsigned int *puint,
#define KVER_FMT "%d.%d.%d"
#define KVER_PARAM(x) KVER_VERSION(x), KVER_PATCHLEVEL(x), KVER_SUBLEVEL(x)
+const char *perf_tip(const char *dirpath);
+
#endif /* GIT_COMPAT_UTIL_H */
diff --git a/tools/power/acpi/Makefile b/tools/power/acpi/Makefile
index e882c8320135..a8bf9081512b 100644
--- a/tools/power/acpi/Makefile
+++ b/tools/power/acpi/Makefile
@@ -10,18 +10,18 @@
include ../../scripts/Makefile.include
-all: acpidump ec
-clean: acpidump_clean ec_clean
-install: acpidump_install ec_install
-uninstall: acpidump_uninstall ec_uninstall
+all: acpidbg acpidump ec
+clean: acpidbg_clean acpidump_clean ec_clean
+install: acpidbg_install acpidump_install ec_install
+uninstall: acpidbg_uninstall acpidump_uninstall ec_uninstall
-acpidump ec: FORCE
+acpidbg acpidump ec: FORCE
$(call descend,tools/$@,all)
-acpidump_clean ec_clean:
+acpidbg_clean acpidump_clean ec_clean:
$(call descend,tools/$(@:_clean=),clean)
-acpidump_install ec_install:
+acpidbg_install acpidump_install ec_install:
$(call descend,tools/$(@:_install=),install)
-acpidump_uninstall ec_uninstall:
+acpidbg_uninstall acpidump_uninstall ec_uninstall:
$(call descend,tools/$(@:_uninstall=),uninstall)
.PHONY: FORCE
diff --git a/tools/power/acpi/common/cmfsize.c b/tools/power/acpi/common/cmfsize.c
index eec688041500..e73a79fce015 100644
--- a/tools/power/acpi/common/cmfsize.c
+++ b/tools/power/acpi/common/cmfsize.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2015, Intel Corp.
+ * Copyright (C) 2000 - 2016, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/common/getopt.c b/tools/power/acpi/common/getopt.c
index 326e826a5d20..0bd343f136a4 100644
--- a/tools/power/acpi/common/getopt.c
+++ b/tools/power/acpi/common/getopt.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2015, Intel Corp.
+ * Copyright (C) 2000 - 2016, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -47,6 +47,7 @@
* Option strings:
* "f" - Option has no arguments
* "f:" - Option requires an argument
+ * "f+" - Option has an optional argument
* "f^" - Option has optional single-char sub-options
* "f|" - Option has required single-char sub-options
*/
@@ -85,6 +86,7 @@ static int current_char_ptr = 1;
int acpi_getopt_argument(int argc, char **argv)
{
+
acpi_gbl_optind--;
current_char_ptr++;
diff --git a/tools/power/acpi/os_specific/service_layers/oslibcfs.c b/tools/power/acpi/os_specific/service_layers/oslibcfs.c
index b51e40a9a120..11f4aba55aab 100644
--- a/tools/power/acpi/os_specific/service_layers/oslibcfs.c
+++ b/tools/power/acpi/os_specific/service_layers/oslibcfs.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2015, Intel Corp.
+ * Copyright (C) 2000 - 2016, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -73,6 +73,7 @@ ACPI_FILE acpi_os_open_file(const char *path, u8 modes)
if (modes & ACPI_FILE_WRITING) {
modes_str[i++] = 'w';
}
+
if (modes & ACPI_FILE_BINARY) {
modes_str[i++] = 'b';
}
@@ -101,6 +102,7 @@ ACPI_FILE acpi_os_open_file(const char *path, u8 modes)
void acpi_os_close_file(ACPI_FILE file)
{
+
fclose(file);
}
@@ -202,6 +204,7 @@ acpi_status acpi_os_set_file_offset(ACPI_FILE file, long offset, u8 from)
if (from == ACPI_FILE_BEGIN) {
ret = fseek(file, offset, SEEK_SET);
}
+
if (from == ACPI_FILE_END) {
ret = fseek(file, offset, SEEK_END);
}
diff --git a/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c b/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
index dd5008b0617a..d0e6b857d8d1 100644
--- a/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
+++ b/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2015, Intel Corp.
+ * Copyright (C) 2000 - 2016, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/os_specific/service_layers/osunixdir.c b/tools/power/acpi/os_specific/service_layers/osunixdir.c
index e153fcb12b1a..66c4badf03e5 100644
--- a/tools/power/acpi/os_specific/service_layers/osunixdir.c
+++ b/tools/power/acpi/os_specific/service_layers/osunixdir.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2015, Intel Corp.
+ * Copyright (C) 2000 - 2016, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/os_specific/service_layers/osunixmap.c b/tools/power/acpi/os_specific/service_layers/osunixmap.c
index 44ad4889d468..3818fd07e50f 100644
--- a/tools/power/acpi/os_specific/service_layers/osunixmap.c
+++ b/tools/power/acpi/os_specific/service_layers/osunixmap.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2015, Intel Corp.
+ * Copyright (C) 2000 - 2016, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/os_specific/service_layers/osunixxf.c b/tools/power/acpi/os_specific/service_layers/osunixxf.c
index 6858c0893c91..08cb8b2035f2 100644
--- a/tools/power/acpi/os_specific/service_layers/osunixxf.c
+++ b/tools/power/acpi/os_specific/service_layers/osunixxf.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2015, Intel Corp.
+ * Copyright (C) 2000 - 2016, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/tools/acpidbg/Makefile b/tools/power/acpi/tools/acpidbg/Makefile
new file mode 100644
index 000000000000..352df4b41ae9
--- /dev/null
+++ b/tools/power/acpi/tools/acpidbg/Makefile
@@ -0,0 +1,27 @@
+# tools/power/acpi/tools/acpidbg/Makefile - ACPI tool Makefile
+#
+# Copyright (c) 2015, Intel Corporation
+# Author: Lv Zheng <lv.zheng@intel.com>
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; version 2
+# of the License.
+
+include ../../Makefile.config
+
+TOOL = acpidbg
+vpath %.c \
+ ../../../../../drivers/acpi/acpica\
+ ../../common\
+ ../../os_specific/service_layers\
+ .
+CFLAGS += -DACPI_APPLICATION -DACPI_SINGLE_THREAD -DACPI_DEBUGGER\
+ -I.\
+ -I../../../../../drivers/acpi/acpica\
+ -I../../../../../include
+LDFLAGS += -lpthread
+TOOL_OBJS = \
+ acpidbg.o
+
+include ../../Makefile.rules
diff --git a/tools/power/acpi/tools/acpidbg/acpidbg.c b/tools/power/acpi/tools/acpidbg/acpidbg.c
new file mode 100644
index 000000000000..d070fccdba6d
--- /dev/null
+++ b/tools/power/acpi/tools/acpidbg/acpidbg.c
@@ -0,0 +1,438 @@
+/*
+ * ACPI AML interfacing userspace utility
+ *
+ * Copyright (C) 2015, Intel Corporation
+ * Authors: Lv Zheng <lv.zheng@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <acpi/acpi.h>
+
+/* Headers not included by include/acpi/platform/aclinux.h */
+#include <stdbool.h>
+#include <fcntl.h>
+#include <assert.h>
+#include <linux/circ_buf.h>
+
+#define ACPI_AML_FILE "/sys/kernel/debug/acpi/acpidbg"
+#define ACPI_AML_SEC_TICK 1
+#define ACPI_AML_USEC_PEEK 200
+#define ACPI_AML_BUF_SIZE 4096
+
+#define ACPI_AML_BATCH_WRITE_CMD 0x00 /* Write command to kernel */
+#define ACPI_AML_BATCH_READ_LOG 0x01 /* Read log from kernel */
+#define ACPI_AML_BATCH_WRITE_LOG 0x02 /* Write log to console */
+
+#define ACPI_AML_LOG_START 0x00
+#define ACPI_AML_PROMPT_START 0x01
+#define ACPI_AML_PROMPT_STOP 0x02
+#define ACPI_AML_LOG_STOP 0x03
+#define ACPI_AML_PROMPT_ROLL 0x04
+
+#define ACPI_AML_INTERACTIVE 0x00
+#define ACPI_AML_BATCH 0x01
+
+#define circ_count(circ) \
+ (CIRC_CNT((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE))
+#define circ_count_to_end(circ) \
+ (CIRC_CNT_TO_END((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE))
+#define circ_space(circ) \
+ (CIRC_SPACE((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE))
+#define circ_space_to_end(circ) \
+ (CIRC_SPACE_TO_END((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE))
+
+#define acpi_aml_cmd_count() circ_count(&acpi_aml_cmd_crc)
+#define acpi_aml_log_count() circ_count(&acpi_aml_log_crc)
+#define acpi_aml_cmd_space() circ_space(&acpi_aml_cmd_crc)
+#define acpi_aml_log_space() circ_space(&acpi_aml_log_crc)
+
+#define ACPI_AML_DO(_fd, _op, _buf, _ret) \
+ do { \
+ _ret = acpi_aml_##_op(_fd, &acpi_aml_##_buf##_crc); \
+ if (_ret == 0) { \
+ fprintf(stderr, \
+ "%s %s pipe closed.\n", #_buf, #_op); \
+ return; \
+ } \
+ } while (0)
+#define ACPI_AML_BATCH_DO(_fd, _op, _buf, _ret) \
+ do { \
+ _ret = acpi_aml_##_op##_batch_##_buf(_fd, \
+ &acpi_aml_##_buf##_crc); \
+ if (_ret == 0) \
+ return; \
+ } while (0)
+
+
+static char acpi_aml_cmd_buf[ACPI_AML_BUF_SIZE];
+static char acpi_aml_log_buf[ACPI_AML_BUF_SIZE];
+static struct circ_buf acpi_aml_cmd_crc = {
+ .buf = acpi_aml_cmd_buf,
+ .head = 0,
+ .tail = 0,
+};
+static struct circ_buf acpi_aml_log_crc = {
+ .buf = acpi_aml_log_buf,
+ .head = 0,
+ .tail = 0,
+};
+static const char *acpi_aml_file_path = ACPI_AML_FILE;
+static unsigned long acpi_aml_mode = ACPI_AML_INTERACTIVE;
+static bool acpi_aml_exit;
+
+static bool acpi_aml_batch_drain;
+static unsigned long acpi_aml_batch_state;
+static char acpi_aml_batch_prompt;
+static char acpi_aml_batch_roll;
+static unsigned long acpi_aml_log_state;
+static char *acpi_aml_batch_cmd = NULL;
+static char *acpi_aml_batch_pos = NULL;
+
+static int acpi_aml_set_fl(int fd, int flags)
+{
+ int ret;
+
+ ret = fcntl(fd, F_GETFL, 0);
+ if (ret < 0) {
+ perror("fcntl(F_GETFL)");
+ return ret;
+ }
+ flags |= ret;
+ ret = fcntl(fd, F_SETFL, flags);
+ if (ret < 0) {
+ perror("fcntl(F_SETFL)");
+ return ret;
+ }
+ return ret;
+}
+
+static int acpi_aml_set_fd(int fd, int maxfd, fd_set *set)
+{
+ if (fd > maxfd)
+ maxfd = fd;
+ FD_SET(fd, set);
+ return maxfd;
+}
+
+static int acpi_aml_read(int fd, struct circ_buf *crc)
+{
+ char *p;
+ int len;
+
+ p = &crc->buf[crc->head];
+ len = circ_space_to_end(crc);
+ len = read(fd, p, len);
+ if (len < 0)
+ perror("read");
+ else if (len > 0)
+ crc->head = (crc->head + len) & (ACPI_AML_BUF_SIZE - 1);
+ return len;
+}
+
+static int acpi_aml_read_batch_cmd(int unused, struct circ_buf *crc)
+{
+ char *p;
+ int len;
+ int remained = strlen(acpi_aml_batch_pos);
+
+ p = &crc->buf[crc->head];
+ len = circ_space_to_end(crc);
+ if (len > remained) {
+ memcpy(p, acpi_aml_batch_pos, remained);
+ acpi_aml_batch_pos += remained;
+ len = remained;
+ } else {
+ memcpy(p, acpi_aml_batch_pos, len);
+ acpi_aml_batch_pos += len;
+ }
+ if (len > 0)
+ crc->head = (crc->head + len) & (ACPI_AML_BUF_SIZE - 1);
+ return len;
+}
+
+static int acpi_aml_read_batch_log(int fd, struct circ_buf *crc)
+{
+ char *p;
+ int len;
+ int ret = 0;
+
+ p = &crc->buf[crc->head];
+ len = circ_space_to_end(crc);
+ while (ret < len && acpi_aml_log_state != ACPI_AML_LOG_STOP) {
+ if (acpi_aml_log_state == ACPI_AML_PROMPT_ROLL) {
+ *p = acpi_aml_batch_roll;
+ len = 1;
+ crc->head = (crc->head + 1) & (ACPI_AML_BUF_SIZE - 1);
+ ret += 1;
+ acpi_aml_log_state = ACPI_AML_LOG_START;
+ } else {
+ len = read(fd, p, 1);
+ if (len <= 0) {
+ if (len < 0)
+ perror("read");
+ ret = len;
+ break;
+ }
+ }
+ switch (acpi_aml_log_state) {
+ case ACPI_AML_LOG_START:
+ if (*p == '\n')
+ acpi_aml_log_state = ACPI_AML_PROMPT_START;
+ crc->head = (crc->head + 1) & (ACPI_AML_BUF_SIZE - 1);
+ ret += 1;
+ break;
+ case ACPI_AML_PROMPT_START:
+ if (*p == ACPI_DEBUGGER_COMMAND_PROMPT ||
+ *p == ACPI_DEBUGGER_EXECUTE_PROMPT) {
+ acpi_aml_batch_prompt = *p;
+ acpi_aml_log_state = ACPI_AML_PROMPT_STOP;
+ } else {
+ if (*p != '\n')
+ acpi_aml_log_state = ACPI_AML_LOG_START;
+ crc->head = (crc->head + 1) & (ACPI_AML_BUF_SIZE - 1);
+ ret += 1;
+ }
+ break;
+ case ACPI_AML_PROMPT_STOP:
+ if (*p == ' ') {
+ acpi_aml_log_state = ACPI_AML_LOG_STOP;
+ acpi_aml_exit = true;
+ } else {
+ /* Roll back */
+ acpi_aml_log_state = ACPI_AML_PROMPT_ROLL;
+ acpi_aml_batch_roll = *p;
+ *p = acpi_aml_batch_prompt;
+ crc->head = (crc->head + 1) & (ACPI_AML_BUF_SIZE - 1);
+ ret += 1;
+ }
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ }
+ return ret;
+}
+
+static int acpi_aml_write(int fd, struct circ_buf *crc)
+{
+ char *p;
+ int len;
+
+ p = &crc->buf[crc->tail];
+ len = circ_count_to_end(crc);
+ len = write(fd, p, len);
+ if (len < 0)
+ perror("write");
+ else if (len > 0)
+ crc->tail = (crc->tail + len) & (ACPI_AML_BUF_SIZE - 1);
+ return len;
+}
+
+static int acpi_aml_write_batch_log(int fd, struct circ_buf *crc)
+{
+ char *p;
+ int len;
+
+ p = &crc->buf[crc->tail];
+ len = circ_count_to_end(crc);
+ if (!acpi_aml_batch_drain) {
+ len = write(fd, p, len);
+ if (len < 0)
+ perror("write");
+ }
+ if (len > 0)
+ crc->tail = (crc->tail + len) & (ACPI_AML_BUF_SIZE - 1);
+ return len;
+}
+
+static int acpi_aml_write_batch_cmd(int fd, struct circ_buf *crc)
+{
+ int len;
+
+ len = acpi_aml_write(fd, crc);
+ if (circ_count_to_end(crc) == 0)
+ acpi_aml_batch_state = ACPI_AML_BATCH_READ_LOG;
+ return len;
+}
+
+static void acpi_aml_loop(int fd)
+{
+ fd_set rfds;
+ fd_set wfds;
+ struct timeval tv;
+ int ret;
+ int maxfd = 0;
+
+ if (acpi_aml_mode == ACPI_AML_BATCH) {
+ acpi_aml_log_state = ACPI_AML_LOG_START;
+ acpi_aml_batch_pos = acpi_aml_batch_cmd;
+ if (acpi_aml_batch_drain)
+ acpi_aml_batch_state = ACPI_AML_BATCH_READ_LOG;
+ else
+ acpi_aml_batch_state = ACPI_AML_BATCH_WRITE_CMD;
+ }
+ acpi_aml_exit = false;
+ while (!acpi_aml_exit) {
+ tv.tv_sec = ACPI_AML_SEC_TICK;
+ tv.tv_usec = 0;
+ FD_ZERO(&rfds);
+ FD_ZERO(&wfds);
+
+ if (acpi_aml_cmd_space()) {
+ if (acpi_aml_mode == ACPI_AML_INTERACTIVE)
+ maxfd = acpi_aml_set_fd(STDIN_FILENO, maxfd, &rfds);
+ else if (strlen(acpi_aml_batch_pos) &&
+ acpi_aml_batch_state == ACPI_AML_BATCH_WRITE_CMD)
+ ACPI_AML_BATCH_DO(STDIN_FILENO, read, cmd, ret);
+ }
+ if (acpi_aml_cmd_count() &&
+ (acpi_aml_mode == ACPI_AML_INTERACTIVE ||
+ acpi_aml_batch_state == ACPI_AML_BATCH_WRITE_CMD))
+ maxfd = acpi_aml_set_fd(fd, maxfd, &wfds);
+ if (acpi_aml_log_space() &&
+ (acpi_aml_mode == ACPI_AML_INTERACTIVE ||
+ acpi_aml_batch_state == ACPI_AML_BATCH_READ_LOG))
+ maxfd = acpi_aml_set_fd(fd, maxfd, &rfds);
+ if (acpi_aml_log_count())
+ maxfd = acpi_aml_set_fd(STDOUT_FILENO, maxfd, &wfds);
+
+ ret = select(maxfd+1, &rfds, &wfds, NULL, &tv);
+ if (ret < 0) {
+ perror("select");
+ break;
+ }
+ if (ret > 0) {
+ if (FD_ISSET(STDIN_FILENO, &rfds))
+ ACPI_AML_DO(STDIN_FILENO, read, cmd, ret);
+ if (FD_ISSET(fd, &wfds)) {
+ if (acpi_aml_mode == ACPI_AML_BATCH)
+ ACPI_AML_BATCH_DO(fd, write, cmd, ret);
+ else
+ ACPI_AML_DO(fd, write, cmd, ret);
+ }
+ if (FD_ISSET(fd, &rfds)) {
+ if (acpi_aml_mode == ACPI_AML_BATCH)
+ ACPI_AML_BATCH_DO(fd, read, log, ret);
+ else
+ ACPI_AML_DO(fd, read, log, ret);
+ }
+ if (FD_ISSET(STDOUT_FILENO, &wfds)) {
+ if (acpi_aml_mode == ACPI_AML_BATCH)
+ ACPI_AML_BATCH_DO(STDOUT_FILENO, write, log, ret);
+ else
+ ACPI_AML_DO(STDOUT_FILENO, write, log, ret);
+ }
+ }
+ }
+}
+
+static bool acpi_aml_readable(int fd)
+{
+ fd_set rfds;
+ struct timeval tv;
+ int ret;
+ int maxfd = 0;
+
+ tv.tv_sec = 0;
+ tv.tv_usec = ACPI_AML_USEC_PEEK;
+ FD_ZERO(&rfds);
+ maxfd = acpi_aml_set_fd(fd, maxfd, &rfds);
+ ret = select(maxfd+1, &rfds, NULL, NULL, &tv);
+ if (ret < 0)
+ perror("select");
+ if (ret > 0 && FD_ISSET(fd, &rfds))
+ return true;
+ return false;
+}
+
+/*
+ * This is a userspace IO flush implementation, replying on the prompt
+ * characters and can be turned into a flush() call after kernel implements
+ * .flush() filesystem operation.
+ */
+static void acpi_aml_flush(int fd)
+{
+ while (acpi_aml_readable(fd)) {
+ acpi_aml_batch_drain = true;
+ acpi_aml_loop(fd);
+ acpi_aml_batch_drain = false;
+ }
+}
+
+void usage(FILE *file, char *progname)
+{
+ fprintf(file, "usage: %s [-b cmd] [-f file] [-h]\n", progname);
+ fprintf(file, "\nOptions:\n");
+ fprintf(file, " -b Specify command to be executed in batch mode\n");
+ fprintf(file, " -f Specify interface file other than");
+ fprintf(file, " /sys/kernel/debug/acpi/acpidbg\n");
+ fprintf(file, " -h Print this help message\n");
+}
+
+int main(int argc, char **argv)
+{
+ int fd = 0;
+ int ch;
+ int len;
+ int ret = EXIT_SUCCESS;
+
+ while ((ch = getopt(argc, argv, "b:f:h")) != -1) {
+ switch (ch) {
+ case 'b':
+ if (acpi_aml_batch_cmd) {
+ fprintf(stderr, "Already specify %s\n",
+ acpi_aml_batch_cmd);
+ ret = EXIT_FAILURE;
+ goto exit;
+ }
+ len = strlen(optarg);
+ acpi_aml_batch_cmd = calloc(len + 2, 1);
+ if (!acpi_aml_batch_cmd) {
+ perror("calloc");
+ ret = EXIT_FAILURE;
+ goto exit;
+ }
+ memcpy(acpi_aml_batch_cmd, optarg, len);
+ acpi_aml_batch_cmd[len] = '\n';
+ acpi_aml_mode = ACPI_AML_BATCH;
+ break;
+ case 'f':
+ acpi_aml_file_path = optarg;
+ break;
+ case 'h':
+ usage(stdout, argv[0]);
+ goto exit;
+ break;
+ case '?':
+ default:
+ usage(stderr, argv[0]);
+ ret = EXIT_FAILURE;
+ goto exit;
+ break;
+ }
+ }
+
+ fd = open(acpi_aml_file_path, O_RDWR | O_NONBLOCK);
+ if (fd < 0) {
+ perror("open");
+ ret = EXIT_FAILURE;
+ goto exit;
+ }
+ acpi_aml_set_fl(STDIN_FILENO, O_NONBLOCK);
+ acpi_aml_set_fl(STDOUT_FILENO, O_NONBLOCK);
+
+ if (acpi_aml_mode == ACPI_AML_BATCH)
+ acpi_aml_flush(fd);
+ acpi_aml_loop(fd);
+
+exit:
+ if (fd < 0)
+ close(fd);
+ if (acpi_aml_batch_cmd)
+ free(acpi_aml_batch_cmd);
+ return ret;
+}
diff --git a/tools/power/acpi/tools/acpidump/acpidump.h b/tools/power/acpi/tools/acpidump/acpidump.h
index eed534481434..025c232e920d 100644
--- a/tools/power/acpi/tools/acpidump/acpidump.h
+++ b/tools/power/acpi/tools/acpidump/acpidump.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2015, Intel Corp.
+ * Copyright (C) 2000 - 2016, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/tools/acpidump/apdump.c b/tools/power/acpi/tools/acpidump/apdump.c
index 61d0de804b70..da44458d3b6c 100644
--- a/tools/power/acpi/tools/acpidump/apdump.c
+++ b/tools/power/acpi/tools/acpidump/apdump.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2015, Intel Corp.
+ * Copyright (C) 2000 - 2016, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/tools/acpidump/apfiles.c b/tools/power/acpi/tools/acpidump/apfiles.c
index a1c62de42a3b..5fcd9700ac18 100644
--- a/tools/power/acpi/tools/acpidump/apfiles.c
+++ b/tools/power/acpi/tools/acpidump/apfiles.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2015, Intel Corp.
+ * Copyright (C) 2000 - 2016, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -48,6 +48,18 @@
static int ap_is_existing_file(char *pathname);
+/******************************************************************************
+ *
+ * FUNCTION: ap_is_existing_file
+ *
+ * PARAMETERS: pathname - Output filename
+ *
+ * RETURN: 0 on success
+ *
+ * DESCRIPTION: Query for file overwrite if it already exists.
+ *
+ ******************************************************************************/
+
static int ap_is_existing_file(char *pathname)
{
#ifndef _GNU_EFI
@@ -136,6 +148,7 @@ int ap_write_to_binary_file(struct acpi_table_header *table, u32 instance)
} else {
ACPI_MOVE_NAME(filename, table->signature);
}
+
filename[0] = (char)tolower((int)filename[0]);
filename[1] = (char)tolower((int)filename[1]);
filename[2] = (char)tolower((int)filename[2]);
diff --git a/tools/power/acpi/tools/acpidump/apmain.c b/tools/power/acpi/tools/acpidump/apmain.c
index 57620f66ae6c..c3c09152fac6 100644
--- a/tools/power/acpi/tools/acpidump/apmain.c
+++ b/tools/power/acpi/tools/acpidump/apmain.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2015, Intel Corp.
+ * Copyright (C) 2000 - 2016, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile
index 2e2ba2efa0d9..0adaf0c7c03a 100644
--- a/tools/power/cpupower/Makefile
+++ b/tools/power/cpupower/Makefile
@@ -47,6 +47,11 @@ NLS ?= true
# cpufreq-bench benchmarking tool
CPUFREQ_BENCH ?= true
+# Do not build libraries, but build the code in statically
+# Libraries are still built, otherwise the Makefile code would
+# be rather ugly.
+export STATIC ?= false
+
# Prefix to the directories we're installing to
DESTDIR ?=
@@ -161,6 +166,12 @@ ifeq ($(strip $(CPUFREQ_BENCH)),true)
COMPILE_BENCH += compile-bench
endif
+ifeq ($(strip $(STATIC)),true)
+ UTIL_OBJS += $(LIB_OBJS)
+ UTIL_HEADERS += $(LIB_HEADERS)
+ UTIL_SRC += $(LIB_SRC)
+endif
+
CFLAGS += $(WARNINGS)
ifeq ($(strip $(V)),false)
@@ -209,7 +220,11 @@ $(OUTPUT)%.o: %.c
$(OUTPUT)cpupower: $(UTIL_OBJS) $(OUTPUT)libcpupower.so.$(LIB_MAJ)
$(ECHO) " CC " $@
+ifeq ($(strip $(STATIC)),true)
+ $(QUIET) $(CC) $(CFLAGS) $(LDFLAGS) $(UTIL_OBJS) -lrt -lpci -L$(OUTPUT) -o $@
+else
$(QUIET) $(CC) $(CFLAGS) $(LDFLAGS) $(UTIL_OBJS) -lcpupower -lrt -lpci -L$(OUTPUT) -o $@
+endif
$(QUIET) $(STRIPCMD) $@
$(OUTPUT)po/$(PACKAGE).pot: $(UTIL_SRC)
@@ -291,7 +306,11 @@ install-bench:
@#DESTDIR must be set from outside to survive
@sbindir=$(sbindir) bindir=$(bindir) docdir=$(docdir) confdir=$(confdir) $(MAKE) -C bench O=$(OUTPUT) install
+ifeq ($(strip $(STATIC)),true)
+install: all install-tools install-man $(INSTALL_NLS) $(INSTALL_BENCH)
+else
install: all install-lib install-tools install-man $(INSTALL_NLS) $(INSTALL_BENCH)
+endif
uninstall:
- rm -f $(DESTDIR)${libdir}/libcpupower.*
diff --git a/tools/power/cpupower/bench/Makefile b/tools/power/cpupower/bench/Makefile
index 7ec7021a29cd..d0f879b223fc 100644
--- a/tools/power/cpupower/bench/Makefile
+++ b/tools/power/cpupower/bench/Makefile
@@ -5,9 +5,15 @@ ifneq ($(O),)
endif
endif
+ifeq ($(strip $(STATIC)),true)
+LIBS = -L../ -L$(OUTPUT) -lm
+OBJS = $(OUTPUT)main.o $(OUTPUT)parse.o $(OUTPUT)system.o $(OUTPUT)benchmark.o \
+ $(OUTPUT)../lib/cpufreq.o $(OUTPUT)../lib/sysfs.o
+else
LIBS = -L../ -L$(OUTPUT) -lm -lcpupower
-
OBJS = $(OUTPUT)main.o $(OUTPUT)parse.o $(OUTPUT)system.o $(OUTPUT)benchmark.o
+endif
+
CFLAGS += -D_GNU_SOURCE -I../lib -DDEFAULT_CONFIG_FILE=\"$(confdir)/cpufreq-bench.conf\"
$(OUTPUT)%.o : %.c
diff --git a/tools/power/cpupower/utils/cpufreq-info.c b/tools/power/cpupower/utils/cpufreq-info.c
index 0e6764330241..590d12a25f6e 100644
--- a/tools/power/cpupower/utils/cpufreq-info.c
+++ b/tools/power/cpupower/utils/cpufreq-info.c
@@ -10,10 +10,12 @@
#include <errno.h>
#include <stdlib.h>
#include <string.h>
+#include <limits.h>
#include <getopt.h>
#include "cpufreq.h"
+#include "helpers/sysfs.h"
#include "helpers/helpers.h"
#include "helpers/bitmask.h"
@@ -244,149 +246,21 @@ static int get_boost_mode(unsigned int cpu)
return 0;
}
-static void debug_output_one(unsigned int cpu)
-{
- char *driver;
- struct cpufreq_affected_cpus *cpus;
- struct cpufreq_available_frequencies *freqs;
- unsigned long min, max, freq_kernel, freq_hardware;
- unsigned long total_trans, latency;
- unsigned long long total_time;
- struct cpufreq_policy *policy;
- struct cpufreq_available_governors *governors;
- struct cpufreq_stats *stats;
-
- if (cpufreq_cpu_exists(cpu))
- return;
-
- freq_kernel = cpufreq_get_freq_kernel(cpu);
- freq_hardware = cpufreq_get_freq_hardware(cpu);
-
- driver = cpufreq_get_driver(cpu);
- if (!driver) {
- printf(_(" no or unknown cpufreq driver is active on this CPU\n"));
- } else {
- printf(_(" driver: %s\n"), driver);
- cpufreq_put_driver(driver);
- }
-
- cpus = cpufreq_get_related_cpus(cpu);
- if (cpus) {
- printf(_(" CPUs which run at the same hardware frequency: "));
- while (cpus->next) {
- printf("%d ", cpus->cpu);
- cpus = cpus->next;
- }
- printf("%d\n", cpus->cpu);
- cpufreq_put_related_cpus(cpus);
- }
-
- cpus = cpufreq_get_affected_cpus(cpu);
- if (cpus) {
- printf(_(" CPUs which need to have their frequency coordinated by software: "));
- while (cpus->next) {
- printf("%d ", cpus->cpu);
- cpus = cpus->next;
- }
- printf("%d\n", cpus->cpu);
- cpufreq_put_affected_cpus(cpus);
- }
-
- latency = cpufreq_get_transition_latency(cpu);
- if (latency) {
- printf(_(" maximum transition latency: "));
- print_duration(latency);
- printf(".\n");
- }
-
- if (!(cpufreq_get_hardware_limits(cpu, &min, &max))) {
- printf(_(" hardware limits: "));
- print_speed(min);
- printf(" - ");
- print_speed(max);
- printf("\n");
- }
-
- freqs = cpufreq_get_available_frequencies(cpu);
- if (freqs) {
- printf(_(" available frequency steps: "));
- while (freqs->next) {
- print_speed(freqs->frequency);
- printf(", ");
- freqs = freqs->next;
- }
- print_speed(freqs->frequency);
- printf("\n");
- cpufreq_put_available_frequencies(freqs);
- }
-
- governors = cpufreq_get_available_governors(cpu);
- if (governors) {
- printf(_(" available cpufreq governors: "));
- while (governors->next) {
- printf("%s, ", governors->governor);
- governors = governors->next;
- }
- printf("%s\n", governors->governor);
- cpufreq_put_available_governors(governors);
- }
-
- policy = cpufreq_get_policy(cpu);
- if (policy) {
- printf(_(" current policy: frequency should be within "));
- print_speed(policy->min);
- printf(_(" and "));
- print_speed(policy->max);
-
- printf(".\n ");
- printf(_("The governor \"%s\" may"
- " decide which speed to use\n within this range.\n"),
- policy->governor);
- cpufreq_put_policy(policy);
- }
-
- if (freq_kernel || freq_hardware) {
- printf(_(" current CPU frequency is "));
- if (freq_hardware) {
- print_speed(freq_hardware);
- printf(_(" (asserted by call to hardware)"));
- } else
- print_speed(freq_kernel);
- printf(".\n");
- }
- stats = cpufreq_get_stats(cpu, &total_time);
- if (stats) {
- printf(_(" cpufreq stats: "));
- while (stats) {
- print_speed(stats->frequency);
- printf(":%.2f%%", (100.0 * stats->time_in_state) / total_time);
- stats = stats->next;
- if (stats)
- printf(", ");
- }
- cpufreq_put_stats(stats);
- total_trans = cpufreq_get_transitions(cpu);
- if (total_trans)
- printf(" (%lu)\n", total_trans);
- else
- printf("\n");
- }
- get_boost_mode(cpu);
-
-}
-
/* --freq / -f */
static int get_freq_kernel(unsigned int cpu, unsigned int human)
{
unsigned long freq = cpufreq_get_freq_kernel(cpu);
- if (!freq)
+ printf(_(" current CPU frequency: "));
+ if (!freq) {
+ printf(_(" Unable to call to kernel\n"));
return -EINVAL;
+ }
if (human) {
print_speed(freq);
- printf("\n");
} else
- printf("%lu\n", freq);
+ printf("%lu", freq);
+ printf(_(" (asserted by call to kernel)\n"));
return 0;
}
@@ -396,13 +270,16 @@ static int get_freq_kernel(unsigned int cpu, unsigned int human)
static int get_freq_hardware(unsigned int cpu, unsigned int human)
{
unsigned long freq = cpufreq_get_freq_hardware(cpu);
- if (!freq)
+ printf(_(" current CPU frequency: "));
+ if (!freq) {
+ printf("Unable to call hardware\n");
return -EINVAL;
+ }
if (human) {
print_speed(freq);
- printf("\n");
} else
- printf("%lu\n", freq);
+ printf("%lu", freq);
+ printf(_(" (asserted by call to hardware)\n"));
return 0;
}
@@ -411,9 +288,17 @@ static int get_freq_hardware(unsigned int cpu, unsigned int human)
static int get_hardware_limits(unsigned int cpu)
{
unsigned long min, max;
- if (cpufreq_get_hardware_limits(cpu, &min, &max))
+
+ printf(_(" hardware limits: "));
+ if (cpufreq_get_hardware_limits(cpu, &min, &max)) {
+ printf(_("Not Available\n"));
return -EINVAL;
- printf("%lu %lu\n", min, max);
+ }
+
+ print_speed(min);
+ printf(" - ");
+ print_speed(max);
+ printf("\n");
return 0;
}
@@ -422,9 +307,11 @@ static int get_hardware_limits(unsigned int cpu)
static int get_driver(unsigned int cpu)
{
char *driver = cpufreq_get_driver(cpu);
- if (!driver)
+ if (!driver) {
+ printf(_(" no or unknown cpufreq driver is active on this CPU\n"));
return -EINVAL;
- printf("%s\n", driver);
+ }
+ printf(" driver: %s\n", driver);
cpufreq_put_driver(driver);
return 0;
}
@@ -434,9 +321,19 @@ static int get_driver(unsigned int cpu)
static int get_policy(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_get_policy(cpu);
- if (!policy)
+ if (!policy) {
+ printf(_(" Unable to determine current policy\n"));
return -EINVAL;
- printf("%lu %lu %s\n", policy->min, policy->max, policy->governor);
+ }
+ printf(_(" current policy: frequency should be within "));
+ print_speed(policy->min);
+ printf(_(" and "));
+ print_speed(policy->max);
+
+ printf(".\n ");
+ printf(_("The governor \"%s\" may decide which speed to use\n"
+ " within this range.\n"),
+ policy->governor);
cpufreq_put_policy(policy);
return 0;
}
@@ -447,8 +344,12 @@ static int get_available_governors(unsigned int cpu)
{
struct cpufreq_available_governors *governors =
cpufreq_get_available_governors(cpu);
- if (!governors)
+
+ printf(_(" available cpufreq governors: "));
+ if (!governors) {
+ printf(_("Not Available\n"));
return -EINVAL;
+ }
while (governors->next) {
printf("%s ", governors->governor);
@@ -465,8 +366,12 @@ static int get_available_governors(unsigned int cpu)
static int get_affected_cpus(unsigned int cpu)
{
struct cpufreq_affected_cpus *cpus = cpufreq_get_affected_cpus(cpu);
- if (!cpus)
+
+ printf(_(" CPUs which need to have their frequency coordinated by software: "));
+ if (!cpus) {
+ printf(_("Not Available\n"));
return -EINVAL;
+ }
while (cpus->next) {
printf("%d ", cpus->cpu);
@@ -482,8 +387,12 @@ static int get_affected_cpus(unsigned int cpu)
static int get_related_cpus(unsigned int cpu)
{
struct cpufreq_affected_cpus *cpus = cpufreq_get_related_cpus(cpu);
- if (!cpus)
+
+ printf(_(" CPUs which run at the same hardware frequency: "));
+ if (!cpus) {
+ printf(_("Not Available\n"));
return -EINVAL;
+ }
while (cpus->next) {
printf("%d ", cpus->cpu);
@@ -524,8 +433,12 @@ static int get_freq_stats(unsigned int cpu, unsigned int human)
static int get_latency(unsigned int cpu, unsigned int human)
{
unsigned long latency = cpufreq_get_transition_latency(cpu);
- if (!latency)
+
+ printf(_(" maximum transition latency: "));
+ if (!latency || latency == UINT_MAX) {
+ printf(_(" Cannot determine or is not supported.\n"));
return -EINVAL;
+ }
if (human) {
print_duration(latency);
@@ -535,6 +448,36 @@ static int get_latency(unsigned int cpu, unsigned int human)
return 0;
}
+static void debug_output_one(unsigned int cpu)
+{
+ struct cpufreq_available_frequencies *freqs;
+
+ get_driver(cpu);
+ get_related_cpus(cpu);
+ get_affected_cpus(cpu);
+ get_latency(cpu, 1);
+ get_hardware_limits(cpu);
+
+ freqs = cpufreq_get_available_frequencies(cpu);
+ if (freqs) {
+ printf(_(" available frequency steps: "));
+ while (freqs->next) {
+ print_speed(freqs->frequency);
+ printf(", ");
+ freqs = freqs->next;
+ }
+ print_speed(freqs->frequency);
+ printf("\n");
+ cpufreq_put_available_frequencies(freqs);
+ }
+
+ get_available_governors(cpu);
+ get_policy(cpu);
+ if (get_freq_hardware(cpu, 1) < 0)
+ get_freq_kernel(cpu, 1);
+ get_boost_mode(cpu);
+}
+
static struct option info_opts[] = {
{"debug", no_argument, NULL, 'e'},
{"boost", no_argument, NULL, 'b'},
@@ -647,11 +590,14 @@ int cmd_freq_info(int argc, char **argv)
if (!bitmask_isbitset(cpus_chosen, cpu))
continue;
- if (cpufreq_cpu_exists(cpu)) {
- printf(_("couldn't analyze CPU %d as it doesn't seem to be present\n"), cpu);
+
+ printf(_("analyzing CPU %d:\n"), cpu);
+
+ if (sysfs_is_cpu_online(cpu) != 1) {
+ printf(_(" *is offline\n"));
+ printf("\n");
continue;
}
- printf(_("analyzing CPU %d:\n"), cpu);
switch (output_param) {
case 'b':
@@ -693,6 +639,7 @@ int cmd_freq_info(int argc, char **argv)
}
if (ret)
return ret;
+ printf("\n");
}
return ret;
}
diff --git a/tools/power/cpupower/utils/cpuidle-info.c b/tools/power/cpupower/utils/cpuidle-info.c
index 750c1d82c3f7..8bf8ab5ffa25 100644
--- a/tools/power/cpupower/utils/cpuidle-info.c
+++ b/tools/power/cpupower/utils/cpuidle-info.c
@@ -12,7 +12,6 @@
#include <stdlib.h>
#include <string.h>
#include <getopt.h>
-#include <cpufreq.h>
#include "helpers/helpers.h"
#include "helpers/sysfs.h"
@@ -25,8 +24,6 @@ static void cpuidle_cpu_output(unsigned int cpu, int verbose)
unsigned int idlestates, idlestate;
char *tmp;
- printf(_ ("Analyzing CPU %d:\n"), cpu);
-
idlestates = sysfs_get_idlestate_count(cpu);
if (idlestates == 0) {
printf(_("CPU %u: No idle states\n"), cpu);
@@ -71,7 +68,6 @@ static void cpuidle_cpu_output(unsigned int cpu, int verbose)
printf(_("Duration: %llu\n"),
sysfs_get_idlestate_time(cpu, idlestate));
}
- printf("\n");
}
static void cpuidle_general_output(void)
@@ -189,10 +185,17 @@ int cmd_idle_info(int argc, char **argv)
for (cpu = bitmask_first(cpus_chosen);
cpu <= bitmask_last(cpus_chosen); cpu++) {
- if (!bitmask_isbitset(cpus_chosen, cpu) ||
- cpufreq_cpu_exists(cpu))
+ if (!bitmask_isbitset(cpus_chosen, cpu))
continue;
+ printf(_("analyzing CPU %d:\n"), cpu);
+
+ if (sysfs_is_cpu_online(cpu) != 1) {
+ printf(_(" *is offline\n"));
+ printf("\n");
+ continue;
+ }
+
switch (output_param) {
case 'o':
@@ -203,6 +206,7 @@ int cmd_idle_info(int argc, char **argv)
cpuidle_cpu_output(cpu, verbose);
break;
}
+ printf("\n");
}
return EXIT_SUCCESS;
}
diff --git a/tools/power/cpupower/utils/cpupower-info.c b/tools/power/cpupower/utils/cpupower-info.c
index 10299f2e9d2a..c7caa8eaa6d0 100644
--- a/tools/power/cpupower/utils/cpupower-info.c
+++ b/tools/power/cpupower/utils/cpupower-info.c
@@ -12,7 +12,6 @@
#include <string.h>
#include <getopt.h>
-#include <cpufreq.h>
#include "helpers/helpers.h"
#include "helpers/sysfs.h"
@@ -83,12 +82,16 @@ int cmd_info(int argc, char **argv)
for (cpu = bitmask_first(cpus_chosen);
cpu <= bitmask_last(cpus_chosen); cpu++) {
- if (!bitmask_isbitset(cpus_chosen, cpu) ||
- cpufreq_cpu_exists(cpu))
+ if (!bitmask_isbitset(cpus_chosen, cpu))
continue;
printf(_("analyzing CPU %d:\n"), cpu);
+ if (sysfs_is_cpu_online(cpu) != 1){
+ printf(_(" *is offline\n"));
+ continue;
+ }
+
if (params.perf_bias) {
ret = msr_intel_get_perf_bias(cpu);
if (ret < 0) {
diff --git a/tools/power/cpupower/utils/cpupower-set.c b/tools/power/cpupower/utils/cpupower-set.c
index 3e6f374f8dd7..532f46b9a335 100644
--- a/tools/power/cpupower/utils/cpupower-set.c
+++ b/tools/power/cpupower/utils/cpupower-set.c
@@ -12,7 +12,6 @@
#include <string.h>
#include <getopt.h>
-#include <cpufreq.h>
#include "helpers/helpers.h"
#include "helpers/sysfs.h"
#include "helpers/bitmask.h"
@@ -78,10 +77,15 @@ int cmd_set(int argc, char **argv)
for (cpu = bitmask_first(cpus_chosen);
cpu <= bitmask_last(cpus_chosen); cpu++) {
- if (!bitmask_isbitset(cpus_chosen, cpu) ||
- cpufreq_cpu_exists(cpu))
+ if (!bitmask_isbitset(cpus_chosen, cpu))
continue;
+ if (sysfs_is_cpu_online(cpu) != 1){
+ fprintf(stderr, _("Cannot set values on CPU %d:"), cpu);
+ fprintf(stderr, _(" *is offline\n"));
+ continue;
+ }
+
if (params.perf_bias) {
ret = msr_intel_set_perf_bias(cpu, perf_bias);
if (ret) {
diff --git a/tools/power/cpupower/utils/helpers/topology.c b/tools/power/cpupower/utils/helpers/topology.c
index 9cbb7fd75171..5f9c908f4557 100644
--- a/tools/power/cpupower/utils/helpers/topology.c
+++ b/tools/power/cpupower/utils/helpers/topology.c
@@ -106,7 +106,7 @@ int get_cpu_topology(struct cpupower_topology *cpu_top)
cpu_top->pkgs++;
}
}
- if (!cpu_top->core_info[0].pkg == -1)
+ if (!(cpu_top->core_info[0].pkg == -1))
cpu_top->pkgs++;
/* Intel's cores count is not consecutively numbered, there may
diff --git a/tools/perf/config/Makefile.arch b/tools/scripts/Makefile.arch
index e11fbd6fae78..e11fbd6fae78 100644
--- a/tools/perf/config/Makefile.arch
+++ b/tools/scripts/Makefile.arch
diff --git a/tools/spi/.gitignore b/tools/spi/.gitignore
new file mode 100644
index 000000000000..4280576397e8
--- /dev/null
+++ b/tools/spi/.gitignore
@@ -0,0 +1,2 @@
+spidev_fdx
+spidev_test
diff --git a/tools/spi/Makefile b/tools/spi/Makefile
new file mode 100644
index 000000000000..cd0db62e4d9d
--- /dev/null
+++ b/tools/spi/Makefile
@@ -0,0 +1,4 @@
+all: spidev_test spidev_fdx
+
+clean:
+ $(RM) spidev_test spidev_fdx
diff --git a/tools/spi/spidev_fdx.c b/tools/spi/spidev_fdx.c
new file mode 100644
index 000000000000..0ea3e51292fc
--- /dev/null
+++ b/tools/spi/spidev_fdx.c
@@ -0,0 +1,158 @@
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <string.h>
+
+#include <sys/ioctl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include <linux/types.h>
+#include <linux/spi/spidev.h>
+
+
+static int verbose;
+
+static void do_read(int fd, int len)
+{
+ unsigned char buf[32], *bp;
+ int status;
+
+ /* read at least 2 bytes, no more than 32 */
+ if (len < 2)
+ len = 2;
+ else if (len > sizeof(buf))
+ len = sizeof(buf);
+ memset(buf, 0, sizeof buf);
+
+ status = read(fd, buf, len);
+ if (status < 0) {
+ perror("read");
+ return;
+ }
+ if (status != len) {
+ fprintf(stderr, "short read\n");
+ return;
+ }
+
+ printf("read(%2d, %2d): %02x %02x,", len, status,
+ buf[0], buf[1]);
+ status -= 2;
+ bp = buf + 2;
+ while (status-- > 0)
+ printf(" %02x", *bp++);
+ printf("\n");
+}
+
+static void do_msg(int fd, int len)
+{
+ struct spi_ioc_transfer xfer[2];
+ unsigned char buf[32], *bp;
+ int status;
+
+ memset(xfer, 0, sizeof xfer);
+ memset(buf, 0, sizeof buf);
+
+ if (len > sizeof buf)
+ len = sizeof buf;
+
+ buf[0] = 0xaa;
+ xfer[0].tx_buf = (unsigned long)buf;
+ xfer[0].len = 1;
+
+ xfer[1].rx_buf = (unsigned long) buf;
+ xfer[1].len = len;
+
+ status = ioctl(fd, SPI_IOC_MESSAGE(2), xfer);
+ if (status < 0) {
+ perror("SPI_IOC_MESSAGE");
+ return;
+ }
+
+ printf("response(%2d, %2d): ", len, status);
+ for (bp = buf; len; len--)
+ printf(" %02x", *bp++);
+ printf("\n");
+}
+
+static void dumpstat(const char *name, int fd)
+{
+ __u8 lsb, bits;
+ __u32 mode, speed;
+
+ if (ioctl(fd, SPI_IOC_RD_MODE32, &mode) < 0) {
+ perror("SPI rd_mode");
+ return;
+ }
+ if (ioctl(fd, SPI_IOC_RD_LSB_FIRST, &lsb) < 0) {
+ perror("SPI rd_lsb_fist");
+ return;
+ }
+ if (ioctl(fd, SPI_IOC_RD_BITS_PER_WORD, &bits) < 0) {
+ perror("SPI bits_per_word");
+ return;
+ }
+ if (ioctl(fd, SPI_IOC_RD_MAX_SPEED_HZ, &speed) < 0) {
+ perror("SPI max_speed_hz");
+ return;
+ }
+
+ printf("%s: spi mode 0x%x, %d bits %sper word, %d Hz max\n",
+ name, mode, bits, lsb ? "(lsb first) " : "", speed);
+}
+
+int main(int argc, char **argv)
+{
+ int c;
+ int readcount = 0;
+ int msglen = 0;
+ int fd;
+ const char *name;
+
+ while ((c = getopt(argc, argv, "hm:r:v")) != EOF) {
+ switch (c) {
+ case 'm':
+ msglen = atoi(optarg);
+ if (msglen < 0)
+ goto usage;
+ continue;
+ case 'r':
+ readcount = atoi(optarg);
+ if (readcount < 0)
+ goto usage;
+ continue;
+ case 'v':
+ verbose++;
+ continue;
+ case 'h':
+ case '?':
+usage:
+ fprintf(stderr,
+ "usage: %s [-h] [-m N] [-r N] /dev/spidevB.D\n",
+ argv[0]);
+ return 1;
+ }
+ }
+
+ if ((optind + 1) != argc)
+ goto usage;
+ name = argv[optind];
+
+ fd = open(name, O_RDWR);
+ if (fd < 0) {
+ perror("open");
+ return 1;
+ }
+
+ dumpstat(name, fd);
+
+ if (msglen)
+ do_msg(fd, msglen);
+
+ if (readcount)
+ do_read(fd, readcount);
+
+ close(fd);
+ return 0;
+}
diff --git a/tools/spi/spidev_test.c b/tools/spi/spidev_test.c
new file mode 100644
index 000000000000..8a73d8185316
--- /dev/null
+++ b/tools/spi/spidev_test.c
@@ -0,0 +1,399 @@
+/*
+ * SPI testing utility (using spidev driver)
+ *
+ * Copyright (c) 2007 MontaVista Software, Inc.
+ * Copyright (c) 2007 Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * Cross-compile with cross-gcc -I/path/to/cross-kernel/include
+ */
+
+#include <stdint.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <getopt.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/stat.h>
+#include <linux/types.h>
+#include <linux/spi/spidev.h>
+
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+static void pabort(const char *s)
+{
+ perror(s);
+ abort();
+}
+
+static const char *device = "/dev/spidev1.1";
+static uint32_t mode;
+static uint8_t bits = 8;
+static char *input_file;
+static char *output_file;
+static uint32_t speed = 500000;
+static uint16_t delay;
+static int verbose;
+
+uint8_t default_tx[] = {
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0x40, 0x00, 0x00, 0x00, 0x00, 0x95,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xF0, 0x0D,
+};
+
+uint8_t default_rx[ARRAY_SIZE(default_tx)] = {0, };
+char *input_tx;
+
+static void hex_dump(const void *src, size_t length, size_t line_size,
+ char *prefix)
+{
+ int i = 0;
+ const unsigned char *address = src;
+ const unsigned char *line = address;
+ unsigned char c;
+
+ printf("%s | ", prefix);
+ while (length-- > 0) {
+ printf("%02X ", *address++);
+ if (!(++i % line_size) || (length == 0 && i % line_size)) {
+ if (length == 0) {
+ while (i++ % line_size)
+ printf("__ ");
+ }
+ printf(" | "); /* right close */
+ while (line < address) {
+ c = *line++;
+ printf("%c", (c < 33 || c == 255) ? 0x2E : c);
+ }
+ printf("\n");
+ if (length > 0)
+ printf("%s | ", prefix);
+ }
+ }
+}
+
+/*
+ * Unescape - process hexadecimal escape character
+ * converts shell input "\x23" -> 0x23
+ */
+static int unescape(char *_dst, char *_src, size_t len)
+{
+ int ret = 0;
+ int match;
+ char *src = _src;
+ char *dst = _dst;
+ unsigned int ch;
+
+ while (*src) {
+ if (*src == '\\' && *(src+1) == 'x') {
+ match = sscanf(src + 2, "%2x", &ch);
+ if (!match)
+ pabort("malformed input string");
+
+ src += 4;
+ *dst++ = (unsigned char)ch;
+ } else {
+ *dst++ = *src++;
+ }
+ ret++;
+ }
+ return ret;
+}
+
+static void transfer(int fd, uint8_t const *tx, uint8_t const *rx, size_t len)
+{
+ int ret;
+ int out_fd;
+ struct spi_ioc_transfer tr = {
+ .tx_buf = (unsigned long)tx,
+ .rx_buf = (unsigned long)rx,
+ .len = len,
+ .delay_usecs = delay,
+ .speed_hz = speed,
+ .bits_per_word = bits,
+ };
+
+ if (mode & SPI_TX_QUAD)
+ tr.tx_nbits = 4;
+ else if (mode & SPI_TX_DUAL)
+ tr.tx_nbits = 2;
+ if (mode & SPI_RX_QUAD)
+ tr.rx_nbits = 4;
+ else if (mode & SPI_RX_DUAL)
+ tr.rx_nbits = 2;
+ if (!(mode & SPI_LOOP)) {
+ if (mode & (SPI_TX_QUAD | SPI_TX_DUAL))
+ tr.rx_buf = 0;
+ else if (mode & (SPI_RX_QUAD | SPI_RX_DUAL))
+ tr.tx_buf = 0;
+ }
+
+ ret = ioctl(fd, SPI_IOC_MESSAGE(1), &tr);
+ if (ret < 1)
+ pabort("can't send spi message");
+
+ if (verbose)
+ hex_dump(tx, len, 32, "TX");
+
+ if (output_file) {
+ out_fd = open(output_file, O_WRONLY | O_CREAT | O_TRUNC, 0666);
+ if (out_fd < 0)
+ pabort("could not open output file");
+
+ ret = write(out_fd, rx, len);
+ if (ret != len)
+ pabort("not all bytes written to output file");
+
+ close(out_fd);
+ }
+
+ if (verbose || !output_file)
+ hex_dump(rx, len, 32, "RX");
+}
+
+static void print_usage(const char *prog)
+{
+ printf("Usage: %s [-DsbdlHOLC3]\n", prog);
+ puts(" -D --device device to use (default /dev/spidev1.1)\n"
+ " -s --speed max speed (Hz)\n"
+ " -d --delay delay (usec)\n"
+ " -b --bpw bits per word\n"
+ " -i --input input data from a file (e.g. \"test.bin\")\n"
+ " -o --output output data to a file (e.g. \"results.bin\")\n"
+ " -l --loop loopback\n"
+ " -H --cpha clock phase\n"
+ " -O --cpol clock polarity\n"
+ " -L --lsb least significant bit first\n"
+ " -C --cs-high chip select active high\n"
+ " -3 --3wire SI/SO signals shared\n"
+ " -v --verbose Verbose (show tx buffer)\n"
+ " -p Send data (e.g. \"1234\\xde\\xad\")\n"
+ " -N --no-cs no chip select\n"
+ " -R --ready slave pulls low to pause\n"
+ " -2 --dual dual transfer\n"
+ " -4 --quad quad transfer\n");
+ exit(1);
+}
+
+static void parse_opts(int argc, char *argv[])
+{
+ while (1) {
+ static const struct option lopts[] = {
+ { "device", 1, 0, 'D' },
+ { "speed", 1, 0, 's' },
+ { "delay", 1, 0, 'd' },
+ { "bpw", 1, 0, 'b' },
+ { "input", 1, 0, 'i' },
+ { "output", 1, 0, 'o' },
+ { "loop", 0, 0, 'l' },
+ { "cpha", 0, 0, 'H' },
+ { "cpol", 0, 0, 'O' },
+ { "lsb", 0, 0, 'L' },
+ { "cs-high", 0, 0, 'C' },
+ { "3wire", 0, 0, '3' },
+ { "no-cs", 0, 0, 'N' },
+ { "ready", 0, 0, 'R' },
+ { "dual", 0, 0, '2' },
+ { "verbose", 0, 0, 'v' },
+ { "quad", 0, 0, '4' },
+ { NULL, 0, 0, 0 },
+ };
+ int c;
+
+ c = getopt_long(argc, argv, "D:s:d:b:i:o:lHOLC3NR24p:v",
+ lopts, NULL);
+
+ if (c == -1)
+ break;
+
+ switch (c) {
+ case 'D':
+ device = optarg;
+ break;
+ case 's':
+ speed = atoi(optarg);
+ break;
+ case 'd':
+ delay = atoi(optarg);
+ break;
+ case 'b':
+ bits = atoi(optarg);
+ break;
+ case 'i':
+ input_file = optarg;
+ break;
+ case 'o':
+ output_file = optarg;
+ break;
+ case 'l':
+ mode |= SPI_LOOP;
+ break;
+ case 'H':
+ mode |= SPI_CPHA;
+ break;
+ case 'O':
+ mode |= SPI_CPOL;
+ break;
+ case 'L':
+ mode |= SPI_LSB_FIRST;
+ break;
+ case 'C':
+ mode |= SPI_CS_HIGH;
+ break;
+ case '3':
+ mode |= SPI_3WIRE;
+ break;
+ case 'N':
+ mode |= SPI_NO_CS;
+ break;
+ case 'v':
+ verbose = 1;
+ break;
+ case 'R':
+ mode |= SPI_READY;
+ break;
+ case 'p':
+ input_tx = optarg;
+ break;
+ case '2':
+ mode |= SPI_TX_DUAL;
+ break;
+ case '4':
+ mode |= SPI_TX_QUAD;
+ break;
+ default:
+ print_usage(argv[0]);
+ break;
+ }
+ }
+ if (mode & SPI_LOOP) {
+ if (mode & SPI_TX_DUAL)
+ mode |= SPI_RX_DUAL;
+ if (mode & SPI_TX_QUAD)
+ mode |= SPI_RX_QUAD;
+ }
+}
+
+static void transfer_escaped_string(int fd, char *str)
+{
+ size_t size = strlen(str + 1);
+ uint8_t *tx;
+ uint8_t *rx;
+
+ tx = malloc(size);
+ if (!tx)
+ pabort("can't allocate tx buffer");
+
+ rx = malloc(size);
+ if (!rx)
+ pabort("can't allocate rx buffer");
+
+ size = unescape((char *)tx, str, size);
+ transfer(fd, tx, rx, size);
+ free(rx);
+ free(tx);
+}
+
+static void transfer_file(int fd, char *filename)
+{
+ ssize_t bytes;
+ struct stat sb;
+ int tx_fd;
+ uint8_t *tx;
+ uint8_t *rx;
+
+ if (stat(filename, &sb) == -1)
+ pabort("can't stat input file");
+
+ tx_fd = open(filename, O_RDONLY);
+ if (fd < 0)
+ pabort("can't open input file");
+
+ tx = malloc(sb.st_size);
+ if (!tx)
+ pabort("can't allocate tx buffer");
+
+ rx = malloc(sb.st_size);
+ if (!rx)
+ pabort("can't allocate rx buffer");
+
+ bytes = read(tx_fd, tx, sb.st_size);
+ if (bytes != sb.st_size)
+ pabort("failed to read input file");
+
+ transfer(fd, tx, rx, sb.st_size);
+ free(rx);
+ free(tx);
+ close(tx_fd);
+}
+
+int main(int argc, char *argv[])
+{
+ int ret = 0;
+ int fd;
+
+ parse_opts(argc, argv);
+
+ fd = open(device, O_RDWR);
+ if (fd < 0)
+ pabort("can't open device");
+
+ /*
+ * spi mode
+ */
+ ret = ioctl(fd, SPI_IOC_WR_MODE32, &mode);
+ if (ret == -1)
+ pabort("can't set spi mode");
+
+ ret = ioctl(fd, SPI_IOC_RD_MODE32, &mode);
+ if (ret == -1)
+ pabort("can't get spi mode");
+
+ /*
+ * bits per word
+ */
+ ret = ioctl(fd, SPI_IOC_WR_BITS_PER_WORD, &bits);
+ if (ret == -1)
+ pabort("can't set bits per word");
+
+ ret = ioctl(fd, SPI_IOC_RD_BITS_PER_WORD, &bits);
+ if (ret == -1)
+ pabort("can't get bits per word");
+
+ /*
+ * max speed hz
+ */
+ ret = ioctl(fd, SPI_IOC_WR_MAX_SPEED_HZ, &speed);
+ if (ret == -1)
+ pabort("can't set max speed hz");
+
+ ret = ioctl(fd, SPI_IOC_RD_MAX_SPEED_HZ, &speed);
+ if (ret == -1)
+ pabort("can't get max speed hz");
+
+ printf("spi mode: 0x%x\n", mode);
+ printf("bits per word: %d\n", bits);
+ printf("max speed: %d Hz (%d KHz)\n", speed, speed/1000);
+
+ if (input_tx && input_file)
+ pabort("only one of -p and --input may be selected");
+
+ if (input_tx)
+ transfer_escaped_string(fd, input_tx);
+ else if (input_file)
+ transfer_file(fd, input_file);
+ else
+ transfer(fd, default_tx, default_rx, sizeof(default_tx));
+
+ close(fd);
+
+ return ret;
+}
diff --git a/tools/testing/nvdimm/Kbuild b/tools/testing/nvdimm/Kbuild
index 38b00ecb2ed5..a34bfd0c8928 100644
--- a/tools/testing/nvdimm/Kbuild
+++ b/tools/testing/nvdimm/Kbuild
@@ -9,6 +9,8 @@ ldflags-y += --wrap=memunmap
ldflags-y += --wrap=__devm_request_region
ldflags-y += --wrap=__request_region
ldflags-y += --wrap=__release_region
+ldflags-y += --wrap=devm_memremap_pages
+ldflags-y += --wrap=phys_to_pfn_t
DRIVERS := ../../../drivers
NVDIMM_SRC := $(DRIVERS)/nvdimm
diff --git a/tools/testing/nvdimm/test/iomap.c b/tools/testing/nvdimm/test/iomap.c
index b7251314bbc0..7ec7df9e7fc7 100644
--- a/tools/testing/nvdimm/test/iomap.c
+++ b/tools/testing/nvdimm/test/iomap.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/io.h>
+#include <linux/mm.h>
#include "nfit_test.h"
static LIST_HEAD(iomap_head);
@@ -41,7 +42,7 @@ void nfit_test_teardown(void)
}
EXPORT_SYMBOL(nfit_test_teardown);
-static struct nfit_test_resource *get_nfit_res(resource_size_t resource)
+static struct nfit_test_resource *__get_nfit_res(resource_size_t resource)
{
struct iomap_ops *ops;
@@ -51,14 +52,22 @@ static struct nfit_test_resource *get_nfit_res(resource_size_t resource)
return NULL;
}
-void __iomem *__nfit_test_ioremap(resource_size_t offset, unsigned long size,
- void __iomem *(*fallback_fn)(resource_size_t, unsigned long))
+static struct nfit_test_resource *get_nfit_res(resource_size_t resource)
{
- struct nfit_test_resource *nfit_res;
+ struct nfit_test_resource *res;
rcu_read_lock();
- nfit_res = get_nfit_res(offset);
+ res = __get_nfit_res(resource);
rcu_read_unlock();
+
+ return res;
+}
+
+void __iomem *__nfit_test_ioremap(resource_size_t offset, unsigned long size,
+ void __iomem *(*fallback_fn)(resource_size_t, unsigned long))
+{
+ struct nfit_test_resource *nfit_res = get_nfit_res(offset);
+
if (nfit_res)
return (void __iomem *) nfit_res->buf + offset
- nfit_res->res->start;
@@ -68,11 +77,8 @@ void __iomem *__nfit_test_ioremap(resource_size_t offset, unsigned long size,
void __iomem *__wrap_devm_ioremap_nocache(struct device *dev,
resource_size_t offset, unsigned long size)
{
- struct nfit_test_resource *nfit_res;
+ struct nfit_test_resource *nfit_res = get_nfit_res(offset);
- rcu_read_lock();
- nfit_res = get_nfit_res(offset);
- rcu_read_unlock();
if (nfit_res)
return (void __iomem *) nfit_res->buf + offset
- nfit_res->res->start;
@@ -83,25 +89,58 @@ EXPORT_SYMBOL(__wrap_devm_ioremap_nocache);
void *__wrap_devm_memremap(struct device *dev, resource_size_t offset,
size_t size, unsigned long flags)
{
- struct nfit_test_resource *nfit_res;
+ struct nfit_test_resource *nfit_res = get_nfit_res(offset);
- rcu_read_lock();
- nfit_res = get_nfit_res(offset);
- rcu_read_unlock();
if (nfit_res)
return nfit_res->buf + offset - nfit_res->res->start;
return devm_memremap(dev, offset, size, flags);
}
EXPORT_SYMBOL(__wrap_devm_memremap);
+#ifdef __HAVE_ARCH_PTE_DEVMAP
+#include <linux/memremap.h>
+#include <linux/pfn_t.h>
+
+void *__wrap_devm_memremap_pages(struct device *dev, struct resource *res,
+ struct percpu_ref *ref, struct vmem_altmap *altmap)
+{
+ resource_size_t offset = res->start;
+ struct nfit_test_resource *nfit_res = get_nfit_res(offset);
+
+ if (nfit_res)
+ return nfit_res->buf + offset - nfit_res->res->start;
+ return devm_memremap_pages(dev, res, ref, altmap);
+}
+EXPORT_SYMBOL(__wrap_devm_memremap_pages);
+
+pfn_t __wrap_phys_to_pfn_t(dma_addr_t addr, unsigned long flags)
+{
+ struct nfit_test_resource *nfit_res = get_nfit_res(addr);
+
+ if (nfit_res)
+ flags &= ~PFN_MAP;
+ return phys_to_pfn_t(addr, flags);
+}
+EXPORT_SYMBOL(__wrap_phys_to_pfn_t);
+#else
+/* to be removed post 4.5-rc1 */
+void *__wrap_devm_memremap_pages(struct device *dev, struct resource *res)
+{
+ resource_size_t offset = res->start;
+ struct nfit_test_resource *nfit_res = get_nfit_res(offset);
+
+ if (nfit_res)
+ return nfit_res->buf + offset - nfit_res->res->start;
+ return devm_memremap_pages(dev, res);
+}
+EXPORT_SYMBOL(__wrap_devm_memremap_pages);
+#endif
+
void *__wrap_memremap(resource_size_t offset, size_t size,
unsigned long flags)
{
- struct nfit_test_resource *nfit_res;
+ struct nfit_test_resource *nfit_res = get_nfit_res(offset);
- rcu_read_lock();
- nfit_res = get_nfit_res(offset);
- rcu_read_unlock();
if (nfit_res)
return nfit_res->buf + offset - nfit_res->res->start;
return memremap(offset, size, flags);
@@ -110,11 +149,8 @@ EXPORT_SYMBOL(__wrap_memremap);
void __wrap_devm_memunmap(struct device *dev, void *addr)
{
- struct nfit_test_resource *nfit_res;
+ struct nfit_test_resource *nfit_res = get_nfit_res((long) addr);
- rcu_read_lock();
- nfit_res = get_nfit_res((unsigned long) addr);
- rcu_read_unlock();
if (nfit_res)
return;
return devm_memunmap(dev, addr);
@@ -135,11 +171,7 @@ EXPORT_SYMBOL(__wrap_ioremap_wc);
void __wrap_iounmap(volatile void __iomem *addr)
{
- struct nfit_test_resource *nfit_res;
-
- rcu_read_lock();
- nfit_res = get_nfit_res((unsigned long) addr);
- rcu_read_unlock();
+ struct nfit_test_resource *nfit_res = get_nfit_res((long) addr);
if (nfit_res)
return;
return iounmap(addr);
@@ -148,11 +180,8 @@ EXPORT_SYMBOL(__wrap_iounmap);
void __wrap_memunmap(void *addr)
{
- struct nfit_test_resource *nfit_res;
+ struct nfit_test_resource *nfit_res = get_nfit_res((long) addr);
- rcu_read_lock();
- nfit_res = get_nfit_res((unsigned long) addr);
- rcu_read_unlock();
if (nfit_res)
return;
return memunmap(addr);
@@ -166,9 +195,7 @@ static struct resource *nfit_test_request_region(struct device *dev,
struct nfit_test_resource *nfit_res;
if (parent == &iomem_resource) {
- rcu_read_lock();
nfit_res = get_nfit_res(start);
- rcu_read_unlock();
if (nfit_res) {
struct resource *res = nfit_res->res + 1;
@@ -218,9 +245,7 @@ void __wrap___release_region(struct resource *parent, resource_size_t start,
struct nfit_test_resource *nfit_res;
if (parent == &iomem_resource) {
- rcu_read_lock();
nfit_res = get_nfit_res(start);
- rcu_read_unlock();
if (nfit_res) {
struct resource *res = nfit_res->res + 1;
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
index 51cf8256c6cd..90bd2ea41032 100644
--- a/tools/testing/nvdimm/test/nfit.c
+++ b/tools/testing/nvdimm/test/nfit.c
@@ -248,6 +248,8 @@ static int nfit_test_cmd_ars_status(struct nd_cmd_ars_status *nd_cmd,
nd_cmd->out_length = 256;
nd_cmd->num_records = 0;
+ nd_cmd->address = 0;
+ nd_cmd->length = -1ULL;
nd_cmd->status = 0;
return 0;
@@ -1088,6 +1090,8 @@ static void nfit_test1_setup(struct nfit_test *t)
struct acpi_nfit_memory_map *memdev;
struct acpi_nfit_control_region *dcr;
struct acpi_nfit_system_address *spa;
+ struct nvdimm_bus_descriptor *nd_desc;
+ struct acpi_nfit_desc *acpi_desc;
offset = 0;
/* spa0 (flat range with no bdw aliasing) */
@@ -1135,6 +1139,13 @@ static void nfit_test1_setup(struct nfit_test *t)
dcr->command_size = 0;
dcr->status_offset = 0;
dcr->status_size = 0;
+
+ acpi_desc = &t->acpi_desc;
+ set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_dsm_force_en);
+ set_bit(ND_CMD_ARS_START, &acpi_desc->bus_dsm_force_en);
+ set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_dsm_force_en);
+ nd_desc = &acpi_desc->nd_desc;
+ nd_desc->ndctl = nfit_test_ctl;
}
static int nfit_test_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa,
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index c8edff6803d1..b04afc3295df 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -1,10 +1,12 @@
TARGETS = breakpoints
+TARGETS += capabilities
TARGETS += cpu-hotplug
TARGETS += efivarfs
TARGETS += exec
TARGETS += firmware
TARGETS += ftrace
TARGETS += futex
+TARGETS += ipc
TARGETS += kcmp
TARGETS += lib
TARGETS += membarrier
diff --git a/tools/testing/selftests/breakpoints/.gitignore b/tools/testing/selftests/breakpoints/.gitignore
new file mode 100644
index 000000000000..9b3193d06608
--- /dev/null
+++ b/tools/testing/selftests/breakpoints/.gitignore
@@ -0,0 +1 @@
+breakpoint_test
diff --git a/tools/testing/selftests/capabilities/Makefile b/tools/testing/selftests/capabilities/Makefile
index 8c8f0c1f0889..008602aed920 100644
--- a/tools/testing/selftests/capabilities/Makefile
+++ b/tools/testing/selftests/capabilities/Makefile
@@ -1,18 +1,15 @@
-all:
-
-include ../lib.mk
-
-.PHONY: all clean
-
-TARGETS := validate_cap test_execve
+TEST_FILES := validate_cap
TEST_PROGS := test_execve
-CFLAGS := -O2 -g -std=gnu99 -Wall -lcap-ng
+BINARIES := $(TEST_FILES) $(TEST_PROGS)
-all: $(TARGETS)
+CFLAGS += -O2 -g -std=gnu99 -Wall
+LDLIBS += -lcap-ng -lrt -ldl
+
+all: $(BINARIES)
clean:
- $(RM) $(TARGETS)
+ $(RM) $(BINARIES)
+
+include ../lib.mk
-$(TARGETS): %: %.c
- $(CC) -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $^ -lrt -ldl
diff --git a/tools/testing/selftests/firmware/fw_filesystem.sh b/tools/testing/selftests/firmware/fw_filesystem.sh
index c4366dc74e01..5c495ad7958a 100755
--- a/tools/testing/selftests/firmware/fw_filesystem.sh
+++ b/tools/testing/selftests/firmware/fw_filesystem.sh
@@ -48,8 +48,21 @@ echo "ABCD0123" >"$FW"
NAME=$(basename "$FW")
+if printf '\000' >"$DIR"/trigger_request; then
+ echo "$0: empty filename should not succeed" >&2
+ exit 1
+fi
+
+if printf '\000' >"$DIR"/trigger_async_request; then
+ echo "$0: empty filename should not succeed (async)" >&2
+ exit 1
+fi
+
# Request a firmware that doesn't exist, it should fail.
-echo -n "nope-$NAME" >"$DIR"/trigger_request
+if echo -n "nope-$NAME" >"$DIR"/trigger_request; then
+ echo "$0: firmware shouldn't have loaded" >&2
+ exit 1
+fi
if diff -q "$FW" /dev/test_firmware >/dev/null ; then
echo "$0: firmware was not expected to match" >&2
exit 1
@@ -74,4 +87,18 @@ else
echo "$0: filesystem loading works"
fi
+# Try the asynchronous version too
+if ! echo -n "$NAME" >"$DIR"/trigger_async_request ; then
+ echo "$0: could not trigger async request" >&2
+ exit 1
+fi
+
+# Verify the contents are what we expect.
+if ! diff -q "$FW" /dev/test_firmware >/dev/null ; then
+ echo "$0: firmware was not loaded (async)" >&2
+ exit 1
+else
+ echo "$0: async filesystem loading works"
+fi
+
exit 0
diff --git a/tools/testing/selftests/ftrace/test.d/instances/instance.tc b/tools/testing/selftests/ftrace/test.d/instances/instance.tc
new file mode 100644
index 000000000000..773e276ff90b
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/instances/instance.tc
@@ -0,0 +1,90 @@
+#!/bin/sh
+# description: Test creation and deletion of trace instances
+
+if [ ! -d instances ] ; then
+ echo "no instance directory with this kernel"
+ exit_unsupported;
+fi
+
+fail() { # mesg
+ rmdir x y z 2>/dev/null
+ echo $1
+ set -e
+ exit $FAIL
+}
+
+cd instances
+
+# we don't want to fail on error
+set +e
+
+mkdir x
+rmdir x
+result=$?
+
+if [ $result -ne 0 ]; then
+ echo "instance rmdir not supported"
+ exit_unsupported
+fi
+
+instance_slam() {
+ while :; do
+ mkdir x
+ mkdir y
+ mkdir z
+ rmdir x
+ rmdir y
+ rmdir z
+ done 2>/dev/null
+}
+
+instance_slam &
+x=`jobs -l`
+p1=`echo $x | cut -d' ' -f2`
+echo $p1
+
+instance_slam &
+x=`jobs -l | tail -1`
+p2=`echo $x | cut -d' ' -f2`
+echo $p2
+
+instance_slam &
+x=`jobs -l | tail -1`
+p3=`echo $x | cut -d' ' -f2`
+echo $p3
+
+instance_slam &
+x=`jobs -l | tail -1`
+p4=`echo $x | cut -d' ' -f2`
+echo $p4
+
+instance_slam &
+x=`jobs -l | tail -1`
+p5=`echo $x | cut -d' ' -f2`
+echo $p5
+
+ls -lR >/dev/null
+sleep 1
+
+kill -1 $p1
+kill -1 $p2
+kill -1 $p3
+kill -1 $p4
+kill -1 $p5
+
+echo "Wait for processes to finish"
+wait $p1 $p2 $p3 $p4 $p5
+echo "all processes finished, wait for cleanup"
+
+mkdir x y z
+ls x y z
+rmdir x y z
+for d in x y z; do
+ if [ -d $d ]; then
+ fail "instance $d still exists"
+ fi
+done
+
+set -e
+
+exit 0
diff --git a/tools/testing/selftests/intel_pstate/Makefile b/tools/testing/selftests/intel_pstate/Makefile
new file mode 100644
index 000000000000..f5f1a28715ff
--- /dev/null
+++ b/tools/testing/selftests/intel_pstate/Makefile
@@ -0,0 +1,15 @@
+CC := $(CROSS_COMPILE)gcc
+CFLAGS := $(CFLAGS) -Wall -D_GNU_SOURCE
+LDFLAGS := $(LDFLAGS) -lm
+
+TARGETS := msr aperf
+
+TEST_PROGS := $(TARGETS) run.sh
+
+.PHONY: all clean
+all: $(TARGETS)
+
+$(TARGETS): $(HEADERS)
+
+clean:
+ rm -f $(TARGETS)
diff --git a/tools/testing/selftests/intel_pstate/aperf.c b/tools/testing/selftests/intel_pstate/aperf.c
new file mode 100644
index 000000000000..6046e183f4ad
--- /dev/null
+++ b/tools/testing/selftests/intel_pstate/aperf.c
@@ -0,0 +1,80 @@
+#include <math.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <sys/timeb.h>
+#include <sched.h>
+#include <errno.h>
+
+void usage(char *name) {
+ printf ("Usage: %s cpunum\n", name);
+}
+
+int main(int argc, char **argv) {
+ int i, cpu, fd;
+ char msr_file_name[64];
+ long long tsc, old_tsc, new_tsc;
+ long long aperf, old_aperf, new_aperf;
+ long long mperf, old_mperf, new_mperf;
+ struct timeb before, after;
+ long long int start, finish, total;
+ cpu_set_t cpuset;
+
+ if (argc != 2) {
+ usage(argv[0]);
+ return 1;
+ }
+
+ errno = 0;
+ cpu = strtol(argv[1], (char **) NULL, 10);
+
+ if (errno) {
+ usage(argv[0]);
+ return 1;
+ }
+
+ sprintf(msr_file_name, "/dev/cpu/%d/msr", cpu);
+ fd = open(msr_file_name, O_RDONLY);
+
+ if (fd == -1) {
+ perror("Failed to open");
+ return 1;
+ }
+
+ CPU_ZERO(&cpuset);
+ CPU_SET(cpu, &cpuset);
+
+ if (sched_setaffinity(0, sizeof(cpu_set_t), &cpuset)) {
+ perror("Failed to set cpu affinity");
+ return 1;
+ }
+
+ ftime(&before);
+ pread(fd, &old_tsc, sizeof(old_tsc), 0x10);
+ pread(fd, &old_aperf, sizeof(old_mperf), 0xe7);
+ pread(fd, &old_mperf, sizeof(old_aperf), 0xe8);
+
+ for (i=0; i<0x8fffffff; i++) {
+ sqrt(i);
+ }
+
+ ftime(&after);
+ pread(fd, &new_tsc, sizeof(new_tsc), 0x10);
+ pread(fd, &new_aperf, sizeof(new_mperf), 0xe7);
+ pread(fd, &new_mperf, sizeof(new_aperf), 0xe8);
+
+ tsc = new_tsc-old_tsc;
+ aperf = new_aperf-old_aperf;
+ mperf = new_mperf-old_mperf;
+
+ start = before.time*1000 + before.millitm;
+ finish = after.time*1000 + after.millitm;
+ total = finish - start;
+
+ printf("runTime: %4.2f\n", 1.0*total/1000);
+ printf("freq: %7.0f\n", tsc / (1.0*aperf / (1.0 * mperf)) / total);
+ return 0;
+}
diff --git a/tools/testing/selftests/intel_pstate/msr.c b/tools/testing/selftests/intel_pstate/msr.c
new file mode 100644
index 000000000000..abbbfc84d359
--- /dev/null
+++ b/tools/testing/selftests/intel_pstate/msr.c
@@ -0,0 +1,39 @@
+#include <math.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <sys/timeb.h>
+#include <sched.h>
+#include <errno.h>
+
+
+int main(int argc, char **argv) {
+ int cpu, fd;
+ long long msr;
+ char msr_file_name[64];
+
+ if (argc != 2)
+ return 1;
+
+ errno = 0;
+ cpu = strtol(argv[1], (char **) NULL, 10);
+
+ if (errno)
+ return 1;
+
+ sprintf(msr_file_name, "/dev/cpu/%d/msr", cpu);
+ fd = open(msr_file_name, O_RDONLY);
+
+ if (fd == -1) {
+ perror("Failed to open");
+ return 1;
+ }
+
+ pread(fd, &msr, sizeof(msr), 0x199);
+
+ printf("msr 0x199: 0x%llx\n", msr);
+ return 0;
+}
diff --git a/tools/testing/selftests/intel_pstate/run.sh b/tools/testing/selftests/intel_pstate/run.sh
new file mode 100755
index 000000000000..bdaf37e92684
--- /dev/null
+++ b/tools/testing/selftests/intel_pstate/run.sh
@@ -0,0 +1,113 @@
+#!/bin/bash
+#
+# This test runs on Intel x86 based hardware which support the intel_pstate
+# driver. The test checks the frequency settings from the maximum turbo
+# state to the minimum supported frequency, in decrements of 100MHz. The
+# test runs the aperf.c program to put load on each processor.
+#
+# The results are displayed in a table which indicate the "Target" state,
+# or the requested frequency in MHz, the Actual frequency, as read from
+# /proc/cpuinfo, the difference between the Target and Actual frequencies,
+# and the value of MSR 0x199 (MSR_IA32_PERF_CTL) which indicates what
+# pstate the cpu is in, and the value of
+# /sys/devices/system/cpu/intel_pstate/max_perf_pct X maximum turbo state
+#
+# Notes: In some cases several frequency values may be placed in the
+# /tmp/result.X files. This is done on purpose in order to catch cases
+# where the pstate driver may not be working at all. There is the case
+# where, for example, several "similar" frequencies are in the file:
+#
+#
+#/tmp/result.3100:1:cpu MHz : 2899.980
+#/tmp/result.3100:2:cpu MHz : 2900.000
+#/tmp/result.3100:3:msr 0x199: 0x1e00
+#/tmp/result.3100:4:max_perf_pct 94
+#
+# and the test will error out in those cases. The result.X file can be checked
+# for consistency and modified to remove the extra MHz values. The result.X
+# files can be re-evaluated by setting EVALUATE_ONLY to 1 below.
+
+EVALUATE_ONLY=0
+
+max_cpus=$(($(nproc)-1))
+
+# compile programs
+gcc -o aperf aperf.c -lm
+[ $? -ne 0 ] && echo "Problem compiling aperf.c." && exit 1
+gcc -o msr msr.c -lm
+[ $? -ne 0 ] && echo "Problem compiling msr.c." && exit 1
+
+function run_test () {
+
+ file_ext=$1
+ for cpu in `seq 0 $max_cpus`
+ do
+ echo "launching aperf load on $cpu"
+ ./aperf $cpu &
+ done
+
+ echo "sleeping for 5 seconds"
+ sleep 5
+ num_freqs=$(cat /proc/cpuinfo | grep MHz | sort -u | wc -l)
+ if [ $num_freqs -le 2 ]; then
+ cat /proc/cpuinfo | grep MHz | sort -u | tail -1 > /tmp/result.$1
+ else
+ cat /proc/cpuinfo | grep MHz | sort -u > /tmp/result.$1
+ fi
+ ./msr 0 >> /tmp/result.$1
+
+ max_perf_pct=$(cat /sys/devices/system/cpu/intel_pstate/max_perf_pct)
+ echo "max_perf_pct $max_perf_pct" >> /tmp/result.$1
+
+ for job in `jobs -p`
+ do
+ echo "waiting for job id $job"
+ wait $job
+ done
+}
+
+#
+# MAIN (ALL UNITS IN MHZ)
+#
+
+# Get the marketing frequency
+_mkt_freq=$(cat /proc/cpuinfo | grep -m 1 "model name" | awk '{print $NF}')
+_mkt_freq=$(echo $_mkt_freq | tr -d [:alpha:][:punct:])
+mkt_freq=${_mkt_freq}0
+
+# Get the ranges from cpupower
+_min_freq=$(cpupower frequency-info -l | tail -1 | awk ' { print $1 } ')
+min_freq=$(($_min_freq / 1000))
+_max_freq=$(cpupower frequency-info -l | tail -1 | awk ' { print $2 } ')
+max_freq=$(($_max_freq / 1000))
+
+
+for freq in `seq $max_freq -100 $min_freq`
+do
+ echo "Setting maximum frequency to $freq"
+ cpupower frequency-set -g powersave --max=${freq}MHz >& /dev/null
+ [ $EVALUATE_ONLY -eq 0 ] && run_test $freq
+done
+
+echo "=============================================================================="
+
+echo "The marketing frequency of the cpu is $mkt_freq MHz"
+echo "The maximum frequency of the cpu is $max_freq MHz"
+echo "The minimum frequency of the cpu is $min_freq MHz"
+
+cpupower frequency-set -g powersave --max=${max_freq}MHz >& /dev/null
+
+# make a pretty table
+echo "Target Actual Difference MSR(0x199) max_perf_pct"
+for freq in `seq $max_freq -100 $min_freq`
+do
+ result_freq=$(cat /tmp/result.${freq} | grep "cpu MHz" | awk ' { print $4 } ' | awk -F "." ' { print $1 } ')
+ msr=$(cat /tmp/result.${freq} | grep "msr" | awk ' { print $3 } ')
+ max_perf_pct=$(cat /tmp/result.${freq} | grep "max_perf_pct" | awk ' { print $2 } ' )
+ if [ $result_freq -eq $freq ]; then
+ echo " $freq $result_freq 0 $msr $(($max_perf_pct*3300))"
+ else
+ echo " $freq $result_freq $(($result_freq-$freq)) $msr $(($max_perf_pct*$max_freq))"
+ fi
+done
+exit 0
diff --git a/tools/testing/selftests/net/.gitignore b/tools/testing/selftests/net/.gitignore
index 00326629d4af..6fb23366b258 100644
--- a/tools/testing/selftests/net/.gitignore
+++ b/tools/testing/selftests/net/.gitignore
@@ -1,3 +1,4 @@
socket
psock_fanout
psock_tpacket
+reuseport_bpf
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index fac4782c51d8..41449b5ad0a9 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -4,7 +4,7 @@ CFLAGS = -Wall -O2 -g
CFLAGS += -I../../../../usr/include/
-NET_PROGS = socket psock_fanout psock_tpacket
+NET_PROGS = socket psock_fanout psock_tpacket reuseport_bpf
all: $(NET_PROGS)
%: %.c
diff --git a/tools/testing/selftests/net/reuseport_bpf.c b/tools/testing/selftests/net/reuseport_bpf.c
new file mode 100644
index 000000000000..bec1b5dd2530
--- /dev/null
+++ b/tools/testing/selftests/net/reuseport_bpf.c
@@ -0,0 +1,514 @@
+/*
+ * Test functionality of BPF filters for SO_REUSEPORT. The tests below will use
+ * a BPF program (both classic and extended) to read the first word from an
+ * incoming packet (expected to be in network byte-order), calculate a modulus
+ * of that number, and then dispatch the packet to the Nth socket using the
+ * result. These tests are run for each supported address family and protocol.
+ * Additionally, a few edge cases in the implementation are tested.
+ */
+
+#include <errno.h>
+#include <error.h>
+#include <linux/bpf.h>
+#include <linux/filter.h>
+#include <linux/unistd.h>
+#include <netinet/in.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/epoll.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <unistd.h>
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+#endif
+
+struct test_params {
+ int recv_family;
+ int send_family;
+ int protocol;
+ size_t recv_socks;
+ uint16_t recv_port;
+ uint16_t send_port_min;
+};
+
+static size_t sockaddr_size(void)
+{
+ return sizeof(struct sockaddr_storage);
+}
+
+static struct sockaddr *new_any_sockaddr(int family, uint16_t port)
+{
+ struct sockaddr_storage *addr;
+ struct sockaddr_in *addr4;
+ struct sockaddr_in6 *addr6;
+
+ addr = malloc(sizeof(struct sockaddr_storage));
+ memset(addr, 0, sizeof(struct sockaddr_storage));
+
+ switch (family) {
+ case AF_INET:
+ addr4 = (struct sockaddr_in *)addr;
+ addr4->sin_family = AF_INET;
+ addr4->sin_addr.s_addr = htonl(INADDR_ANY);
+ addr4->sin_port = htons(port);
+ break;
+ case AF_INET6:
+ addr6 = (struct sockaddr_in6 *)addr;
+ addr6->sin6_family = AF_INET6;
+ addr6->sin6_addr = in6addr_any;
+ addr6->sin6_port = htons(port);
+ break;
+ default:
+ error(1, 0, "Unsupported family %d", family);
+ }
+ return (struct sockaddr *)addr;
+}
+
+static struct sockaddr *new_loopback_sockaddr(int family, uint16_t port)
+{
+ struct sockaddr *addr = new_any_sockaddr(family, port);
+ struct sockaddr_in *addr4;
+ struct sockaddr_in6 *addr6;
+
+ switch (family) {
+ case AF_INET:
+ addr4 = (struct sockaddr_in *)addr;
+ addr4->sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+ break;
+ case AF_INET6:
+ addr6 = (struct sockaddr_in6 *)addr;
+ addr6->sin6_addr = in6addr_loopback;
+ break;
+ default:
+ error(1, 0, "Unsupported family %d", family);
+ }
+ return addr;
+}
+
+static void attach_ebpf(int fd, uint16_t mod)
+{
+ static char bpf_log_buf[65536];
+ static const char bpf_license[] = "GPL";
+
+ int bpf_fd;
+ const struct bpf_insn prog[] = {
+ /* BPF_MOV64_REG(BPF_REG_6, BPF_REG_1) */
+ { BPF_ALU64 | BPF_MOV | BPF_X, BPF_REG_6, BPF_REG_1, 0, 0 },
+ /* BPF_LD_ABS(BPF_W, 0) R0 = (uint32_t)skb[0] */
+ { BPF_LD | BPF_ABS | BPF_W, 0, 0, 0, 0 },
+ /* BPF_ALU64_IMM(BPF_MOD, BPF_REG_0, mod) */
+ { BPF_ALU64 | BPF_MOD | BPF_K, BPF_REG_0, 0, 0, mod },
+ /* BPF_EXIT_INSN() */
+ { BPF_JMP | BPF_EXIT, 0, 0, 0, 0 }
+ };
+ union bpf_attr attr;
+
+ memset(&attr, 0, sizeof(attr));
+ attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
+ attr.insn_cnt = ARRAY_SIZE(prog);
+ attr.insns = (uint64_t)prog;
+ attr.license = (uint64_t)bpf_license;
+ attr.log_buf = (uint64_t)bpf_log_buf;
+ attr.log_size = sizeof(bpf_log_buf);
+ attr.log_level = 1;
+ attr.kern_version = 0;
+
+ bpf_fd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
+ if (bpf_fd < 0)
+ error(1, errno, "ebpf error. log:\n%s\n", bpf_log_buf);
+
+ if (setsockopt(fd, SOL_SOCKET, SO_ATTACH_REUSEPORT_EBPF, &bpf_fd,
+ sizeof(bpf_fd)))
+ error(1, errno, "failed to set SO_ATTACH_REUSEPORT_EBPF");
+
+ close(bpf_fd);
+}
+
+static void attach_cbpf(int fd, uint16_t mod)
+{
+ struct sock_filter code[] = {
+ /* A = (uint32_t)skb[0] */
+ { BPF_LD | BPF_W | BPF_ABS, 0, 0, 0 },
+ /* A = A % mod */
+ { BPF_ALU | BPF_MOD, 0, 0, mod },
+ /* return A */
+ { BPF_RET | BPF_A, 0, 0, 0 },
+ };
+ struct sock_fprog p = {
+ .len = ARRAY_SIZE(code),
+ .filter = code,
+ };
+
+ if (setsockopt(fd, SOL_SOCKET, SO_ATTACH_REUSEPORT_CBPF, &p, sizeof(p)))
+ error(1, errno, "failed to set SO_ATTACH_REUSEPORT_CBPF");
+}
+
+static void build_recv_group(const struct test_params p, int fd[], uint16_t mod,
+ void (*attach_bpf)(int, uint16_t))
+{
+ struct sockaddr * const addr =
+ new_any_sockaddr(p.recv_family, p.recv_port);
+ int i, opt;
+
+ for (i = 0; i < p.recv_socks; ++i) {
+ fd[i] = socket(p.recv_family, p.protocol, 0);
+ if (fd[i] < 0)
+ error(1, errno, "failed to create recv %d", i);
+
+ opt = 1;
+ if (setsockopt(fd[i], SOL_SOCKET, SO_REUSEPORT, &opt,
+ sizeof(opt)))
+ error(1, errno, "failed to set SO_REUSEPORT on %d", i);
+
+ if (i == 0)
+ attach_bpf(fd[i], mod);
+
+ if (bind(fd[i], addr, sockaddr_size()))
+ error(1, errno, "failed to bind recv socket %d", i);
+
+ if (p.protocol == SOCK_STREAM)
+ if (listen(fd[i], p.recv_socks * 10))
+ error(1, errno, "failed to listen on socket");
+ }
+ free(addr);
+}
+
+static void send_from(struct test_params p, uint16_t sport, char *buf,
+ size_t len)
+{
+ struct sockaddr * const saddr = new_any_sockaddr(p.send_family, sport);
+ struct sockaddr * const daddr =
+ new_loopback_sockaddr(p.send_family, p.recv_port);
+ const int fd = socket(p.send_family, p.protocol, 0);
+
+ if (fd < 0)
+ error(1, errno, "failed to create send socket");
+
+ if (bind(fd, saddr, sockaddr_size()))
+ error(1, errno, "failed to bind send socket");
+ if (connect(fd, daddr, sockaddr_size()))
+ error(1, errno, "failed to connect");
+
+ if (send(fd, buf, len, 0) < 0)
+ error(1, errno, "failed to send message");
+
+ close(fd);
+ free(saddr);
+ free(daddr);
+}
+
+static void test_recv_order(const struct test_params p, int fd[], int mod)
+{
+ char recv_buf[8], send_buf[8];
+ struct msghdr msg;
+ struct iovec recv_io = { recv_buf, 8 };
+ struct epoll_event ev;
+ int epfd, conn, i, sport, expected;
+ uint32_t data, ndata;
+
+ epfd = epoll_create(1);
+ if (epfd < 0)
+ error(1, errno, "failed to create epoll");
+ for (i = 0; i < p.recv_socks; ++i) {
+ ev.events = EPOLLIN;
+ ev.data.fd = fd[i];
+ if (epoll_ctl(epfd, EPOLL_CTL_ADD, fd[i], &ev))
+ error(1, errno, "failed to register sock %d epoll", i);
+ }
+
+ memset(&msg, 0, sizeof(msg));
+ msg.msg_iov = &recv_io;
+ msg.msg_iovlen = 1;
+
+ for (data = 0; data < p.recv_socks * 2; ++data) {
+ sport = p.send_port_min + data;
+ ndata = htonl(data);
+ memcpy(send_buf, &ndata, sizeof(ndata));
+ send_from(p, sport, send_buf, sizeof(ndata));
+
+ i = epoll_wait(epfd, &ev, 1, -1);
+ if (i < 0)
+ error(1, errno, "epoll wait failed");
+
+ if (p.protocol == SOCK_STREAM) {
+ conn = accept(ev.data.fd, NULL, NULL);
+ if (conn < 0)
+ error(1, errno, "error accepting");
+ i = recvmsg(conn, &msg, 0);
+ close(conn);
+ } else {
+ i = recvmsg(ev.data.fd, &msg, 0);
+ }
+ if (i < 0)
+ error(1, errno, "recvmsg error");
+ if (i != sizeof(ndata))
+ error(1, 0, "expected size %zd got %d",
+ sizeof(ndata), i);
+
+ for (i = 0; i < p.recv_socks; ++i)
+ if (ev.data.fd == fd[i])
+ break;
+ memcpy(&ndata, recv_buf, sizeof(ndata));
+ fprintf(stderr, "Socket %d: %d\n", i, ntohl(ndata));
+
+ expected = (sport % mod);
+ if (i != expected)
+ error(1, 0, "expected socket %d", expected);
+ }
+}
+
+static void test_reuseport_ebpf(const struct test_params p)
+{
+ int i, fd[p.recv_socks];
+
+ fprintf(stderr, "Testing EBPF mod %zd...\n", p.recv_socks);
+ build_recv_group(p, fd, p.recv_socks, attach_ebpf);
+ test_recv_order(p, fd, p.recv_socks);
+
+ fprintf(stderr, "Reprograming, testing mod %zd...\n", p.recv_socks / 2);
+ attach_ebpf(fd[0], p.recv_socks / 2);
+ test_recv_order(p, fd, p.recv_socks / 2);
+
+ for (i = 0; i < p.recv_socks; ++i)
+ close(fd[i]);
+}
+
+static void test_reuseport_cbpf(const struct test_params p)
+{
+ int i, fd[p.recv_socks];
+
+ fprintf(stderr, "Testing CBPF mod %zd...\n", p.recv_socks);
+ build_recv_group(p, fd, p.recv_socks, attach_cbpf);
+ test_recv_order(p, fd, p.recv_socks);
+
+ fprintf(stderr, "Reprograming, testing mod %zd...\n", p.recv_socks / 2);
+ attach_cbpf(fd[0], p.recv_socks / 2);
+ test_recv_order(p, fd, p.recv_socks / 2);
+
+ for (i = 0; i < p.recv_socks; ++i)
+ close(fd[i]);
+}
+
+static void test_extra_filter(const struct test_params p)
+{
+ struct sockaddr * const addr =
+ new_any_sockaddr(p.recv_family, p.recv_port);
+ int fd1, fd2, opt;
+
+ fprintf(stderr, "Testing too many filters...\n");
+ fd1 = socket(p.recv_family, p.protocol, 0);
+ if (fd1 < 0)
+ error(1, errno, "failed to create socket 1");
+ fd2 = socket(p.recv_family, p.protocol, 0);
+ if (fd2 < 0)
+ error(1, errno, "failed to create socket 2");
+
+ opt = 1;
+ if (setsockopt(fd1, SOL_SOCKET, SO_REUSEPORT, &opt, sizeof(opt)))
+ error(1, errno, "failed to set SO_REUSEPORT on socket 1");
+ if (setsockopt(fd2, SOL_SOCKET, SO_REUSEPORT, &opt, sizeof(opt)))
+ error(1, errno, "failed to set SO_REUSEPORT on socket 2");
+
+ attach_ebpf(fd1, 10);
+ attach_ebpf(fd2, 10);
+
+ if (bind(fd1, addr, sockaddr_size()))
+ error(1, errno, "failed to bind recv socket 1");
+
+ if (!bind(fd2, addr, sockaddr_size()) && errno != EADDRINUSE)
+ error(1, errno, "bind socket 2 should fail with EADDRINUSE");
+
+ free(addr);
+}
+
+static void test_filter_no_reuseport(const struct test_params p)
+{
+ struct sockaddr * const addr =
+ new_any_sockaddr(p.recv_family, p.recv_port);
+ const char bpf_license[] = "GPL";
+ struct bpf_insn ecode[] = {
+ { BPF_ALU64 | BPF_MOV | BPF_K, BPF_REG_0, 0, 0, 10 },
+ { BPF_JMP | BPF_EXIT, 0, 0, 0, 0 }
+ };
+ struct sock_filter ccode[] = {{ BPF_RET | BPF_A, 0, 0, 0 }};
+ union bpf_attr eprog;
+ struct sock_fprog cprog;
+ int fd, bpf_fd;
+
+ fprintf(stderr, "Testing filters on non-SO_REUSEPORT socket...\n");
+
+ memset(&eprog, 0, sizeof(eprog));
+ eprog.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
+ eprog.insn_cnt = ARRAY_SIZE(ecode);
+ eprog.insns = (uint64_t)ecode;
+ eprog.license = (uint64_t)bpf_license;
+ eprog.kern_version = 0;
+
+ memset(&cprog, 0, sizeof(cprog));
+ cprog.len = ARRAY_SIZE(ccode);
+ cprog.filter = ccode;
+
+
+ bpf_fd = syscall(__NR_bpf, BPF_PROG_LOAD, &eprog, sizeof(eprog));
+ if (bpf_fd < 0)
+ error(1, errno, "ebpf error");
+ fd = socket(p.recv_family, p.protocol, 0);
+ if (fd < 0)
+ error(1, errno, "failed to create socket 1");
+
+ if (bind(fd, addr, sockaddr_size()))
+ error(1, errno, "failed to bind recv socket 1");
+
+ errno = 0;
+ if (!setsockopt(fd, SOL_SOCKET, SO_ATTACH_REUSEPORT_EBPF, &bpf_fd,
+ sizeof(bpf_fd)) || errno != EINVAL)
+ error(1, errno, "setsockopt should have returned EINVAL");
+
+ errno = 0;
+ if (!setsockopt(fd, SOL_SOCKET, SO_ATTACH_REUSEPORT_CBPF, &cprog,
+ sizeof(cprog)) || errno != EINVAL)
+ error(1, errno, "setsockopt should have returned EINVAL");
+
+ free(addr);
+}
+
+static void test_filter_without_bind(void)
+{
+ int fd1, fd2;
+
+ fprintf(stderr, "Testing filter add without bind...\n");
+ fd1 = socket(AF_INET, SOCK_DGRAM, 0);
+ if (fd1 < 0)
+ error(1, errno, "failed to create socket 1");
+ fd2 = socket(AF_INET, SOCK_DGRAM, 0);
+ if (fd2 < 0)
+ error(1, errno, "failed to create socket 2");
+
+ attach_ebpf(fd1, 10);
+ attach_cbpf(fd2, 10);
+
+ close(fd1);
+ close(fd2);
+}
+
+
+int main(void)
+{
+ fprintf(stderr, "---- IPv4 UDP ----\n");
+ /* NOTE: UDP socket lookups traverse a different code path when there
+ * are > 10 sockets in a group. Run the bpf test through both paths.
+ */
+ test_reuseport_ebpf((struct test_params) {
+ .recv_family = AF_INET,
+ .send_family = AF_INET,
+ .protocol = SOCK_DGRAM,
+ .recv_socks = 10,
+ .recv_port = 8000,
+ .send_port_min = 9000});
+ test_reuseport_ebpf((struct test_params) {
+ .recv_family = AF_INET,
+ .send_family = AF_INET,
+ .protocol = SOCK_DGRAM,
+ .recv_socks = 20,
+ .recv_port = 8000,
+ .send_port_min = 9000});
+ test_reuseport_cbpf((struct test_params) {
+ .recv_family = AF_INET,
+ .send_family = AF_INET,
+ .protocol = SOCK_DGRAM,
+ .recv_socks = 10,
+ .recv_port = 8001,
+ .send_port_min = 9020});
+ test_reuseport_cbpf((struct test_params) {
+ .recv_family = AF_INET,
+ .send_family = AF_INET,
+ .protocol = SOCK_DGRAM,
+ .recv_socks = 20,
+ .recv_port = 8001,
+ .send_port_min = 9020});
+ test_extra_filter((struct test_params) {
+ .recv_family = AF_INET,
+ .protocol = SOCK_DGRAM,
+ .recv_port = 8002});
+ test_filter_no_reuseport((struct test_params) {
+ .recv_family = AF_INET,
+ .protocol = SOCK_DGRAM,
+ .recv_port = 8008});
+
+ fprintf(stderr, "---- IPv6 UDP ----\n");
+ test_reuseport_ebpf((struct test_params) {
+ .recv_family = AF_INET6,
+ .send_family = AF_INET6,
+ .protocol = SOCK_DGRAM,
+ .recv_socks = 10,
+ .recv_port = 8003,
+ .send_port_min = 9040});
+ test_reuseport_ebpf((struct test_params) {
+ .recv_family = AF_INET6,
+ .send_family = AF_INET6,
+ .protocol = SOCK_DGRAM,
+ .recv_socks = 20,
+ .recv_port = 8003,
+ .send_port_min = 9040});
+ test_reuseport_cbpf((struct test_params) {
+ .recv_family = AF_INET6,
+ .send_family = AF_INET6,
+ .protocol = SOCK_DGRAM,
+ .recv_socks = 10,
+ .recv_port = 8004,
+ .send_port_min = 9060});
+ test_reuseport_cbpf((struct test_params) {
+ .recv_family = AF_INET6,
+ .send_family = AF_INET6,
+ .protocol = SOCK_DGRAM,
+ .recv_socks = 20,
+ .recv_port = 8004,
+ .send_port_min = 9060});
+ test_extra_filter((struct test_params) {
+ .recv_family = AF_INET6,
+ .protocol = SOCK_DGRAM,
+ .recv_port = 8005});
+ test_filter_no_reuseport((struct test_params) {
+ .recv_family = AF_INET6,
+ .protocol = SOCK_DGRAM,
+ .recv_port = 8009});
+
+ fprintf(stderr, "---- IPv6 UDP w/ mapped IPv4 ----\n");
+ test_reuseport_ebpf((struct test_params) {
+ .recv_family = AF_INET6,
+ .send_family = AF_INET,
+ .protocol = SOCK_DGRAM,
+ .recv_socks = 20,
+ .recv_port = 8006,
+ .send_port_min = 9080});
+ test_reuseport_ebpf((struct test_params) {
+ .recv_family = AF_INET6,
+ .send_family = AF_INET,
+ .protocol = SOCK_DGRAM,
+ .recv_socks = 10,
+ .recv_port = 8006,
+ .send_port_min = 9080});
+ test_reuseport_cbpf((struct test_params) {
+ .recv_family = AF_INET6,
+ .send_family = AF_INET,
+ .protocol = SOCK_DGRAM,
+ .recv_socks = 10,
+ .recv_port = 8007,
+ .send_port_min = 9100});
+ test_reuseport_cbpf((struct test_params) {
+ .recv_family = AF_INET6,
+ .send_family = AF_INET,
+ .protocol = SOCK_DGRAM,
+ .recv_socks = 20,
+ .recv_port = 8007,
+ .send_port_min = 9100});
+
+
+ test_filter_without_bind();
+
+ fprintf(stderr, "SUCCESS\n");
+ return 0;
+}
diff --git a/tools/testing/selftests/powerpc/benchmarks/.gitignore b/tools/testing/selftests/powerpc/benchmarks/.gitignore
index b4709ea588c1..6fa673316ac2 100644
--- a/tools/testing/selftests/powerpc/benchmarks/.gitignore
+++ b/tools/testing/selftests/powerpc/benchmarks/.gitignore
@@ -1 +1,2 @@
gettimeofday
+context_switch
diff --git a/tools/testing/selftests/powerpc/benchmarks/Makefile b/tools/testing/selftests/powerpc/benchmarks/Makefile
index 5fa48702070d..912445ff7ce7 100644
--- a/tools/testing/selftests/powerpc/benchmarks/Makefile
+++ b/tools/testing/selftests/powerpc/benchmarks/Makefile
@@ -1,4 +1,4 @@
-TEST_PROGS := gettimeofday
+TEST_PROGS := gettimeofday context_switch
CFLAGS += -O2
@@ -6,6 +6,9 @@ all: $(TEST_PROGS)
$(TEST_PROGS): ../harness.c
+context_switch: ../utils.c
+context_switch: LDLIBS += -lpthread
+
include ../../lib.mk
clean:
diff --git a/tools/testing/selftests/powerpc/benchmarks/context_switch.c b/tools/testing/selftests/powerpc/benchmarks/context_switch.c
new file mode 100644
index 000000000000..7b785941adec
--- /dev/null
+++ b/tools/testing/selftests/powerpc/benchmarks/context_switch.c
@@ -0,0 +1,466 @@
+/*
+ * Context switch microbenchmark.
+ *
+ * Copyright (C) 2015 Anton Blanchard <anton@au.ibm.com>, IBM
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define _GNU_SOURCE
+#include <sched.h>
+#include <string.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <getopt.h>
+#include <signal.h>
+#include <assert.h>
+#include <pthread.h>
+#include <limits.h>
+#include <sys/time.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <sys/shm.h>
+#include <linux/futex.h>
+
+#include "../utils.h"
+
+static unsigned int timeout = 30;
+
+static int touch_vdso;
+struct timeval tv;
+
+static int touch_fp = 1;
+double fp;
+
+static int touch_vector = 1;
+typedef int v4si __attribute__ ((vector_size (16)));
+v4si a, b, c;
+
+#ifdef __powerpc__
+static int touch_altivec = 1;
+
+static void __attribute__((__target__("no-vsx"))) altivec_touch_fn(void)
+{
+ c = a + b;
+}
+#endif
+
+static void touch(void)
+{
+ if (touch_vdso)
+ gettimeofday(&tv, NULL);
+
+ if (touch_fp)
+ fp += 0.1;
+
+#ifdef __powerpc__
+ if (touch_altivec)
+ altivec_touch_fn();
+#endif
+
+ if (touch_vector)
+ c = a + b;
+
+ asm volatile("# %0 %1 %2": : "r"(&tv), "r"(&fp), "r"(&c));
+}
+
+static void start_thread_on(void *(*fn)(void *), void *arg, unsigned long cpu)
+{
+ pthread_t tid;
+ cpu_set_t cpuset;
+ pthread_attr_t attr;
+
+ CPU_ZERO(&cpuset);
+ CPU_SET(cpu, &cpuset);
+
+ pthread_attr_init(&attr);
+
+ if (pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpuset)) {
+ perror("pthread_attr_setaffinity_np");
+ exit(1);
+ }
+
+ if (pthread_create(&tid, &attr, fn, arg)) {
+ perror("pthread_create");
+ exit(1);
+ }
+}
+
+static void start_process_on(void *(*fn)(void *), void *arg, unsigned long cpu)
+{
+ int pid;
+ cpu_set_t cpuset;
+
+ pid = fork();
+ if (pid == -1) {
+ perror("fork");
+ exit(1);
+ }
+
+ if (pid)
+ return;
+
+ CPU_ZERO(&cpuset);
+ CPU_SET(cpu, &cpuset);
+
+ if (sched_setaffinity(0, sizeof(cpuset), &cpuset)) {
+ perror("sched_setaffinity");
+ exit(1);
+ }
+
+ fn(arg);
+
+ exit(0);
+}
+
+static unsigned long iterations;
+static unsigned long iterations_prev;
+
+static void sigalrm_handler(int junk)
+{
+ unsigned long i = iterations;
+
+ printf("%ld\n", i - iterations_prev);
+ iterations_prev = i;
+
+ if (--timeout == 0)
+ kill(0, SIGUSR1);
+
+ alarm(1);
+}
+
+static void sigusr1_handler(int junk)
+{
+ exit(0);
+}
+
+struct actions {
+ void (*setup)(int, int);
+ void *(*thread1)(void *);
+ void *(*thread2)(void *);
+};
+
+#define READ 0
+#define WRITE 1
+
+static int pipe_fd1[2];
+static int pipe_fd2[2];
+
+static void pipe_setup(int cpu1, int cpu2)
+{
+ if (pipe(pipe_fd1) || pipe(pipe_fd2))
+ exit(1);
+}
+
+static void *pipe_thread1(void *arg)
+{
+ signal(SIGALRM, sigalrm_handler);
+ alarm(1);
+
+ while (1) {
+ assert(read(pipe_fd1[READ], &c, 1) == 1);
+ touch();
+
+ assert(write(pipe_fd2[WRITE], &c, 1) == 1);
+ touch();
+
+ iterations += 2;
+ }
+
+ return NULL;
+}
+
+static void *pipe_thread2(void *arg)
+{
+ while (1) {
+ assert(write(pipe_fd1[WRITE], &c, 1) == 1);
+ touch();
+
+ assert(read(pipe_fd2[READ], &c, 1) == 1);
+ touch();
+ }
+
+ return NULL;
+}
+
+static struct actions pipe_actions = {
+ .setup = pipe_setup,
+ .thread1 = pipe_thread1,
+ .thread2 = pipe_thread2,
+};
+
+static void yield_setup(int cpu1, int cpu2)
+{
+ if (cpu1 != cpu2) {
+ fprintf(stderr, "Both threads must be on the same CPU for yield test\n");
+ exit(1);
+ }
+}
+
+static void *yield_thread1(void *arg)
+{
+ signal(SIGALRM, sigalrm_handler);
+ alarm(1);
+
+ while (1) {
+ sched_yield();
+ touch();
+
+ iterations += 2;
+ }
+
+ return NULL;
+}
+
+static void *yield_thread2(void *arg)
+{
+ while (1) {
+ sched_yield();
+ touch();
+ }
+
+ return NULL;
+}
+
+static struct actions yield_actions = {
+ .setup = yield_setup,
+ .thread1 = yield_thread1,
+ .thread2 = yield_thread2,
+};
+
+static long sys_futex(void *addr1, int op, int val1, struct timespec *timeout,
+ void *addr2, int val3)
+{
+ return syscall(SYS_futex, addr1, op, val1, timeout, addr2, val3);
+}
+
+static unsigned long cmpxchg(unsigned long *p, unsigned long expected,
+ unsigned long desired)
+{
+ unsigned long exp = expected;
+
+ __atomic_compare_exchange_n(p, &exp, desired, 0,
+ __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+ return exp;
+}
+
+static unsigned long xchg(unsigned long *p, unsigned long val)
+{
+ return __atomic_exchange_n(p, val, __ATOMIC_SEQ_CST);
+}
+
+static int mutex_lock(unsigned long *m)
+{
+ int c;
+
+ c = cmpxchg(m, 0, 1);
+ if (!c)
+ return 0;
+
+ if (c == 1)
+ c = xchg(m, 2);
+
+ while (c) {
+ sys_futex(m, FUTEX_WAIT, 2, NULL, NULL, 0);
+ c = xchg(m, 2);
+ }
+
+ return 0;
+}
+
+static int mutex_unlock(unsigned long *m)
+{
+ if (*m == 2)
+ *m = 0;
+ else if (xchg(m, 0) == 1)
+ return 0;
+
+ sys_futex(m, FUTEX_WAKE, 1, NULL, NULL, 0);
+
+ return 0;
+}
+
+static unsigned long *m1, *m2;
+
+static void futex_setup(int cpu1, int cpu2)
+{
+ int shmid;
+ void *shmaddr;
+
+ shmid = shmget(IPC_PRIVATE, getpagesize(), SHM_R | SHM_W);
+ if (shmid < 0) {
+ perror("shmget");
+ exit(1);
+ }
+
+ shmaddr = shmat(shmid, NULL, 0);
+ if (shmaddr == (char *)-1) {
+ perror("shmat");
+ shmctl(shmid, IPC_RMID, NULL);
+ exit(1);
+ }
+
+ shmctl(shmid, IPC_RMID, NULL);
+
+ m1 = shmaddr;
+ m2 = shmaddr + sizeof(*m1);
+
+ *m1 = 0;
+ *m2 = 0;
+
+ mutex_lock(m1);
+ mutex_lock(m2);
+}
+
+static void *futex_thread1(void *arg)
+{
+ signal(SIGALRM, sigalrm_handler);
+ alarm(1);
+
+ while (1) {
+ mutex_lock(m2);
+ mutex_unlock(m1);
+
+ iterations += 2;
+ }
+
+ return NULL;
+}
+
+static void *futex_thread2(void *arg)
+{
+ while (1) {
+ mutex_unlock(m2);
+ mutex_lock(m1);
+ }
+
+ return NULL;
+}
+
+static struct actions futex_actions = {
+ .setup = futex_setup,
+ .thread1 = futex_thread1,
+ .thread2 = futex_thread2,
+};
+
+static int processes;
+
+static struct option options[] = {
+ { "test", required_argument, 0, 't' },
+ { "process", no_argument, &processes, 1 },
+ { "timeout", required_argument, 0, 's' },
+ { "vdso", no_argument, &touch_vdso, 1 },
+ { "no-fp", no_argument, &touch_fp, 0 },
+#ifdef __powerpc__
+ { "no-altivec", no_argument, &touch_altivec, 0 },
+#endif
+ { "no-vector", no_argument, &touch_vector, 0 },
+ { 0, },
+};
+
+static void usage(void)
+{
+ fprintf(stderr, "Usage: context_switch2 <options> CPU1 CPU2\n\n");
+ fprintf(stderr, "\t\t--test=X\tpipe, futex or yield (default)\n");
+ fprintf(stderr, "\t\t--process\tUse processes (default threads)\n");
+ fprintf(stderr, "\t\t--timeout=X\tDuration in seconds to run (default 30)\n");
+ fprintf(stderr, "\t\t--vdso\t\ttouch VDSO\n");
+ fprintf(stderr, "\t\t--fp\t\ttouch FP\n");
+#ifdef __powerpc__
+ fprintf(stderr, "\t\t--altivec\ttouch altivec\n");
+#endif
+ fprintf(stderr, "\t\t--vector\ttouch vector\n");
+}
+
+int main(int argc, char *argv[])
+{
+ signed char c;
+ struct actions *actions = &yield_actions;
+ int cpu1;
+ int cpu2;
+ static void (*start_fn)(void *(*fn)(void *), void *arg, unsigned long cpu);
+
+ while (1) {
+ int option_index = 0;
+
+ c = getopt_long(argc, argv, "", options, &option_index);
+
+ if (c == -1)
+ break;
+
+ switch (c) {
+ case 0:
+ if (options[option_index].flag != 0)
+ break;
+
+ usage();
+ exit(1);
+ break;
+
+ case 't':
+ if (!strcmp(optarg, "pipe")) {
+ actions = &pipe_actions;
+ } else if (!strcmp(optarg, "yield")) {
+ actions = &yield_actions;
+ } else if (!strcmp(optarg, "futex")) {
+ actions = &futex_actions;
+ } else {
+ usage();
+ exit(1);
+ }
+ break;
+
+ case 's':
+ timeout = atoi(optarg);
+ break;
+
+ default:
+ usage();
+ exit(1);
+ }
+ }
+
+ if (processes)
+ start_fn = start_process_on;
+ else
+ start_fn = start_thread_on;
+
+ if (((argc - optind) != 2)) {
+ cpu1 = cpu2 = pick_online_cpu();
+ } else {
+ cpu1 = atoi(argv[optind++]);
+ cpu2 = atoi(argv[optind++]);
+ }
+
+ printf("Using %s with ", processes ? "processes" : "threads");
+
+ if (actions == &pipe_actions)
+ printf("pipe");
+ else if (actions == &yield_actions)
+ printf("yield");
+ else
+ printf("futex");
+
+ printf(" on cpus %d/%d touching FP:%s altivec:%s vector:%s vdso:%s\n",
+ cpu1, cpu2, touch_fp ? "yes" : "no", touch_altivec ? "yes" : "no",
+ touch_vector ? "yes" : "no", touch_vdso ? "yes" : "no");
+
+ /* Create a new process group so we can signal everyone for exit */
+ setpgid(getpid(), getpid());
+
+ signal(SIGUSR1, sigusr1_handler);
+
+ actions->setup(cpu1, cpu2);
+
+ start_fn(actions->thread1, NULL, cpu1);
+ start_fn(actions->thread2, NULL, cpu2);
+
+ while (1)
+ sleep(3600);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/powerpc/dscr/dscr_inherit_exec_test.c b/tools/testing/selftests/powerpc/dscr/dscr_inherit_exec_test.c
index 8265504de571..08a8b95e3bc1 100644
--- a/tools/testing/selftests/powerpc/dscr/dscr_inherit_exec_test.c
+++ b/tools/testing/selftests/powerpc/dscr/dscr_inherit_exec_test.c
@@ -60,14 +60,6 @@ int dscr_inherit_exec(void)
else
set_dscr(dscr);
- /*
- * XXX: Force a context switch out so that DSCR
- * current value is copied into the thread struct
- * which is required for the child to inherit the
- * changed value.
- */
- sleep(1);
-
pid = fork();
if (pid == -1) {
perror("fork() failed");
diff --git a/tools/testing/selftests/powerpc/dscr/dscr_inherit_test.c b/tools/testing/selftests/powerpc/dscr/dscr_inherit_test.c
index 4e414caf7f40..3e5a6d195e9a 100644
--- a/tools/testing/selftests/powerpc/dscr/dscr_inherit_test.c
+++ b/tools/testing/selftests/powerpc/dscr/dscr_inherit_test.c
@@ -40,14 +40,6 @@ int dscr_inherit(void)
else
set_dscr(dscr);
- /*
- * XXX: Force a context switch out so that DSCR
- * current value is copied into the thread struct
- * which is required for the child to inherit the
- * changed value.
- */
- sleep(1);
-
pid = fork();
if (pid == -1) {
perror("fork() failed");
diff --git a/tools/testing/selftests/powerpc/harness.c b/tools/testing/selftests/powerpc/harness.c
index f7997affd143..52f9be7f61f0 100644
--- a/tools/testing/selftests/powerpc/harness.c
+++ b/tools/testing/selftests/powerpc/harness.c
@@ -116,46 +116,3 @@ int test_harness(int (test_function)(void), char *name)
return rc;
}
-
-static char auxv[4096];
-
-void *get_auxv_entry(int type)
-{
- ElfW(auxv_t) *p;
- void *result;
- ssize_t num;
- int fd;
-
- fd = open("/proc/self/auxv", O_RDONLY);
- if (fd == -1) {
- perror("open");
- return NULL;
- }
-
- result = NULL;
-
- num = read(fd, auxv, sizeof(auxv));
- if (num < 0) {
- perror("read");
- goto out;
- }
-
- if (num > sizeof(auxv)) {
- printf("Overflowed auxv buffer\n");
- goto out;
- }
-
- p = (ElfW(auxv_t) *)auxv;
-
- while (p->a_type != AT_NULL) {
- if (p->a_type == type) {
- result = (void *)p->a_un.a_val;
- break;
- }
-
- p++;
- }
-out:
- close(fd);
- return result;
-}
diff --git a/tools/testing/selftests/powerpc/pmu/Makefile b/tools/testing/selftests/powerpc/pmu/Makefile
index a9099d9f8f39..ac41a7177f2e 100644
--- a/tools/testing/selftests/powerpc/pmu/Makefile
+++ b/tools/testing/selftests/powerpc/pmu/Makefile
@@ -2,7 +2,7 @@ noarg:
$(MAKE) -C ../
TEST_PROGS := count_instructions l3_bank_test per_event_excludes
-EXTRA_SOURCES := ../harness.c event.c lib.c
+EXTRA_SOURCES := ../harness.c event.c lib.c ../utils.c
all: $(TEST_PROGS) ebb
@@ -12,6 +12,8 @@ $(TEST_PROGS): $(EXTRA_SOURCES)
count_instructions: loop.S count_instructions.c $(EXTRA_SOURCES)
$(CC) $(CFLAGS) -m64 -o $@ $^
+per_event_excludes: ../utils.c
+
include ../../lib.mk
DEFAULT_RUN_TESTS := $(RUN_TESTS)
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/Makefile b/tools/testing/selftests/powerpc/pmu/ebb/Makefile
index 5cdc9dbf2b27..8d2279c4bb4b 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/Makefile
+++ b/tools/testing/selftests/powerpc/pmu/ebb/Makefile
@@ -18,7 +18,8 @@ TEST_PROGS := reg_access_test event_attributes_test cycles_test \
all: $(TEST_PROGS)
-$(TEST_PROGS): ../../harness.c ../event.c ../lib.c ebb.c ebb_handler.S trace.c busy_loop.S
+$(TEST_PROGS): ../../harness.c ../../utils.c ../event.c ../lib.c \
+ ebb.c ebb_handler.S trace.c busy_loop.S
instruction_count_test: ../loop.S
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb.c b/tools/testing/selftests/powerpc/pmu/ebb/ebb.c
index 9729d9f90218..e67452f1bcff 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/ebb.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb.c
@@ -13,7 +13,6 @@
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
-#include <linux/auxvec.h>
#include "trace.h"
#include "reg.h"
@@ -324,7 +323,7 @@ bool ebb_is_supported(void)
{
#ifdef PPC_FEATURE2_EBB
/* EBB requires at least POWER8 */
- return ((long)get_auxv_entry(AT_HWCAP2) & PPC_FEATURE2_EBB);
+ return have_hwcap2(PPC_FEATURE2_EBB);
#else
return false;
#endif
diff --git a/tools/testing/selftests/powerpc/pmu/lib.c b/tools/testing/selftests/powerpc/pmu/lib.c
index a07104c2afe6..a361ad3334ce 100644
--- a/tools/testing/selftests/powerpc/pmu/lib.c
+++ b/tools/testing/selftests/powerpc/pmu/lib.c
@@ -15,32 +15,6 @@
#include "lib.h"
-int pick_online_cpu(void)
-{
- cpu_set_t mask;
- int cpu;
-
- CPU_ZERO(&mask);
-
- if (sched_getaffinity(0, sizeof(mask), &mask)) {
- perror("sched_getaffinity");
- return -1;
- }
-
- /* We prefer a primary thread, but skip 0 */
- for (cpu = 8; cpu < CPU_SETSIZE; cpu += 8)
- if (CPU_ISSET(cpu, &mask))
- return cpu;
-
- /* Search for anything, but in reverse */
- for (cpu = CPU_SETSIZE - 1; cpu >= 0; cpu--)
- if (CPU_ISSET(cpu, &mask))
- return cpu;
-
- printf("No cpus in affinity mask?!\n");
- return -1;
-}
-
int bind_to_cpu(int cpu)
{
cpu_set_t mask;
diff --git a/tools/testing/selftests/powerpc/pmu/lib.h b/tools/testing/selftests/powerpc/pmu/lib.h
index ca5d72ae3be6..0213af4ff332 100644
--- a/tools/testing/selftests/powerpc/pmu/lib.h
+++ b/tools/testing/selftests/powerpc/pmu/lib.h
@@ -19,7 +19,6 @@ union pipe {
int fds[2];
};
-extern int pick_online_cpu(void);
extern int bind_to_cpu(int cpu);
extern int kill_child_and_wait(pid_t child_pid);
extern int wait_for_child(pid_t child_pid);
diff --git a/tools/testing/selftests/powerpc/scripts/hmi.sh b/tools/testing/selftests/powerpc/scripts/hmi.sh
new file mode 100755
index 000000000000..83fb253ae3bd
--- /dev/null
+++ b/tools/testing/selftests/powerpc/scripts/hmi.sh
@@ -0,0 +1,89 @@
+#!/bin/sh
+#
+# Copyright 2015, Daniel Axtens, IBM Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+
+# do we have ./getscom, ./putscom?
+if [ -x ./getscom ] && [ -x ./putscom ]; then
+ GETSCOM=./getscom
+ PUTSCOM=./putscom
+elif which getscom > /dev/null; then
+ GETSCOM=$(which getscom)
+ PUTSCOM=$(which putscom)
+else
+ cat <<EOF
+Can't find getscom/putscom in . or \$PATH.
+See https://github.com/open-power/skiboot.
+The tool is in external/xscom-utils
+EOF
+ exit 1
+fi
+
+# We will get 8 HMI events per injection
+# todo: deal with things being offline
+expected_hmis=8
+COUNT_HMIS() {
+ dmesg | grep -c 'Harmless Hypervisor Maintenance interrupt'
+}
+
+# massively expand snooze delay, allowing injection on all cores
+ppc64_cpu --smt-snooze-delay=1000000000
+
+# when we exit, restore it
+trap "ppc64_cpu --smt-snooze-delay=100" 0 1
+
+# for each chip+core combination
+# todo - less fragile parsing
+egrep -o 'OCC: Chip [0-9a-f]+ Core [0-9a-f]' < /sys/firmware/opal/msglog |
+while read chipcore; do
+ chip=$(echo "$chipcore"|awk '{print $3}')
+ core=$(echo "$chipcore"|awk '{print $5}')
+ fir="0x1${core}013100"
+
+ # verify that Core FIR is zero as expected
+ if [ "$($GETSCOM -c 0x${chip} $fir)" != 0 ]; then
+ echo "FIR was not zero before injection for chip $chip, core $core. Aborting!"
+ echo "Result of $GETSCOM -c 0x${chip} $fir:"
+ $GETSCOM -c 0x${chip} $fir
+ echo "If you get a -5 error, the core may be in idle state. Try stress-ng."
+ echo "Otherwise, try $PUTSCOM -c 0x${chip} $fir 0"
+ exit 1
+ fi
+
+ # keep track of the number of HMIs handled
+ old_hmis=$(COUNT_HMIS)
+
+ # do injection, adding a marker to dmesg for clarity
+ echo "Injecting HMI on core $core, chip $chip" | tee /dev/kmsg
+ # inject a RegFile recoverable error
+ if ! $PUTSCOM -c 0x${chip} $fir 2000000000000000 > /dev/null; then
+ echo "Error injecting. Aborting!"
+ exit 1
+ fi
+
+ # now we want to wait for all the HMIs to be processed
+ # we expect one per thread on the core
+ i=0;
+ new_hmis=$(COUNT_HMIS)
+ while [ $new_hmis -lt $((old_hmis + expected_hmis)) ] && [ $i -lt 12 ]; do
+ echo "Seen $((new_hmis - old_hmis)) HMI(s) out of $expected_hmis expected, sleeping"
+ sleep 5;
+ i=$((i + 1))
+ new_hmis=$(COUNT_HMIS)
+ done
+ if [ $i = 12 ]; then
+ echo "Haven't seen expected $expected_hmis recoveries after 1 min. Aborting."
+ exit 1
+ fi
+ echo "Processed $expected_hmis events; presumed success. Check dmesg."
+ echo ""
+done
diff --git a/tools/testing/selftests/powerpc/tm/.gitignore b/tools/testing/selftests/powerpc/tm/.gitignore
index 2699635d2cd9..7d0f14b8cb2e 100644
--- a/tools/testing/selftests/powerpc/tm/.gitignore
+++ b/tools/testing/selftests/powerpc/tm/.gitignore
@@ -1,2 +1,5 @@
tm-resched-dscr
tm-syscall
+tm-signal-msr-resv
+tm-signal-stack
+tm-vmxcopy
diff --git a/tools/testing/selftests/powerpc/tm/Makefile b/tools/testing/selftests/powerpc/tm/Makefile
index 4bea62a319dc..737f72c964e6 100644
--- a/tools/testing/selftests/powerpc/tm/Makefile
+++ b/tools/testing/selftests/powerpc/tm/Makefile
@@ -1,8 +1,8 @@
-TEST_PROGS := tm-resched-dscr tm-syscall
+TEST_PROGS := tm-resched-dscr tm-syscall tm-signal-msr-resv tm-signal-stack tm-vmxcopy
all: $(TEST_PROGS)
-$(TEST_PROGS): ../harness.c
+$(TEST_PROGS): ../harness.c ../utils.c
tm-syscall: tm-syscall-asm.S
tm-syscall: CFLAGS += -mhtm -I../../../../../usr/include
diff --git a/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c b/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c
index 42d4c8caad81..8fde93d6021f 100644
--- a/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c
+++ b/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c
@@ -29,6 +29,7 @@
#include <asm/tm.h>
#include "utils.h"
+#include "tm.h"
#define TBEGIN ".long 0x7C00051D ;"
#define TEND ".long 0x7C00055D ;"
@@ -42,6 +43,8 @@ int test_body(void)
{
uint64_t rv, dscr1 = 1, dscr2, texasr;
+ SKIP_IF(!have_htm());
+
printf("Check DSCR TM context switch: ");
fflush(stdout);
for (;;) {
diff --git a/tools/testing/selftests/powerpc/tm/tm-signal-msr-resv.c b/tools/testing/selftests/powerpc/tm/tm-signal-msr-resv.c
new file mode 100644
index 000000000000..d86653f282b1
--- /dev/null
+++ b/tools/testing/selftests/powerpc/tm/tm-signal-msr-resv.c
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2015, Michael Neuling, IBM Corp.
+ * Licensed under GPLv2.
+ *
+ * Test the kernel's signal return code to ensure that it doesn't
+ * crash when both the transactional and suspend MSR bits are set in
+ * the signal context.
+ *
+ * For this test, we send ourselves a SIGUSR1. In the SIGUSR1 handler
+ * we modify the signal context to set both MSR TM S and T bits (which
+ * is "reserved" by the PowerISA). When we return from the signal
+ * handler (implicit sigreturn), the kernel should detect reserved MSR
+ * value and send us with a SIGSEGV.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <signal.h>
+#include <unistd.h>
+
+#include "utils.h"
+#include "tm.h"
+
+int segv_expected = 0;
+
+void signal_segv(int signum)
+{
+ if (segv_expected && (signum == SIGSEGV))
+ _exit(0);
+ _exit(1);
+}
+
+void signal_usr1(int signum, siginfo_t *info, void *uc)
+{
+ ucontext_t *ucp = uc;
+
+ /* Link tm checkpointed context to normal context */
+ ucp->uc_link = ucp;
+ /* Set all TM bits so that the context is now invalid */
+#ifdef __powerpc64__
+ ucp->uc_mcontext.gp_regs[PT_MSR] |= (7ULL << 32);
+#else
+ ucp->uc_mcontext.regs->gpr[PT_MSR] |= (7ULL);
+#endif
+ /* Should segv on return becuase of invalid context */
+ segv_expected = 1;
+}
+
+int tm_signal_msr_resv()
+{
+ struct sigaction act;
+
+ SKIP_IF(!have_htm());
+
+ act.sa_sigaction = signal_usr1;
+ sigemptyset(&act.sa_mask);
+ act.sa_flags = SA_SIGINFO;
+ if (sigaction(SIGUSR1, &act, NULL) < 0) {
+ perror("sigaction sigusr1");
+ exit(1);
+ }
+ if (signal(SIGSEGV, signal_segv) == SIG_ERR)
+ exit(1);
+
+ raise(SIGUSR1);
+
+ /* We shouldn't get here as we exit in the segv handler */
+ return 1;
+}
+
+int main(void)
+{
+ return test_harness(tm_signal_msr_resv, "tm_signal_msr_resv");
+}
diff --git a/tools/testing/selftests/powerpc/tm/tm-signal-stack.c b/tools/testing/selftests/powerpc/tm/tm-signal-stack.c
new file mode 100644
index 000000000000..e44a238c1d77
--- /dev/null
+++ b/tools/testing/selftests/powerpc/tm/tm-signal-stack.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2015, Michael Neuling, IBM Corp.
+ * Licensed under GPLv2.
+ *
+ * Test the kernel's signal delievery code to ensure that we don't
+ * trelaim twice in the kernel signal delivery code. This can happen
+ * if we trigger a signal when in a transaction and the stack pointer
+ * is bogus.
+ *
+ * This test case registers a SEGV handler, sets the stack pointer
+ * (r1) to NULL, starts a transaction and then generates a SEGV. The
+ * SEGV should be handled but we exit here as the stack pointer is
+ * invalid and hance we can't sigreturn. We only need to check that
+ * this flow doesn't crash the kernel.
+ */
+
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <signal.h>
+
+#include "utils.h"
+#include "tm.h"
+
+void signal_segv(int signum)
+{
+ /* This should never actually run since stack is foobar */
+ exit(1);
+}
+
+int tm_signal_stack()
+{
+ int pid;
+
+ SKIP_IF(!have_htm());
+
+ pid = fork();
+ if (pid < 0)
+ exit(1);
+
+ if (pid) { /* Parent */
+ /*
+ * It's likely the whole machine will crash here so if
+ * the child ever exits, we are good.
+ */
+ wait(NULL);
+ return 0;
+ }
+
+ /*
+ * The flow here is:
+ * 1) register a signal handler (so signal delievery occurs)
+ * 2) make stack pointer (r1) = NULL
+ * 3) start transaction
+ * 4) cause segv
+ */
+ if (signal(SIGSEGV, signal_segv) == SIG_ERR)
+ exit(1);
+ asm volatile("li 1, 0 ;" /* stack ptr == NULL */
+ "1:"
+ ".long 0x7C00051D ;" /* tbegin */
+ "beq 1b ;" /* retry forever */
+ ".long 0x7C0005DD ; ;" /* tsuspend */
+ "ld 2, 0(1) ;" /* trigger segv" */
+ : : : "memory");
+
+ /* This should never get here due to above segv */
+ return 1;
+}
+
+int main(void)
+{
+ return test_harness(tm_signal_stack, "tm_signal_stack");
+}
diff --git a/tools/testing/selftests/powerpc/tm/tm-syscall.c b/tools/testing/selftests/powerpc/tm/tm-syscall.c
index e835bf7ec7ae..60560cb20e38 100644
--- a/tools/testing/selftests/powerpc/tm/tm-syscall.c
+++ b/tools/testing/selftests/powerpc/tm/tm-syscall.c
@@ -13,12 +13,11 @@
#include <unistd.h>
#include <sys/syscall.h>
#include <asm/tm.h>
-#include <asm/cputable.h>
-#include <linux/auxvec.h>
#include <sys/time.h>
#include <stdlib.h>
#include "utils.h"
+#include "tm.h"
extern int getppid_tm_active(void);
extern int getppid_tm_suspended(void);
@@ -77,16 +76,6 @@ pid_t getppid_tm(bool suspend)
exit(-1);
}
-static inline bool have_htm_nosc(void)
-{
-#ifdef PPC_FEATURE2_HTM_NOSC
- return ((long)get_auxv_entry(AT_HWCAP2) & PPC_FEATURE2_HTM_NOSC);
-#else
- printf("PPC_FEATURE2_HTM_NOSC not defined, can't check AT_HWCAP2\n");
- return false;
-#endif
-}
-
int tm_syscall(void)
{
unsigned count = 0;
diff --git a/tools/testing/selftests/powerpc/tm/tm-vmxcopy.c b/tools/testing/selftests/powerpc/tm/tm-vmxcopy.c
new file mode 100644
index 000000000000..0274de7b11f3
--- /dev/null
+++ b/tools/testing/selftests/powerpc/tm/tm-vmxcopy.c
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2015, Michael Neuling, IBM Corp.
+ * Licensed under GPLv2.
+ *
+ * Original: Michael Neuling 4/12/2013
+ * Edited: Rashmica Gupta 4/12/2015
+ *
+ * See if the altivec state is leaked out of an aborted transaction due to
+ * kernel vmx copy loops.
+ *
+ * When the transaction aborts, VSR values should rollback to the values
+ * they held before the transaction commenced. Using VSRs while transaction
+ * is suspended should not affect the checkpointed values.
+ *
+ * (1) write A to a VSR
+ * (2) start transaction
+ * (3) suspend transaction
+ * (4) change the VSR to B
+ * (5) trigger kernel vmx copy loop
+ * (6) abort transaction
+ * (7) check that the VSR value is A
+ */
+
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/mman.h>
+#include <string.h>
+#include <assert.h>
+
+#include "tm.h"
+#include "utils.h"
+
+int test_vmxcopy()
+{
+ long double vecin = 1.3;
+ long double vecout;
+ unsigned long pgsize = getpagesize();
+ int i;
+ int fd;
+ int size = pgsize*16;
+ char tmpfile[] = "/tmp/page_faultXXXXXX";
+ char buf[pgsize];
+ char *a;
+ uint64_t aborted = 0;
+
+ SKIP_IF(!have_htm());
+
+ fd = mkstemp(tmpfile);
+ assert(fd >= 0);
+
+ memset(buf, 0, pgsize);
+ for (i = 0; i < size; i += pgsize)
+ assert(write(fd, buf, pgsize) == pgsize);
+
+ unlink(tmpfile);
+
+ a = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0);
+ assert(a != MAP_FAILED);
+
+ asm __volatile__(
+ "lxvd2x 40,0,%[vecinptr];" /* set 40 to initial value*/
+ "tbegin.;"
+ "beq 3f;"
+ "tsuspend.;"
+ "xxlxor 40,40,40;" /* set 40 to 0 */
+ "std 5, 0(%[map]);" /* cause kernel vmx copy page */
+ "tabort. 0;"
+ "tresume.;"
+ "tend.;"
+ "li %[res], 0;"
+ "b 5f;"
+
+ /* Abort handler */
+ "3:;"
+ "li %[res], 1;"
+
+ "5:;"
+ "stxvd2x 40,0,%[vecoutptr];"
+ : [res]"=r"(aborted)
+ : [vecinptr]"r"(&vecin),
+ [vecoutptr]"r"(&vecout),
+ [map]"r"(a)
+ : "memory", "r0", "r3", "r4", "r5", "r6", "r7");
+
+ if (aborted && (vecin != vecout)){
+ printf("FAILED: vector state leaked on abort %f != %f\n",
+ (double)vecin, (double)vecout);
+ return 1;
+ }
+
+ munmap(a, size);
+
+ close(fd);
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(test_vmxcopy, "tm_vmxcopy");
+}
diff --git a/tools/testing/selftests/powerpc/tm/tm.h b/tools/testing/selftests/powerpc/tm/tm.h
new file mode 100644
index 000000000000..24144b25772c
--- /dev/null
+++ b/tools/testing/selftests/powerpc/tm/tm.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2015, Michael Ellerman, IBM Corp.
+ * Licensed under GPLv2.
+ */
+
+#ifndef _SELFTESTS_POWERPC_TM_TM_H
+#define _SELFTESTS_POWERPC_TM_TM_H
+
+#include <stdbool.h>
+#include <asm/cputable.h>
+
+#include "../utils.h"
+
+static inline bool have_htm(void)
+{
+#ifdef PPC_FEATURE2_HTM
+ return have_hwcap2(PPC_FEATURE2_HTM);
+#else
+ printf("PPC_FEATURE2_HTM not defined, can't check AT_HWCAP2\n");
+ return false;
+#endif
+}
+
+static inline bool have_htm_nosc(void)
+{
+#ifdef PPC_FEATURE2_HTM_NOSC
+ return have_hwcap2(PPC_FEATURE2_HTM_NOSC);
+#else
+ printf("PPC_FEATURE2_HTM_NOSC not defined, can't check AT_HWCAP2\n");
+ return false;
+#endif
+}
+
+#endif /* _SELFTESTS_POWERPC_TM_TM_H */
diff --git a/tools/testing/selftests/powerpc/utils.c b/tools/testing/selftests/powerpc/utils.c
new file mode 100644
index 000000000000..dcf74184bfd0
--- /dev/null
+++ b/tools/testing/selftests/powerpc/utils.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2013-2015, Michael Ellerman, IBM Corp.
+ * Licensed under GPLv2.
+ */
+
+#define _GNU_SOURCE /* For CPU_ZERO etc. */
+
+#include <elf.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <link.h>
+#include <sched.h>
+#include <stdio.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "utils.h"
+
+static char auxv[4096];
+
+void *get_auxv_entry(int type)
+{
+ ElfW(auxv_t) *p;
+ void *result;
+ ssize_t num;
+ int fd;
+
+ fd = open("/proc/self/auxv", O_RDONLY);
+ if (fd == -1) {
+ perror("open");
+ return NULL;
+ }
+
+ result = NULL;
+
+ num = read(fd, auxv, sizeof(auxv));
+ if (num < 0) {
+ perror("read");
+ goto out;
+ }
+
+ if (num > sizeof(auxv)) {
+ printf("Overflowed auxv buffer\n");
+ goto out;
+ }
+
+ p = (ElfW(auxv_t) *)auxv;
+
+ while (p->a_type != AT_NULL) {
+ if (p->a_type == type) {
+ result = (void *)p->a_un.a_val;
+ break;
+ }
+
+ p++;
+ }
+out:
+ close(fd);
+ return result;
+}
+
+int pick_online_cpu(void)
+{
+ cpu_set_t mask;
+ int cpu;
+
+ CPU_ZERO(&mask);
+
+ if (sched_getaffinity(0, sizeof(mask), &mask)) {
+ perror("sched_getaffinity");
+ return -1;
+ }
+
+ /* We prefer a primary thread, but skip 0 */
+ for (cpu = 8; cpu < CPU_SETSIZE; cpu += 8)
+ if (CPU_ISSET(cpu, &mask))
+ return cpu;
+
+ /* Search for anything, but in reverse */
+ for (cpu = CPU_SETSIZE - 1; cpu >= 0; cpu--)
+ if (CPU_ISSET(cpu, &mask))
+ return cpu;
+
+ printf("No cpus in affinity mask?!\n");
+ return -1;
+}
diff --git a/tools/testing/selftests/powerpc/utils.h b/tools/testing/selftests/powerpc/utils.h
index b7d41086bb0a..175ac6ad10dd 100644
--- a/tools/testing/selftests/powerpc/utils.h
+++ b/tools/testing/selftests/powerpc/utils.h
@@ -8,6 +8,7 @@
#include <stdint.h>
#include <stdbool.h>
+#include <linux/auxvec.h>
/* Avoid headaches with PRI?64 - just use %ll? always */
typedef unsigned long long u64;
@@ -21,6 +22,12 @@ typedef uint8_t u8;
int test_harness(int (test_function)(void), char *name);
extern void *get_auxv_entry(int type);
+int pick_online_cpu(void);
+
+static inline bool have_hwcap2(unsigned long ftr2)
+{
+ return ((unsigned long)get_auxv_entry(AT_HWCAP2) & ftr2) == ftr2;
+}
/* Yes, this is evil */
#define FAIL_IF(x) \
diff --git a/tools/testing/selftests/ptrace/.gitignore b/tools/testing/selftests/ptrace/.gitignore
new file mode 100644
index 000000000000..b3e59d41fd82
--- /dev/null
+++ b/tools/testing/selftests/ptrace/.gitignore
@@ -0,0 +1 @@
+peeksiginfo
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
index 5236e073919d..0f80eefb0bfd 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
@@ -38,8 +38,6 @@
#
# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-grace=120
-
T=/tmp/kvm-test-1-run.sh.$$
trap 'rm -rf $T' 0
touch $T
@@ -152,7 +150,7 @@ fi
qemu_args="`specify_qemu_cpus "$QEMU" "$qemu_args" "$cpu_count"`"
# Generate architecture-specific and interaction-specific qemu arguments
-qemu_args="$qemu_args `identify_qemu_args "$QEMU" "$builddir/console.log"`"
+qemu_args="$qemu_args `identify_qemu_args "$QEMU" "$resdir/console.log"`"
# Generate qemu -append arguments
qemu_append="`identify_qemu_append "$QEMU"`"
@@ -168,7 +166,7 @@ then
touch $resdir/buildonly
exit 0
fi
-echo "NOTE: $QEMU either did not run or was interactive" > $builddir/console.log
+echo "NOTE: $QEMU either did not run or was interactive" > $resdir/console.log
echo $QEMU $qemu_args -m 512 -kernel $resdir/bzImage -append \"$qemu_append $boot_args\" > $resdir/qemu-cmd
( $QEMU $qemu_args -m 512 -kernel $resdir/bzImage -append "$qemu_append $boot_args"; echo $? > $resdir/qemu-retval ) &
qemu_pid=$!
@@ -214,7 +212,7 @@ then
else
break
fi
- if test $kruntime -ge $((seconds + grace))
+ if test $kruntime -ge $((seconds + $TORTURE_SHUTDOWN_GRACE))
then
echo "!!! PID $qemu_pid hung at $kruntime vs. $seconds seconds" >> $resdir/Warnings 2>&1
kill -KILL $qemu_pid
@@ -224,6 +222,5 @@ then
done
fi
-cp $builddir/console.log $resdir
parse-torture.sh $resdir/console.log $title
parse-console.sh $resdir/console.log $title
diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh
index f6483609ebc2..4a431767f77a 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm.sh
@@ -42,6 +42,7 @@ TORTURE_DEFCONFIG=defconfig
TORTURE_BOOT_IMAGE=""
TORTURE_INITRD="$KVM/initrd"; export TORTURE_INITRD
TORTURE_KMAKE_ARG=""
+TORTURE_SHUTDOWN_GRACE=180
TORTURE_SUITE=rcu
resdir=""
configs=""
@@ -149,6 +150,11 @@ do
resdir=$2
shift
;;
+ --shutdown-grace)
+ checkarg --shutdown-grace "(seconds)" "$#" "$2" '^[0-9]*$' '^error'
+ TORTURE_SHUTDOWN_GRACE=$2
+ shift
+ ;;
--torture)
checkarg --torture "(suite name)" "$#" "$2" '^\(lock\|rcu\)$' '^--'
TORTURE_SUITE=$2
@@ -266,6 +272,7 @@ TORTURE_KMAKE_ARG="$TORTURE_KMAKE_ARG"; export TORTURE_KMAKE_ARG
TORTURE_QEMU_CMD="$TORTURE_QEMU_CMD"; export TORTURE_QEMU_CMD
TORTURE_QEMU_INTERACTIVE="$TORTURE_QEMU_INTERACTIVE"; export TORTURE_QEMU_INTERACTIVE
TORTURE_QEMU_MAC="$TORTURE_QEMU_MAC"; export TORTURE_QEMU_MAC
+TORTURE_SHUTDOWN_GRACE="$TORTURE_SHUTDOWN_GRACE"; export TORTURE_SHUTDOWN_GRACE
TORTURE_SUITE="$TORTURE_SUITE"; export TORTURE_SUITE
if ! test -e $resdir
then
@@ -307,10 +314,10 @@ awk < $T/cfgcpu.pack \
}
# Dump out the scripting required to run one test batch.
-function dump(first, pastlast)
+function dump(first, pastlast, batchnum)
{
- print "echo ----Start batch: `date`";
- print "echo ----Start batch: `date` >> " rd "/log";
+ print "echo ----Start batch " batchnum ": `date`";
+ print "echo ----Start batch " batchnum ": `date` >> " rd "/log";
jn=1
for (j = first; j < pastlast; j++) {
builddir=KVM "/b" jn
@@ -371,25 +378,28 @@ END {
njobs = i;
nc = ncpus;
first = 0;
+ batchnum = 1;
# Each pass through the following loop considers one test.
for (i = 0; i < njobs; i++) {
if (ncpus == 0) {
# Sequential test specified, each test its own batch.
- dump(i, i + 1);
+ dump(i, i + 1, batchnum);
first = i;
+ batchnum++;
} else if (nc < cpus[i] && i != 0) {
# Out of CPUs, dump out a batch.
- dump(first, i);
+ dump(first, i, batchnum);
first = i;
nc = ncpus;
+ batchnum++;
}
# Account for the CPUs needed by the current test.
nc -= cpus[i];
}
# Dump the last batch.
if (ncpus != 0)
- dump(first, i);
+ dump(first, i, batchnum);
}' >> $T/script
cat << ___EOF___ >> $T/script
diff --git a/tools/testing/selftests/rcutorture/bin/parse-console.sh b/tools/testing/selftests/rcutorture/bin/parse-console.sh
index d8f35cf116be..844787a0d7be 100755
--- a/tools/testing/selftests/rcutorture/bin/parse-console.sh
+++ b/tools/testing/selftests/rcutorture/bin/parse-console.sh
@@ -24,9 +24,6 @@
#
# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-T=/tmp/abat-chk-badness.sh.$$
-trap 'rm -f $T' 0
-
file="$1"
title="$2"
@@ -36,9 +33,41 @@ if grep -Pq '\x00' < $file
then
print_warning Console output contains nul bytes, old qemu still running?
fi
-egrep 'Badness|WARNING:|Warn|BUG|===========|Call Trace:|Oops:|Stall ended before state dump start' < $file | grep -v 'ODEBUG: ' | grep -v 'Warning: unable to open an initial console' > $T
-if test -s $T
+egrep 'Badness|WARNING:|Warn|BUG|===========|Call Trace:|Oops:|detected stalls on CPUs/tasks:|Stall ended before state dump start' < $file | grep -v 'ODEBUG: ' | grep -v 'Warning: unable to open an initial console' > $1.diags
+if test -s $1.diags
then
print_warning Assertion failure in $file $title
- cat $T
+ # cat $1.diags
+ summary=""
+ n_badness=`grep -c Badness $1`
+ if test "$n_badness" -ne 0
+ then
+ summary="$summary Badness: $n_badness"
+ fi
+ n_warn=`grep -v 'Warning: unable to open an initial console' $1 | egrep -c 'WARNING:|Warn'`
+ if test "$n_warn" -ne 0
+ then
+ summary="$summary Warnings: $n_warn"
+ fi
+ n_bugs=`egrep -c 'BUG|Oops:' $1`
+ if test "$n_bugs" -ne 0
+ then
+ summary="$summary Bugs: $n_bugs"
+ fi
+ n_calltrace=`grep -c 'Call Trace:' $1`
+ if test "$n_calltrace" -ne 0
+ then
+ summary="$summary Call Traces: $n_calltrace"
+ fi
+ n_lockdep=`grep -c =========== $1`
+ if test "$n_badness" -ne 0
+ then
+ summary="$summary lockdep: $n_badness"
+ fi
+ n_stalls=`egrep -c 'detected stalls on CPUs/tasks:|Stall ended before state dump start' $1`
+ if test "$n_stalls" -ne 0
+ then
+ summary="$summary Stalls: $n_stalls"
+ fi
+ print_warning Summary: $summary
fi
diff --git a/tools/testing/selftests/rcutorture/doc/TINY_RCU.txt b/tools/testing/selftests/rcutorture/doc/TINY_RCU.txt
index 9ef33a743b73..24396ae8355b 100644
--- a/tools/testing/selftests/rcutorture/doc/TINY_RCU.txt
+++ b/tools/testing/selftests/rcutorture/doc/TINY_RCU.txt
@@ -20,7 +20,6 @@ CONFIG_PROVE_RCU
CONFIG_NO_HZ_FULL_SYSIDLE
CONFIG_RCU_NOCB_CPU
-CONFIG_RCU_USER_QS
Meaningless for TINY_RCU.
diff --git a/tools/testing/selftests/rcutorture/doc/TREE_RCU-kconfig.txt b/tools/testing/selftests/rcutorture/doc/TREE_RCU-kconfig.txt
index 657f3a035488..4e2b1893d40d 100644
--- a/tools/testing/selftests/rcutorture/doc/TREE_RCU-kconfig.txt
+++ b/tools/testing/selftests/rcutorture/doc/TREE_RCU-kconfig.txt
@@ -72,10 +72,6 @@ CONFIG_RCU_TORTURE_TEST_RUNNABLE
Always used in KVM testing.
-CONFIG_RCU_USER_QS
-
- Redundant with CONFIG_NO_HZ_FULL.
-
CONFIG_PREEMPT_RCU
CONFIG_TREE_RCU
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index 882fe83a3554..b9453b838162 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -1246,11 +1246,24 @@ TEST_F(TRACE_poke, getpid_runs_normally)
# error "Do not know how to find your architecture's registers and syscalls"
#endif
+/* Use PTRACE_GETREGS and PTRACE_SETREGS when available. This is useful for
+ * architectures without HAVE_ARCH_TRACEHOOK (e.g. User-mode Linux).
+ */
+#if defined(__x86_64__) || defined(__i386__)
+#define HAVE_GETREGS
+#endif
+
/* Architecture-specific syscall fetching routine. */
int get_syscall(struct __test_metadata *_metadata, pid_t tracee)
{
- struct iovec iov;
ARCH_REGS regs;
+#ifdef HAVE_GETREGS
+ EXPECT_EQ(0, ptrace(PTRACE_GETREGS, tracee, 0, &regs)) {
+ TH_LOG("PTRACE_GETREGS failed");
+ return -1;
+ }
+#else
+ struct iovec iov;
iov.iov_base = &regs;
iov.iov_len = sizeof(regs);
@@ -1258,6 +1271,7 @@ int get_syscall(struct __test_metadata *_metadata, pid_t tracee)
TH_LOG("PTRACE_GETREGSET failed");
return -1;
}
+#endif
return regs.SYSCALL_NUM;
}
@@ -1266,13 +1280,16 @@ int get_syscall(struct __test_metadata *_metadata, pid_t tracee)
void change_syscall(struct __test_metadata *_metadata,
pid_t tracee, int syscall)
{
- struct iovec iov;
int ret;
ARCH_REGS regs;
-
+#ifdef HAVE_GETREGS
+ ret = ptrace(PTRACE_GETREGS, tracee, 0, &regs);
+#else
+ struct iovec iov;
iov.iov_base = &regs;
iov.iov_len = sizeof(regs);
ret = ptrace(PTRACE_GETREGSET, tracee, NT_PRSTATUS, &iov);
+#endif
EXPECT_EQ(0, ret);
#if defined(__x86_64__) || defined(__i386__) || defined(__powerpc__) || \
@@ -1312,9 +1329,13 @@ void change_syscall(struct __test_metadata *_metadata,
if (syscall == -1)
regs.SYSCALL_RET = 1;
+#ifdef HAVE_GETREGS
+ ret = ptrace(PTRACE_SETREGS, tracee, 0, &regs);
+#else
iov.iov_base = &regs;
iov.iov_len = sizeof(regs);
ret = ptrace(PTRACE_SETREGSET, tracee, NT_PRSTATUS, &iov);
+#endif
EXPECT_EQ(0, ret);
}
diff --git a/tools/testing/selftests/seccomp/test_harness.h b/tools/testing/selftests/seccomp/test_harness.h
index fb2841601f2f..a786c69c7584 100644
--- a/tools/testing/selftests/seccomp/test_harness.h
+++ b/tools/testing/selftests/seccomp/test_harness.h
@@ -42,6 +42,7 @@
#define TEST_HARNESS_H_
#define _GNU_SOURCE
+#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@@ -370,8 +371,8 @@
__typeof__(_expected) __exp = (_expected); \
__typeof__(_seen) __seen = (_seen); \
if (!(__exp _t __seen)) { \
- unsigned long long __exp_print = (unsigned long long)__exp; \
- unsigned long long __seen_print = (unsigned long long)__seen; \
+ unsigned long long __exp_print = (uintptr_t)__exp; \
+ unsigned long long __seen_print = (uintptr_t)__seen; \
__TH_LOG("Expected %s (%llu) %s %s (%llu)", \
#_expected, __exp_print, #_t, \
#_seen, __seen_print); \
diff --git a/tools/testing/selftests/timers/.gitignore b/tools/testing/selftests/timers/.gitignore
index ced998151bc4..68f3fc71ac44 100644
--- a/tools/testing/selftests/timers/.gitignore
+++ b/tools/testing/selftests/timers/.gitignore
@@ -16,3 +16,4 @@ set-timer-lat
skew_consistency
threadtest
valid-adjtimex
+adjtick
diff --git a/tools/testing/selftests/vm/.gitignore b/tools/testing/selftests/vm/.gitignore
index ff1bb16cec4f..a937a9d26b60 100644
--- a/tools/testing/selftests/vm/.gitignore
+++ b/tools/testing/selftests/vm/.gitignore
@@ -2,3 +2,8 @@ hugepage-mmap
hugepage-shm
map_hugetlb
thuge-gen
+compaction_test
+mlock2-tests
+on-fault-limit
+transhuge-stress
+userfaultfd
diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile
index eabcff411984..d0c473f65850 100644
--- a/tools/testing/selftests/x86/Makefile
+++ b/tools/testing/selftests/x86/Makefile
@@ -4,9 +4,11 @@ include ../lib.mk
.PHONY: all all_32 all_64 warn_32bit_failure clean
-TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs ldt_gdt syscall_nt ptrace_syscall
+TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt ptrace_syscall
TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault sigreturn test_syscall_vdso unwind_vdso \
- test_FCMOV test_FCOMI test_FISTTP
+ test_FCMOV test_FCOMI test_FISTTP \
+ ldt_gdt \
+ vdso_restorer
TARGETS_C_32BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_32BIT_ONLY)
BINARIES_32 := $(TARGETS_C_32BIT_ALL:%=%_32)
diff --git a/tools/testing/selftests/x86/vdso_restorer.c b/tools/testing/selftests/x86/vdso_restorer.c
new file mode 100644
index 000000000000..cb038424a403
--- /dev/null
+++ b/tools/testing/selftests/x86/vdso_restorer.c
@@ -0,0 +1,88 @@
+/*
+ * vdso_restorer.c - tests vDSO-based signal restore
+ * Copyright (c) 2015 Andrew Lutomirski
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * This makes sure that sa_restorer == NULL keeps working on 32-bit
+ * configurations. Modern glibc doesn't use it under any circumstances,
+ * so it's easy to overlook breakage.
+ *
+ * 64-bit userspace has never supported sa_restorer == NULL, so this is
+ * 32-bit only.
+ */
+
+#define _GNU_SOURCE
+
+#include <err.h>
+#include <stdio.h>
+#include <string.h>
+#include <signal.h>
+#include <unistd.h>
+#include <syscall.h>
+#include <sys/syscall.h>
+
+/* Open-code this -- the headers are too messy to easily use them. */
+struct real_sigaction {
+ void *handler;
+ unsigned long flags;
+ void *restorer;
+ unsigned int mask[2];
+};
+
+static volatile sig_atomic_t handler_called;
+
+static void handler_with_siginfo(int sig, siginfo_t *info, void *ctx_void)
+{
+ handler_called = 1;
+}
+
+static void handler_without_siginfo(int sig)
+{
+ handler_called = 1;
+}
+
+int main()
+{
+ int nerrs = 0;
+ struct real_sigaction sa;
+
+ memset(&sa, 0, sizeof(sa));
+ sa.handler = handler_with_siginfo;
+ sa.flags = SA_SIGINFO;
+ sa.restorer = NULL; /* request kernel-provided restorer */
+
+ if (syscall(SYS_rt_sigaction, SIGUSR1, &sa, NULL, 8) != 0)
+ err(1, "raw rt_sigaction syscall");
+
+ raise(SIGUSR1);
+
+ if (handler_called) {
+ printf("[OK]\tSA_SIGINFO handler returned successfully\n");
+ } else {
+ printf("[FAIL]\tSA_SIGINFO handler was not called\n");
+ nerrs++;
+ }
+
+ sa.flags = 0;
+ sa.handler = handler_without_siginfo;
+ if (syscall(SYS_sigaction, SIGUSR1, &sa, 0) != 0)
+ err(1, "raw sigaction syscall");
+ handler_called = 0;
+
+ raise(SIGUSR1);
+
+ if (handler_called) {
+ printf("[OK]\t!SA_SIGINFO handler returned successfully\n");
+ } else {
+ printf("[FAIL]\t!SA_SIGINFO handler was not called\n");
+ nerrs++;
+ }
+}
diff --git a/tools/virtio/asm/barrier.h b/tools/virtio/asm/barrier.h
index 26b7926bda88..ba34f9e96efd 100644
--- a/tools/virtio/asm/barrier.h
+++ b/tools/virtio/asm/barrier.h
@@ -1,15 +1,19 @@
#if defined(__i386__) || defined(__x86_64__)
#define barrier() asm volatile("" ::: "memory")
-#define mb() __sync_synchronize()
-
-#define smp_mb() mb()
-# define dma_rmb() barrier()
-# define dma_wmb() barrier()
-# define smp_rmb() barrier()
-# define smp_wmb() barrier()
+#define virt_mb() __sync_synchronize()
+#define virt_rmb() barrier()
+#define virt_wmb() barrier()
+/* Atomic store should be enough, but gcc generates worse code in that case. */
+#define virt_store_mb(var, value) do { \
+ typeof(var) virt_store_mb_value = (value); \
+ __atomic_exchange(&(var), &virt_store_mb_value, &virt_store_mb_value, \
+ __ATOMIC_SEQ_CST); \
+ barrier(); \
+} while (0);
/* Weak barriers should be used. If not - it's a bug */
-# define rmb() abort()
-# define wmb() abort()
+# define mb() abort()
+# define rmb() abort()
+# define wmb() abort()
#else
#error Please fill in barrier macros
#endif
diff --git a/tools/virtio/linux/compiler.h b/tools/virtio/linux/compiler.h
new file mode 100644
index 000000000000..845960e1cbf2
--- /dev/null
+++ b/tools/virtio/linux/compiler.h
@@ -0,0 +1,9 @@
+#ifndef LINUX_COMPILER_H
+#define LINUX_COMPILER_H
+
+#define WRITE_ONCE(var, val) \
+ (*((volatile typeof(val) *)(&(var))) = (val))
+
+#define READ_ONCE(var) (*((volatile typeof(val) *)(&(var))))
+
+#endif
diff --git a/tools/virtio/linux/kernel.h b/tools/virtio/linux/kernel.h
index 4db7d5691ba7..033849948215 100644
--- a/tools/virtio/linux/kernel.h
+++ b/tools/virtio/linux/kernel.h
@@ -8,6 +8,7 @@
#include <assert.h>
#include <stdarg.h>
+#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/printk.h>
#include <linux/bug.h>
diff --git a/tools/virtio/ringtest/Makefile b/tools/virtio/ringtest/Makefile
new file mode 100644
index 000000000000..feaa64ac4630
--- /dev/null
+++ b/tools/virtio/ringtest/Makefile
@@ -0,0 +1,22 @@
+all:
+
+all: ring virtio_ring_0_9 virtio_ring_poll
+
+CFLAGS += -Wall
+CFLAGS += -pthread -O2 -ggdb
+LDFLAGS += -pthread -O2 -ggdb
+
+main.o: main.c main.h
+ring.o: ring.c main.h
+virtio_ring_0_9.o: virtio_ring_0_9.c main.h
+virtio_ring_poll.o: virtio_ring_poll.c virtio_ring_0_9.c main.h
+ring: ring.o main.o
+virtio_ring_0_9: virtio_ring_0_9.o main.o
+virtio_ring_poll: virtio_ring_poll.o main.o
+clean:
+ -rm main.o
+ -rm ring.o ring
+ -rm virtio_ring_0_9.o virtio_ring_0_9
+ -rm virtio_ring_poll.o virtio_ring_poll
+
+.PHONY: all clean
diff --git a/tools/virtio/ringtest/README b/tools/virtio/ringtest/README
new file mode 100644
index 000000000000..34e94c46104f
--- /dev/null
+++ b/tools/virtio/ringtest/README
@@ -0,0 +1,2 @@
+Partial implementation of various ring layouts, useful to tune virtio design.
+Uses shared memory heavily.
diff --git a/tools/virtio/ringtest/main.c b/tools/virtio/ringtest/main.c
new file mode 100644
index 000000000000..3a5ff438bd62
--- /dev/null
+++ b/tools/virtio/ringtest/main.c
@@ -0,0 +1,366 @@
+/*
+ * Copyright (C) 2016 Red Hat, Inc.
+ * Author: Michael S. Tsirkin <mst@redhat.com>
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ *
+ * Command line processing and common functions for ring benchmarking.
+ */
+#define _GNU_SOURCE
+#include <getopt.h>
+#include <pthread.h>
+#include <assert.h>
+#include <sched.h>
+#include "main.h"
+#include <sys/eventfd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <limits.h>
+
+int runcycles = 10000000;
+int max_outstanding = INT_MAX;
+int batch = 1;
+
+bool do_sleep = false;
+bool do_relax = false;
+bool do_exit = true;
+
+unsigned ring_size = 256;
+
+static int kickfd = -1;
+static int callfd = -1;
+
+void notify(int fd)
+{
+ unsigned long long v = 1;
+ int r;
+
+ vmexit();
+ r = write(fd, &v, sizeof v);
+ assert(r == sizeof v);
+ vmentry();
+}
+
+void wait_for_notify(int fd)
+{
+ unsigned long long v = 1;
+ int r;
+
+ vmexit();
+ r = read(fd, &v, sizeof v);
+ assert(r == sizeof v);
+ vmentry();
+}
+
+void kick(void)
+{
+ notify(kickfd);
+}
+
+void wait_for_kick(void)
+{
+ wait_for_notify(kickfd);
+}
+
+void call(void)
+{
+ notify(callfd);
+}
+
+void wait_for_call(void)
+{
+ wait_for_notify(callfd);
+}
+
+void set_affinity(const char *arg)
+{
+ cpu_set_t cpuset;
+ int ret;
+ pthread_t self;
+ long int cpu;
+ char *endptr;
+
+ if (!arg)
+ return;
+
+ cpu = strtol(arg, &endptr, 0);
+ assert(!*endptr);
+
+ assert(cpu >= 0 || cpu < CPU_SETSIZE);
+
+ self = pthread_self();
+ CPU_ZERO(&cpuset);
+ CPU_SET(cpu, &cpuset);
+
+ ret = pthread_setaffinity_np(self, sizeof(cpu_set_t), &cpuset);
+ assert(!ret);
+}
+
+static void run_guest(void)
+{
+ int completed_before;
+ int completed = 0;
+ int started = 0;
+ int bufs = runcycles;
+ int spurious = 0;
+ int r;
+ unsigned len;
+ void *buf;
+ int tokick = batch;
+
+ for (;;) {
+ if (do_sleep)
+ disable_call();
+ completed_before = completed;
+ do {
+ if (started < bufs &&
+ started - completed < max_outstanding) {
+ r = add_inbuf(0, NULL, "Hello, world!");
+ if (__builtin_expect(r == 0, true)) {
+ ++started;
+ if (!--tokick) {
+ tokick = batch;
+ if (do_sleep)
+ kick_available();
+ }
+
+ }
+ } else
+ r = -1;
+
+ /* Flush out completed bufs if any */
+ if (get_buf(&len, &buf)) {
+ ++completed;
+ if (__builtin_expect(completed == bufs, false))
+ return;
+ r = 0;
+ }
+ } while (r == 0);
+ if (completed == completed_before)
+ ++spurious;
+ assert(completed <= bufs);
+ assert(started <= bufs);
+ if (do_sleep) {
+ if (enable_call())
+ wait_for_call();
+ } else {
+ poll_used();
+ }
+ }
+}
+
+static void run_host(void)
+{
+ int completed_before;
+ int completed = 0;
+ int spurious = 0;
+ int bufs = runcycles;
+ unsigned len;
+ void *buf;
+
+ for (;;) {
+ if (do_sleep) {
+ if (enable_kick())
+ wait_for_kick();
+ } else {
+ poll_avail();
+ }
+ if (do_sleep)
+ disable_kick();
+ completed_before = completed;
+ while (__builtin_expect(use_buf(&len, &buf), true)) {
+ if (do_sleep)
+ call_used();
+ ++completed;
+ if (__builtin_expect(completed == bufs, false))
+ return;
+ }
+ if (completed == completed_before)
+ ++spurious;
+ assert(completed <= bufs);
+ if (completed == bufs)
+ break;
+ }
+}
+
+void *start_guest(void *arg)
+{
+ set_affinity(arg);
+ run_guest();
+ pthread_exit(NULL);
+}
+
+void *start_host(void *arg)
+{
+ set_affinity(arg);
+ run_host();
+ pthread_exit(NULL);
+}
+
+static const char optstring[] = "";
+static const struct option longopts[] = {
+ {
+ .name = "help",
+ .has_arg = no_argument,
+ .val = 'h',
+ },
+ {
+ .name = "host-affinity",
+ .has_arg = required_argument,
+ .val = 'H',
+ },
+ {
+ .name = "guest-affinity",
+ .has_arg = required_argument,
+ .val = 'G',
+ },
+ {
+ .name = "ring-size",
+ .has_arg = required_argument,
+ .val = 'R',
+ },
+ {
+ .name = "run-cycles",
+ .has_arg = required_argument,
+ .val = 'C',
+ },
+ {
+ .name = "outstanding",
+ .has_arg = required_argument,
+ .val = 'o',
+ },
+ {
+ .name = "batch",
+ .has_arg = required_argument,
+ .val = 'b',
+ },
+ {
+ .name = "sleep",
+ .has_arg = no_argument,
+ .val = 's',
+ },
+ {
+ .name = "relax",
+ .has_arg = no_argument,
+ .val = 'x',
+ },
+ {
+ .name = "exit",
+ .has_arg = no_argument,
+ .val = 'e',
+ },
+ {
+ }
+};
+
+static void help(void)
+{
+ fprintf(stderr, "Usage: <test> [--help]"
+ " [--host-affinity H]"
+ " [--guest-affinity G]"
+ " [--ring-size R (default: %d)]"
+ " [--run-cycles C (default: %d)]"
+ " [--batch b]"
+ " [--outstanding o]"
+ " [--sleep]"
+ " [--relax]"
+ " [--exit]"
+ "\n",
+ ring_size,
+ runcycles);
+}
+
+int main(int argc, char **argv)
+{
+ int ret;
+ pthread_t host, guest;
+ void *tret;
+ char *host_arg = NULL;
+ char *guest_arg = NULL;
+ char *endptr;
+ long int c;
+
+ kickfd = eventfd(0, 0);
+ assert(kickfd >= 0);
+ callfd = eventfd(0, 0);
+ assert(callfd >= 0);
+
+ for (;;) {
+ int o = getopt_long(argc, argv, optstring, longopts, NULL);
+ switch (o) {
+ case -1:
+ goto done;
+ case '?':
+ help();
+ exit(2);
+ case 'H':
+ host_arg = optarg;
+ break;
+ case 'G':
+ guest_arg = optarg;
+ break;
+ case 'R':
+ ring_size = strtol(optarg, &endptr, 0);
+ assert(ring_size && !(ring_size & (ring_size - 1)));
+ assert(!*endptr);
+ break;
+ case 'C':
+ c = strtol(optarg, &endptr, 0);
+ assert(!*endptr);
+ assert(c > 0 && c < INT_MAX);
+ runcycles = c;
+ break;
+ case 'o':
+ c = strtol(optarg, &endptr, 0);
+ assert(!*endptr);
+ assert(c > 0 && c < INT_MAX);
+ max_outstanding = c;
+ break;
+ case 'b':
+ c = strtol(optarg, &endptr, 0);
+ assert(!*endptr);
+ assert(c > 0 && c < INT_MAX);
+ batch = c;
+ break;
+ case 's':
+ do_sleep = true;
+ break;
+ case 'x':
+ do_relax = true;
+ break;
+ case 'e':
+ do_exit = true;
+ break;
+ default:
+ help();
+ exit(4);
+ break;
+ }
+ }
+
+ /* does nothing here, used to make sure all smp APIs compile */
+ smp_acquire();
+ smp_release();
+ smp_mb();
+done:
+
+ if (batch > max_outstanding)
+ batch = max_outstanding;
+
+ if (optind < argc) {
+ help();
+ exit(4);
+ }
+ alloc_ring();
+
+ ret = pthread_create(&host, NULL, start_host, host_arg);
+ assert(!ret);
+ ret = pthread_create(&guest, NULL, start_guest, guest_arg);
+ assert(!ret);
+
+ ret = pthread_join(guest, &tret);
+ assert(!ret);
+ ret = pthread_join(host, &tret);
+ assert(!ret);
+ return 0;
+}
diff --git a/tools/virtio/ringtest/main.h b/tools/virtio/ringtest/main.h
new file mode 100644
index 000000000000..16917acb0ade
--- /dev/null
+++ b/tools/virtio/ringtest/main.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2016 Red Hat, Inc.
+ * Author: Michael S. Tsirkin <mst@redhat.com>
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ *
+ * Common macros and functions for ring benchmarking.
+ */
+#ifndef MAIN_H
+#define MAIN_H
+
+#include <stdbool.h>
+
+extern bool do_exit;
+
+#if defined(__x86_64__) || defined(__i386__)
+#include "x86intrin.h"
+
+static inline void wait_cycles(unsigned long long cycles)
+{
+ unsigned long long t;
+
+ t = __rdtsc();
+ while (__rdtsc() - t < cycles) {}
+}
+
+#define VMEXIT_CYCLES 500
+#define VMENTRY_CYCLES 500
+
+#else
+static inline void wait_cycles(unsigned long long cycles)
+{
+ _Exit(5);
+}
+#define VMEXIT_CYCLES 0
+#define VMENTRY_CYCLES 0
+#endif
+
+static inline void vmexit(void)
+{
+ if (!do_exit)
+ return;
+
+ wait_cycles(VMEXIT_CYCLES);
+}
+static inline void vmentry(void)
+{
+ if (!do_exit)
+ return;
+
+ wait_cycles(VMENTRY_CYCLES);
+}
+
+/* implemented by ring */
+void alloc_ring(void);
+/* guest side */
+int add_inbuf(unsigned, void *, void *);
+void *get_buf(unsigned *, void **);
+void disable_call();
+bool enable_call();
+void kick_available();
+void poll_used();
+/* host side */
+void disable_kick();
+bool enable_kick();
+bool use_buf(unsigned *, void **);
+void call_used();
+void poll_avail();
+
+/* implemented by main */
+extern bool do_sleep;
+void kick(void);
+void wait_for_kick(void);
+void call(void);
+void wait_for_call(void);
+
+extern unsigned ring_size;
+
+/* Compiler barrier - similar to what Linux uses */
+#define barrier() asm volatile("" ::: "memory")
+
+/* Is there a portable way to do this? */
+#if defined(__x86_64__) || defined(__i386__)
+#define cpu_relax() asm ("rep; nop" ::: "memory")
+#else
+#define cpu_relax() assert(0)
+#endif
+
+extern bool do_relax;
+
+static inline void busy_wait(void)
+{
+ if (do_relax)
+ cpu_relax();
+ else
+ /* prevent compiler from removing busy loops */
+ barrier();
+}
+
+/*
+ * Not using __ATOMIC_SEQ_CST since gcc docs say they are only synchronized
+ * with other __ATOMIC_SEQ_CST calls.
+ */
+#define smp_mb() __sync_synchronize()
+
+/*
+ * This abuses the atomic builtins for thread fences, and
+ * adds a compiler barrier.
+ */
+#define smp_release() do { \
+ barrier(); \
+ __atomic_thread_fence(__ATOMIC_RELEASE); \
+} while (0)
+
+#define smp_acquire() do { \
+ __atomic_thread_fence(__ATOMIC_ACQUIRE); \
+ barrier(); \
+} while (0)
+
+#endif
diff --git a/tools/virtio/ringtest/ring.c b/tools/virtio/ringtest/ring.c
new file mode 100644
index 000000000000..c25c8d248b6b
--- /dev/null
+++ b/tools/virtio/ringtest/ring.c
@@ -0,0 +1,272 @@
+/*
+ * Copyright (C) 2016 Red Hat, Inc.
+ * Author: Michael S. Tsirkin <mst@redhat.com>
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ *
+ * Simple descriptor-based ring. virtio 0.9 compatible event index is used for
+ * signalling, unconditionally.
+ */
+#define _GNU_SOURCE
+#include "main.h"
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+/* Next - Where next entry will be written.
+ * Prev - "Next" value when event triggered previously.
+ * Event - Peer requested event after writing this entry.
+ */
+static inline bool need_event(unsigned short event,
+ unsigned short next,
+ unsigned short prev)
+{
+ return (unsigned short)(next - event - 1) < (unsigned short)(next - prev);
+}
+
+/* Design:
+ * Guest adds descriptors with unique index values and DESC_HW in flags.
+ * Host overwrites used descriptors with correct len, index, and DESC_HW clear.
+ * Flags are always set last.
+ */
+#define DESC_HW 0x1
+
+struct desc {
+ unsigned short flags;
+ unsigned short index;
+ unsigned len;
+ unsigned long long addr;
+};
+
+/* how much padding is needed to avoid false cache sharing */
+#define HOST_GUEST_PADDING 0x80
+
+/* Mostly read */
+struct event {
+ unsigned short kick_index;
+ unsigned char reserved0[HOST_GUEST_PADDING - 2];
+ unsigned short call_index;
+ unsigned char reserved1[HOST_GUEST_PADDING - 2];
+};
+
+struct data {
+ void *buf; /* descriptor is writeable, we can't get buf from there */
+ void *data;
+} *data;
+
+struct desc *ring;
+struct event *event;
+
+struct guest {
+ unsigned avail_idx;
+ unsigned last_used_idx;
+ unsigned num_free;
+ unsigned kicked_avail_idx;
+ unsigned char reserved[HOST_GUEST_PADDING - 12];
+} guest;
+
+struct host {
+ /* we do not need to track last avail index
+ * unless we have more than one in flight.
+ */
+ unsigned used_idx;
+ unsigned called_used_idx;
+ unsigned char reserved[HOST_GUEST_PADDING - 4];
+} host;
+
+/* implemented by ring */
+void alloc_ring(void)
+{
+ int ret;
+ int i;
+
+ ret = posix_memalign((void **)&ring, 0x1000, ring_size * sizeof *ring);
+ if (ret) {
+ perror("Unable to allocate ring buffer.\n");
+ exit(3);
+ }
+ event = malloc(sizeof *event);
+ if (!event) {
+ perror("Unable to allocate event buffer.\n");
+ exit(3);
+ }
+ memset(event, 0, sizeof *event);
+ guest.avail_idx = 0;
+ guest.kicked_avail_idx = -1;
+ guest.last_used_idx = 0;
+ host.used_idx = 0;
+ host.called_used_idx = -1;
+ for (i = 0; i < ring_size; ++i) {
+ struct desc desc = {
+ .index = i,
+ };
+ ring[i] = desc;
+ }
+ guest.num_free = ring_size;
+ data = malloc(ring_size * sizeof *data);
+ if (!data) {
+ perror("Unable to allocate data buffer.\n");
+ exit(3);
+ }
+ memset(data, 0, ring_size * sizeof *data);
+}
+
+/* guest side */
+int add_inbuf(unsigned len, void *buf, void *datap)
+{
+ unsigned head, index;
+
+ if (!guest.num_free)
+ return -1;
+
+ guest.num_free--;
+ head = (ring_size - 1) & (guest.avail_idx++);
+
+ /* Start with a write. On MESI architectures this helps
+ * avoid a shared state with consumer that is polling this descriptor.
+ */
+ ring[head].addr = (unsigned long)(void*)buf;
+ ring[head].len = len;
+ /* read below might bypass write above. That is OK because it's just an
+ * optimization. If this happens, we will get the cache line in a
+ * shared state which is unfortunate, but probably not worth it to
+ * add an explicit full barrier to avoid this.
+ */
+ barrier();
+ index = ring[head].index;
+ data[index].buf = buf;
+ data[index].data = datap;
+ /* Barrier A (for pairing) */
+ smp_release();
+ ring[head].flags = DESC_HW;
+
+ return 0;
+}
+
+void *get_buf(unsigned *lenp, void **bufp)
+{
+ unsigned head = (ring_size - 1) & guest.last_used_idx;
+ unsigned index;
+ void *datap;
+
+ if (ring[head].flags & DESC_HW)
+ return NULL;
+ /* Barrier B (for pairing) */
+ smp_acquire();
+ *lenp = ring[head].len;
+ index = ring[head].index & (ring_size - 1);
+ datap = data[index].data;
+ *bufp = data[index].buf;
+ data[index].buf = NULL;
+ data[index].data = NULL;
+ guest.num_free++;
+ guest.last_used_idx++;
+ return datap;
+}
+
+void poll_used(void)
+{
+ unsigned head = (ring_size - 1) & guest.last_used_idx;
+
+ while (ring[head].flags & DESC_HW)
+ busy_wait();
+}
+
+void disable_call()
+{
+ /* Doing nothing to disable calls might cause
+ * extra interrupts, but reduces the number of cache misses.
+ */
+}
+
+bool enable_call()
+{
+ unsigned head = (ring_size - 1) & guest.last_used_idx;
+
+ event->call_index = guest.last_used_idx;
+ /* Flush call index write */
+ /* Barrier D (for pairing) */
+ smp_mb();
+ return ring[head].flags & DESC_HW;
+}
+
+void kick_available(void)
+{
+ /* Flush in previous flags write */
+ /* Barrier C (for pairing) */
+ smp_mb();
+ if (!need_event(event->kick_index,
+ guest.avail_idx,
+ guest.kicked_avail_idx))
+ return;
+
+ guest.kicked_avail_idx = guest.avail_idx;
+ kick();
+}
+
+/* host side */
+void disable_kick()
+{
+ /* Doing nothing to disable kicks might cause
+ * extra interrupts, but reduces the number of cache misses.
+ */
+}
+
+bool enable_kick()
+{
+ unsigned head = (ring_size - 1) & host.used_idx;
+
+ event->kick_index = host.used_idx;
+ /* Barrier C (for pairing) */
+ smp_mb();
+ return !(ring[head].flags & DESC_HW);
+}
+
+void poll_avail(void)
+{
+ unsigned head = (ring_size - 1) & host.used_idx;
+
+ while (!(ring[head].flags & DESC_HW))
+ busy_wait();
+}
+
+bool use_buf(unsigned *lenp, void **bufp)
+{
+ unsigned head = (ring_size - 1) & host.used_idx;
+
+ if (!(ring[head].flags & DESC_HW))
+ return false;
+
+ /* make sure length read below is not speculated */
+ /* Barrier A (for pairing) */
+ smp_acquire();
+
+ /* simple in-order completion: we don't need
+ * to touch index at all. This also means we
+ * can just modify the descriptor in-place.
+ */
+ ring[head].len--;
+ /* Make sure len is valid before flags.
+ * Note: alternative is to write len and flags in one access -
+ * possible on 64 bit architectures but wmb is free on Intel anyway
+ * so I have no way to test whether it's a gain.
+ */
+ /* Barrier B (for pairing) */
+ smp_release();
+ ring[head].flags = 0;
+ host.used_idx++;
+ return true;
+}
+
+void call_used(void)
+{
+ /* Flush in previous flags write */
+ /* Barrier D (for pairing) */
+ smp_mb();
+ if (!need_event(event->call_index,
+ host.used_idx,
+ host.called_used_idx))
+ return;
+
+ host.called_used_idx = host.used_idx;
+ call();
+}
diff --git a/tools/virtio/ringtest/run-on-all.sh b/tools/virtio/ringtest/run-on-all.sh
new file mode 100755
index 000000000000..52b0f71ffa8d
--- /dev/null
+++ b/tools/virtio/ringtest/run-on-all.sh
@@ -0,0 +1,24 @@
+#!/bin/sh
+
+#use last CPU for host. Why not the first?
+#many devices tend to use cpu0 by default so
+#it tends to be busier
+HOST_AFFINITY=$(cd /dev/cpu; ls|grep -v '[a-z]'|sort -n|tail -1)
+
+#run command on all cpus
+for cpu in $(cd /dev/cpu; ls|grep -v '[a-z]'|sort -n);
+do
+ #Don't run guest and host on same CPU
+ #It actually works ok if using signalling
+ if
+ (echo "$@" | grep -e "--sleep" > /dev/null) || \
+ test $HOST_AFFINITY '!=' $cpu
+ then
+ echo "GUEST AFFINITY $cpu"
+ "$@" --host-affinity $HOST_AFFINITY --guest-affinity $cpu
+ fi
+done
+echo "NO GUEST AFFINITY"
+"$@" --host-affinity $HOST_AFFINITY
+echo "NO AFFINITY"
+"$@"
diff --git a/tools/virtio/ringtest/virtio_ring_0_9.c b/tools/virtio/ringtest/virtio_ring_0_9.c
new file mode 100644
index 000000000000..47c9a1a18d36
--- /dev/null
+++ b/tools/virtio/ringtest/virtio_ring_0_9.c
@@ -0,0 +1,316 @@
+/*
+ * Copyright (C) 2016 Red Hat, Inc.
+ * Author: Michael S. Tsirkin <mst@redhat.com>
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ *
+ * Partial implementation of virtio 0.9. event index is used for signalling,
+ * unconditionally. Design roughly follows linux kernel implementation in order
+ * to be able to judge its performance.
+ */
+#define _GNU_SOURCE
+#include "main.h"
+#include <stdlib.h>
+#include <stdio.h>
+#include <assert.h>
+#include <string.h>
+#include <linux/virtio_ring.h>
+
+struct data {
+ void *data;
+} *data;
+
+struct vring ring;
+
+/* enabling the below activates experimental ring polling code
+ * (which skips index reads on consumer in favor of looking at
+ * high bits of ring id ^ 0x8000).
+ */
+/* #ifdef RING_POLL */
+
+/* how much padding is needed to avoid false cache sharing */
+#define HOST_GUEST_PADDING 0x80
+
+struct guest {
+ unsigned short avail_idx;
+ unsigned short last_used_idx;
+ unsigned short num_free;
+ unsigned short kicked_avail_idx;
+ unsigned short free_head;
+ unsigned char reserved[HOST_GUEST_PADDING - 10];
+} guest;
+
+struct host {
+ /* we do not need to track last avail index
+ * unless we have more than one in flight.
+ */
+ unsigned short used_idx;
+ unsigned short called_used_idx;
+ unsigned char reserved[HOST_GUEST_PADDING - 4];
+} host;
+
+/* implemented by ring */
+void alloc_ring(void)
+{
+ int ret;
+ int i;
+ void *p;
+
+ ret = posix_memalign(&p, 0x1000, vring_size(ring_size, 0x1000));
+ if (ret) {
+ perror("Unable to allocate ring buffer.\n");
+ exit(3);
+ }
+ memset(p, 0, vring_size(ring_size, 0x1000));
+ vring_init(&ring, ring_size, p, 0x1000);
+
+ guest.avail_idx = 0;
+ guest.kicked_avail_idx = -1;
+ guest.last_used_idx = 0;
+ /* Put everything in free lists. */
+ guest.free_head = 0;
+ for (i = 0; i < ring_size - 1; i++)
+ ring.desc[i].next = i + 1;
+ host.used_idx = 0;
+ host.called_used_idx = -1;
+ guest.num_free = ring_size;
+ data = malloc(ring_size * sizeof *data);
+ if (!data) {
+ perror("Unable to allocate data buffer.\n");
+ exit(3);
+ }
+ memset(data, 0, ring_size * sizeof *data);
+}
+
+/* guest side */
+int add_inbuf(unsigned len, void *buf, void *datap)
+{
+ unsigned head, avail;
+ struct vring_desc *desc;
+
+ if (!guest.num_free)
+ return -1;
+
+ head = guest.free_head;
+ guest.num_free--;
+
+ desc = ring.desc;
+ desc[head].flags = VRING_DESC_F_NEXT;
+ desc[head].addr = (unsigned long)(void *)buf;
+ desc[head].len = len;
+ /* We do it like this to simulate the way
+ * we'd have to flip it if we had multiple
+ * descriptors.
+ */
+ desc[head].flags &= ~VRING_DESC_F_NEXT;
+ guest.free_head = desc[head].next;
+
+ data[head].data = datap;
+
+#ifdef RING_POLL
+ /* Barrier A (for pairing) */
+ smp_release();
+ avail = guest.avail_idx++;
+ ring.avail->ring[avail & (ring_size - 1)] =
+ (head | (avail & ~(ring_size - 1))) ^ 0x8000;
+#else
+ avail = (ring_size - 1) & (guest.avail_idx++);
+ ring.avail->ring[avail] = head;
+ /* Barrier A (for pairing) */
+ smp_release();
+#endif
+ ring.avail->idx = guest.avail_idx;
+ return 0;
+}
+
+void *get_buf(unsigned *lenp, void **bufp)
+{
+ unsigned head;
+ unsigned index;
+ void *datap;
+
+#ifdef RING_POLL
+ head = (ring_size - 1) & guest.last_used_idx;
+ index = ring.used->ring[head].id;
+ if ((index ^ guest.last_used_idx ^ 0x8000) & ~(ring_size - 1))
+ return NULL;
+ /* Barrier B (for pairing) */
+ smp_acquire();
+ index &= ring_size - 1;
+#else
+ if (ring.used->idx == guest.last_used_idx)
+ return NULL;
+ /* Barrier B (for pairing) */
+ smp_acquire();
+ head = (ring_size - 1) & guest.last_used_idx;
+ index = ring.used->ring[head].id;
+#endif
+ *lenp = ring.used->ring[head].len;
+ datap = data[index].data;
+ *bufp = (void*)(unsigned long)ring.desc[index].addr;
+ data[index].data = NULL;
+ ring.desc[index].next = guest.free_head;
+ guest.free_head = index;
+ guest.num_free++;
+ guest.last_used_idx++;
+ return datap;
+}
+
+void poll_used(void)
+{
+#ifdef RING_POLL
+ unsigned head = (ring_size - 1) & guest.last_used_idx;
+
+ for (;;) {
+ unsigned index = ring.used->ring[head].id;
+
+ if ((index ^ guest.last_used_idx ^ 0x8000) & ~(ring_size - 1))
+ busy_wait();
+ else
+ break;
+ }
+#else
+ unsigned head = guest.last_used_idx;
+
+ while (ring.used->idx == head)
+ busy_wait();
+#endif
+}
+
+void disable_call()
+{
+ /* Doing nothing to disable calls might cause
+ * extra interrupts, but reduces the number of cache misses.
+ */
+}
+
+bool enable_call()
+{
+ unsigned short last_used_idx;
+
+ vring_used_event(&ring) = (last_used_idx = guest.last_used_idx);
+ /* Flush call index write */
+ /* Barrier D (for pairing) */
+ smp_mb();
+#ifdef RING_POLL
+ {
+ unsigned short head = last_used_idx & (ring_size - 1);
+ unsigned index = ring.used->ring[head].id;
+
+ return (index ^ last_used_idx ^ 0x8000) & ~(ring_size - 1);
+ }
+#else
+ return ring.used->idx == last_used_idx;
+#endif
+}
+
+void kick_available(void)
+{
+ /* Flush in previous flags write */
+ /* Barrier C (for pairing) */
+ smp_mb();
+ if (!vring_need_event(vring_avail_event(&ring),
+ guest.avail_idx,
+ guest.kicked_avail_idx))
+ return;
+
+ guest.kicked_avail_idx = guest.avail_idx;
+ kick();
+}
+
+/* host side */
+void disable_kick()
+{
+ /* Doing nothing to disable kicks might cause
+ * extra interrupts, but reduces the number of cache misses.
+ */
+}
+
+bool enable_kick()
+{
+ unsigned head = host.used_idx;
+
+ vring_avail_event(&ring) = head;
+ /* Barrier C (for pairing) */
+ smp_mb();
+#ifdef RING_POLL
+ {
+ unsigned index = ring.avail->ring[head & (ring_size - 1)];
+
+ return (index ^ head ^ 0x8000) & ~(ring_size - 1);
+ }
+#else
+ return head == ring.avail->idx;
+#endif
+}
+
+void poll_avail(void)
+{
+ unsigned head = host.used_idx;
+#ifdef RING_POLL
+ for (;;) {
+ unsigned index = ring.avail->ring[head & (ring_size - 1)];
+ if ((index ^ head ^ 0x8000) & ~(ring_size - 1))
+ busy_wait();
+ else
+ break;
+ }
+#else
+ while (ring.avail->idx == head)
+ busy_wait();
+#endif
+}
+
+bool use_buf(unsigned *lenp, void **bufp)
+{
+ unsigned used_idx = host.used_idx;
+ struct vring_desc *desc;
+ unsigned head;
+
+#ifdef RING_POLL
+ head = ring.avail->ring[used_idx & (ring_size - 1)];
+ if ((used_idx ^ head ^ 0x8000) & ~(ring_size - 1))
+ return false;
+ /* Barrier A (for pairing) */
+ smp_acquire();
+
+ used_idx &= ring_size - 1;
+ desc = &ring.desc[head & (ring_size - 1)];
+#else
+ if (used_idx == ring.avail->idx)
+ return false;
+
+ /* Barrier A (for pairing) */
+ smp_acquire();
+
+ used_idx &= ring_size - 1;
+ head = ring.avail->ring[used_idx];
+ desc = &ring.desc[head];
+#endif
+
+ *lenp = desc->len;
+ *bufp = (void *)(unsigned long)desc->addr;
+
+ /* now update used ring */
+ ring.used->ring[used_idx].id = head;
+ ring.used->ring[used_idx].len = desc->len - 1;
+ /* Barrier B (for pairing) */
+ smp_release();
+ host.used_idx++;
+ ring.used->idx = host.used_idx;
+
+ return true;
+}
+
+void call_used(void)
+{
+ /* Flush in previous flags write */
+ /* Barrier D (for pairing) */
+ smp_mb();
+ if (!vring_need_event(vring_used_event(&ring),
+ host.used_idx,
+ host.called_used_idx))
+ return;
+
+ host.called_used_idx = host.used_idx;
+ call();
+}
diff --git a/tools/virtio/ringtest/virtio_ring_poll.c b/tools/virtio/ringtest/virtio_ring_poll.c
new file mode 100644
index 000000000000..84fc2c557aaa
--- /dev/null
+++ b/tools/virtio/ringtest/virtio_ring_poll.c
@@ -0,0 +1,2 @@
+#define RING_POLL 1
+#include "virtio_ring_0_9.c"