diff options
author | 2024-06-10 12:03:21 +0100 | |
---|---|---|
committer | 2024-06-10 12:03:21 +0100 | |
commit | 594ce0b8a998aa4d05827cd7c0d0dcec9a1e3ae2 (patch) | |
tree | 070bd60a8fda15e5f47339d3f6888a0fe2ca6fe9 /tools/perf | |
parent | clkdev: don't fail clkdev_alloc() if over-sized (diff) | |
parent | ARM: 9405/1: ftrace: Don't assume stack frames are contiguous in memory (diff) | |
download | wireguard-linux-594ce0b8a998aa4d05827cd7c0d0dcec9a1e3ae2.tar.xz wireguard-linux-594ce0b8a998aa4d05827cd7c0d0dcec9a1e3ae2.zip |
Merge topic branches 'clkdev' and 'fixes' into for-linus
Diffstat (limited to 'tools/perf')
480 files changed, 38966 insertions, 10089 deletions
diff --git a/tools/perf/Build b/tools/perf/Build index aa7623622834..b0cb7ad8e6ac 100644 --- a/tools/perf/Build +++ b/tools/perf/Build @@ -59,3 +59,17 @@ perf-y += ui/ perf-y += scripts/ gtk-y += ui/gtk/ + +ifdef SHELLCHECK + SHELL_TESTS := $(wildcard *.sh) + TEST_LOGS := $(SHELL_TESTS:%=%.shellcheck_log) +else + SHELL_TESTS := + TEST_LOGS := +endif + +$(OUTPUT)%.shellcheck_log: % + $(call rule_mkdir) + $(Q)$(call echo-cmd,test)shellcheck -s bash -a -S warning "$<" > $@ || (cat $@ && rm $@ && false) + +perf-y += $(TEST_LOGS) diff --git a/tools/perf/Documentation/perf-arm-spe.txt b/tools/perf/Documentation/perf-arm-spe.txt index bf03222e9a68..0a3eda482307 100644 --- a/tools/perf/Documentation/perf-arm-spe.txt +++ b/tools/perf/Documentation/perf-arm-spe.txt @@ -116,6 +116,15 @@ Depending on CPU model, the kernel may need to be booted with page table isolati (kpti=off). If KPTI needs to be disabled, this will fail with a console message "profiling buffer inaccessible. Try passing 'kpti=off' on the kernel command line". +For the full criteria that determine whether KPTI needs to be forced off or not, see function +unmap_kernel_at_el0() in the kernel sources. Common cases where it's not required +are on the CPUs in kpti_safe_list, or on Arm v8.5+ where FEAT_E0PD is mandatory. + +The SPE interrupt must also be described by the firmware. If the module is loaded and KPTI is +disabled (or isn't required to be disabled) but the SPE PMU still doesn't show in +/sys/bus/event_source/devices/, then it's possible that the SPE interrupt isn't described by +ACPI or DT. In this case no warning will be printed by the driver. + Capturing SPE with perf command-line tools ------------------------------------------ @@ -199,7 +208,8 @@ Common errors - "Cannot find PMU `arm_spe'. Missing kernel support?" - Module not built or loaded, KPTI not disabled (see above), or running on a VM + Module not built or loaded, KPTI not disabled, interrupt not described by firmware, + or running on a VM. See 'Kernel Requirements' above. - "Arm SPE CONTEXT packets not found in the traces." diff --git a/tools/perf/Documentation/perf-intel-pt.txt b/tools/perf/Documentation/perf-intel-pt.txt index 2109690b0d5f..59ab1ff9d75f 100644 --- a/tools/perf/Documentation/perf-intel-pt.txt +++ b/tools/perf/Documentation/perf-intel-pt.txt @@ -115,9 +115,13 @@ toggle respectively. perf script also supports higher level ways to dump instruction traces: + perf script --insn-trace=disasm + +or to use the xed disassembler, which requires installing the xed tool +(see XED below): + perf script --insn-trace --xed -Dump all instructions. This requires installing the xed tool (see XED below) Dumping all instructions in a long trace can be fairly slow. It is usually better to start with higher level decoding, like @@ -130,12 +134,12 @@ or and then select a time range of interest. The time range can then be examined in detail with - perf script --time starttime,stoptime --insn-trace --xed + perf script --time starttime,stoptime --insn-trace=disasm While examining the trace it's also useful to filter on specific CPUs using the -C option - perf script --time starttime,stoptime --insn-trace --xed -C 1 + perf script --time starttime,stoptime --insn-trace=disasm -C 1 Dump all instructions in time range on CPU 1. @@ -1306,7 +1310,7 @@ Without timestamps, --per-thread must be specified to distinguish threads. perf script can be used to provide an instruction trace - $ perf script --guestkallsyms $KALLSYMS --insn-trace --xed -F+ipc | grep -C10 vmresume | head -21 + $ perf script --guestkallsyms $KALLSYMS --insn-trace=disasm -F+ipc | grep -C10 vmresume | head -21 CPU 0/KVM 1440 ffffffff82133cdd __vmx_vcpu_run+0x3d ([kernel.kallsyms]) movq 0x48(%rax), %r9 CPU 0/KVM 1440 ffffffff82133ce1 __vmx_vcpu_run+0x41 ([kernel.kallsyms]) movq 0x50(%rax), %r10 CPU 0/KVM 1440 ffffffff82133ce5 __vmx_vcpu_run+0x45 ([kernel.kallsyms]) movq 0x58(%rax), %r11 @@ -1407,7 +1411,7 @@ There were none. 'perf script' can be used to provide an instruction trace showing timestamps - $ perf script -i perf.data.kvm --guestkallsyms $KALLSYMS --insn-trace --xed -F+ipc | grep -C10 vmresume | head -21 + $ perf script -i perf.data.kvm --guestkallsyms $KALLSYMS --insn-trace=disasm -F+ipc | grep -C10 vmresume | head -21 CPU 1/KVM 17006 [001] 11500.262865593: ffffffff82133cdd __vmx_vcpu_run+0x3d ([kernel.kallsyms]) movq 0x48(%rax), %r9 CPU 1/KVM 17006 [001] 11500.262865593: ffffffff82133ce1 __vmx_vcpu_run+0x41 ([kernel.kallsyms]) movq 0x50(%rax), %r10 CPU 1/KVM 17006 [001] 11500.262865593: ffffffff82133ce5 __vmx_vcpu_run+0x45 ([kernel.kallsyms]) movq 0x58(%rax), %r11 diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt index 3b12595193c9..6bf2468f59d3 100644 --- a/tools/perf/Documentation/perf-list.txt +++ b/tools/perf/Documentation/perf-list.txt @@ -71,6 +71,7 @@ counted. The following modifiers exist: D - pin the event to the PMU W - group is weak and will fallback to non-group if not schedulable, e - group or event are exclusive and do not share the PMU + b - use BPF aggregration (see perf stat --bpf-counters) The 'p' modifier can be used for specifying how precise the instruction address should be. The 'p' modifier can be specified multiple times: diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt index 38f59ac064f7..d2b1593ef700 100644 --- a/tools/perf/Documentation/perf-report.txt +++ b/tools/perf/Documentation/perf-report.txt @@ -121,6 +121,9 @@ OPTIONS - type: Data type of sample memory access. - typeoff: Offset in the data type of sample memory access. - symoff: Offset in the symbol. + - weight1: Average value of event specific weight (1st field of weight_struct). + - weight2: Average value of event specific weight (2nd field of weight_struct). + - weight3: Average value of event specific weight (3rd field of weight_struct). By default, comm, dso and symbol keys are used. (i.e. --sort comm,dso,symbol) @@ -198,7 +201,11 @@ OPTIONS --fields=:: Specify output field - multiple keys can be specified in CSV format. Following fields are available: - overhead, overhead_sys, overhead_us, overhead_children, sample and period. + overhead, overhead_sys, overhead_us, overhead_children, sample, period, + weight1, weight2, weight3, ins_lat, p_stage_cyc and retire_lat. The + last 3 names are alias for the corresponding weights. When the weight + fields are used, they will show the average value of the weight. + Also it can contain any sort key(s). By default, every sort keys not specified in -F will be appended @@ -531,8 +538,35 @@ include::itrace.txt[] --raw-trace:: When displaying traceevent output, do not use print fmt or plugins. +-H:: --hierarchy:: - Enable hierarchical output. + Enable hierarchical output. In the hierarchy mode, each sort key groups + samples based on the criteria and then sub-divide it using the lower + level sort key. + + For example: + In normal output: + + perf report -s dso,sym + # Overhead Shared Object Symbol + 50.00% [kernel.kallsyms] [k] kfunc1 + 20.00% perf [.] foo + 15.00% [kernel.kallsyms] [k] kfunc2 + 10.00% perf [.] bar + 5.00% libc.so [.] libcall + + In hierarchy output: + + perf report -s dso,sym --hierarchy + # Overhead Shared Object / Symbol + 65.00% [kernel.kallsyms] + 50.00% [k] kfunc1 + 15.00% [k] kfunc2 + 30.00% perf + 20.00% [.] foo + 10.00% [.] bar + 5.00% libc.so + 5.00% [.] libcall --inline:: If a callgraph address belongs to an inlined function, the inline stack diff --git a/tools/perf/Documentation/perf-sched.txt b/tools/perf/Documentation/perf-sched.txt index 5fbe42bd599b..a216d2991b19 100644 --- a/tools/perf/Documentation/perf-sched.txt +++ b/tools/perf/Documentation/perf-sched.txt @@ -20,6 +20,26 @@ There are several variants of 'perf sched': 'perf sched latency' to report the per task scheduling latencies and other scheduling properties of the workload. + Example usage: + perf sched record -- sleep 1 + perf sched latency + + ------------------------------------------------------------------------------------------------------------------------------------------- + Task | Runtime ms | Count | Avg delay ms | Max delay ms | Max delay start | Max delay end | + ------------------------------------------------------------------------------------------------------------------------------------------- + perf:(2) | 2.804 ms | 66 | avg: 0.524 ms | max: 1.069 ms | max start: 254752.314960 s | max end: 254752.316029 s + NetworkManager:1343 | 0.372 ms | 13 | avg: 0.008 ms | max: 0.013 ms | max start: 254751.551153 s | max end: 254751.551166 s + kworker/1:2-xfs:4649 | 0.012 ms | 1 | avg: 0.008 ms | max: 0.008 ms | max start: 254751.519807 s | max end: 254751.519815 s + kworker/3:1-xfs:388 | 0.011 ms | 1 | avg: 0.006 ms | max: 0.006 ms | max start: 254751.519809 s | max end: 254751.519815 s + sleep:147736 | 0.938 ms | 3 | avg: 0.006 ms | max: 0.007 ms | max start: 254751.313817 s | max end: 254751.313824 s + + It shows Runtime(time that a task spent actually running on the CPU), + Count(number of times a delay was calculated) and delay(time that a + task was ready to run but was kept waiting). + + Tasks with the same command name are merged and the merge count is + given within (), However if -p option is used, pid is mentioned. + 'perf sched script' to see a detailed trace of the workload that was recorded (aliased to 'perf script' for now). @@ -78,6 +98,22 @@ OPTIONS --force:: Don't complain, do it. +OPTIONS for 'perf sched latency' +------------------------------- + +-C:: +--CPU <n>:: + CPU to profile on. + +-p:: +--pids:: + latency stats per pid instead of per command name. + +-s:: +--sort <key[,key2...]>:: + sort by key(s): runtime, switch, avg, max + by default it's sorted by "avg ,max ,switch ,runtime". + OPTIONS for 'perf sched map' ---------------------------- diff --git a/tools/perf/Documentation/perf-script-python.txt b/tools/perf/Documentation/perf-script-python.txt index 6a8581012e16..13e37e9385ee 100644 --- a/tools/perf/Documentation/perf-script-python.txt +++ b/tools/perf/Documentation/perf-script-python.txt @@ -642,8 +642,8 @@ SUPPORTED FIELDS Currently supported fields: -ev_name, comm, pid, tid, cpu, ip, time, period, phys_addr, addr, -symbol, symoff, dso, time_enabled, time_running, values, callchain, +ev_name, comm, id, stream_id, pid, tid, cpu, ip, time, period, phys_addr, +addr, symbol, symoff, dso, time_enabled, time_running, values, callchain, brstack, brstacksym, datasrc, datasrc_decode, iregs, uregs, weight, transaction, raw_buf, attr, cpumode. diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt index ff9a52e44688..ff086ef05a0c 100644 --- a/tools/perf/Documentation/perf-script.txt +++ b/tools/perf/Documentation/perf-script.txt @@ -132,9 +132,10 @@ OPTIONS Comma separated list of fields to print. Options are: comm, tid, pid, time, cpu, event, trace, ip, sym, dso, dsoff, addr, symoff, srcline, period, iregs, uregs, brstack, brstacksym, flags, bpf-output, - brstackinsn, brstackinsnlen, brstackoff, callindent, insn, insnlen, synth, - phys_addr, metric, misc, srccode, ipc, data_page_size, code_page_size, ins_lat, - machine_pid, vcpu, cgroup, retire_lat. + brstackinsn, brstackinsnlen, brstackdisasm, brstackoff, callindent, insn, disasm, + insnlen, synth, phys_addr, metric, misc, srccode, ipc, data_page_size, + code_page_size, ins_lat, machine_pid, vcpu, cgroup, retire_lat, + Field list can be prepended with the type, trace, sw or hw, to indicate to which event type the field list applies. e.g., -F sw:comm,tid,time,ip,sym and -F trace:time,cpu,trace @@ -217,9 +218,9 @@ OPTIONS Instruction Trace decoding. For calls and returns, it will display the name of the symbol indented with spaces to reflect the stack depth. - When doing instruction trace decoding insn and insnlen give the - instruction bytes and the instruction length of the current - instruction. + When doing instruction trace decoding, insn, disasm and insnlen give the + instruction bytes, disassembled instructions (requires libcapstone support) + and the instruction length of the current instruction respectively. The synth field is used by synthesized events which may be created when Instruction Trace decoding. @@ -256,6 +257,9 @@ OPTIONS can’t know the next sequential instruction after an unconditional branch unless you calculate that based on its length. + brstackdisasm acts like brstackinsn, but will print disassembled instructions if + perf is built with the capstone library. + The brstackoff field will print an offset into a specific dso/binary. With the metric option perf script can compute metrics for @@ -441,9 +445,10 @@ include::itrace.txt[] will be printed. Each entry has function name and file/line. Enabled by default, disable with --no-inline. ---insn-trace:: - Show instruction stream for intel_pt traces. Combine with --xed to - show disassembly. +--insn-trace[=<raw|disasm>]:: + Show instruction stream in bytes (raw) or disassembled (disasm) + for intel_pt traces. The default is 'raw'. To use xed, combine + 'raw' with --xed to show disassembly done by xed. --xed:: Run xed disassembler on output. Requires installing the xed disassembler. diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt index 5af2e432b54f..29756a87ab6f 100644 --- a/tools/perf/Documentation/perf-stat.txt +++ b/tools/perf/Documentation/perf-stat.txt @@ -308,6 +308,14 @@ use --per-die in addition to -a. (system-wide). The output includes the die number and the number of online processors on that die. This is useful to gauge the amount of aggregation. +--per-cluster:: +Aggregate counts per processor cluster for system-wide mode measurement. This +is a useful mode to detect imbalance between clusters. To enable this mode, +use --per-cluster in addition to -a. (system-wide). The output includes the +cluster number and the number of online processors on that cluster. This is +useful to gauge the amount of aggregation. The information of cluster ID and +related CPUs can be gotten from /sys/devices/system/cpu/cpuX/topology/cluster_{id, cpus}. + --per-cache:: Aggregate counts per cache instance for system-wide mode measurements. By default, the aggregation happens for the cache level at the highest index @@ -396,6 +404,9 @@ Aggregate counts per processor socket for system-wide mode measurements. --per-die:: Aggregate counts per processor die for system-wide mode measurements. +--per-cluster:: +Aggregate counts perf processor cluster for system-wide mode measurements. + --per-cache:: Aggregate counts per cache instance for system-wide mode measurements. By default, the aggregation happens for the cache level at the highest index diff --git a/tools/perf/Documentation/perf-test.txt b/tools/perf/Documentation/perf-test.txt index 951a2f262872..9acb8d1f6588 100644 --- a/tools/perf/Documentation/perf-test.txt +++ b/tools/perf/Documentation/perf-test.txt @@ -31,9 +31,20 @@ OPTIONS --verbose:: Be more verbose. +-S:: +--sequential:: + Run tests one after the other, this is the default mode. + +-p:: +--parallel:: + Run tests in parallel, speeds up the whole process but is not safe with + the current infrastructure, where some tests that compete for some resources, + for instance, 'perf probe' tests that add/remove probes or clean all probes, etc. + -F:: --dont-fork:: - Do not fork child for each test, run all tests within single process. + Do not fork child for each test, run all tests within single process, this + sets sequential mode. --dso:: Specify a DSO for the "Symbols" test. diff --git a/tools/perf/Documentation/perf-top.txt b/tools/perf/Documentation/perf-top.txt index 3c202ec080ba..a754875fa5bb 100644 --- a/tools/perf/Documentation/perf-top.txt +++ b/tools/perf/Documentation/perf-top.txt @@ -261,8 +261,38 @@ Default is to monitor all CPUS. --raw-trace:: When displaying traceevent output, do not use print fmt or plugins. +-H:: --hierarchy:: - Enable hierarchy output. + Enable hierarchical output. In the hierarchy mode, each sort key groups + samples based on the criteria and then sub-divide it using the lower + level sort key. + + For example, in normal output: + + perf report -s dso,sym + # + # Overhead Shared Object Symbol + # ........ ................. ........... + 50.00% [kernel.kallsyms] [k] kfunc1 + 20.00% perf [.] foo + 15.00% [kernel.kallsyms] [k] kfunc2 + 10.00% perf [.] bar + 5.00% libc.so [.] libcall + + In hierarchy output: + + perf report -s dso,sym --hierarchy + # + # Overhead Shared Object / Symbol + # .......... ...................... + 65.00% [kernel.kallsyms] + 50.00% [k] kfunc1 + 15.00% [k] kfunc2 + 30.00% perf + 20.00% [.] foo + 10.00% [.] bar + 5.00% libc.so + 5.00% [.] libcall --overwrite:: Enable this to use just the most recent records, which helps in high core count diff --git a/tools/perf/Documentation/perf.txt b/tools/perf/Documentation/perf.txt index a7cf7bc2f968..09f516f3fdfb 100644 --- a/tools/perf/Documentation/perf.txt +++ b/tools/perf/Documentation/perf.txt @@ -63,6 +63,8 @@ OPTIONS in browser mode perf-event-open - Print perf_event_open() arguments and return value + kmaps - Print kernel and module maps (perf script + and perf report without browser) --debug-file:: Write debug output to a specified file. diff --git a/tools/perf/Documentation/tips.txt b/tools/perf/Documentation/tips.txt index 825745a645c1..67b326ba0040 100644 --- a/tools/perf/Documentation/tips.txt +++ b/tools/perf/Documentation/tips.txt @@ -2,6 +2,7 @@ For a higher level overview, try: perf report --sort comm,dso Sample related events with: perf record -e '{cycles,instructions}:S' Compare performance results with: perf diff [<old file> <new file>] Boolean options have negative forms, e.g.: perf report --no-children +To not accumulate CPU time of children symbols add --no-children Customize output of perf script with: perf script -F event,ip,sym Generate a script for your data: perf script -g <lang> Save output of perf stat using: perf stat record <target workload> @@ -12,32 +13,52 @@ List events using substring match: perf list <keyword> To see list of saved events and attributes: perf evlist -v Use --symfs <dir> if your symbol files are in non-standard locations To see callchains in a more compact form: perf report -g folded +To see call chains by final symbol taking CPU time (bottom up) use perf report -G Show individual samples with: perf script Limit to show entries above 5% only: perf report --percent-limit 5 Profiling branch (mis)predictions with: perf record -b / perf report -To show assembler sample contexts use perf record -b / perf script -F +brstackinsn --xed -Treat branches as callchains: perf report --branch-history -To count events in every 1000 msec: perf stat -I 1000 -Print event counts in CSV format with: perf stat -x, +To show assembler sample context control flow use perf record -b / perf report --samples 10 and then browse context +To adjust path to source files to local file system use perf report --prefix=... --prefix-strip=... +Treat branches as callchains: perf record -b ... ; perf report --branch-history +Show estimate cycles per function and IPC in annotate use perf record -b ... ; perf report --total-cycles +To count events every 1000 msec: perf stat -I 1000 +Print event counts in machine readable CSV format with: perf stat -x\; If you have debuginfo enabled, try: perf report -s sym,srcline For memory address profiling, try: perf mem record / perf mem report For tracepoint events, try: perf report -s trace_fields To record callchains for each sample: perf record -g +If call chains don't work try perf record --call-graph dwarf or --call-graph lbr To record every process run by a user: perf record -u <user> +To show inline functions in call traces add --inline to perf report +To not record events from perf itself add --exclude-perf Skip collecting build-id when recording: perf record -B To change sampling frequency to 100 Hz: perf record -F 100 +To show information about system the samples were collected on use perf report --header +To only collect call graph on one event use perf record -e cpu/cpu-cycles,callgraph=1/,branches ; perf report --show-ref-call-graph +To set sampling period of individual events use perf record -e cpu/cpu-cycles,period=100001/,cpu/branches,period=10001/ ... +To group events which need to be collected together for accuracy use {}: perf record -e {cycles,branches}' ... +To compute metrics for samples use perf record -e '{cycles,instructions}' ... ; perf script -F +metric See assembly instructions with percentage: perf annotate <symbol> If you prefer Intel style assembly, try: perf annotate -M intel +When collecting LBR backtraces use --stitch-lbr to handle more than 32 deep entries: perf record --call-graph lbr ; perf report --stitch-lbr For hierarchical output, try: perf report --hierarchy Order by the overhead of source file name and line number: perf report -s srcline System-wide collection from all CPUs: perf record -a Show current config key-value pairs: perf config --list +To collect Processor Trace with samples use perf record -e '{intel_pt//,cycles}' ; perf script --call-trace or --insn-trace --xed -F +ipc (remove --xed if no xed) +To trace calls using Processor Trace use perf record -e intel_pt// ... ; perf script --call-trace. Then use perf script --time A-B --insn-trace to look at region of interest. +To measure approximate function latency with Processor Trace use perf record -e intel_pt// ... ; perf script --call-ret-trace +To trace only single function with Processor Trace use perf record --filter 'filter func @ program' -e intel_pt//u ./program ; perf script --insn-trace Show user configuration overrides: perf config --user --list To add Node.js USDT(User-Level Statically Defined Tracing): perf buildid-cache --add `which node` -To report cacheline events from previous recording: perf c2c report +To analyze cache line scalability issues use perf c2c record ... ; perf c2c report To browse sample contexts use perf report --sample 10 and select in context menu To separate samples by time use perf report --sort time,overhead,sym +To filter subset of samples with report or script add --time X-Y or --cpu A,B,C or --socket-filter ... To set sample time separation other than 100ms with --sort time use --time-quantum Add -I to perf record to sample register values, which will be visible in perf report sample context. To show IPC for sampling periods use perf record -e '{cycles,instructions}:S' and then browse context To show context switches in perf report sample context add --switch-events to perf record. +To show time in nanoseconds in record/report add --ns +To compare hot regions in two workloads use perf record -b -o file ... ; perf diff --stream file1 file2 +To compare scalability of two workload samples use perf diff -c ratio file1 file2 diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config index aa55850fbc21..7f1e016a9253 100644 --- a/tools/perf/Makefile.config +++ b/tools/perf/Makefile.config @@ -28,8 +28,6 @@ include $(srctree)/tools/scripts/Makefile.arch $(call detected_var,SRCARCH) -NO_PERF_REGS := 1 - ifneq ($(NO_SYSCALL_TABLE),1) NO_SYSCALL_TABLE := 1 @@ -50,7 +48,6 @@ endif # Additional ARCH settings for ppc ifeq ($(SRCARCH),powerpc) - NO_PERF_REGS := 0 CFLAGS += -I$(OUTPUT)arch/powerpc/include/generated LIBUNWIND_LIBS := -lunwind -lunwind-ppc64 endif @@ -66,49 +63,31 @@ ifeq ($(SRCARCH),x86) else LIBUNWIND_LIBS = -lunwind-x86 -llzma -lunwind endif - NO_PERF_REGS := 0 endif ifeq ($(SRCARCH),arm) - NO_PERF_REGS := 0 LIBUNWIND_LIBS = -lunwind -lunwind-arm endif ifeq ($(SRCARCH),arm64) - NO_PERF_REGS := 0 CFLAGS += -I$(OUTPUT)arch/arm64/include/generated LIBUNWIND_LIBS = -lunwind -lunwind-aarch64 endif ifeq ($(SRCARCH),loongarch) - NO_PERF_REGS := 0 CFLAGS += -I$(OUTPUT)arch/loongarch/include/generated LIBUNWIND_LIBS = -lunwind -lunwind-loongarch64 endif -ifeq ($(SRCARCH),riscv) - NO_PERF_REGS := 0 -endif - -ifeq ($(SRCARCH),csky) - NO_PERF_REGS := 0 -endif - ifeq ($(ARCH),s390) - NO_PERF_REGS := 0 CFLAGS += -fPIC -I$(OUTPUT)arch/s390/include/generated endif ifeq ($(ARCH),mips) - NO_PERF_REGS := 0 CFLAGS += -I$(OUTPUT)arch/mips/include/generated LIBUNWIND_LIBS = -lunwind -lunwind-mips endif -ifeq ($(NO_PERF_REGS),0) - $(call detected,CONFIG_PERF_REGS) -endif - # So far there's only x86 and arm libdw unwind support merged in perf. # Disable it on all other architectures in case libdw unwind # support is detected in system. Add supported architectures @@ -165,10 +144,6 @@ endif FEATURE_CHECK_CFLAGS-libopencsd := $(LIBOPENCSD_CFLAGS) FEATURE_CHECK_LDFLAGS-libopencsd := $(LIBOPENCSD_LDFLAGS) $(OPENCSDLIBS) -ifeq ($(NO_PERF_REGS),0) - CFLAGS += -DHAVE_PERF_REGS_SUPPORT -endif - # for linking with debug library, run like: # make DEBUG=1 LIBDW_DIR=/opt/libdw/ ifdef LIBDW_DIR @@ -191,6 +166,15 @@ endif FEATURE_CHECK_CFLAGS-libbabeltrace := $(LIBBABELTRACE_CFLAGS) FEATURE_CHECK_LDFLAGS-libbabeltrace := $(LIBBABELTRACE_LDFLAGS) -lbabeltrace-ctf +# for linking with debug library, run like: +# make DEBUG=1 LIBCAPSTONE_DIR=/opt/capstone/ +ifdef LIBCAPSTONE_DIR + LIBCAPSTONE_CFLAGS := -I$(LIBCAPSTONE_DIR)/include + LIBCAPSTONE_LDFLAGS := -L$(LIBCAPSTONE_DIR)/ +endif +FEATURE_CHECK_CFLAGS-libcapstone := $(LIBCAPSTONE_CFLAGS) +FEATURE_CHECK_LDFLAGS-libcapstone := $(LIBCAPSTONE_LDFLAGS) -lcapstone + ifdef LIBZSTD_DIR LIBZSTD_CFLAGS := -I$(LIBZSTD_DIR)/lib LIBZSTD_LDFLAGS := -L$(LIBZSTD_DIR)/lib @@ -198,6 +182,16 @@ endif FEATURE_CHECK_CFLAGS-libzstd := $(LIBZSTD_CFLAGS) FEATURE_CHECK_LDFLAGS-libzstd := $(LIBZSTD_LDFLAGS) +# for linking with debug library, run like: +# make DEBUG=1 LIBTRACEEVENT_DIR=/opt/libtraceevent/ +TRACEEVENTLIBS := -ltraceevent +ifdef LIBTRACEEVENT_DIR + LIBTRACEEVENT_CFLAGS := -I$(LIBTRACEEVENT_DIR)/include + LIBTRACEEVENT_LDFLAGS := -L$(LIBTRACEEVENT_DIR)/lib +endif +FEATURE_CHECK_CFLAGS-libtraceevent := $(LIBTRACEEVENT_CFLAGS) +FEATURE_CHECK_LDFLAGS-libtraceevent := $(LIBTRACEEVENT_LDFLAGS) $(TRACEEVENTLIBS) + FEATURE_CHECK_CFLAGS-bpf = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(SRCARCH)/include/uapi -I$(srctree)/tools/include/uapi # include ARCH specific config -include $(src-perf)/arch/$(SRCARCH)/Makefile @@ -209,11 +203,17 @@ endif include $(srctree)/tools/scripts/utilities.mak ifeq ($(call get-executable,$(FLEX)),) - dummy := $(error Error: $(FLEX) is missing on this system, please install it) + $(error Error: $(FLEX) is missing on this system, please install it) endif ifeq ($(call get-executable,$(BISON)),) - dummy := $(error Error: $(BISON) is missing on this system, please install it) + $(error Error: $(BISON) is missing on this system, please install it) +endif + +ifneq ($(NO_LIBTRACEEVENT),1) + ifeq ($(call get-executable,$(PKG_CONFIG)),) + dummy := $(error Error: $(PKG_CONFIG) needed by libtraceevent is missing on this system, please install it) + endif endif ifneq ($(OUTPUT),) @@ -438,46 +438,46 @@ else LIBC_SUPPORT := 1 endif ifeq ($(LIBC_SUPPORT),1) - msg := $(error ERROR: No libelf found. Disables 'probe' tool, jvmti and BPF support. Please install libelf-dev, libelf-devel, elfutils-libelf-devel or build with NO_LIBELF=1.) + $(error ERROR: No libelf found. Disables 'probe' tool, jvmti and BPF support. Please install libelf-dev, libelf-devel, elfutils-libelf-devel or build with NO_LIBELF=1.) else ifneq ($(filter s% -fsanitize=address%,$(EXTRA_CFLAGS),),) ifneq ($(shell ldconfig -p | grep libasan >/dev/null 2>&1; echo $$?), 0) - msg := $(error No libasan found, please install libasan); + $(error No libasan found, please install libasan) endif endif ifneq ($(filter s% -fsanitize=undefined%,$(EXTRA_CFLAGS),),) ifneq ($(shell ldconfig -p | grep libubsan >/dev/null 2>&1; echo $$?), 0) - msg := $(error No libubsan found, please install libubsan); + $(error No libubsan found, please install libubsan) endif endif ifneq ($(filter s% -static%,$(LDFLAGS),),) - msg := $(error No static glibc found, please install glibc-static); + $(error No static glibc found, please install glibc-static) else - msg := $(error No gnu/libc-version.h found, please install glibc-dev[el]); + $(error No gnu/libc-version.h found, please install glibc-dev[el]) endif endif else ifndef NO_LIBDW_DWARF_UNWIND ifneq ($(feature-libdw-dwarf-unwind),1) NO_LIBDW_DWARF_UNWIND := 1 - msg := $(warning No libdw DWARF unwind found, Please install elfutils-devel/libdw-dev >= 0.158 and/or set LIBDW_DIR); + $(warning No libdw DWARF unwind found, Please install elfutils-devel/libdw-dev >= 0.158 and/or set LIBDW_DIR) endif endif ifneq ($(feature-dwarf), 1) ifndef NO_DWARF - msg := $(warning No libdw.h found or old libdw.h found or elfutils is older than 0.138, disables dwarf support. Please install new elfutils-devel/libdw-dev); + $(warning No libdw.h found or old libdw.h found or elfutils is older than 0.138, disables dwarf support. Please install new elfutils-devel/libdw-dev) NO_DWARF := 1 endif else ifneq ($(feature-dwarf_getlocations), 1) - msg := $(warning Old libdw.h, finding variables at given 'perf probe' point will not work, install elfutils-devel/libdw-dev >= 0.157); + $(warning Old libdw.h, finding variables at given 'perf probe' point will not work, install elfutils-devel/libdw-dev >= 0.157) else CFLAGS += -DHAVE_DWARF_GETLOCATIONS_SUPPORT endif # dwarf_getlocations ifneq ($(feature-dwarf_getcfi), 1) - msg := $(warning Old libdw.h, finding variables at given 'perf probe' point will not work, install elfutils-devel/libdw-dev >= 0.142); + $(warning Old libdw.h, finding variables at given 'perf probe' point will not work, install elfutils-devel/libdw-dev >= 0.142) else CFLAGS += -DHAVE_DWARF_CFI_SUPPORT endif # dwarf_getcfi @@ -496,7 +496,10 @@ ifdef NO_DWARF endif ifeq ($(feature-scandirat), 1) - CFLAGS += -DHAVE_SCANDIRAT_SUPPORT + # Ignore having scandirat with memory sanitizer that lacks an interceptor. + ifeq ($(filter s% -fsanitize=memory%,$(EXTRA_CFLAGS),),) + CFLAGS += -DHAVE_SCANDIRAT_SUPPORT + endif endif ifeq ($(feature-sched_getcpu), 1) @@ -525,7 +528,7 @@ ifdef CORESIGHT endif endif else - dummy := $(error Error: No libopencsd library found or the version is not up-to-date. Please install recent libopencsd to build with CORESIGHT=1) + $(error Error: No libopencsd library found or the version is not up-to-date. Please install recent libopencsd to build with CORESIGHT=1) endif endif @@ -551,7 +554,7 @@ ifndef NO_LIBELF ifeq ($(feature-libelf-gelf_getnote), 1) CFLAGS += -DHAVE_GELF_GETNOTE_SUPPORT else - msg := $(warning gelf_getnote() not found on libelf, SDT support disabled); + $(warning gelf_getnote() not found on libelf, SDT support disabled) endif ifeq ($(feature-libelf-getshdrstrndx), 1) @@ -568,7 +571,7 @@ ifndef NO_LIBELF ifndef NO_DWARF ifeq ($(origin PERF_HAVE_DWARF_REGS), undefined) - msg := $(warning DWARF register mappings have not been defined for architecture $(SRCARCH), DWARF support disabled); + $(warning DWARF register mappings have not been defined for architecture $(SRCARCH), DWARF support disabled) NO_DWARF := 1 else CFLAGS += -DHAVE_DWARF_SUPPORT $(LIBDW_CFLAGS) @@ -590,11 +593,11 @@ ifndef NO_LIBELF $(call detected,CONFIG_LIBBPF) $(call detected,CONFIG_LIBBPF_DYNAMIC) else - dummy := $(error Error: No libbpf devel library found or older than v1.0, please install/update libbpf-devel); + $(error Error: No libbpf devel library found or older than v1.0, please install/update libbpf-devel) endif else ifeq ($(NO_ZLIB), 1) - dummy := $(warning Warning: Statically building libbpf not possible as zlib is missing) + $(warning Warning: Statically building libbpf not possible as zlib is missing) NO_LIBBPF := 1 else # Libbpf will be built as a static library from tools/lib/bpf. @@ -609,7 +612,7 @@ endif # NO_LIBELF ifndef NO_SDT ifneq ($(feature-sdt), 1) - msg := $(warning No sys/sdt.h found, no SDT events are defined, please install systemtap-sdt-devel or systemtap-sdt-dev); + $(warning No sys/sdt.h found, no SDT events are defined, please install systemtap-sdt-devel or systemtap-sdt-dev) NO_SDT := 1; else CFLAGS += -DHAVE_SDT_EVENT @@ -651,13 +654,13 @@ ifndef NO_LIBUNWIND have_libunwind = 1 $(call feature_check,libunwind-debug-frame-aarch64) ifneq ($(feature-libunwind-debug-frame-aarch64), 1) - msg := $(warning No debug_frame support found in libunwind-aarch64); + $(warning No debug_frame support found in libunwind-aarch64) CFLAGS += -DNO_LIBUNWIND_DEBUG_FRAME_AARCH64 endif endif ifneq ($(feature-libunwind), 1) - msg := $(warning No libunwind found. Please install libunwind-dev[el] >= 1.1 and/or set LIBUNWIND_DIR); + $(warning No libunwind found. Please install libunwind-dev[el] >= 1.1 and/or set LIBUNWIND_DIR) NO_LOCAL_LIBUNWIND := 1 else have_libunwind := 1 @@ -673,7 +676,7 @@ endif ifndef NO_LIBBPF ifneq ($(feature-bpf), 1) - msg := $(warning BPF API too old. Please install recent kernel headers. BPF support in 'perf record' is disabled.) + $(warning BPF API too old. Please install recent kernel headers. BPF support in 'perf record' is disabled.) NO_LIBBPF := 1 endif endif @@ -686,28 +689,28 @@ endif ifeq ($(BUILD_BPF_SKEL),1) ifeq ($(filter -DHAVE_LIBELF_SUPPORT, $(CFLAGS)),) - dummy := $(warning Warning: Disabled BPF skeletons as libelf is required by bpftool) + $(warning Warning: Disabled BPF skeletons as libelf is required by bpftool) BUILD_BPF_SKEL := 0 else ifeq ($(filter -DHAVE_ZLIB_SUPPORT, $(CFLAGS)),) - dummy := $(warning Warning: Disabled BPF skeletons as zlib is required by bpftool) + $(warning Warning: Disabled BPF skeletons as zlib is required by bpftool) BUILD_BPF_SKEL := 0 else ifeq ($(filter -DHAVE_LIBBPF_SUPPORT, $(CFLAGS)),) - dummy := $(warning Warning: Disabled BPF skeletons as libbpf is required) + $(warning Warning: Disabled BPF skeletons as libbpf is required) BUILD_BPF_SKEL := 0 else ifeq ($(call get-executable,$(CLANG)),) - dummy := $(warning Warning: Disabled BPF skeletons as clang ($(CLANG)) is missing) + $(warning Warning: Disabled BPF skeletons as clang ($(CLANG)) is missing) BUILD_BPF_SKEL := 0 else CLANG_VERSION := $(shell $(CLANG) --version | head -1 | sed 's/.*clang version \([[:digit:]]\+.[[:digit:]]\+.[[:digit:]]\+\).*/\1/g') ifeq ($(call version-lt3,$(CLANG_VERSION),12.0.1),1) - dummy := $(warning Warning: Disabled BPF skeletons as reliable BTF generation needs at least $(CLANG) version 12.0.1) + $(warning Warning: Disabled BPF skeletons as reliable BTF generation needs at least $(CLANG) version 12.0.1) BUILD_BPF_SKEL := 0 endif endif ifeq ($(BUILD_BPF_SKEL),1) $(call feature_check,clang-bpf-co-re) ifeq ($(feature-clang-bpf-co-re), 0) - dummy := $(warning Warning: Disabled BPF skeletons as clang is too old) + $(warning Warning: Disabled BPF skeletons as clang is too old) BUILD_BPF_SKEL := 0 endif endif @@ -727,7 +730,7 @@ dwarf-post-unwind-text := BUG # setup DWARF post unwinder ifdef NO_LIBUNWIND ifdef NO_LIBDW_DWARF_UNWIND - msg := $(warning Disabling post unwind, no support found.); + $(warning Disabling post unwind, no support found.) dwarf-post-unwind := 0 else dwarf-post-unwind-text := libdw @@ -753,7 +756,7 @@ ifndef NO_LOCAL_LIBUNWIND ifeq ($(SRCARCH),$(filter $(SRCARCH),arm arm64)) $(call feature_check,libunwind-debug-frame) ifneq ($(feature-libunwind-debug-frame), 1) - msg := $(warning No debug_frame support found in libunwind); + $(warning No debug_frame support found in libunwind) CFLAGS += -DNO_LIBUNWIND_DEBUG_FRAME endif else @@ -782,7 +785,7 @@ ifneq ($(NO_LIBTRACEEVENT),1) ifndef NO_LIBAUDIT $(call feature_check,libaudit) ifneq ($(feature-libaudit), 1) - msg := $(warning No libaudit.h found, disables 'trace' tool, please install audit-libs-devel or libaudit-dev); + $(warning No libaudit.h found, disables 'trace' tool, please install audit-libs-devel or libaudit-dev) NO_LIBAUDIT := 1 else CFLAGS += -DHAVE_LIBAUDIT_SUPPORT @@ -795,7 +798,7 @@ endif ifndef NO_LIBCRYPTO ifneq ($(feature-libcrypto), 1) - msg := $(warning No libcrypto.h found, disables jitted code injection, please install openssl-devel or libssl-dev); + $(warning No libcrypto.h found, disables jitted code injection, please install openssl-devel or libssl-dev) NO_LIBCRYPTO := 1 else CFLAGS += -DHAVE_LIBCRYPTO_SUPPORT @@ -807,7 +810,7 @@ endif ifndef NO_SLANG ifneq ($(feature-libslang), 1) ifneq ($(feature-libslang-include-subdir), 1) - msg := $(warning slang not found, disables TUI support. Please install slang-devel, libslang-dev or libslang2-dev); + $(warning slang not found, disables TUI support. Please install slang-devel, libslang-dev or libslang2-dev) NO_SLANG := 1 else CFLAGS += -DHAVE_SLANG_INCLUDE_SUBDIR @@ -825,7 +828,7 @@ ifdef GTK2 FLAGS_GTK2=$(CFLAGS) $(LDFLAGS) $(EXTLIBS) $(shell $(PKG_CONFIG) --libs --cflags gtk+-2.0 2>/dev/null) $(call feature_check,gtk2) ifneq ($(feature-gtk2), 1) - msg := $(warning GTK2 not found, disables GTK2 support. Please install gtk2-devel or libgtk2.0-dev); + $(warning GTK2 not found, disables GTK2 support. Please install gtk2-devel or libgtk2.0-dev) NO_GTK2 := 1 else $(call feature_check,gtk2-infobar) @@ -854,7 +857,7 @@ else ifneq ($(feature-libperl), 1) CFLAGS += -DNO_LIBPERL NO_LIBPERL := 1 - msg := $(warning Missing perl devel files. Disabling perl scripting support, please install perl-ExtUtils-Embed/libperl-dev); + $(warning Missing perl devel files. Disabling perl scripting support, please install perl-ExtUtils-Embed/libperl-dev) else LDFLAGS += $(PERL_EMBED_LDFLAGS) EXTLIBS += $(PERL_EMBED_LIBADD) @@ -869,7 +872,7 @@ endif ifeq ($(feature-timerfd), 1) CFLAGS += -DHAVE_TIMERFD_SUPPORT else - msg := $(warning No timerfd support. Disables 'perf kvm stat live'); + $(warning No timerfd support. Disables 'perf kvm stat live') endif disable-python = $(eval $(disable-python_code)) @@ -903,7 +906,7 @@ else PYTHON_EXTENSION_SUFFIX := $(shell $(PYTHON) -c 'from importlib import machinery; print(machinery.EXTENSION_SUFFIXES[0])') LANG_BINDINGS += $(obj-perf)python/perf$(PYTHON_EXTENSION_SUFFIX) else - msg := $(warning Missing python setuptools, the python binding won't be built, please install python3-setuptools or equivalent); + $(warning Missing python setuptools, the python binding won't be built, please install python3-setuptools or equivalent) endif CFLAGS += -DHAVE_LIBPYTHON_SUPPORT $(call detected,CONFIG_LIBPYTHON) @@ -962,7 +965,7 @@ ifdef BUILD_NONDISTRO ifeq ($(feature-libbfd-buildid), 1) CFLAGS += -DHAVE_LIBBFD_BUILDID_SUPPORT else - msg := $(warning Old version of libbfd/binutils things like PE executable profiling will not be available); + $(warning Old version of libbfd/binutils things like PE executable profiling will not be available) endif endif @@ -994,7 +997,7 @@ ifndef NO_LZMA EXTLIBS += -llzma $(call detected,CONFIG_LZMA) else - msg := $(warning No liblzma found, disables xz kernel module decompression, please install xz-devel/liblzma-dev); + $(warning No liblzma found, disables xz kernel module decompression, please install xz-devel/liblzma-dev) NO_LZMA := 1 endif endif @@ -1007,7 +1010,7 @@ ifndef NO_LIBZSTD EXTLIBS += -lzstd $(call detected,CONFIG_ZSTD) else - msg := $(warning No libzstd found, disables trace compression, please install libzstd-dev[el] and/or set LIBZSTD_DIR); + $(warning No libzstd found, disables trace compression, please install libzstd-dev[el] and/or set LIBZSTD_DIR) NO_LIBZSTD := 1 endif endif @@ -1018,7 +1021,7 @@ ifndef NO_LIBCAP EXTLIBS += -lcap $(call detected,CONFIG_LIBCAP) else - msg := $(warning No libcap found, disables capability support, please install libcap-devel/libcap-dev); + $(warning No libcap found, disables capability support, please install libcap-devel/libcap-dev) NO_LIBCAP := 1 endif endif @@ -1031,11 +1034,11 @@ endif ifndef NO_LIBNUMA ifeq ($(feature-libnuma), 0) - msg := $(warning No numa.h found, disables 'perf bench numa mem' benchmark, please install numactl-devel/libnuma-devel/libnuma-dev); + $(warning No numa.h found, disables 'perf bench numa mem' benchmark, please install numactl-devel/libnuma-devel/libnuma-dev) NO_LIBNUMA := 1 else ifeq ($(feature-numa_num_possible_cpus), 0) - msg := $(warning Old numa library found, disables 'perf bench numa mem' benchmark, please install numactl-devel/libnuma-devel/libnuma-dev >= 2.0.8); + $(warning Old numa library found, disables 'perf bench numa mem' benchmark, please install numactl-devel/libnuma-devel/libnuma-dev >= 2.0.8) NO_LIBNUMA := 1 else CFLAGS += -DHAVE_LIBNUMA_SUPPORT @@ -1090,14 +1093,26 @@ ifndef NO_LIBBABELTRACE EXTLIBS += -lbabeltrace-ctf $(call detected,CONFIG_LIBBABELTRACE) else - msg := $(warning No libbabeltrace found, disables 'perf data' CTF format support, please install libbabeltrace-dev[el]/libbabeltrace-ctf-dev); + $(warning No libbabeltrace found, disables 'perf data' CTF format support, please install libbabeltrace-dev[el]/libbabeltrace-ctf-dev) + endif +endif + +ifndef NO_CAPSTONE + $(call feature_check,libcapstone) + ifeq ($(feature-libcapstone), 1) + CFLAGS += -DHAVE_LIBCAPSTONE_SUPPORT $(LIBCAPSTONE_CFLAGS) + LDFLAGS += $(LICAPSTONE_LDFLAGS) + EXTLIBS += -lcapstone + $(call detected,CONFIG_LIBCAPSTONE) + else + msg := $(warning No libcapstone found, disables disasm engine support for 'perf script', please install libcapstone-dev/capstone-devel); endif endif ifndef NO_AUXTRACE ifeq ($(SRCARCH),x86) ifeq ($(feature-get_cpuid), 0) - msg := $(warning Your gcc lacks the __get_cpuid() builtin, disables support for auxtrace/Intel PT, please install a newer gcc); + $(warning Your gcc lacks the __get_cpuid() builtin, disables support for auxtrace/Intel PT, please install a newer gcc) NO_AUXTRACE := 1 endif endif @@ -1155,7 +1170,7 @@ ifndef NO_LIBPFM4 ASCIIDOC_EXTRA = -aHAVE_LIBPFM=1 $(call detected,CONFIG_LIBPFM4) else - msg := $(warning libpfm4 not found, disables libpfm4 support. Please install libpfm4-dev); + $(warning libpfm4 not found, disables libpfm4 support. Please install libpfm4-dev) endif endif @@ -1163,9 +1178,10 @@ endif ifneq ($(NO_LIBTRACEEVENT),1) $(call feature_check,libtraceevent) ifeq ($(feature-libtraceevent), 1) - CFLAGS += -DHAVE_LIBTRACEEVENT - EXTLIBS += -ltraceevent - LIBTRACEEVENT_VERSION := $(shell $(PKG_CONFIG) --modversion libtraceevent) + CFLAGS += -DHAVE_LIBTRACEEVENT $(LIBTRACEEVENT_CFLAGS) + LDFLAGS += $(LIBTRACEEVENT_LDFLAGS) + EXTLIBS += ${TRACEEVENTLIBS} + LIBTRACEEVENT_VERSION := $(shell PKG_CONFIG_PATH=$(LIBTRACEEVENT_DIR) $(PKG_CONFIG) --modversion libtraceevent) LIBTRACEEVENT_VERSION_1 := $(word 1, $(subst ., ,$(LIBTRACEEVENT_VERSION))) LIBTRACEEVENT_VERSION_2 := $(word 2, $(subst ., ,$(LIBTRACEEVENT_VERSION))) LIBTRACEEVENT_VERSION_3 := $(word 3, $(subst ., ,$(LIBTRACEEVENT_VERSION))) @@ -1173,7 +1189,7 @@ ifneq ($(NO_LIBTRACEEVENT),1) CFLAGS += -DLIBTRACEEVENT_VERSION=$(LIBTRACEEVENT_VERSION_CPP) $(call detected,CONFIG_LIBTRACEEVENT) else - dummy := $(error ERROR: libtraceevent is missing. Please install libtraceevent-dev/libtraceevent-devel or build with NO_LIBTRACEEVENT=1) + $(error ERROR: libtraceevent is missing. Please install libtraceevent-dev/libtraceevent-devel and/or set LIBTRACEEVENT_DIR or build with NO_LIBTRACEEVENT=1) endif $(call feature_check,libtracefs) @@ -1299,6 +1315,7 @@ ifeq ($(VF),1) $(call print_var,LIBUNWIND_DIR) $(call print_var,LIBDW_DIR) $(call print_var,JDIR) + $(call print_var,LIBTRACEEVENT_DIR) ifeq ($(dwarf-post-unwind),1) $(call feature_print_text,"DWARF post unwind library", $(dwarf-post-unwind-text)) $(info $(MSG)) diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf index f8774a9b1377..5c35c0d89306 100644 --- a/tools/perf/Makefile.perf +++ b/tools/perf/Makefile.perf @@ -84,6 +84,9 @@ include ../scripts/utilities.mak # Define NO_LIBBABELTRACE if you do not want libbabeltrace support # for CTF data format. # +# Define NO_CAPSTONE if you do not want libcapstone support +# for disasm engine. +# # Define NO_LZMA if you do not want to support compressed (xz) kernel modules # # Define NO_AUXTRACE if you do not want AUX area tracing support @@ -380,7 +383,7 @@ python-clean := $(call QUIET_CLEAN, python) $(RM) -r $(PYTHON_EXTBUILD) $(OUTPUT ifeq ($(CONFIG_LIBTRACEEVENT),y) PYTHON_EXT_SRCS := $(shell grep -v ^\# util/python-ext-sources) else - PYTHON_EXT_SRCS := $(shell grep -v ^\#\\\|util/trace-event.c util/python-ext-sources) + PYTHON_EXT_SRCS := $(shell grep -v ^\#\\\|util/trace-event.c\\\|util/trace-event-parse.c util/python-ext-sources) endif PYTHON_EXT_DEPS := util/python-ext-sources util/setup.py $(LIBAPI) @@ -455,35 +458,53 @@ SHELL = $(SHELL_PATH) arm64_gen_sysreg_dir := $(srctree)/tools/arch/arm64/tools ifneq ($(OUTPUT),) - arm64_gen_sysreg_outdir := $(OUTPUT) + arm64_gen_sysreg_outdir := $(abspath $(OUTPUT)) else arm64_gen_sysreg_outdir := $(CURDIR) endif arm64-sysreg-defs: FORCE - $(Q)$(MAKE) -C $(arm64_gen_sysreg_dir) O=$(arm64_gen_sysreg_outdir) + $(Q)$(MAKE) -C $(arm64_gen_sysreg_dir) O=$(arm64_gen_sysreg_outdir) \ + prefix= subdir= arm64-sysreg-defs-clean: $(call QUIET_CLEAN,arm64-sysreg-defs) $(Q)$(MAKE) -C $(arm64_gen_sysreg_dir) O=$(arm64_gen_sysreg_outdir) \ - clean > /dev/null + prefix= subdir= clean > /dev/null beauty_linux_dir := $(srctree)/tools/perf/trace/beauty/include/linux/ +beauty_uapi_linux_dir := $(srctree)/tools/perf/trace/beauty/include/uapi/linux/ +beauty_uapi_sound_dir := $(srctree)/tools/perf/trace/beauty/include/uapi/sound/ +beauty_arch_asm_dir := $(srctree)/tools/perf/trace/beauty/arch/x86/include/asm/ +beauty_x86_arch_asm_uapi_dir := $(srctree)/tools/perf/trace/beauty/arch/x86/include/uapi/asm/ + linux_uapi_dir := $(srctree)/tools/include/uapi/linux asm_generic_uapi_dir := $(srctree)/tools/include/uapi/asm-generic arch_asm_uapi_dir := $(srctree)/tools/arch/$(SRCARCH)/include/uapi/asm/ -x86_arch_asm_uapi_dir := $(srctree)/tools/arch/x86/include/uapi/asm/ x86_arch_asm_dir := $(srctree)/tools/arch/x86/include/asm/ beauty_outdir := $(OUTPUT)trace/beauty/generated beauty_ioctl_outdir := $(beauty_outdir)/ioctl + +# Create output directory if not already present +$(shell [ -d '$(beauty_ioctl_outdir)' ] || mkdir -p '$(beauty_ioctl_outdir)') + +fs_at_flags_array := $(beauty_outdir)/fs_at_flags_array.c +fs_at_flags_tbl := $(srctree)/tools/perf/trace/beauty/fs_at_flags.sh + +$(fs_at_flags_array): $(beauty_uapi_linux_dir)/fcntl.h $(fs_at_flags_tbl) + $(Q)$(SHELL) '$(fs_at_flags_tbl)' $(beauty_uapi_linux_dir) > $@ + +clone_flags_array := $(beauty_outdir)/clone_flags_array.c +clone_flags_tbl := $(srctree)/tools/perf/trace/beauty/clone.sh + +$(clone_flags_array): $(beauty_uapi_linux_dir)/sched.h $(clone_flags_tbl) + $(Q)$(SHELL) '$(clone_flags_tbl)' $(beauty_uapi_linux_dir) > $@ + drm_ioctl_array := $(beauty_ioctl_outdir)/drm_ioctl_array.c drm_hdr_dir := $(srctree)/tools/include/uapi/drm drm_ioctl_tbl := $(srctree)/tools/perf/trace/beauty/drm_ioctl.sh -# Create output directory if not already present -_dummy := $(shell [ -d '$(beauty_ioctl_outdir)' ] || mkdir -p '$(beauty_ioctl_outdir)') - $(drm_ioctl_array): $(drm_hdr_dir)/drm.h $(drm_hdr_dir)/i915_drm.h $(drm_ioctl_tbl) $(Q)$(SHELL) '$(drm_ioctl_tbl)' $(drm_hdr_dir) > $@ @@ -496,20 +517,20 @@ $(fadvise_advice_array): $(linux_uapi_dir)/in.h $(fadvise_advice_tbl) fsmount_arrays := $(beauty_outdir)/fsmount_arrays.c fsmount_tbls := $(srctree)/tools/perf/trace/beauty/fsmount.sh -$(fsmount_arrays): $(linux_uapi_dir)/fs.h $(fsmount_tbls) - $(Q)$(SHELL) '$(fsmount_tbls)' $(linux_uapi_dir) > $@ +$(fsmount_arrays): $(beauty_uapi_linux_dir)/mount.h $(fsmount_tbls) + $(Q)$(SHELL) '$(fsmount_tbls)' $(beauty_uapi_linux_dir) > $@ fspick_arrays := $(beauty_outdir)/fspick_arrays.c fspick_tbls := $(srctree)/tools/perf/trace/beauty/fspick.sh -$(fspick_arrays): $(linux_uapi_dir)/fs.h $(fspick_tbls) - $(Q)$(SHELL) '$(fspick_tbls)' $(linux_uapi_dir) > $@ +$(fspick_arrays): $(beauty_uapi_linux_dir)/mount.h $(fspick_tbls) + $(Q)$(SHELL) '$(fspick_tbls)' $(beauty_uapi_linux_dir) > $@ fsconfig_arrays := $(beauty_outdir)/fsconfig_arrays.c fsconfig_tbls := $(srctree)/tools/perf/trace/beauty/fsconfig.sh -$(fsconfig_arrays): $(linux_uapi_dir)/fs.h $(fsconfig_tbls) - $(Q)$(SHELL) '$(fsconfig_tbls)' $(linux_uapi_dir) > $@ +$(fsconfig_arrays): $(beauty_uapi_linux_dir)/mount.h $(fsconfig_tbls) + $(Q)$(SHELL) '$(fsconfig_tbls)' $(beauty_uapi_linux_dir) > $@ pkey_alloc_access_rights_array := $(beauty_outdir)/pkey_alloc_access_rights_array.c asm_generic_hdr_dir := $(srctree)/tools/include/uapi/asm-generic/ @@ -522,15 +543,15 @@ sndrv_ctl_ioctl_array := $(beauty_ioctl_outdir)/sndrv_ctl_ioctl_array.c sndrv_ctl_hdr_dir := $(srctree)/tools/include/uapi/sound sndrv_ctl_ioctl_tbl := $(srctree)/tools/perf/trace/beauty/sndrv_ctl_ioctl.sh -$(sndrv_ctl_ioctl_array): $(sndrv_ctl_hdr_dir)/asound.h $(sndrv_ctl_ioctl_tbl) - $(Q)$(SHELL) '$(sndrv_ctl_ioctl_tbl)' $(sndrv_ctl_hdr_dir) > $@ +$(sndrv_ctl_ioctl_array): $(beauty_uapi_sound_dir)/asound.h $(sndrv_ctl_ioctl_tbl) + $(Q)$(SHELL) '$(sndrv_ctl_ioctl_tbl)' $(beauty_uapi_sound_dir) > $@ sndrv_pcm_ioctl_array := $(beauty_ioctl_outdir)/sndrv_pcm_ioctl_array.c sndrv_pcm_hdr_dir := $(srctree)/tools/include/uapi/sound sndrv_pcm_ioctl_tbl := $(srctree)/tools/perf/trace/beauty/sndrv_pcm_ioctl.sh -$(sndrv_pcm_ioctl_array): $(sndrv_pcm_hdr_dir)/asound.h $(sndrv_pcm_ioctl_tbl) - $(Q)$(SHELL) '$(sndrv_pcm_ioctl_tbl)' $(sndrv_pcm_hdr_dir) > $@ +$(sndrv_pcm_ioctl_array): $(beauty_uapi_sound_dir)/asound.h $(sndrv_pcm_ioctl_tbl) + $(Q)$(SHELL) '$(sndrv_pcm_ioctl_tbl)' $(beauty_uapi_sound_dir) > $@ kcmp_type_array := $(beauty_outdir)/kcmp_type_array.c kcmp_hdr_dir := $(srctree)/tools/include/uapi/linux/ @@ -559,11 +580,10 @@ $(sockaddr_arrays): $(beauty_linux_dir)/socket.h $(sockaddr_tbl) $(Q)$(SHELL) '$(sockaddr_tbl)' $(beauty_linux_dir) > $@ vhost_virtio_ioctl_array := $(beauty_ioctl_outdir)/vhost_virtio_ioctl_array.c -vhost_virtio_hdr_dir := $(srctree)/tools/include/uapi/linux vhost_virtio_ioctl_tbl := $(srctree)/tools/perf/trace/beauty/vhost_virtio_ioctl.sh -$(vhost_virtio_ioctl_array): $(vhost_virtio_hdr_dir)/vhost.h $(vhost_virtio_ioctl_tbl) - $(Q)$(SHELL) '$(vhost_virtio_ioctl_tbl)' $(vhost_virtio_hdr_dir) > $@ +$(vhost_virtio_ioctl_array): $(beauty_uapi_linux_dir)/vhost.h $(vhost_virtio_ioctl_tbl) + $(Q)$(SHELL) '$(vhost_virtio_ioctl_tbl)' $(beauty_uapi_linux_dir) > $@ perf_ioctl_array := $(beauty_ioctl_outdir)/perf_ioctl_array.c perf_hdr_dir := $(srctree)/tools/include/uapi/linux @@ -594,15 +614,14 @@ $(mremap_flags_array): $(linux_uapi_dir)/mman.h $(mremap_flags_tbl) mount_flags_array := $(beauty_outdir)/mount_flags_array.c mount_flags_tbl := $(srctree)/tools/perf/trace/beauty/mount_flags.sh -$(mount_flags_array): $(linux_uapi_dir)/fs.h $(mount_flags_tbl) - $(Q)$(SHELL) '$(mount_flags_tbl)' $(linux_uapi_dir) > $@ +$(mount_flags_array): $(beauty_uapi_linux_dir)/mount.h $(mount_flags_tbl) + $(Q)$(SHELL) '$(mount_flags_tbl)' $(beauty_uapi_linux_dir) > $@ move_mount_flags_array := $(beauty_outdir)/move_mount_flags_array.c move_mount_flags_tbl := $(srctree)/tools/perf/trace/beauty/move_mount_flags.sh -$(move_mount_flags_array): $(linux_uapi_dir)/fs.h $(move_mount_flags_tbl) - $(Q)$(SHELL) '$(move_mount_flags_tbl)' $(linux_uapi_dir) > $@ - +$(move_mount_flags_array): $(beauty_uapi_linux_dir)/mount.h $(move_mount_flags_tbl) + $(Q)$(SHELL) '$(move_mount_flags_tbl)' $(beauty_uapi_linux_dir) > $@ mmap_prot_array := $(beauty_outdir)/mmap_prot_array.c mmap_prot_tbl := $(srctree)/tools/perf/trace/beauty/mmap_prot.sh @@ -611,29 +630,28 @@ $(mmap_prot_array): $(asm_generic_uapi_dir)/mman.h $(asm_generic_uapi_dir)/mman- $(Q)$(SHELL) '$(mmap_prot_tbl)' $(asm_generic_uapi_dir) $(arch_asm_uapi_dir) > $@ prctl_option_array := $(beauty_outdir)/prctl_option_array.c -prctl_hdr_dir := $(srctree)/tools/include/uapi/linux/ prctl_option_tbl := $(srctree)/tools/perf/trace/beauty/prctl_option.sh -$(prctl_option_array): $(prctl_hdr_dir)/prctl.h $(prctl_option_tbl) - $(Q)$(SHELL) '$(prctl_option_tbl)' $(prctl_hdr_dir) > $@ +$(prctl_option_array): $(beauty_uapi_linux_dir)/prctl.h $(prctl_option_tbl) + $(Q)$(SHELL) '$(prctl_option_tbl)' $(beauty_uapi_linux_dir) > $@ usbdevfs_ioctl_array := $(beauty_ioctl_outdir)/usbdevfs_ioctl_array.c usbdevfs_ioctl_tbl := $(srctree)/tools/perf/trace/beauty/usbdevfs_ioctl.sh -$(usbdevfs_ioctl_array): $(linux_uapi_dir)/usbdevice_fs.h $(usbdevfs_ioctl_tbl) - $(Q)$(SHELL) '$(usbdevfs_ioctl_tbl)' $(linux_uapi_dir) > $@ +$(usbdevfs_ioctl_array): $(beauty_uapi_linux_dir)/usbdevice_fs.h $(usbdevfs_ioctl_tbl) + $(Q)$(SHELL) '$(usbdevfs_ioctl_tbl)' $(beauty_uapi_linux_dir) > $@ x86_arch_prctl_code_array := $(beauty_outdir)/x86_arch_prctl_code_array.c x86_arch_prctl_code_tbl := $(srctree)/tools/perf/trace/beauty/x86_arch_prctl.sh -$(x86_arch_prctl_code_array): $(x86_arch_asm_uapi_dir)/prctl.h $(x86_arch_prctl_code_tbl) - $(Q)$(SHELL) '$(x86_arch_prctl_code_tbl)' $(x86_arch_asm_uapi_dir) > $@ +$(x86_arch_prctl_code_array): $(beauty_x86_arch_asm_uapi_dir)/prctl.h $(x86_arch_prctl_code_tbl) + $(Q)$(SHELL) '$(x86_arch_prctl_code_tbl)' $(beauty_x86_arch_asm_uapi_dir) > $@ x86_arch_irq_vectors_array := $(beauty_outdir)/x86_arch_irq_vectors_array.c x86_arch_irq_vectors_tbl := $(srctree)/tools/perf/trace/beauty/tracepoints/x86_irq_vectors.sh -$(x86_arch_irq_vectors_array): $(x86_arch_asm_dir)/irq_vectors.h $(x86_arch_irq_vectors_tbl) - $(Q)$(SHELL) '$(x86_arch_irq_vectors_tbl)' $(x86_arch_asm_dir) > $@ +$(x86_arch_irq_vectors_array): $(beauty_arch_asm_dir)/irq_vectors.h $(x86_arch_irq_vectors_tbl) + $(Q)$(SHELL) '$(x86_arch_irq_vectors_tbl)' $(beauty_arch_asm_dir) > $@ x86_arch_MSRs_array := $(beauty_outdir)/x86_arch_MSRs_array.c x86_arch_MSRs_tbl := $(srctree)/tools/perf/trace/beauty/tracepoints/x86_msr.sh @@ -644,8 +662,8 @@ $(x86_arch_MSRs_array): $(x86_arch_asm_dir)/msr-index.h $(x86_arch_MSRs_tbl) rename_flags_array := $(beauty_outdir)/rename_flags_array.c rename_flags_tbl := $(srctree)/tools/perf/trace/beauty/rename_flags.sh -$(rename_flags_array): $(linux_uapi_dir)/fs.h $(rename_flags_tbl) - $(Q)$(SHELL) '$(rename_flags_tbl)' $(linux_uapi_dir) > $@ +$(rename_flags_array): $(beauty_uapi_linux_dir)/fs.h $(rename_flags_tbl) + $(Q)$(SHELL) '$(rename_flags_tbl)' $(beauty_uapi_linux_dir) > $@ arch_errno_name_array := $(beauty_outdir)/arch_errno_name_array.c arch_errno_hdr_dir := $(srctree)/tools @@ -654,11 +672,17 @@ arch_errno_tbl := $(srctree)/tools/perf/trace/beauty/arch_errno_names.sh $(arch_errno_name_array): $(arch_errno_tbl) $(Q)$(SHELL) '$(arch_errno_tbl)' '$(patsubst -%,,$(CC))' $(arch_errno_hdr_dir) > $@ +statx_mask_array := $(beauty_outdir)/statx_mask_array.c +statx_mask_tbl := $(srctree)/tools/perf/trace/beauty/statx_mask.sh + +$(statx_mask_array): $(beauty_uapi_linux_dir)/stat.h $(statx_mask_tbl) + $(Q)$(SHELL) '$(statx_mask_tbl)' $(beauty_uapi_linux_dir) > $@ + sync_file_range_arrays := $(beauty_outdir)/sync_file_range_arrays.c sync_file_range_tbls := $(srctree)/tools/perf/trace/beauty/sync_file_range.sh -$(sync_file_range_arrays): $(linux_uapi_dir)/fs.h $(sync_file_range_tbls) - $(Q)$(SHELL) '$(sync_file_range_tbls)' $(linux_uapi_dir) > $@ +$(sync_file_range_arrays): $(beauty_uapi_linux_dir)/fs.h $(sync_file_range_tbls) + $(Q)$(SHELL) '$(sync_file_range_tbls)' $(beauty_uapi_linux_dir) > $@ TESTS_CORESIGHT_DIR := $(srctree)/tools/perf/tests/shell/coresight @@ -672,7 +696,7 @@ tests-coresight-targets-clean: all: shell_compatibility_test $(ALL_PROGRAMS) $(LANG_BINDINGS) $(OTHER_PROGRAMS) tests-coresight-targets # Create python binding output directory if not already present -_dummy := $(shell [ -d '$(OUTPUT)python' ] || mkdir -p '$(OUTPUT)python') +$(shell [ -d '$(OUTPUT)python' ] || mkdir -p '$(OUTPUT)python') $(OUTPUT)python/perf$(PYTHON_EXTENSION_SUFFIX): $(PYTHON_EXT_SRCS) $(PYTHON_EXT_DEPS) $(LIBPERF) $(LIBSUBCMD) $(QUIET_GEN)LDSHARED="$(CC) -pthread -shared" \ @@ -759,6 +783,8 @@ build-dir = $(or $(__build-dir),.) prepare: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h archheaders \ arm64-sysreg-defs \ + $(fs_at_flags_array) \ + $(clone_flags_array) \ $(drm_ioctl_array) \ $(fadvise_advice_array) \ $(fsconfig_arrays) \ @@ -786,6 +812,7 @@ prepare: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h archheaders \ $(x86_arch_prctl_code_array) \ $(rename_flags_array) \ $(arch_errno_name_array) \ + $(statx_mask_array) \ $(sync_file_range_arrays) \ $(LIBAPI) \ $(LIBPERF) \ @@ -1048,6 +1075,11 @@ install-tests: all install-gtk $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/lib'; \ $(INSTALL) tests/shell/lib/*.sh -m 644 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/lib'; \ $(INSTALL) tests/shell/lib/*.py -m 644 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/lib'; \ + $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/common'; \ + $(INSTALL) tests/shell/common/*.sh '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/common'; \ + $(INSTALL) tests/shell/common/*.pl '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/common'; \ + $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/base_probe'; \ + $(INSTALL) tests/shell/base_probe/*.sh '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/base_probe'; \ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/coresight' ; \ $(INSTALL) tests/shell/coresight/*.sh '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/coresight' $(Q)$(MAKE) -C tests/shell/coresight install-tests @@ -1142,7 +1174,7 @@ ifeq ($(VMLINUX_H),) endif endif -$(SKEL_OUT)/vmlinux.h: $(VMLINUX_BTF) $(BPFTOOL) +$(SKEL_OUT)/vmlinux.h: $(VMLINUX_BTF) $(BPFTOOL) $(VMLINUX_H) ifeq ($(VMLINUX_H),) $(QUIET_GEN)$(BPFTOOL) btf dump file $< format c > $@ else @@ -1167,7 +1199,7 @@ bpf-skel: endif # CONFIG_PERF_BPF_SKEL bpf-skel-clean: - $(call QUIET_CLEAN, bpf-skel) $(RM) -r $(SKEL_TMP_OUT) $(SKELETONS) + $(call QUIET_CLEAN, bpf-skel) $(RM) -r $(SKEL_TMP_OUT) $(SKELETONS) $(SKEL_OUT)/vmlinux.h clean:: $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clean $(LIBSYMBOL)-clean $(LIBPERF)-clean arm64-sysreg-defs-clean fixdep-clean python-clean bpf-skel-clean tests-coresight-targets-clean $(call QUIET_CLEAN, core-objs) $(RM) $(LIBPERF_A) $(OUTPUT)perf-archive $(OUTPUT)perf-iostat $(LANG_BINDINGS) diff --git a/tools/perf/arch/arm/util/cs-etm.c b/tools/perf/arch/arm/util/cs-etm.c index 77e6663c1703..da6231367993 100644 --- a/tools/perf/arch/arm/util/cs-etm.c +++ b/tools/perf/arch/arm/util/cs-etm.c @@ -66,18 +66,30 @@ static const char * const metadata_ete_ro[] = { [CS_ETE_TS_SOURCE] = "ts_source", }; -static bool cs_etm_is_etmv4(struct auxtrace_record *itr, int cpu); -static bool cs_etm_is_ete(struct auxtrace_record *itr, int cpu); +enum cs_etm_version { CS_NOT_PRESENT, CS_ETMV3, CS_ETMV4, CS_ETE }; -static int cs_etm_validate_context_id(struct auxtrace_record *itr, - struct evsel *evsel, int cpu) +static bool cs_etm_is_ete(struct perf_pmu *cs_etm_pmu, struct perf_cpu cpu); +static int cs_etm_get_ro(struct perf_pmu *pmu, struct perf_cpu cpu, const char *path, __u64 *val); +static bool cs_etm_pmu_path_exists(struct perf_pmu *pmu, struct perf_cpu cpu, const char *path); + +static enum cs_etm_version cs_etm_get_version(struct perf_pmu *cs_etm_pmu, + struct perf_cpu cpu) +{ + if (cs_etm_is_ete(cs_etm_pmu, cpu)) + return CS_ETE; + else if (cs_etm_pmu_path_exists(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0])) + return CS_ETMV4; + else if (cs_etm_pmu_path_exists(cs_etm_pmu, cpu, metadata_etmv3_ro[CS_ETM_ETMCCER])) + return CS_ETMV3; + + return CS_NOT_PRESENT; +} + +static int cs_etm_validate_context_id(struct perf_pmu *cs_etm_pmu, struct evsel *evsel, + struct perf_cpu cpu) { - struct cs_etm_recording *ptr = - container_of(itr, struct cs_etm_recording, itr); - struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; - char path[PATH_MAX]; int err; - u32 val; + __u64 val; u64 contextid = evsel->core.attr.config & (perf_pmu__format_bits(cs_etm_pmu, "contextid") | perf_pmu__format_bits(cs_etm_pmu, "contextid1") | @@ -87,23 +99,16 @@ static int cs_etm_validate_context_id(struct auxtrace_record *itr, return 0; /* Not supported in etmv3 */ - if (!cs_etm_is_etmv4(itr, cpu)) { + if (cs_etm_get_version(cs_etm_pmu, cpu) == CS_ETMV3) { pr_err("%s: contextid not supported in ETMv3, disable with %s/contextid=0/\n", CORESIGHT_ETM_PMU_NAME, CORESIGHT_ETM_PMU_NAME); return -EINVAL; } /* Get a handle on TRCIDR2 */ - snprintf(path, PATH_MAX, "cpu%d/%s", - cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR2]); - err = perf_pmu__scan_file(cs_etm_pmu, path, "%x", &val); - - /* There was a problem reading the file, bailing out */ - if (err != 1) { - pr_err("%s: can't read file %s\n", CORESIGHT_ETM_PMU_NAME, - path); + err = cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR2], &val); + if (err) return err; - } if (contextid & perf_pmu__format_bits(cs_etm_pmu, "contextid1")) { @@ -140,37 +145,26 @@ static int cs_etm_validate_context_id(struct auxtrace_record *itr, return 0; } -static int cs_etm_validate_timestamp(struct auxtrace_record *itr, - struct evsel *evsel, int cpu) +static int cs_etm_validate_timestamp(struct perf_pmu *cs_etm_pmu, struct evsel *evsel, + struct perf_cpu cpu) { - struct cs_etm_recording *ptr = - container_of(itr, struct cs_etm_recording, itr); - struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; - char path[PATH_MAX]; int err; - u32 val; + __u64 val; if (!(evsel->core.attr.config & perf_pmu__format_bits(cs_etm_pmu, "timestamp"))) return 0; - if (!cs_etm_is_etmv4(itr, cpu)) { + if (cs_etm_get_version(cs_etm_pmu, cpu) == CS_ETMV3) { pr_err("%s: timestamp not supported in ETMv3, disable with %s/timestamp=0/\n", CORESIGHT_ETM_PMU_NAME, CORESIGHT_ETM_PMU_NAME); return -EINVAL; } /* Get a handle on TRCIRD0 */ - snprintf(path, PATH_MAX, "cpu%d/%s", - cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0]); - err = perf_pmu__scan_file(cs_etm_pmu, path, "%x", &val); - - /* There was a problem reading the file, bailing out */ - if (err != 1) { - pr_err("%s: can't read file %s\n", - CORESIGHT_ETM_PMU_NAME, path); + err = cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0], &val); + if (err) return err; - } /* * TRCIDR0.TSSIZE, bit [28-24], indicates whether global timestamping @@ -187,6 +181,13 @@ static int cs_etm_validate_timestamp(struct auxtrace_record *itr, return 0; } +static struct perf_pmu *cs_etm_get_pmu(struct auxtrace_record *itr) +{ + struct cs_etm_recording *ptr = container_of(itr, struct cs_etm_recording, itr); + + return ptr->cs_etm_pmu; +} + /* * Check whether the requested timestamp and contextid options should be * available on all requested CPUs and if not, tell the user how to override. @@ -194,41 +195,45 @@ static int cs_etm_validate_timestamp(struct auxtrace_record *itr, * first is better. In theory the kernel could still disable the option for * some other reason so this is best effort only. */ -static int cs_etm_validate_config(struct auxtrace_record *itr, +static int cs_etm_validate_config(struct perf_pmu *cs_etm_pmu, struct evsel *evsel) { - int i, err = -EINVAL; + int idx, err = 0; struct perf_cpu_map *event_cpus = evsel->evlist->core.user_requested_cpus; - struct perf_cpu_map *online_cpus = perf_cpu_map__new_online_cpus(); + struct perf_cpu_map *intersect_cpus; + struct perf_cpu cpu; - /* Set option of each CPU we have */ - for (i = 0; i < cpu__max_cpu().cpu; i++) { - struct perf_cpu cpu = { .cpu = i, }; - - /* - * In per-cpu case, do the validation for CPUs to work with. - * In per-thread case, the CPU map is empty. Since the traced - * program can run on any CPUs in this case, thus don't skip - * validation. - */ - if (!perf_cpu_map__has_any_cpu_or_is_empty(event_cpus) && - !perf_cpu_map__has(event_cpus, cpu)) - continue; + /* + * Set option of each CPU we have. In per-cpu case, do the validation + * for CPUs to work with. In per-thread case, the CPU map has the "any" + * CPU value. Since the traced program can run on any CPUs in this case, + * thus don't skip validation. + */ + if (!perf_cpu_map__has_any_cpu(event_cpus)) { + struct perf_cpu_map *online_cpus = perf_cpu_map__new_online_cpus(); - if (!perf_cpu_map__has(online_cpus, cpu)) - continue; + intersect_cpus = perf_cpu_map__intersect(event_cpus, online_cpus); + perf_cpu_map__put(online_cpus); + } else { + intersect_cpus = perf_cpu_map__new_online_cpus(); + } - err = cs_etm_validate_context_id(itr, evsel, i); + perf_cpu_map__for_each_cpu_skip_any(cpu, idx, intersect_cpus) { + if (cs_etm_get_version(cs_etm_pmu, cpu) == CS_NOT_PRESENT) { + pr_err("%s: Not found on CPU %d. Check hardware and firmware support and that all Coresight drivers are loaded\n", + CORESIGHT_ETM_PMU_NAME, cpu.cpu); + return -EINVAL; + } + err = cs_etm_validate_context_id(cs_etm_pmu, evsel, cpu); if (err) - goto out; - err = cs_etm_validate_timestamp(itr, evsel, i); + break; + + err = cs_etm_validate_timestamp(cs_etm_pmu, evsel, cpu); if (err) - goto out; + break; } - err = 0; -out: - perf_cpu_map__put(online_cpus); + perf_cpu_map__put(intersect_cpus); return err; } @@ -435,7 +440,7 @@ static int cs_etm_recording_options(struct auxtrace_record *itr, * Also the case of per-cpu mmaps, need the contextID in order to be notified * when a context switch happened. */ - if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus)) { + if (!perf_cpu_map__is_any_cpu_or_is_empty(cpus)) { evsel__set_config_if_unset(cs_etm_pmu, cs_etm_evsel, "timestamp", 1); evsel__set_config_if_unset(cs_etm_pmu, cs_etm_evsel, @@ -461,10 +466,10 @@ static int cs_etm_recording_options(struct auxtrace_record *itr, evsel->core.attr.sample_period = 1; /* In per-cpu case, always need the time of mmap events etc */ - if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus)) + if (!perf_cpu_map__is_any_cpu_or_is_empty(cpus)) evsel__set_sample_bit(evsel, TIME); - err = cs_etm_validate_config(itr, cs_etm_evsel); + err = cs_etm_validate_config(cs_etm_pmu, cs_etm_evsel); out: return err; } @@ -530,48 +535,35 @@ static u64 cs_etmv4_get_config(struct auxtrace_record *itr) } static size_t -cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused, - struct evlist *evlist __maybe_unused) +cs_etm_info_priv_size(struct auxtrace_record *itr, + struct evlist *evlist) { - int i; + int idx; int etmv3 = 0, etmv4 = 0, ete = 0; struct perf_cpu_map *event_cpus = evlist->core.user_requested_cpus; - struct perf_cpu_map *online_cpus = perf_cpu_map__new_online_cpus(); - - /* cpu map is not empty, we have specific CPUs to work with */ - if (!perf_cpu_map__has_any_cpu_or_is_empty(event_cpus)) { - for (i = 0; i < cpu__max_cpu().cpu; i++) { - struct perf_cpu cpu = { .cpu = i, }; + struct perf_cpu_map *intersect_cpus; + struct perf_cpu cpu; + struct perf_pmu *cs_etm_pmu = cs_etm_get_pmu(itr); - if (!perf_cpu_map__has(event_cpus, cpu) || - !perf_cpu_map__has(online_cpus, cpu)) - continue; + if (!perf_cpu_map__has_any_cpu(event_cpus)) { + /* cpu map is not "any" CPU , we have specific CPUs to work with */ + struct perf_cpu_map *online_cpus = perf_cpu_map__new_online_cpus(); - if (cs_etm_is_ete(itr, i)) - ete++; - else if (cs_etm_is_etmv4(itr, i)) - etmv4++; - else - etmv3++; - } + intersect_cpus = perf_cpu_map__intersect(event_cpus, online_cpus); + perf_cpu_map__put(online_cpus); } else { - /* get configuration for all CPUs in the system */ - for (i = 0; i < cpu__max_cpu().cpu; i++) { - struct perf_cpu cpu = { .cpu = i, }; - - if (!perf_cpu_map__has(online_cpus, cpu)) - continue; - - if (cs_etm_is_ete(itr, i)) - ete++; - else if (cs_etm_is_etmv4(itr, i)) - etmv4++; - else - etmv3++; - } + /* Event can be "any" CPU so count all online CPUs. */ + intersect_cpus = perf_cpu_map__new_online_cpus(); } + /* Count number of each type of ETM. Don't count if that CPU has CS_NOT_PRESENT. */ + perf_cpu_map__for_each_cpu_skip_any(cpu, idx, intersect_cpus) { + enum cs_etm_version v = cs_etm_get_version(cs_etm_pmu, cpu); - perf_cpu_map__put(online_cpus); + ete += v == CS_ETE; + etmv4 += v == CS_ETMV4; + etmv3 += v == CS_ETMV3; + } + perf_cpu_map__put(intersect_cpus); return (CS_ETM_HEADER_SIZE + (ete * CS_ETE_PRIV_SIZE) + @@ -579,66 +571,49 @@ cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused, (etmv3 * CS_ETMV3_PRIV_SIZE)); } -static bool cs_etm_is_etmv4(struct auxtrace_record *itr, int cpu) -{ - bool ret = false; - char path[PATH_MAX]; - int scan; - unsigned int val; - struct cs_etm_recording *ptr = - container_of(itr, struct cs_etm_recording, itr); - struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; - - /* Take any of the RO files for ETMv4 and see if it present */ - snprintf(path, PATH_MAX, "cpu%d/%s", - cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0]); - scan = perf_pmu__scan_file(cs_etm_pmu, path, "%x", &val); - - /* The file was read successfully, we have a winner */ - if (scan == 1) - ret = true; - - return ret; -} - -static int cs_etm_get_ro(struct perf_pmu *pmu, int cpu, const char *path) +static int cs_etm_get_ro(struct perf_pmu *pmu, struct perf_cpu cpu, const char *path, __u64 *val) { char pmu_path[PATH_MAX]; int scan; - unsigned int val = 0; /* Get RO metadata from sysfs */ - snprintf(pmu_path, PATH_MAX, "cpu%d/%s", cpu, path); + snprintf(pmu_path, PATH_MAX, "cpu%d/%s", cpu.cpu, path); - scan = perf_pmu__scan_file(pmu, pmu_path, "%x", &val); - if (scan != 1) + scan = perf_pmu__scan_file(pmu, pmu_path, "%llx", val); + if (scan != 1) { pr_err("%s: error reading: %s\n", __func__, pmu_path); + return -EINVAL; + } - return val; + return 0; } -static int cs_etm_get_ro_signed(struct perf_pmu *pmu, int cpu, const char *path) +static int cs_etm_get_ro_signed(struct perf_pmu *pmu, struct perf_cpu cpu, const char *path, + __u64 *out_val) { char pmu_path[PATH_MAX]; int scan; int val = 0; /* Get RO metadata from sysfs */ - snprintf(pmu_path, PATH_MAX, "cpu%d/%s", cpu, path); + snprintf(pmu_path, PATH_MAX, "cpu%d/%s", cpu.cpu, path); scan = perf_pmu__scan_file(pmu, pmu_path, "%d", &val); - if (scan != 1) + if (scan != 1) { pr_err("%s: error reading: %s\n", __func__, pmu_path); + return -EINVAL; + } - return val; + *out_val = (__u64) val; + return 0; } -static bool cs_etm_pmu_path_exists(struct perf_pmu *pmu, int cpu, const char *path) +static bool cs_etm_pmu_path_exists(struct perf_pmu *pmu, struct perf_cpu cpu, const char *path) { char pmu_path[PATH_MAX]; /* Get RO metadata from sysfs */ - snprintf(pmu_path, PATH_MAX, "cpu%d/%s", cpu, path); + snprintf(pmu_path, PATH_MAX, "cpu%d/%s", cpu.cpu, path); return perf_pmu__file_exists(pmu, pmu_path); } @@ -651,16 +626,14 @@ static bool cs_etm_pmu_path_exists(struct perf_pmu *pmu, int cpu, const char *pa #define TRCDEVARCH_ARCHVER_MASK GENMASK(15, 12) #define TRCDEVARCH_ARCHVER(x) (((x) & TRCDEVARCH_ARCHVER_MASK) >> TRCDEVARCH_ARCHVER_SHIFT) -static bool cs_etm_is_ete(struct auxtrace_record *itr, int cpu) +static bool cs_etm_is_ete(struct perf_pmu *cs_etm_pmu, struct perf_cpu cpu) { - struct cs_etm_recording *ptr = container_of(itr, struct cs_etm_recording, itr); - struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; - int trcdevarch; + __u64 trcdevarch; if (!cs_etm_pmu_path_exists(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCDEVARCH])) return false; - trcdevarch = cs_etm_get_ro(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCDEVARCH]); + cs_etm_get_ro(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCDEVARCH], &trcdevarch); /* * ETE if ARCHVER is 5 (ARCHVER is 4 for ETM) and ARCHPART is 0xA13. * See ETM_DEVARCH_ETE_ARCH in coresight-etm4x.h @@ -668,7 +641,12 @@ static bool cs_etm_is_ete(struct auxtrace_record *itr, int cpu) return TRCDEVARCH_ARCHVER(trcdevarch) == 5 && TRCDEVARCH_ARCHPART(trcdevarch) == 0xA13; } -static void cs_etm_save_etmv4_header(__u64 data[], struct auxtrace_record *itr, int cpu) +static __u64 cs_etm_get_legacy_trace_id(struct perf_cpu cpu) +{ + return CORESIGHT_LEGACY_CPU_TRACE_ID(cpu.cpu); +} + +static void cs_etm_save_etmv4_header(__u64 data[], struct auxtrace_record *itr, struct perf_cpu cpu) { struct cs_etm_recording *ptr = container_of(itr, struct cs_etm_recording, itr); struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; @@ -676,33 +654,32 @@ static void cs_etm_save_etmv4_header(__u64 data[], struct auxtrace_record *itr, /* Get trace configuration register */ data[CS_ETMV4_TRCCONFIGR] = cs_etmv4_get_config(itr); /* traceID set to legacy version, in case new perf running on older system */ - data[CS_ETMV4_TRCTRACEIDR] = - CORESIGHT_LEGACY_CPU_TRACE_ID(cpu) | CORESIGHT_TRACE_ID_UNUSED_FLAG; + data[CS_ETMV4_TRCTRACEIDR] = cs_etm_get_legacy_trace_id(cpu) | + CORESIGHT_TRACE_ID_UNUSED_FLAG; /* Get read-only information from sysFS */ - data[CS_ETMV4_TRCIDR0] = cs_etm_get_ro(cs_etm_pmu, cpu, - metadata_etmv4_ro[CS_ETMV4_TRCIDR0]); - data[CS_ETMV4_TRCIDR1] = cs_etm_get_ro(cs_etm_pmu, cpu, - metadata_etmv4_ro[CS_ETMV4_TRCIDR1]); - data[CS_ETMV4_TRCIDR2] = cs_etm_get_ro(cs_etm_pmu, cpu, - metadata_etmv4_ro[CS_ETMV4_TRCIDR2]); - data[CS_ETMV4_TRCIDR8] = cs_etm_get_ro(cs_etm_pmu, cpu, - metadata_etmv4_ro[CS_ETMV4_TRCIDR8]); - data[CS_ETMV4_TRCAUTHSTATUS] = cs_etm_get_ro(cs_etm_pmu, cpu, - metadata_etmv4_ro[CS_ETMV4_TRCAUTHSTATUS]); + cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0], + &data[CS_ETMV4_TRCIDR0]); + cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR1], + &data[CS_ETMV4_TRCIDR1]); + cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR2], + &data[CS_ETMV4_TRCIDR2]); + cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR8], + &data[CS_ETMV4_TRCIDR8]); + cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TRCAUTHSTATUS], + &data[CS_ETMV4_TRCAUTHSTATUS]); /* Kernels older than 5.19 may not expose ts_source */ - if (cs_etm_pmu_path_exists(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TS_SOURCE])) - data[CS_ETMV4_TS_SOURCE] = (__u64) cs_etm_get_ro_signed(cs_etm_pmu, cpu, - metadata_etmv4_ro[CS_ETMV4_TS_SOURCE]); - else { + if (!cs_etm_pmu_path_exists(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TS_SOURCE]) || + cs_etm_get_ro_signed(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TS_SOURCE], + &data[CS_ETMV4_TS_SOURCE])) { pr_debug3("[%03d] pmu file 'ts_source' not found. Fallback to safe value (-1)\n", - cpu); + cpu.cpu); data[CS_ETMV4_TS_SOURCE] = (__u64) -1; } } -static void cs_etm_save_ete_header(__u64 data[], struct auxtrace_record *itr, int cpu) +static void cs_etm_save_ete_header(__u64 data[], struct auxtrace_record *itr, struct perf_cpu cpu) { struct cs_etm_recording *ptr = container_of(itr, struct cs_etm_recording, itr); struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; @@ -710,83 +687,85 @@ static void cs_etm_save_ete_header(__u64 data[], struct auxtrace_record *itr, in /* Get trace configuration register */ data[CS_ETE_TRCCONFIGR] = cs_etmv4_get_config(itr); /* traceID set to legacy version, in case new perf running on older system */ - data[CS_ETE_TRCTRACEIDR] = - CORESIGHT_LEGACY_CPU_TRACE_ID(cpu) | CORESIGHT_TRACE_ID_UNUSED_FLAG; + data[CS_ETE_TRCTRACEIDR] = cs_etm_get_legacy_trace_id(cpu) | CORESIGHT_TRACE_ID_UNUSED_FLAG; /* Get read-only information from sysFS */ - data[CS_ETE_TRCIDR0] = cs_etm_get_ro(cs_etm_pmu, cpu, - metadata_ete_ro[CS_ETE_TRCIDR0]); - data[CS_ETE_TRCIDR1] = cs_etm_get_ro(cs_etm_pmu, cpu, - metadata_ete_ro[CS_ETE_TRCIDR1]); - data[CS_ETE_TRCIDR2] = cs_etm_get_ro(cs_etm_pmu, cpu, - metadata_ete_ro[CS_ETE_TRCIDR2]); - data[CS_ETE_TRCIDR8] = cs_etm_get_ro(cs_etm_pmu, cpu, - metadata_ete_ro[CS_ETE_TRCIDR8]); - data[CS_ETE_TRCAUTHSTATUS] = cs_etm_get_ro(cs_etm_pmu, cpu, - metadata_ete_ro[CS_ETE_TRCAUTHSTATUS]); + cs_etm_get_ro(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCIDR0], &data[CS_ETE_TRCIDR0]); + cs_etm_get_ro(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCIDR1], &data[CS_ETE_TRCIDR1]); + cs_etm_get_ro(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCIDR2], &data[CS_ETE_TRCIDR2]); + cs_etm_get_ro(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCIDR8], &data[CS_ETE_TRCIDR8]); + cs_etm_get_ro(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCAUTHSTATUS], + &data[CS_ETE_TRCAUTHSTATUS]); /* ETE uses the same registers as ETMv4 plus TRCDEVARCH */ - data[CS_ETE_TRCDEVARCH] = cs_etm_get_ro(cs_etm_pmu, cpu, - metadata_ete_ro[CS_ETE_TRCDEVARCH]); + cs_etm_get_ro(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCDEVARCH], + &data[CS_ETE_TRCDEVARCH]); /* Kernels older than 5.19 may not expose ts_source */ - if (cs_etm_pmu_path_exists(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TS_SOURCE])) - data[CS_ETE_TS_SOURCE] = (__u64) cs_etm_get_ro_signed(cs_etm_pmu, cpu, - metadata_ete_ro[CS_ETE_TS_SOURCE]); - else { + if (!cs_etm_pmu_path_exists(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TS_SOURCE]) || + cs_etm_get_ro_signed(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TS_SOURCE], + &data[CS_ETE_TS_SOURCE])) { pr_debug3("[%03d] pmu file 'ts_source' not found. Fallback to safe value (-1)\n", - cpu); + cpu.cpu); data[CS_ETE_TS_SOURCE] = (__u64) -1; } } -static void cs_etm_get_metadata(int cpu, u32 *offset, +static void cs_etm_get_metadata(struct perf_cpu cpu, u32 *offset, struct auxtrace_record *itr, struct perf_record_auxtrace_info *info) { u32 increment, nr_trc_params; u64 magic; - struct cs_etm_recording *ptr = - container_of(itr, struct cs_etm_recording, itr); - struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; + struct perf_pmu *cs_etm_pmu = cs_etm_get_pmu(itr); /* first see what kind of tracer this cpu is affined to */ - if (cs_etm_is_ete(itr, cpu)) { + switch (cs_etm_get_version(cs_etm_pmu, cpu)) { + case CS_ETE: magic = __perf_cs_ete_magic; cs_etm_save_ete_header(&info->priv[*offset], itr, cpu); /* How much space was used */ increment = CS_ETE_PRIV_MAX; nr_trc_params = CS_ETE_PRIV_MAX - CS_ETM_COMMON_BLK_MAX_V1; - } else if (cs_etm_is_etmv4(itr, cpu)) { + break; + + case CS_ETMV4: magic = __perf_cs_etmv4_magic; cs_etm_save_etmv4_header(&info->priv[*offset], itr, cpu); /* How much space was used */ increment = CS_ETMV4_PRIV_MAX; nr_trc_params = CS_ETMV4_PRIV_MAX - CS_ETMV4_TRCCONFIGR; - } else { + break; + + case CS_ETMV3: magic = __perf_cs_etmv3_magic; /* Get configuration register */ info->priv[*offset + CS_ETM_ETMCR] = cs_etm_get_config(itr); /* traceID set to legacy value in case new perf running on old system */ - info->priv[*offset + CS_ETM_ETMTRACEIDR] = - CORESIGHT_LEGACY_CPU_TRACE_ID(cpu) | CORESIGHT_TRACE_ID_UNUSED_FLAG; + info->priv[*offset + CS_ETM_ETMTRACEIDR] = cs_etm_get_legacy_trace_id(cpu) | + CORESIGHT_TRACE_ID_UNUSED_FLAG; /* Get read-only information from sysFS */ - info->priv[*offset + CS_ETM_ETMCCER] = - cs_etm_get_ro(cs_etm_pmu, cpu, - metadata_etmv3_ro[CS_ETM_ETMCCER]); - info->priv[*offset + CS_ETM_ETMIDR] = - cs_etm_get_ro(cs_etm_pmu, cpu, - metadata_etmv3_ro[CS_ETM_ETMIDR]); + cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv3_ro[CS_ETM_ETMCCER], + &info->priv[*offset + CS_ETM_ETMCCER]); + cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv3_ro[CS_ETM_ETMIDR], + &info->priv[*offset + CS_ETM_ETMIDR]); /* How much space was used */ increment = CS_ETM_PRIV_MAX; nr_trc_params = CS_ETM_PRIV_MAX - CS_ETM_ETMCR; + break; + + default: + case CS_NOT_PRESENT: + /* Unreachable, CPUs already validated in cs_etm_validate_config() */ + assert(true); + return; } /* Build generic header portion */ info->priv[*offset + CS_ETM_MAGIC] = magic; - info->priv[*offset + CS_ETM_CPU] = cpu; + info->priv[*offset + CS_ETM_CPU] = cpu.cpu; info->priv[*offset + CS_ETM_NR_TRC_PARAMS] = nr_trc_params; /* Where the next CPU entry should start from */ *offset += increment; @@ -806,6 +785,7 @@ static int cs_etm_info_fill(struct auxtrace_record *itr, struct cs_etm_recording *ptr = container_of(itr, struct cs_etm_recording, itr); struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; + struct perf_cpu cpu; if (priv_size != cs_etm_info_priv_size(itr, session->evlist)) return -EINVAL; @@ -813,16 +793,13 @@ static int cs_etm_info_fill(struct auxtrace_record *itr, if (!session->evlist->core.nr_mmaps) return -EINVAL; - /* If the cpu_map is empty all online CPUs are involved */ - if (perf_cpu_map__has_any_cpu_or_is_empty(event_cpus)) { + /* If the cpu_map has the "any" CPU all online CPUs are involved */ + if (perf_cpu_map__has_any_cpu(event_cpus)) { cpu_map = online_cpus; } else { /* Make sure all specified CPUs are online */ - for (i = 0; i < perf_cpu_map__nr(event_cpus); i++) { - struct perf_cpu cpu = { .cpu = i, }; - - if (perf_cpu_map__has(event_cpus, cpu) && - !perf_cpu_map__has(online_cpus, cpu)) + perf_cpu_map__for_each_cpu(cpu, i, event_cpus) { + if (!perf_cpu_map__has(online_cpus, cpu)) return -EINVAL; } @@ -842,11 +819,9 @@ static int cs_etm_info_fill(struct auxtrace_record *itr, offset = CS_ETM_SNAPSHOT + 1; - for (i = 0; i < cpu__max_cpu().cpu && offset < priv_size; i++) { - struct perf_cpu cpu = { .cpu = i, }; - - if (perf_cpu_map__has(cpu_map, cpu)) - cs_etm_get_metadata(i, &offset, itr, info); + perf_cpu_map__for_each_cpu(cpu, i, cpu_map) { + assert(offset < priv_size); + cs_etm_get_metadata(cpu, &offset, itr, info); } perf_cpu_map__put(online_cpus); diff --git a/tools/perf/arch/arm/util/perf_regs.c b/tools/perf/arch/arm/util/perf_regs.c index 2c56e8b56ddf..f94a0210c7b7 100644 --- a/tools/perf/arch/arm/util/perf_regs.c +++ b/tools/perf/arch/arm/util/perf_regs.c @@ -2,7 +2,7 @@ #include "perf_regs.h" #include "../../../util/perf_regs.h" -const struct sample_reg sample_reg_masks[] = { +static const struct sample_reg sample_reg_masks[] = { SMPL_REG_END }; @@ -15,3 +15,8 @@ uint64_t arch__user_reg_mask(void) { return PERF_REGS_MASK; } + +const struct sample_reg *arch__sample_reg_masks(void) +{ + return sample_reg_masks; +} diff --git a/tools/perf/arch/arm/util/pmu.c b/tools/perf/arch/arm/util/pmu.c index 7f3af3b97f3b..8b7cb68ba1a8 100644 --- a/tools/perf/arch/arm/util/pmu.c +++ b/tools/perf/arch/arm/util/pmu.c @@ -13,6 +13,7 @@ #include "hisi-ptt.h" #include "../../../util/pmu.h" #include "../../../util/cs-etm.h" +#include "../../arm64/util/mem-events.h" void perf_pmu__arch_init(struct perf_pmu *pmu __maybe_unused) { @@ -26,6 +27,8 @@ void perf_pmu__arch_init(struct perf_pmu *pmu __maybe_unused) pmu->selectable = true; pmu->is_uncore = false; pmu->perf_event_attr_init_default = arm_spe_pmu_default_config; + if (!strcmp(pmu->name, "arm_spe_0")) + pmu->mem_events = perf_mem_events_arm; } else if (strstarts(pmu->name, HISI_PTT_PMU_NAME)) { pmu->selectable = true; #endif diff --git a/tools/perf/arch/arm64/Makefile b/tools/perf/arch/arm64/Makefile index fab3095fb5d0..5735ed4479bb 100644 --- a/tools/perf/arch/arm64/Makefile +++ b/tools/perf/arch/arm64/Makefile @@ -18,7 +18,7 @@ sysprf := $(srctree)/tools/perf/arch/arm64/entry/syscalls/ systbl := $(sysprf)/mksyscalltbl # Create output directory if not already present -_dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)') +$(shell [ -d '$(out)' ] || mkdir -p '$(out)') $(header): $(sysdef) $(systbl) $(Q)$(SHELL) '$(systbl)' '$(CC)' '$(HOSTCC)' $(incpath) $(sysdef) > $@ diff --git a/tools/perf/arch/arm64/util/arm-spe.c b/tools/perf/arch/arm64/util/arm-spe.c index 51ccbfd3d246..0b52e67edb3b 100644 --- a/tools/perf/arch/arm64/util/arm-spe.c +++ b/tools/perf/arch/arm64/util/arm-spe.c @@ -232,7 +232,7 @@ static int arm_spe_recording_options(struct auxtrace_record *itr, * In the case of per-cpu mmaps, sample CPU for AUX event; * also enable the timestamp tracing for samples correlation. */ - if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus)) { + if (!perf_cpu_map__is_any_cpu_or_is_empty(cpus)) { evsel__set_sample_bit(arm_spe_evsel, CPU); evsel__set_config_if_unset(arm_spe_pmu, arm_spe_evsel, "ts_enable", 1); @@ -265,7 +265,7 @@ static int arm_spe_recording_options(struct auxtrace_record *itr, tracking_evsel->core.attr.sample_period = 1; /* In per-cpu case, always need the time of mmap events etc */ - if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus)) { + if (!perf_cpu_map__is_any_cpu_or_is_empty(cpus)) { evsel__set_sample_bit(tracking_evsel, TIME); evsel__set_sample_bit(tracking_evsel, CPU); diff --git a/tools/perf/arch/arm64/util/header.c b/tools/perf/arch/arm64/util/header.c index 97037499152e..741df3614a09 100644 --- a/tools/perf/arch/arm64/util/header.c +++ b/tools/perf/arch/arm64/util/header.c @@ -4,8 +4,6 @@ #include <stdio.h> #include <stdlib.h> #include <perf/cpumap.h> -#include <util/cpumap.h> -#include <internal/cpumap.h> #include <api/fs/fs.h> #include <errno.h> #include "debug.h" @@ -19,20 +17,18 @@ static int _get_cpuid(char *buf, size_t sz, struct perf_cpu_map *cpus) { const char *sysfs = sysfs__mountpoint(); - int cpu; - int ret = EINVAL; + struct perf_cpu cpu; + int idx, ret = EINVAL; if (!sysfs || sz < MIDR_SIZE) return EINVAL; - cpus = perf_cpu_map__get(cpus); - - for (cpu = 0; cpu < perf_cpu_map__nr(cpus); cpu++) { + perf_cpu_map__for_each_cpu(cpu, idx, cpus) { char path[PATH_MAX]; FILE *file; scnprintf(path, PATH_MAX, "%s/devices/system/cpu/cpu%d" MIDR, - sysfs, RC_CHK_ACCESS(cpus)->map[cpu].cpu); + sysfs, cpu.cpu); file = fopen(path, "r"); if (!file) { @@ -51,7 +47,6 @@ static int _get_cpuid(char *buf, size_t sz, struct perf_cpu_map *cpus) break; } - perf_cpu_map__put(cpus); return ret; } diff --git a/tools/perf/arch/arm64/util/machine.c b/tools/perf/arch/arm64/util/machine.c index ba1144366e85..aab1cc2bc283 100644 --- a/tools/perf/arch/arm64/util/machine.c +++ b/tools/perf/arch/arm64/util/machine.c @@ -12,5 +12,7 @@ void arch__add_leaf_frame_record_opts(struct record_opts *opts) { + const struct sample_reg *sample_reg_masks = arch__sample_reg_masks(); + opts->sample_user_regs |= sample_reg_masks[PERF_REG_ARM64_LR].mask; } diff --git a/tools/perf/arch/arm64/util/mem-events.c b/tools/perf/arch/arm64/util/mem-events.c index 3bcc5c7035c2..9f8da7937255 100644 --- a/tools/perf/arch/arm64/util/mem-events.c +++ b/tools/perf/arch/arm64/util/mem-events.c @@ -1,37 +1,12 @@ // SPDX-License-Identifier: GPL-2.0 -#include "map_symbol.h" +#include "util/map_symbol.h" +#include "util/mem-events.h" #include "mem-events.h" -#define E(t, n, s) { .tag = t, .name = n, .sysfs_name = s } +#define E(t, n, s, l, a) { .tag = t, .name = n, .event_name = s, .ldlat = l, .aux_event = a } -static struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX] = { - E("spe-load", "arm_spe_0/ts_enable=1,pa_enable=1,load_filter=1,store_filter=0,min_latency=%u/", "arm_spe_0"), - E("spe-store", "arm_spe_0/ts_enable=1,pa_enable=1,load_filter=0,store_filter=1/", "arm_spe_0"), - E("spe-ldst", "arm_spe_0/ts_enable=1,pa_enable=1,load_filter=1,store_filter=1,min_latency=%u/", "arm_spe_0"), +struct perf_mem_event perf_mem_events_arm[PERF_MEM_EVENTS__MAX] = { + E("spe-load", "%s/ts_enable=1,pa_enable=1,load_filter=1,store_filter=0,min_latency=%u/", NULL, true, 0), + E("spe-store", "%s/ts_enable=1,pa_enable=1,load_filter=0,store_filter=1/", NULL, false, 0), + E("spe-ldst", "%s/ts_enable=1,pa_enable=1,load_filter=1,store_filter=1,min_latency=%u/", NULL, true, 0), }; - -static char mem_ev_name[100]; - -struct perf_mem_event *perf_mem_events__ptr(int i) -{ - if (i >= PERF_MEM_EVENTS__MAX) - return NULL; - - return &perf_mem_events[i]; -} - -const char *perf_mem_events__name(int i, const char *pmu_name __maybe_unused) -{ - struct perf_mem_event *e = perf_mem_events__ptr(i); - - if (i >= PERF_MEM_EVENTS__MAX) - return NULL; - - if (i == PERF_MEM_EVENTS__LOAD || i == PERF_MEM_EVENTS__LOAD_STORE) - scnprintf(mem_ev_name, sizeof(mem_ev_name), - e->name, perf_mem_events__loads_ldlat); - else /* PERF_MEM_EVENTS__STORE */ - scnprintf(mem_ev_name, sizeof(mem_ev_name), e->name); - - return mem_ev_name; -} diff --git a/tools/perf/arch/arm64/util/mem-events.h b/tools/perf/arch/arm64/util/mem-events.h new file mode 100644 index 000000000000..5fc50be4be38 --- /dev/null +++ b/tools/perf/arch/arm64/util/mem-events.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ARM64_MEM_EVENTS_H +#define _ARM64_MEM_EVENTS_H + +extern struct perf_mem_event perf_mem_events_arm[PERF_MEM_EVENTS__MAX]; + +#endif /* _ARM64_MEM_EVENTS_H */ diff --git a/tools/perf/arch/arm64/util/perf_regs.c b/tools/perf/arch/arm64/util/perf_regs.c index 1b79d8eab22f..09308665e28a 100644 --- a/tools/perf/arch/arm64/util/perf_regs.c +++ b/tools/perf/arch/arm64/util/perf_regs.c @@ -16,7 +16,7 @@ #define HWCAP_SVE (1 << 22) #endif -const struct sample_reg sample_reg_masks[] = { +static const struct sample_reg sample_reg_masks[] = { SMPL_REG(x0, PERF_REG_ARM64_X0), SMPL_REG(x1, PERF_REG_ARM64_X1), SMPL_REG(x2, PERF_REG_ARM64_X2), @@ -175,3 +175,8 @@ uint64_t arch__user_reg_mask(void) } return PERF_REGS_MASK; } + +const struct sample_reg *arch__sample_reg_masks(void) +{ + return sample_reg_masks; +} diff --git a/tools/perf/arch/csky/util/perf_regs.c b/tools/perf/arch/csky/util/perf_regs.c index c0877c264d49..6b1665f41180 100644 --- a/tools/perf/arch/csky/util/perf_regs.c +++ b/tools/perf/arch/csky/util/perf_regs.c @@ -2,7 +2,7 @@ #include "perf_regs.h" #include "../../util/perf_regs.h" -const struct sample_reg sample_reg_masks[] = { +static const struct sample_reg sample_reg_masks[] = { SMPL_REG_END }; @@ -15,3 +15,8 @@ uint64_t arch__user_reg_mask(void) { return PERF_REGS_MASK; } + +const struct sample_reg *arch__sample_reg_masks(void) +{ + return sample_reg_masks; +} diff --git a/tools/perf/arch/loongarch/Makefile b/tools/perf/arch/loongarch/Makefile index c392e7af4743..3992a67a87d9 100644 --- a/tools/perf/arch/loongarch/Makefile +++ b/tools/perf/arch/loongarch/Makefile @@ -17,7 +17,7 @@ sysprf := $(srctree)/tools/perf/arch/loongarch/entry/syscalls/ systbl := $(sysprf)/mksyscalltbl # Create output directory if not already present -_dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)') +$(shell [ -d '$(out)' ] || mkdir -p '$(out)') $(header): $(sysdef) $(systbl) $(Q)$(SHELL) '$(systbl)' '$(CC)' '$(HOSTCC)' $(incpath) $(sysdef) > $@ diff --git a/tools/perf/arch/loongarch/util/perf_regs.c b/tools/perf/arch/loongarch/util/perf_regs.c index 2c56e8b56ddf..f94a0210c7b7 100644 --- a/tools/perf/arch/loongarch/util/perf_regs.c +++ b/tools/perf/arch/loongarch/util/perf_regs.c @@ -2,7 +2,7 @@ #include "perf_regs.h" #include "../../../util/perf_regs.h" -const struct sample_reg sample_reg_masks[] = { +static const struct sample_reg sample_reg_masks[] = { SMPL_REG_END }; @@ -15,3 +15,8 @@ uint64_t arch__user_reg_mask(void) { return PERF_REGS_MASK; } + +const struct sample_reg *arch__sample_reg_masks(void) +{ + return sample_reg_masks; +} diff --git a/tools/perf/arch/mips/Makefile b/tools/perf/arch/mips/Makefile index 8bc09072e3d6..cd0b011b3be5 100644 --- a/tools/perf/arch/mips/Makefile +++ b/tools/perf/arch/mips/Makefile @@ -11,7 +11,7 @@ sysdef := $(sysprf)/syscall_n64.tbl systbl := $(sysprf)/mksyscalltbl # Create output directory if not already present -_dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)') +$(shell [ -d '$(out)' ] || mkdir -p '$(out)') $(header): $(sysdef) $(systbl) $(Q)$(SHELL) '$(systbl)' $(sysdef) > $@ diff --git a/tools/perf/arch/mips/util/perf_regs.c b/tools/perf/arch/mips/util/perf_regs.c index c0877c264d49..6b1665f41180 100644 --- a/tools/perf/arch/mips/util/perf_regs.c +++ b/tools/perf/arch/mips/util/perf_regs.c @@ -2,7 +2,7 @@ #include "perf_regs.h" #include "../../util/perf_regs.h" -const struct sample_reg sample_reg_masks[] = { +static const struct sample_reg sample_reg_masks[] = { SMPL_REG_END }; @@ -15,3 +15,8 @@ uint64_t arch__user_reg_mask(void) { return PERF_REGS_MASK; } + +const struct sample_reg *arch__sample_reg_masks(void) +{ + return sample_reg_masks; +} diff --git a/tools/perf/arch/powerpc/Makefile b/tools/perf/arch/powerpc/Makefile index 840ea0e59287..bf6d323574f6 100644 --- a/tools/perf/arch/powerpc/Makefile +++ b/tools/perf/arch/powerpc/Makefile @@ -19,7 +19,7 @@ sysdef := $(sysprf)/syscall.tbl systbl := $(sysprf)/mksyscalltbl # Create output directory if not already present -_dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)') +$(shell [ -d '$(out)' ] || mkdir -p '$(out)') $(header64): $(sysdef) $(systbl) $(Q)$(SHELL) '$(systbl)' '64' $(sysdef) > $@ diff --git a/tools/perf/arch/powerpc/util/Build b/tools/perf/arch/powerpc/util/Build index 9889245c555c..1d323f3a3322 100644 --- a/tools/perf/arch/powerpc/util/Build +++ b/tools/perf/arch/powerpc/util/Build @@ -2,6 +2,7 @@ perf-y += header.o perf-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o perf-y += perf_regs.o perf-y += mem-events.o +perf-y += pmu.o perf-y += sym-handling.o perf-y += evsel.o perf-y += event.o diff --git a/tools/perf/arch/powerpc/util/kvm-stat.c b/tools/perf/arch/powerpc/util/kvm-stat.c index d9a0ac1cdf30..c8357b571ccf 100644 --- a/tools/perf/arch/powerpc/util/kvm-stat.c +++ b/tools/perf/arch/powerpc/util/kvm-stat.c @@ -114,7 +114,7 @@ static int is_tracepoint_available(const char *str, struct evlist *evlist) parse_events_error__init(&err); ret = parse_events(evlist, str, &err); - if (err.str) + if (ret) parse_events_error__print(&err, "tracepoint"); parse_events_error__exit(&err); return ret; diff --git a/tools/perf/arch/powerpc/util/mem-events.c b/tools/perf/arch/powerpc/util/mem-events.c index 78b986e5268d..765d4a054b0a 100644 --- a/tools/perf/arch/powerpc/util/mem-events.c +++ b/tools/perf/arch/powerpc/util/mem-events.c @@ -1,12 +1,12 @@ // SPDX-License-Identifier: GPL-2.0 -#include "map_symbol.h" +#include "util/map_symbol.h" +#include "util/mem-events.h" #include "mem-events.h" -/* PowerPC does not support 'ldlat' parameter. */ -const char *perf_mem_events__name(int i, const char *pmu_name __maybe_unused) -{ - if (i == PERF_MEM_EVENTS__LOAD) - return "cpu/mem-loads/"; +#define E(t, n, s, l, a) { .tag = t, .name = n, .event_name = s, .ldlat = l, .aux_event = a } - return "cpu/mem-stores/"; -} +struct perf_mem_event perf_mem_events_power[PERF_MEM_EVENTS__MAX] = { + E("ldlat-loads", "%s/mem-loads/", "mem-loads", false, 0), + E("ldlat-stores", "%s/mem-stores/", "mem-stores", false, 0), + E(NULL, NULL, NULL, false, 0), +}; diff --git a/tools/perf/arch/powerpc/util/mem-events.h b/tools/perf/arch/powerpc/util/mem-events.h new file mode 100644 index 000000000000..6acc3d1b6873 --- /dev/null +++ b/tools/perf/arch/powerpc/util/mem-events.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _POWER_MEM_EVENTS_H +#define _POWER_MEM_EVENTS_H + +extern struct perf_mem_event perf_mem_events_power[PERF_MEM_EVENTS__MAX]; + +#endif /* _POWER_MEM_EVENTS_H */ diff --git a/tools/perf/arch/powerpc/util/perf_regs.c b/tools/perf/arch/powerpc/util/perf_regs.c index b38aa056eea0..e8e6e6fc6f17 100644 --- a/tools/perf/arch/powerpc/util/perf_regs.c +++ b/tools/perf/arch/powerpc/util/perf_regs.c @@ -17,7 +17,7 @@ #define PVR_POWER9 0x004E #define PVR_POWER10 0x0080 -const struct sample_reg sample_reg_masks[] = { +static const struct sample_reg sample_reg_masks[] = { SMPL_REG(r0, PERF_REG_POWERPC_R0), SMPL_REG(r1, PERF_REG_POWERPC_R1), SMPL_REG(r2, PERF_REG_POWERPC_R2), @@ -232,3 +232,8 @@ uint64_t arch__user_reg_mask(void) { return PERF_REGS_MASK; } + +const struct sample_reg *arch__sample_reg_masks(void) +{ + return sample_reg_masks; +} diff --git a/tools/perf/arch/powerpc/util/pmu.c b/tools/perf/arch/powerpc/util/pmu.c new file mode 100644 index 000000000000..554675deef7b --- /dev/null +++ b/tools/perf/arch/powerpc/util/pmu.c @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <string.h> + +#include "../../../util/pmu.h" +#include "mem-events.h" + +void perf_pmu__arch_init(struct perf_pmu *pmu) +{ + if (pmu->is_core) + pmu->mem_events = perf_mem_events_power; +} diff --git a/tools/perf/arch/riscv/util/header.c b/tools/perf/arch/riscv/util/header.c index 4a41856938a8..1b29030021ee 100644 --- a/tools/perf/arch/riscv/util/header.c +++ b/tools/perf/arch/riscv/util/header.c @@ -41,7 +41,7 @@ static char *_get_cpuid(void) char *mimpid = NULL; char *cpuid = NULL; int read; - unsigned long line_sz; + size_t line_sz; FILE *cpuinfo; cpuinfo = fopen(CPUINFO, "r"); diff --git a/tools/perf/arch/riscv/util/perf_regs.c b/tools/perf/arch/riscv/util/perf_regs.c index c0877c264d49..6b1665f41180 100644 --- a/tools/perf/arch/riscv/util/perf_regs.c +++ b/tools/perf/arch/riscv/util/perf_regs.c @@ -2,7 +2,7 @@ #include "perf_regs.h" #include "../../util/perf_regs.h" -const struct sample_reg sample_reg_masks[] = { +static const struct sample_reg sample_reg_masks[] = { SMPL_REG_END }; @@ -15,3 +15,8 @@ uint64_t arch__user_reg_mask(void) { return PERF_REGS_MASK; } + +const struct sample_reg *arch__sample_reg_masks(void) +{ + return sample_reg_masks; +} diff --git a/tools/perf/arch/s390/Makefile b/tools/perf/arch/s390/Makefile index 74bffbea03e2..56994e63b43a 100644 --- a/tools/perf/arch/s390/Makefile +++ b/tools/perf/arch/s390/Makefile @@ -17,7 +17,7 @@ sysdef := $(sysprf)/syscall.tbl systbl := $(sysprf)/mksyscalltbl # Create output directory if not already present -_dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)') +$(shell [ -d '$(out)' ] || mkdir -p '$(out)') $(header): $(sysdef) $(systbl) $(Q)$(SHELL) '$(systbl)' $(sysdef) > $@ diff --git a/tools/perf/arch/s390/util/perf_regs.c b/tools/perf/arch/s390/util/perf_regs.c index c0877c264d49..6b1665f41180 100644 --- a/tools/perf/arch/s390/util/perf_regs.c +++ b/tools/perf/arch/s390/util/perf_regs.c @@ -2,7 +2,7 @@ #include "perf_regs.h" #include "../../util/perf_regs.h" -const struct sample_reg sample_reg_masks[] = { +static const struct sample_reg sample_reg_masks[] = { SMPL_REG_END }; @@ -15,3 +15,8 @@ uint64_t arch__user_reg_mask(void) { return PERF_REGS_MASK; } + +const struct sample_reg *arch__sample_reg_masks(void) +{ + return sample_reg_masks; +} diff --git a/tools/perf/arch/x86/Build b/tools/perf/arch/x86/Build index a7dd46a5b678..ed37013b4289 100644 --- a/tools/perf/arch/x86/Build +++ b/tools/perf/arch/x86/Build @@ -1,2 +1,16 @@ perf-y += util/ perf-y += tests/ + +ifdef SHELLCHECK + SHELL_TESTS := entry/syscalls/syscalltbl.sh + TEST_LOGS := $(SHELL_TESTS:%=%.shellcheck_log) +else + SHELL_TESTS := + TEST_LOGS := +endif + +$(OUTPUT)%.shellcheck_log: % + $(call rule_mkdir) + $(Q)$(call echo-cmd,test)shellcheck -a -S warning "$<" > $@ || (cat $@ && rm $@ && false) + +perf-y += $(TEST_LOGS) diff --git a/tools/perf/arch/x86/Makefile b/tools/perf/arch/x86/Makefile index 5a9f9a7bf07d..8952e00f9b60 100644 --- a/tools/perf/arch/x86/Makefile +++ b/tools/perf/arch/x86/Makefile @@ -17,7 +17,7 @@ sys := $(srctree)/tools/perf/arch/x86/entry/syscalls systbl := $(sys)/syscalltbl.sh # Create output directory if not already present -_dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)') +$(shell [ -d '$(out)' ] || mkdir -p '$(out)') $(header): $(sys)/syscall_64.tbl $(systbl) $(Q)$(SHELL) '$(systbl)' $(sys)/syscall_64.tbl 'x86_64' > $@ diff --git a/tools/perf/arch/x86/tests/Build b/tools/perf/arch/x86/tests/Build index b87f46e5feea..c1e3b7d39554 100644 --- a/tools/perf/arch/x86/tests/Build +++ b/tools/perf/arch/x86/tests/Build @@ -10,3 +10,17 @@ perf-$(CONFIG_AUXTRACE) += insn-x86.o endif perf-$(CONFIG_X86_64) += bp-modify.o perf-y += amd-ibs-via-core-pmu.o + +ifdef SHELLCHECK + SHELL_TESTS := gen-insn-x86-dat.sh + TEST_LOGS := $(SHELL_TESTS:%=%.shellcheck_log) +else + SHELL_TESTS := + TEST_LOGS := +endif + +$(OUTPUT)%.shellcheck_log: % + $(call rule_mkdir) + $(Q)$(call echo-cmd,test)shellcheck -a -S warning "$<" > $@ || (cat $@ && rm $@ && false) + +perf-y += $(TEST_LOGS) diff --git a/tools/perf/arch/x86/tests/dwarf-unwind.c b/tools/perf/arch/x86/tests/dwarf-unwind.c index 5bfec3345d59..c05c0a85dad4 100644 --- a/tools/perf/arch/x86/tests/dwarf-unwind.c +++ b/tools/perf/arch/x86/tests/dwarf-unwind.c @@ -34,6 +34,7 @@ static int sample_ustack(struct perf_sample *sample, } stack_size = map__end(map) - sp; + map__put(map); stack_size = stack_size > STACK_SIZE ? STACK_SIZE : stack_size; memcpy(buf, (void *) sp, stack_size); diff --git a/tools/perf/arch/x86/tests/gen-insn-x86-dat.sh b/tools/perf/arch/x86/tests/gen-insn-x86-dat.sh index 0d0a003a9c5e..89c46532cd5c 100755 --- a/tools/perf/arch/x86/tests/gen-insn-x86-dat.sh +++ b/tools/perf/arch/x86/tests/gen-insn-x86-dat.sh @@ -11,7 +11,7 @@ if [ "$(uname -m)" != "x86_64" ]; then exit 1 fi -cd $(dirname $0) +cd "$(dirname $0)" trap 'echo "Might need a more recent version of binutils"' EXIT diff --git a/tools/perf/arch/x86/tests/hybrid.c b/tools/perf/arch/x86/tests/hybrid.c index 40f5d17fedab..e221ea104174 100644 --- a/tools/perf/arch/x86/tests/hybrid.c +++ b/tools/perf/arch/x86/tests/hybrid.c @@ -259,11 +259,10 @@ static int test_event(const struct evlist_test *e) parse_events_error__init(&err); ret = parse_events(evlist, e->name, &err); if (ret) { - pr_debug("failed to parse event '%s', err %d, str '%s'\n", - e->name, ret, err.str); + pr_debug("failed to parse event '%s', err %d\n", e->name, ret); parse_events_error__print(&err, e->name); ret = TEST_FAIL; - if (strstr(err.str, "can't access trace events")) + if (parse_events_error__contains(&err, "can't access trace events")) ret = TEST_SKIP; } else { ret = e->check(evlist); diff --git a/tools/perf/arch/x86/util/intel-bts.c b/tools/perf/arch/x86/util/intel-bts.c index af8ae4647585..34696f3d3d5d 100644 --- a/tools/perf/arch/x86/util/intel-bts.c +++ b/tools/perf/arch/x86/util/intel-bts.c @@ -143,7 +143,7 @@ static int intel_bts_recording_options(struct auxtrace_record *itr, if (!opts->full_auxtrace) return 0; - if (opts->full_auxtrace && !perf_cpu_map__has_any_cpu_or_is_empty(cpus)) { + if (opts->full_auxtrace && !perf_cpu_map__is_any_cpu_or_is_empty(cpus)) { pr_err(INTEL_BTS_PMU_NAME " does not support per-cpu recording\n"); return -EINVAL; } @@ -224,7 +224,7 @@ static int intel_bts_recording_options(struct auxtrace_record *itr, * In the case of per-cpu mmaps, we need the CPU on the * AUX event. */ - if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus)) + if (!perf_cpu_map__is_any_cpu_or_is_empty(cpus)) evsel__set_sample_bit(intel_bts_evsel, CPU); } diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c index d199619df3ab..6de7e2d21075 100644 --- a/tools/perf/arch/x86/util/intel-pt.c +++ b/tools/perf/arch/x86/util/intel-pt.c @@ -369,7 +369,7 @@ static int intel_pt_info_fill(struct auxtrace_record *itr, ui__warning("Intel Processor Trace: TSC not available\n"); } - per_cpu_mmaps = !perf_cpu_map__has_any_cpu_or_is_empty(session->evlist->core.user_requested_cpus); + per_cpu_mmaps = !perf_cpu_map__is_any_cpu_or_is_empty(session->evlist->core.user_requested_cpus); auxtrace_info->type = PERF_AUXTRACE_INTEL_PT; auxtrace_info->priv[INTEL_PT_PMU_TYPE] = intel_pt_pmu->type; @@ -774,7 +774,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, * Per-cpu recording needs sched_switch events to distinguish different * threads. */ - if (have_timing_info && !perf_cpu_map__has_any_cpu_or_is_empty(cpus) && + if (have_timing_info && !perf_cpu_map__is_any_cpu_or_is_empty(cpus) && !record_opts__no_switch_events(opts)) { if (perf_can_record_switch_events()) { bool cpu_wide = !target__none(&opts->target) && @@ -832,7 +832,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, * In the case of per-cpu mmaps, we need the CPU on the * AUX event. */ - if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus)) + if (!perf_cpu_map__is_any_cpu_or_is_empty(cpus)) evsel__set_sample_bit(intel_pt_evsel, CPU); } @@ -858,7 +858,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, tracking_evsel->immediate = true; /* In per-cpu case, always need the time of mmap events etc */ - if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus)) { + if (!perf_cpu_map__is_any_cpu_or_is_empty(cpus)) { evsel__set_sample_bit(tracking_evsel, TIME); /* And the CPU for switch events */ evsel__set_sample_bit(tracking_evsel, CPU); @@ -870,7 +870,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, * Warn the user when we do not have enough information to decode i.e. * per-cpu with no sched_switch (except workload-only). */ - if (!ptr->have_sched_switch && !perf_cpu_map__has_any_cpu_or_is_empty(cpus) && + if (!ptr->have_sched_switch && !perf_cpu_map__is_any_cpu_or_is_empty(cpus) && !target__none(&opts->target) && !intel_pt_evsel->core.attr.exclude_user) ui__warning("Intel Processor Trace decoding will not be possible except for kernel tracing!\n"); diff --git a/tools/perf/arch/x86/util/mem-events.c b/tools/perf/arch/x86/util/mem-events.c index 191b372f9a2d..62df03e91c7e 100644 --- a/tools/perf/arch/x86/util/mem-events.c +++ b/tools/perf/arch/x86/util/mem-events.c @@ -1,93 +1,28 @@ // SPDX-License-Identifier: GPL-2.0 -#include "util/pmu.h" -#include "util/pmus.h" -#include "util/env.h" -#include "map_symbol.h" -#include "mem-events.h" #include "linux/string.h" -#include "env.h" +#include "util/map_symbol.h" +#include "util/mem-events.h" +#include "mem-events.h" -static char mem_loads_name[100]; -static bool mem_loads_name__init; -static char mem_stores_name[100]; #define MEM_LOADS_AUX 0x8203 -#define MEM_LOADS_AUX_NAME "{%s/mem-loads-aux/,%s/mem-loads,ldlat=%u/}:P" -#define E(t, n, s) { .tag = t, .name = n, .sysfs_name = s } +#define E(t, n, s, l, a) { .tag = t, .name = n, .event_name = s, .ldlat = l, .aux_event = a } -static struct perf_mem_event perf_mem_events_intel[PERF_MEM_EVENTS__MAX] = { - E("ldlat-loads", "%s/mem-loads,ldlat=%u/P", "%s/events/mem-loads"), - E("ldlat-stores", "%s/mem-stores/P", "%s/events/mem-stores"), - E(NULL, NULL, NULL), +struct perf_mem_event perf_mem_events_intel[PERF_MEM_EVENTS__MAX] = { + E("ldlat-loads", "%s/mem-loads,ldlat=%u/P", "mem-loads", true, 0), + E("ldlat-stores", "%s/mem-stores/P", "mem-stores", false, 0), + E(NULL, NULL, NULL, false, 0), }; -static struct perf_mem_event perf_mem_events_amd[PERF_MEM_EVENTS__MAX] = { - E(NULL, NULL, NULL), - E(NULL, NULL, NULL), - E("mem-ldst", "ibs_op//", "ibs_op"), +struct perf_mem_event perf_mem_events_intel_aux[PERF_MEM_EVENTS__MAX] = { + E("ldlat-loads", "{%s/mem-loads-aux/,%s/mem-loads,ldlat=%u/}:P", "mem-loads", true, MEM_LOADS_AUX), + E("ldlat-stores", "%s/mem-stores/P", "mem-stores", false, 0), + E(NULL, NULL, NULL, false, 0), }; -struct perf_mem_event *perf_mem_events__ptr(int i) -{ - if (i >= PERF_MEM_EVENTS__MAX) - return NULL; - - if (x86__is_amd_cpu()) - return &perf_mem_events_amd[i]; - - return &perf_mem_events_intel[i]; -} - -bool is_mem_loads_aux_event(struct evsel *leader) -{ - struct perf_pmu *pmu = perf_pmus__find("cpu"); - - if (!pmu) - pmu = perf_pmus__find("cpu_core"); - - if (pmu && !perf_pmu__have_event(pmu, "mem-loads-aux")) - return false; - - return leader->core.attr.config == MEM_LOADS_AUX; -} - -const char *perf_mem_events__name(int i, const char *pmu_name) -{ - struct perf_mem_event *e = perf_mem_events__ptr(i); - - if (!e) - return NULL; - - if (i == PERF_MEM_EVENTS__LOAD) { - if (mem_loads_name__init && !pmu_name) - return mem_loads_name; - - if (!pmu_name) { - mem_loads_name__init = true; - pmu_name = "cpu"; - } - - if (perf_pmus__have_event(pmu_name, "mem-loads-aux")) { - scnprintf(mem_loads_name, sizeof(mem_loads_name), - MEM_LOADS_AUX_NAME, pmu_name, pmu_name, - perf_mem_events__loads_ldlat); - } else { - scnprintf(mem_loads_name, sizeof(mem_loads_name), - e->name, pmu_name, - perf_mem_events__loads_ldlat); - } - return mem_loads_name; - } - - if (i == PERF_MEM_EVENTS__STORE) { - if (!pmu_name) - pmu_name = "cpu"; - - scnprintf(mem_stores_name, sizeof(mem_stores_name), - e->name, pmu_name); - return mem_stores_name; - } - - return e->name; -} +struct perf_mem_event perf_mem_events_amd[PERF_MEM_EVENTS__MAX] = { + E(NULL, NULL, NULL, false, 0), + E(NULL, NULL, NULL, false, 0), + E("mem-ldst", "%s//", NULL, false, 0), +}; diff --git a/tools/perf/arch/x86/util/mem-events.h b/tools/perf/arch/x86/util/mem-events.h new file mode 100644 index 000000000000..f55c8d3b7d59 --- /dev/null +++ b/tools/perf/arch/x86/util/mem-events.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _X86_MEM_EVENTS_H +#define _X86_MEM_EVENTS_H + +extern struct perf_mem_event perf_mem_events_intel[PERF_MEM_EVENTS__MAX]; +extern struct perf_mem_event perf_mem_events_intel_aux[PERF_MEM_EVENTS__MAX]; + +extern struct perf_mem_event perf_mem_events_amd[PERF_MEM_EVENTS__MAX]; + +#endif /* _X86_MEM_EVENTS_H */ diff --git a/tools/perf/arch/x86/util/perf_regs.c b/tools/perf/arch/x86/util/perf_regs.c index b813502a2727..12fd93f04802 100644 --- a/tools/perf/arch/x86/util/perf_regs.c +++ b/tools/perf/arch/x86/util/perf_regs.c @@ -13,7 +13,7 @@ #include "../../../util/pmu.h" #include "../../../util/pmus.h" -const struct sample_reg sample_reg_masks[] = { +static const struct sample_reg sample_reg_masks[] = { SMPL_REG(AX, PERF_REG_X86_AX), SMPL_REG(BX, PERF_REG_X86_BX), SMPL_REG(CX, PERF_REG_X86_CX), @@ -276,6 +276,11 @@ int arch_sdt_arg_parse_op(char *old_op, char **new_op) return SDT_ARG_VALID; } +const struct sample_reg *arch__sample_reg_masks(void) +{ + return sample_reg_masks; +} + uint64_t arch__intr_reg_mask(void) { struct perf_event_attr attr = { diff --git a/tools/perf/arch/x86/util/pmu.c b/tools/perf/arch/x86/util/pmu.c index 469555ae9b3c..c3d89d6ba1bf 100644 --- a/tools/perf/arch/x86/util/pmu.c +++ b/tools/perf/arch/x86/util/pmu.c @@ -15,6 +15,7 @@ #include "../../../util/pmu.h" #include "../../../util/fncache.h" #include "../../../util/pmus.h" +#include "mem-events.h" #include "env.h" void perf_pmu__arch_init(struct perf_pmu *pmu __maybe_unused) @@ -30,14 +31,14 @@ void perf_pmu__arch_init(struct perf_pmu *pmu __maybe_unused) pmu->selectable = true; } #endif -} - -int perf_pmus__num_mem_pmus(void) -{ - /* AMD uses IBS OP pmu and not a core PMU for perf mem/c2c */ - if (x86__is_amd_cpu()) - return 1; - /* Intel uses core pmus for perf mem/c2c */ - return perf_pmus__num_core_pmus(); + if (x86__is_amd_cpu()) { + if (!strcmp(pmu->name, "ibs_op")) + pmu->mem_events = perf_mem_events_amd; + } else if (pmu->is_core) { + if (perf_pmu__have_event(pmu, "mem-loads-aux")) + pmu->mem_events = perf_mem_events_intel_aux; + else + pmu->mem_events = perf_mem_events_intel; + } } diff --git a/tools/perf/arch/x86/util/tsc.c b/tools/perf/arch/x86/util/tsc.c index 9b99f48b923c..e2d6cfe21057 100644 --- a/tools/perf/arch/x86/util/tsc.c +++ b/tools/perf/arch/x86/util/tsc.c @@ -33,7 +33,7 @@ static double cpuinfo_tsc_freq(void) cpuinfo = fopen("/proc/cpuinfo", "r"); if (!cpuinfo) { - pr_err("Failed to read /proc/cpuinfo for TSC frequency"); + pr_err("Failed to read /proc/cpuinfo for TSC frequency\n"); return NAN; } while (getline(&line, &len, cpuinfo) > 0) { @@ -48,7 +48,7 @@ static double cpuinfo_tsc_freq(void) } out: if (fpclassify(result) == FP_ZERO) - pr_err("Failed to find TSC frequency in /proc/cpuinfo"); + pr_err("Failed to find TSC frequency in /proc/cpuinfo\n"); free(line); fclose(cpuinfo); diff --git a/tools/perf/bench/bench.h b/tools/perf/bench/bench.h index faa18e6d2467..9f736423af53 100644 --- a/tools/perf/bench/bench.h +++ b/tools/perf/bench/bench.h @@ -46,6 +46,8 @@ int bench_breakpoint_enable(int argc, const char **argv); int bench_uprobe_baseline(int argc, const char **argv); int bench_uprobe_empty(int argc, const char **argv); int bench_uprobe_trace_printk(int argc, const char **argv); +int bench_uprobe_empty_ret(int argc, const char **argv); +int bench_uprobe_trace_printk_ret(int argc, const char **argv); int bench_pmu_scan(int argc, const char **argv); #define BENCH_FORMAT_DEFAULT_STR "default" diff --git a/tools/perf/bench/inject-buildid.c b/tools/perf/bench/inject-buildid.c index 49331743c743..a759eb2328be 100644 --- a/tools/perf/bench/inject-buildid.c +++ b/tools/perf/bench/inject-buildid.c @@ -362,7 +362,7 @@ static int inject_build_id(struct bench_data *data, u64 *max_rss) return -1; for (i = 0; i < nr_mmaps; i++) { - int idx = rand() % (nr_dsos - 1); + int idx = rand() % nr_dsos; struct bench_dso *dso = &dsos[idx]; u64 timestamp = rand() % 1000000; diff --git a/tools/perf/bench/uprobe.c b/tools/perf/bench/uprobe.c index 5c71fdc419dd..0b90275862e1 100644 --- a/tools/perf/bench/uprobe.c +++ b/tools/perf/bench/uprobe.c @@ -26,9 +26,11 @@ static int loops = LOOPS_DEFAULT; enum bench_uprobe { - BENCH_UPROBE__BASELINE, - BENCH_UPROBE__EMPTY, - BENCH_UPROBE__TRACE_PRINTK, + BENCH_UPROBE__BASELINE, + BENCH_UPROBE__EMPTY, + BENCH_UPROBE__TRACE_PRINTK, + BENCH_UPROBE__EMPTY_RET, + BENCH_UPROBE__TRACE_PRINTK_RET, }; static const struct option options[] = { @@ -47,7 +49,7 @@ static const char * const bench_uprobe_usage[] = { #define bench_uprobe__attach_uprobe(prog) \ skel->links.prog = bpf_program__attach_uprobe_opts(/*prog=*/skel->progs.prog, \ /*pid=*/-1, \ - /*binary_path=*/"/lib64/libc.so.6", \ + /*binary_path=*/"libc.so.6", \ /*func_offset=*/0, \ /*opts=*/&uprobe_opts); \ if (!skel->links.prog) { \ @@ -81,6 +83,8 @@ static int bench_uprobe__setup_bpf_skel(enum bench_uprobe bench) case BENCH_UPROBE__BASELINE: break; case BENCH_UPROBE__EMPTY: bench_uprobe__attach_uprobe(empty); break; case BENCH_UPROBE__TRACE_PRINTK: bench_uprobe__attach_uprobe(trace_printk); break; + case BENCH_UPROBE__EMPTY_RET: bench_uprobe__attach_uprobe(empty_ret); break; + case BENCH_UPROBE__TRACE_PRINTK_RET: bench_uprobe__attach_uprobe(trace_printk_ret); break; default: fprintf(stderr, "Invalid bench: %d\n", bench); goto cleanup; @@ -197,3 +201,13 @@ int bench_uprobe_trace_printk(int argc, const char **argv) { return bench_uprobe(argc, argv, BENCH_UPROBE__TRACE_PRINTK); } + +int bench_uprobe_empty_ret(int argc, const char **argv) +{ + return bench_uprobe(argc, argv, BENCH_UPROBE__EMPTY_RET); +} + +int bench_uprobe_trace_printk_ret(int argc, const char **argv) +{ + return bench_uprobe(argc, argv, BENCH_UPROBE__TRACE_PRINTK_RET); +} diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 6c1cc797692d..50d2fb222d48 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -37,11 +37,13 @@ #include "util/map_symbol.h" #include "util/branch.h" #include "util/util.h" +#include "ui/progress.h" #include <dlfcn.h> #include <errno.h> #include <linux/bitmap.h> #include <linux/err.h> +#include <inttypes.h> struct perf_annotate { struct perf_tool tool; @@ -217,7 +219,7 @@ static int process_branch_callback(struct evsel *evsel, } if (a.map != NULL) - map__dso(a.map)->hit = 1; + dso__set_hit(map__dso(a.map)); hist__account_cycles(sample->branch_stack, al, sample, false, NULL); @@ -252,7 +254,7 @@ static int evsel__add_sample(struct evsel *evsel, struct perf_sample *sample, if (al->sym != NULL) { struct dso *dso = map__dso(al->map); - rb_erase_cached(&al->sym->rb_node, &dso->symbols); + rb_erase_cached(&al->sym->rb_node, dso__symbols(dso)); symbol__delete(al->sym); dso__reset_find_symbol_cache(dso); } @@ -327,77 +329,6 @@ static int hist_entry__tty_annotate(struct hist_entry *he, return symbol__tty_annotate2(&he->ms, evsel); } -static void print_annotated_data_header(struct hist_entry *he, struct evsel *evsel) -{ - struct dso *dso = map__dso(he->ms.map); - int nr_members = 1; - int nr_samples = he->stat.nr_events; - - if (evsel__is_group_event(evsel)) { - struct hist_entry *pair; - - list_for_each_entry(pair, &he->pairs.head, pairs.node) - nr_samples += pair->stat.nr_events; - } - - printf("Annotate type: '%s' in %s (%d samples):\n", - he->mem_type->self.type_name, dso->name, nr_samples); - - if (evsel__is_group_event(evsel)) { - struct evsel *pos; - int i = 0; - - for_each_group_evsel(pos, evsel) - printf(" event[%d] = %s\n", i++, pos->name); - - nr_members = evsel->core.nr_members; - } - - printf("============================================================================\n"); - printf("%*s %10s %10s %s\n", 11 * nr_members, "samples", "offset", "size", "field"); -} - -static void print_annotated_data_type(struct annotated_data_type *mem_type, - struct annotated_member *member, - struct evsel *evsel, int indent) -{ - struct annotated_member *child; - struct type_hist *h = mem_type->histograms[evsel->core.idx]; - int i, nr_events = 1, samples = 0; - - for (i = 0; i < member->size; i++) - samples += h->addr[member->offset + i].nr_samples; - printf(" %10d", samples); - - if (evsel__is_group_event(evsel)) { - struct evsel *pos; - - for_each_group_member(pos, evsel) { - h = mem_type->histograms[pos->core.idx]; - - samples = 0; - for (i = 0; i < member->size; i++) - samples += h->addr[member->offset + i].nr_samples; - printf(" %10d", samples); - } - nr_events = evsel->core.nr_members; - } - - printf(" %10d %10d %*s%s\t%s", - member->offset, member->size, indent, "", member->type_name, - member->var_name ?: ""); - - if (!list_empty(&member->children)) - printf(" {\n"); - - list_for_each_entry(child, &member->children, node) - print_annotated_data_type(mem_type, child, evsel, indent + 4); - - if (!list_empty(&member->children)) - printf("%*s}", 11 * nr_events + 24 + indent, ""); - printf(";\n"); -} - static void print_annotate_data_stat(struct annotated_data_stat *s) { #define PRINT_STAT(fld) if (s->fld) printf("%10d : %s\n", s->fld, #fld) @@ -430,6 +361,7 @@ static void print_annotate_data_stat(struct annotated_data_stat *s) PRINT_STAT(no_typeinfo); PRINT_STAT(invalid_size); PRINT_STAT(bad_offset); + PRINT_STAT(insn_track); printf("\n"); #undef PRINT_STAT @@ -487,7 +419,7 @@ static void hists__find_annotations(struct hists *hists, struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node); struct annotation *notes; - if (he->ms.sym == NULL || map__dso(he->ms.map)->annotate_warned) + if (he->ms.sym == NULL || dso__annotate_warned(map__dso(he->ms.map))) goto find_next; if (ann->sym_hist_filter && @@ -537,10 +469,32 @@ find_next: goto find_next; } - print_annotated_data_header(he, evsel); - print_annotated_data_type(he->mem_type, &he->mem_type->self, evsel, 0); - printf("\n"); - goto find_next; + if (use_browser == 1) + key = hist_entry__annotate_data_tui(he, evsel, NULL); + else + key = hist_entry__annotate_data_tty(he, evsel); + + switch (key) { + case -1: + if (!ann->skip_missing) + return; + /* fall through */ + case K_RIGHT: + case '>': + next = rb_next(nd); + break; + case K_LEFT: + case '<': + next = rb_prev(nd); + break; + default: + return; + } + + if (use_browser == 0 || next != NULL) + nd = next; + + continue; } if (use_browser == 2) { @@ -632,13 +586,23 @@ static int __cmd_annotate(struct perf_annotate *ann) evlist__for_each_entry(session->evlist, pos) { struct hists *hists = evsel__hists(pos); u32 nr_samples = hists->stats.nr_samples; + struct ui_progress prog; if (nr_samples > 0) { total_nr_samples += nr_samples; - hists__collapse_resort(hists, NULL); + + ui_progress__init(&prog, nr_samples, + "Merging related events..."); + hists__collapse_resort(hists, &prog); + ui_progress__finish(); + /* Don't sort callchain */ evsel__reset_sample_bit(pos, CALLCHAIN); - evsel__output_resort(pos, NULL); + + ui_progress__init(&prog, nr_samples, + "Sorting events for output..."); + evsel__output_resort(pos, &prog); + ui_progress__finish(); /* * An event group needs to display other events too. @@ -809,8 +773,6 @@ int cmd_annotate(int argc, const char **argv) "Enable symbol demangling"), OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel, "Enable kernel symbol demangling"), - OPT_BOOLEAN(0, "group", &symbol_conf.event_group, - "Show event group information together"), OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period, "Show a column with the sum of periods"), OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples, @@ -935,9 +897,7 @@ int cmd_annotate(int argc, const char **argv) use_browser = 2; #endif - /* FIXME: only support stdio for now */ if (annotate.data_type) { - use_browser = 0; annotate_opts.annotate_src = false; symbol_conf.annotate_data_member = true; symbol_conf.annotate_data_sample = true; diff --git a/tools/perf/builtin-bench.c b/tools/perf/builtin-bench.c index 1a8898d5b560..2c1a9f3d847a 100644 --- a/tools/perf/builtin-bench.c +++ b/tools/perf/builtin-bench.c @@ -109,6 +109,8 @@ static struct bench uprobe_benchmarks[] = { { "baseline", "Baseline libc usleep(1000) call", bench_uprobe_baseline, }, { "empty", "Attach empty BPF prog to uprobe on usleep, system wide", bench_uprobe_empty, }, { "trace_printk", "Attach trace_printk BPF prog to uprobe on usleep syswide", bench_uprobe_trace_printk, }, + { "empty_ret", "Attach empty BPF prog to uretprobe on usleep, system wide", bench_uprobe_empty_ret, }, + { "trace_printk_ret", "Attach trace_printk BPF prog to uretprobe on usleep syswide", bench_uprobe_trace_printk_ret,}, { NULL, NULL, NULL }, }; diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c index e2a40f1d9225..b0511d16aeb6 100644 --- a/tools/perf/builtin-buildid-cache.c +++ b/tools/perf/builtin-buildid-cache.c @@ -286,7 +286,7 @@ static bool dso__missing_buildid_cache(struct dso *dso, int parm __maybe_unused) pr_warning("Problems with %s file, consider removing it from the cache\n", filename); - } else if (memcmp(dso->bid.data, bid.data, bid.size)) { + } else if (memcmp(dso__bid(dso)->data, bid.data, bid.size)) { pr_warning("Problems with %s file, consider removing it from the cache\n", filename); } diff --git a/tools/perf/builtin-buildid-list.c b/tools/perf/builtin-buildid-list.c index c9037477865a..383d5de36ce4 100644 --- a/tools/perf/builtin-buildid-list.c +++ b/tools/perf/builtin-buildid-list.c @@ -26,16 +26,18 @@ static int buildid__map_cb(struct map *map, void *arg __maybe_unused) { const struct dso *dso = map__dso(map); char bid_buf[SBUILD_ID_SIZE]; + const char *dso_long_name = dso__long_name(dso); + const char *dso_short_name = dso__short_name(dso); memset(bid_buf, 0, sizeof(bid_buf)); - if (dso->has_build_id) - build_id__sprintf(&dso->bid, bid_buf); + if (dso__has_build_id(dso)) + build_id__sprintf(dso__bid_const(dso), bid_buf); printf("%s %16" PRIx64 " %16" PRIx64, bid_buf, map__start(map), map__end(map)); - if (dso->long_name != NULL) { - printf(" %s", dso->long_name); - } else if (dso->short_name != NULL) { - printf(" %s", dso->short_name); - } + if (dso_long_name != NULL) + printf(" %s", dso_long_name); + else if (dso_short_name != NULL) + printf(" %s", dso_short_name); + printf("\n"); return 0; @@ -76,7 +78,7 @@ static int filename__fprintf_build_id(const char *name, FILE *fp) static bool dso__skip_buildid(struct dso *dso, int with_hits) { - return with_hits && !dso->hit; + return with_hits && !dso__hit(dso); } static int perf_session__list_build_ids(bool force, bool with_hits) diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c index f78eea9e2153..c157bd31f2e5 100644 --- a/tools/perf/builtin-c2c.c +++ b/tools/perf/builtin-c2c.c @@ -38,6 +38,7 @@ #include "ui/browsers/hists.h" #include "thread.h" #include "mem2node.h" +#include "mem-info.h" #include "symbol.h" #include "ui/ui.h" #include "ui/progress.h" @@ -529,7 +530,7 @@ static int dcacheline_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, char buf[20]; if (he->mem_info) - addr = cl_address(he->mem_info->daddr.addr, chk_double_cl); + addr = cl_address(mem_info__daddr(he->mem_info)->addr, chk_double_cl); return scnprintf(hpp->buf, hpp->size, "%*s", width, HEX_STR(buf, addr)); } @@ -567,7 +568,7 @@ static int offset_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, char buf[20]; if (he->mem_info) - addr = cl_offset(he->mem_info->daddr.al_addr, chk_double_cl); + addr = cl_offset(mem_info__daddr(he->mem_info)->al_addr, chk_double_cl); return scnprintf(hpp->buf, hpp->size, "%*s", width, HEX_STR(buf, addr)); } @@ -579,10 +580,10 @@ offset_cmp(struct perf_hpp_fmt *fmt __maybe_unused, uint64_t l = 0, r = 0; if (left->mem_info) - l = cl_offset(left->mem_info->daddr.addr, chk_double_cl); + l = cl_offset(mem_info__daddr(left->mem_info)->addr, chk_double_cl); if (right->mem_info) - r = cl_offset(right->mem_info->daddr.addr, chk_double_cl); + r = cl_offset(mem_info__daddr(right->mem_info)->addr, chk_double_cl); return (int64_t)(r - l); } @@ -596,7 +597,7 @@ iaddr_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, char buf[20]; if (he->mem_info) - addr = he->mem_info->iaddr.addr; + addr = mem_info__iaddr(he->mem_info)->addr; return scnprintf(hpp->buf, hpp->size, "%*s", width, HEX_STR(buf, addr)); } @@ -2050,7 +2051,7 @@ static int hpp_list__parse(struct perf_hpp_list *hpp_list, perf_hpp__setup_output_field(hpp_list); /* - * We dont need other sorting keys other than those + * We don't need other sorting keys other than those * we already specified. It also really slows down * the processing a lot with big number of output * fields, so switching this off for c2c. @@ -2319,11 +2320,7 @@ static int setup_nodes(struct perf_session *session) nodes[node] = set; - /* empty node, skip */ - if (perf_cpu_map__has_any_cpu_or_is_empty(map)) - continue; - - perf_cpu_map__for_each_cpu(cpu, idx, map) { + perf_cpu_map__for_each_cpu_skip_any(cpu, idx, map) { __set_bit(cpu.cpu, set); if (WARN_ONCE(cpu2node[cpu.cpu] != -1, "node/cpu topology bug")) @@ -2596,7 +2593,7 @@ perf_c2c_cacheline_browser__title(struct hist_browser *browser, he = cl_browser->he; if (he->mem_info) - addr = cl_address(he->mem_info->daddr.addr, chk_double_cl); + addr = cl_address(mem_info__daddr(he->mem_info)->addr, chk_double_cl); scnprintf(bf, size, "Cacheline 0x%lx", addr); return 0; @@ -3215,12 +3212,19 @@ static int parse_record_events(const struct option *opt, const char *str, int unset __maybe_unused) { bool *event_set = (bool *) opt->value; + struct perf_pmu *pmu; + + pmu = perf_mem_events_find_pmu(); + if (!pmu) { + pr_err("failed: there is no PMU that supports perf c2c\n"); + exit(-1); + } if (!strcmp(str, "list")) { - perf_mem_events__list(); + perf_pmu__mem_events_list(pmu); exit(0); } - if (perf_mem_events__parse(str)) + if (perf_pmu__mem_events_parse(pmu, str)) exit(-1); *event_set = true; @@ -3238,13 +3242,13 @@ static const char * const *record_mem_usage = __usage_record; static int perf_c2c__record(int argc, const char **argv) { - int rec_argc, i = 0, j, rec_tmp_nr = 0; + int rec_argc, i = 0, j; const char **rec_argv; - char **rec_tmp; int ret; bool all_user = false, all_kernel = false; bool event_set = false; struct perf_mem_event *e; + struct perf_pmu *pmu; struct option options[] = { OPT_CALLBACK('e', "event", &event_set, "event", "event selector. Use 'perf c2c record -e list' to list available events", @@ -3256,7 +3260,13 @@ static int perf_c2c__record(int argc, const char **argv) OPT_END() }; - if (perf_mem_events__init()) { + pmu = perf_mem_events_find_pmu(); + if (!pmu) { + pr_err("failed: no PMU supports the memory events\n"); + return -1; + } + + if (perf_pmu__mem_events_init(pmu)) { pr_err("failed: memory events not supported\n"); return -1; } @@ -3265,22 +3275,16 @@ static int perf_c2c__record(int argc, const char **argv) PARSE_OPT_KEEP_UNKNOWN); /* Max number of arguments multiplied by number of PMUs that can support them. */ - rec_argc = argc + 11 * perf_pmus__num_mem_pmus(); + rec_argc = argc + 11 * (perf_pmu__mem_events_num_mem_pmus(pmu) + 1); rec_argv = calloc(rec_argc + 1, sizeof(char *)); if (!rec_argv) return -1; - rec_tmp = calloc(rec_argc + 1, sizeof(char *)); - if (!rec_tmp) { - free(rec_argv); - return -1; - } - rec_argv[i++] = "record"; if (!event_set) { - e = perf_mem_events__ptr(PERF_MEM_EVENTS__LOAD_STORE); + e = perf_pmu__mem_events_ptr(pmu, PERF_MEM_EVENTS__LOAD_STORE); /* * The load and store operations are required, use the event * PERF_MEM_EVENTS__LOAD_STORE if it is supported. @@ -3289,15 +3293,15 @@ static int perf_c2c__record(int argc, const char **argv) e->record = true; rec_argv[i++] = "-W"; } else { - e = perf_mem_events__ptr(PERF_MEM_EVENTS__LOAD); + e = perf_pmu__mem_events_ptr(pmu, PERF_MEM_EVENTS__LOAD); e->record = true; - e = perf_mem_events__ptr(PERF_MEM_EVENTS__STORE); + e = perf_pmu__mem_events_ptr(pmu, PERF_MEM_EVENTS__STORE); e->record = true; } } - e = perf_mem_events__ptr(PERF_MEM_EVENTS__LOAD); + e = perf_pmu__mem_events_ptr(pmu, PERF_MEM_EVENTS__LOAD); if (e->record) rec_argv[i++] = "-W"; @@ -3305,7 +3309,7 @@ static int perf_c2c__record(int argc, const char **argv) rec_argv[i++] = "--phys-data"; rec_argv[i++] = "--sample-cpu"; - ret = perf_mem_events__record_args(rec_argv, &i, rec_tmp, &rec_tmp_nr); + ret = perf_mem_events__record_args(rec_argv, &i); if (ret) goto out; @@ -3332,10 +3336,6 @@ static int perf_c2c__record(int argc, const char **argv) ret = cmd_record(i, rec_argv); out: - for (i = 0; i < rec_tmp_nr; i++) - free(rec_tmp[i]); - - free(rec_tmp); free(rec_argv); return ret; } diff --git a/tools/perf/builtin-daemon.c b/tools/perf/builtin-daemon.c index 83954af36753..de76bbc50bfb 100644 --- a/tools/perf/builtin-daemon.c +++ b/tools/perf/builtin-daemon.c @@ -523,7 +523,7 @@ static int daemon_session__control(struct daemon_session *session, session->base, SESSION_CONTROL); control = open(control_path, O_WRONLY|O_NONBLOCK); - if (!control) + if (control < 0) return -1; if (do_ack) { @@ -532,7 +532,7 @@ static int daemon_session__control(struct daemon_session *session, session->base, SESSION_ACK); ack = open(ack_path, O_RDONLY, O_NONBLOCK); - if (!ack) { + if (ack < 0) { close(control); return -1; } diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c index eb3ef5c24b66..a212678d47be 100644 --- a/tools/perf/builtin-inject.c +++ b/tools/perf/builtin-inject.c @@ -445,10 +445,9 @@ static struct dso *findnew_dso(int pid, int tid, const char *filename, } if (dso) { - mutex_lock(&dso->lock); - nsinfo__put(dso->nsinfo); - dso->nsinfo = nsi; - mutex_unlock(&dso->lock); + mutex_lock(dso__lock(dso)); + dso__set_nsinfo(dso, nsi); + mutex_unlock(dso__lock(dso)); } else nsinfo__put(nsi); @@ -466,8 +465,8 @@ static int perf_event__repipe_buildid_mmap(struct perf_tool *tool, dso = findnew_dso(event->mmap.pid, event->mmap.tid, event->mmap.filename, NULL, machine); - if (dso && !dso->hit) { - dso->hit = 1; + if (dso && !dso__hit(dso)) { + dso__set_hit(dso); dso__inject_build_id(dso, tool, machine, sample->cpumode, 0); } dso__put(dso); @@ -492,7 +491,7 @@ static int perf_event__repipe_mmap2(struct perf_tool *tool, event->mmap2.filename, NULL, machine); if (dso) { /* mark it not to inject build-id */ - dso->hit = 1; + dso__set_hit(dso); } dso__put(dso); } @@ -544,7 +543,7 @@ static int perf_event__repipe_buildid_mmap2(struct perf_tool *tool, event->mmap2.filename, NULL, machine); if (dso) { /* mark it not to inject build-id */ - dso->hit = 1; + dso__set_hit(dso); } dso__put(dso); perf_event__repipe(tool, event, sample, machine); @@ -554,8 +553,8 @@ static int perf_event__repipe_buildid_mmap2(struct perf_tool *tool, dso = findnew_dso(event->mmap2.pid, event->mmap2.tid, event->mmap2.filename, &dso_id, machine); - if (dso && !dso->hit) { - dso->hit = 1; + if (dso && !dso__hit(dso)) { + dso__set_hit(dso); dso__inject_build_id(dso, tool, machine, sample->cpumode, event->mmap2.flags); } @@ -631,24 +630,24 @@ static int dso__read_build_id(struct dso *dso) { struct nscookie nsc; - if (dso->has_build_id) + if (dso__has_build_id(dso)) return 0; - mutex_lock(&dso->lock); - nsinfo__mountns_enter(dso->nsinfo, &nsc); - if (filename__read_build_id(dso->long_name, &dso->bid) > 0) - dso->has_build_id = true; - else if (dso->nsinfo) { - char *new_name = dso__filename_with_chroot(dso, dso->long_name); + mutex_lock(dso__lock(dso)); + nsinfo__mountns_enter(dso__nsinfo(dso), &nsc); + if (filename__read_build_id(dso__long_name(dso), dso__bid(dso)) > 0) + dso__set_has_build_id(dso); + else if (dso__nsinfo(dso)) { + char *new_name = dso__filename_with_chroot(dso, dso__long_name(dso)); - if (new_name && filename__read_build_id(new_name, &dso->bid) > 0) - dso->has_build_id = true; + if (new_name && filename__read_build_id(new_name, dso__bid(dso)) > 0) + dso__set_has_build_id(dso); free(new_name); } nsinfo__mountns_exit(&nsc); - mutex_unlock(&dso->lock); + mutex_unlock(dso__lock(dso)); - return dso->has_build_id ? 0 : -1; + return dso__has_build_id(dso) ? 0 : -1; } static struct strlist *perf_inject__parse_known_build_ids( @@ -700,14 +699,14 @@ static bool perf_inject__lookup_known_build_id(struct perf_inject *inject, dso_name = strchr(build_id, ' '); bid_len = dso_name - pos->s; dso_name = skip_spaces(dso_name); - if (strcmp(dso->long_name, dso_name)) + if (strcmp(dso__long_name(dso), dso_name)) continue; for (int ix = 0; 2 * ix + 1 < bid_len; ++ix) { - dso->bid.data[ix] = (hex(build_id[2 * ix]) << 4 | - hex(build_id[2 * ix + 1])); + dso__bid(dso)->data[ix] = (hex(build_id[2 * ix]) << 4 | + hex(build_id[2 * ix + 1])); } - dso->bid.size = bid_len / 2; - dso->has_build_id = 1; + dso__bid(dso)->size = bid_len / 2; + dso__set_has_build_id(dso); return true; } return false; @@ -720,9 +719,9 @@ static int dso__inject_build_id(struct dso *dso, struct perf_tool *tool, tool); int err; - if (is_anon_memory(dso->long_name) || flags & MAP_HUGETLB) + if (is_anon_memory(dso__long_name(dso)) || flags & MAP_HUGETLB) return 0; - if (is_no_dso_memory(dso->long_name)) + if (is_no_dso_memory(dso__long_name(dso))) return 0; if (inject->known_build_ids != NULL && @@ -730,14 +729,14 @@ static int dso__inject_build_id(struct dso *dso, struct perf_tool *tool, return 1; if (dso__read_build_id(dso) < 0) { - pr_debug("no build_id found for %s\n", dso->long_name); + pr_debug("no build_id found for %s\n", dso__long_name(dso)); return -1; } err = perf_event__synthesize_build_id(tool, dso, cpumode, perf_event__repipe, machine); if (err) { - pr_err("Can't synthesize build_id event for %s\n", dso->long_name); + pr_err("Can't synthesize build_id event for %s\n", dso__long_name(dso)); return -1; } @@ -763,8 +762,8 @@ int perf_event__inject_buildid(struct perf_tool *tool, union perf_event *event, if (thread__find_map(thread, sample->cpumode, sample->ip, &al)) { struct dso *dso = map__dso(al.map); - if (!dso->hit) { - dso->hit = 1; + if (!dso__hit(dso)) { + dso__set_hit(dso); dso__inject_build_id(dso, tool, machine, sample->cpumode, map__flags(al.map)); } @@ -1146,8 +1145,8 @@ static bool dso__is_in_kernel_space(struct dso *dso) return false; return dso__is_kcore(dso) || - dso->kernel || - is_kernel_module(dso->long_name, PERF_RECORD_MISC_CPUMODE_UNKNOWN); + dso__kernel(dso) || + is_kernel_module(dso__long_name(dso), PERF_RECORD_MISC_CPUMODE_UNKNOWN); } static u64 evlist__first_id(struct evlist *evlist) @@ -1181,29 +1180,34 @@ static int synthesize_build_id(struct perf_inject *inject, struct dso *dso, pid_ if (!machine) return -ENOMEM; - dso->hit = 1; + dso__set_hit(dso); return perf_event__synthesize_build_id(&inject->tool, dso, cpumode, process_build_id, machine); } +static int guest_session__add_build_ids_cb(struct dso *dso, void *data) +{ + struct guest_session *gs = data; + struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session); + + if (!dso__has_build_id(dso)) + return 0; + + return synthesize_build_id(inject, dso, gs->machine_pid); + +} + static int guest_session__add_build_ids(struct guest_session *gs) { struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session); - struct machine *machine = &gs->session->machines.host; - struct dso *dso; - int ret; /* Build IDs will be put in the Build ID feature section */ perf_header__set_feat(&inject->session->header, HEADER_BUILD_ID); - dsos__for_each_with_build_id(dso, &machine->dsos.head) { - ret = synthesize_build_id(inject, dso, gs->machine_pid); - if (ret) - return ret; - } - - return 0; + return dsos__for_each_dso(&gs->session->machines.host.dsos, + guest_session__add_build_ids_cb, + gs); } static int guest_session__ksymbol_event(struct perf_tool *tool, @@ -2122,7 +2126,7 @@ static int __cmd_inject(struct perf_inject *inject) */ if (perf_header__has_feat(&session->header, HEADER_BUILD_ID) && inject->have_auxtrace && !inject->itrace_synth_opts.set) - dsos__hit_all(session); + perf_session__dsos_hit_all(session); /* * The AUX areas have been removed and replaced with * synthesized hardware events, so clear the feature flag. diff --git a/tools/perf/builtin-kallsyms.c b/tools/perf/builtin-kallsyms.c index 7f75c5b73f26..a3c2ffdc1af8 100644 --- a/tools/perf/builtin-kallsyms.c +++ b/tools/perf/builtin-kallsyms.c @@ -38,7 +38,7 @@ static int __cmd_kallsyms(int argc, const char **argv) dso = map__dso(map); printf("%s: %s %s %#" PRIx64 "-%#" PRIx64 " (%#" PRIx64 "-%#" PRIx64")\n", - symbol->name, dso->short_name, dso->long_name, + symbol->name, dso__short_name(dso), dso__long_name(dso), map__unmap_ip(map, symbol->start), map__unmap_ip(map, symbol->end), symbol->start, symbol->end); } diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c index 9714327fd0ea..6fd95be5032b 100644 --- a/tools/perf/builtin-kmem.c +++ b/tools/perf/builtin-kmem.c @@ -1408,7 +1408,7 @@ static int __cmd_kmem(struct perf_session *session) } evlist__for_each_entry(session->evlist, evsel) { - if (!strcmp(evsel__name(evsel), "kmem:mm_page_alloc") && + if (evsel__name_is(evsel, "kmem:mm_page_alloc") && evsel__field(evsel, "pfn")) { use_pfn = true; break; diff --git a/tools/perf/builtin-kwork.c b/tools/perf/builtin-kwork.c index 0092b9b39611..56e3f3a5e03a 100644 --- a/tools/perf/builtin-kwork.c +++ b/tools/perf/builtin-kwork.c @@ -2230,7 +2230,7 @@ static int perf_kwork__top(struct perf_kwork *kwork) perf_kwork__top_report(kwork); out: - free(kwork->top_stat.cpus_runtime); + zfree(&kwork->top_stat.cpus_runtime); return ret; } diff --git a/tools/perf/builtin-list.c b/tools/perf/builtin-list.c index e27a1b1288c2..5cab31231551 100644 --- a/tools/perf/builtin-list.c +++ b/tools/perf/builtin-list.c @@ -22,6 +22,7 @@ #include <subcmd/pager.h> #include <subcmd/parse-options.h> #include <linux/zalloc.h> +#include <ctype.h> #include <stdarg.h> #include <stdio.h> @@ -76,26 +77,38 @@ static void default_print_start(void *ps) static void default_print_end(void *print_state __maybe_unused) {} +static const char *skip_spaces_or_commas(const char *str) +{ + while (isspace(*str) || *str == ',') + ++str; + return str; +} + static void wordwrap(FILE *fp, const char *s, int start, int max, int corr) { int column = start; int n; bool saw_newline = false; + bool comma = false; while (*s) { - int wlen = strcspn(s, " \t\n"); + int wlen = strcspn(s, " ,\t\n"); + const char *sep = comma ? "," : " "; if ((column + wlen >= max && column > start) || saw_newline) { - fprintf(fp, "\n%*s", start, ""); + fprintf(fp, comma ? ",\n%*s" : "\n%*s", start, ""); column = start + corr; } - n = fprintf(fp, "%s%.*s", column > start ? " " : "", wlen, s); + if (column <= start) + sep = ""; + n = fprintf(fp, "%s%.*s", sep, wlen, s); if (n <= 0) break; saw_newline = s[wlen] == '\n'; s += wlen; + comma = s[0] == ','; column += n; - s = skip_spaces(s); + s = skip_spaces_or_commas(s); } } @@ -208,17 +221,24 @@ static void default_print_metric(void *ps, if (!print_state->last_metricgroups || strcmp(print_state->last_metricgroups, group ?: "")) { if (group && print_state->metricgroups) { - if (print_state->name_only) + if (print_state->name_only) { fprintf(fp, "%s ", group); - else if (print_state->metrics) { - const char *gdesc = describe_metricgroup(group); + } else { + const char *gdesc = print_state->desc + ? describe_metricgroup(group) + : NULL; + const char *print_colon = ""; + + if (print_state->metrics) { + print_colon = ":"; + fputc('\n', fp); + } if (gdesc) - fprintf(fp, "\n%s: [%s]\n", group, gdesc); + fprintf(fp, "%s%s [%s]\n", group, print_colon, gdesc); else - fprintf(fp, "\n%s:\n", group); - } else - fprintf(fp, "%s\n", group); + fprintf(fp, "%s%s\n", group, print_colon); + } } zfree(&print_state->last_metricgroups); print_state->last_metricgroups = strdup(group ?: ""); @@ -306,6 +326,9 @@ static void fix_escape_fprintf(FILE *fp, struct strbuf *buf, const char *fmt, .. case '\n': strbuf_addstr(buf, "\\n"); break; + case '\r': + strbuf_addstr(buf, "\\r"); + break; case '\\': fallthrough; case '\"': diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c index 230461280e45..7007d26fe654 100644 --- a/tools/perf/builtin-lock.c +++ b/tools/perf/builtin-lock.c @@ -2275,23 +2275,13 @@ setup_args: return -ENOMEM; for (i = 0; i < ARRAY_SIZE(record_args); i++) - rec_argv[i] = strdup(record_args[i]); + rec_argv[i] = record_args[i]; for (j = 0; j < nr_tracepoints; j++) { - const char *ev_name; - - if (has_lock_stat) - ev_name = strdup(lock_tracepoints[j].name); - else - ev_name = strdup(contention_tracepoints[j].name); - - if (!ev_name) { - free(rec_argv); - return -ENOMEM; - } - rec_argv[i++] = "-e"; - rec_argv[i++] = ev_name; + rec_argv[i++] = has_lock_stat + ? lock_tracepoints[j].name + : contention_tracepoints[j].name; } for (j = 0; j < nr_callgraph_args; j++, i++) diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c index 51499c20da01..863fcd735dae 100644 --- a/tools/perf/builtin-mem.c +++ b/tools/perf/builtin-mem.c @@ -43,12 +43,19 @@ static int parse_record_events(const struct option *opt, const char *str, int unset __maybe_unused) { struct perf_mem *mem = *(struct perf_mem **)opt->value; + struct perf_pmu *pmu; + + pmu = perf_mem_events_find_pmu(); + if (!pmu) { + pr_err("failed: there is no PMU that supports perf mem\n"); + exit(-1); + } if (!strcmp(str, "list")) { - perf_mem_events__list(); + perf_pmu__mem_events_list(pmu); exit(0); } - if (perf_mem_events__parse(str)) + if (perf_pmu__mem_events_parse(pmu, str)) exit(-1); mem->operation = 0; @@ -65,13 +72,13 @@ static const char * const *record_mem_usage = __usage; static int __cmd_record(int argc, const char **argv, struct perf_mem *mem) { - int rec_argc, i = 0, j, tmp_nr = 0; + int rec_argc, i = 0, j; int start, end; const char **rec_argv; - char **rec_tmp; int ret; bool all_user = false, all_kernel = false; struct perf_mem_event *e; + struct perf_pmu *pmu; struct option options[] = { OPT_CALLBACK('e', "event", &mem, "event", "event selector. use 'perf mem record -e list' to list available events", @@ -84,7 +91,13 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem) OPT_END() }; - if (perf_mem_events__init()) { + pmu = perf_mem_events_find_pmu(); + if (!pmu) { + pr_err("failed: no PMU supports the memory events\n"); + return -1; + } + + if (perf_pmu__mem_events_init(pmu)) { pr_err("failed: memory events not supported\n"); return -1; } @@ -93,7 +106,7 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem) PARSE_OPT_KEEP_UNKNOWN); /* Max number of arguments multiplied by number of PMUs that can support them. */ - rec_argc = argc + 9 * perf_pmus__num_mem_pmus(); + rec_argc = argc + 9 * (perf_pmu__mem_events_num_mem_pmus(pmu) + 1); if (mem->cpu_list) rec_argc += 2; @@ -102,18 +115,9 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem) if (!rec_argv) return -1; - /* - * Save the allocated event name strings. - */ - rec_tmp = calloc(rec_argc + 1, sizeof(char *)); - if (!rec_tmp) { - free(rec_argv); - return -1; - } - rec_argv[i++] = "record"; - e = perf_mem_events__ptr(PERF_MEM_EVENTS__LOAD_STORE); + e = perf_pmu__mem_events_ptr(pmu, PERF_MEM_EVENTS__LOAD_STORE); /* * The load and store operations are required, use the event @@ -126,17 +130,17 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem) rec_argv[i++] = "-W"; } else { if (mem->operation & MEM_OPERATION_LOAD) { - e = perf_mem_events__ptr(PERF_MEM_EVENTS__LOAD); + e = perf_pmu__mem_events_ptr(pmu, PERF_MEM_EVENTS__LOAD); e->record = true; } if (mem->operation & MEM_OPERATION_STORE) { - e = perf_mem_events__ptr(PERF_MEM_EVENTS__STORE); + e = perf_pmu__mem_events_ptr(pmu, PERF_MEM_EVENTS__STORE); e->record = true; } } - e = perf_mem_events__ptr(PERF_MEM_EVENTS__LOAD); + e = perf_pmu__mem_events_ptr(pmu, PERF_MEM_EVENTS__LOAD); if (e->record) rec_argv[i++] = "-W"; @@ -149,7 +153,7 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem) rec_argv[i++] = "--data-page-size"; start = i; - ret = perf_mem_events__record_args(rec_argv, &i, rec_tmp, &tmp_nr); + ret = perf_mem_events__record_args(rec_argv, &i); if (ret) goto out; end = i; @@ -179,10 +183,6 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem) ret = cmd_record(i, rec_argv); out: - for (i = 0; i < tmp_nr; i++) - free(rec_tmp[i]); - - free(rec_tmp); free(rec_argv); return ret; } @@ -213,7 +213,7 @@ dump_raw_samples(struct perf_tool *tool, if (al.map != NULL) { dso = map__dso(al.map); if (dso) - dso->hit = 1; + dso__set_hit(dso); } field_sep = symbol_conf.field_sep; @@ -255,7 +255,7 @@ dump_raw_samples(struct perf_tool *tool, symbol_conf.field_sep, sample->data_src, symbol_conf.field_sep, - dso ? dso->long_name : "???", + dso ? dso__long_name(dso) : "???", al.sym ? al.sym->name : "???"); out_put: addr_location__exit(&al); diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c index 019fef8da6a8..003a3bcebfdf 100644 --- a/tools/perf/builtin-probe.c +++ b/tools/perf/builtin-probe.c @@ -325,7 +325,7 @@ static void cleanup_params(void) for (i = 0; i < params->nevents; i++) clear_perf_probe_event(params->events + i); line_range__clear(¶ms->line_range); - free(params->target); + zfree(¶ms->target); strfilter__delete(params->filter); nsinfo__put(params->nsi); zfree(¶ms); diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 86c910125172..66a3de8ac661 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -332,7 +332,7 @@ static int record__aio_complete(struct mmap *md, struct aiocb *cblock) } else { /* * aio write request may require restart with the - * reminder if the kernel didn't write whole + * remainder if the kernel didn't write whole * chunk at once. */ rem_off = cblock->aio_offset + written; @@ -400,7 +400,7 @@ static int record__aio_pushfn(struct mmap *map, void *to, void *buf, size_t size * * Coping can be done in two steps in case the chunk of profiling data * crosses the upper bound of the kernel buffer. In this case we first move - * part of data from map->start till the upper bound and then the reminder + * part of data from map->start till the upper bound and then the remainder * from the beginning of the kernel buffer till the end of the data chunk. */ @@ -1355,8 +1355,6 @@ static int record__open(struct record *rec) struct record_opts *opts = &rec->opts; int rc = 0; - evlist__config(evlist, opts, &callchain_param); - evlist__for_each_entry(evlist, pos) { try_again: if (evsel__open(pos, pos->core.cpus, pos->core.threads) < 0) { @@ -1773,8 +1771,11 @@ record__finish_output(struct record *rec) struct perf_data *data = &rec->data; int fd = perf_data__fd(data); - if (data->is_pipe) + if (data->is_pipe) { + /* Just to display approx. size */ + data->file.size = rec->bytes_written; return; + } rec->session->header.data_size += rec->bytes_written; data->file.size = lseek(perf_data__fd(data), 0, SEEK_CUR); @@ -1787,7 +1788,7 @@ record__finish_output(struct record *rec) process_buildids(rec); if (rec->buildid_all) - dsos__hit_all(rec->session); + perf_session__dsos_hit_all(rec->session); } perf_session__write_header(rec->session, rec->evlist, fd, true); @@ -1830,8 +1831,8 @@ static int record__switch_output(struct record *rec, bool at_exit) { struct perf_data *data = &rec->data; + char *new_filename = NULL; int fd, err; - char *new_filename; /* Same Size: "2015122520103046"*/ char timestamp[] = "InvalidTimestamp"; @@ -1853,16 +1854,17 @@ record__switch_output(struct record *rec, bool at_exit) } fd = perf_data__switch(data, timestamp, - rec->session->header.data_offset, - at_exit, &new_filename); + rec->session->header.data_offset, + at_exit, &new_filename); if (fd >= 0 && !at_exit) { rec->bytes_written = 0; rec->session->header.data_size = 0; } - if (!quiet) + if (!quiet) { fprintf(stderr, "[ perf record: Dump %s.%s ]\n", data->path, timestamp); + } if (rec->switch_output.num_files) { int n = rec->switch_output.cur_file + 1; @@ -2472,8 +2474,15 @@ static int __cmd_record(struct record *rec, int argc, const char **argv) if (data->is_pipe && rec->evlist->core.nr_entries == 1) rec->opts.sample_id = true; + if (rec->timestamp_filename && perf_data__is_pipe(data)) { + rec->timestamp_filename = false; + pr_warning("WARNING: --timestamp-filename option is not available in pipe mode.\n"); + } + evlist__uniquify_name(rec->evlist); + evlist__config(rec->evlist, opts, &callchain_param); + /* Debug message used by test scripts */ pr_debug3("perf record opening and mmapping events\n"); if (record__open(rec) != 0) { @@ -2872,10 +2881,10 @@ out_delete_session: } #endif zstd_fini(&session->zstd_data); - perf_session__delete(session); - if (!opts->no_bpf_event) evlist__stop_sb_thread(rec->sb_evlist); + + perf_session__delete(session); return status; } diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index f2ed2b7e80a3..69618fb0110b 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -31,6 +31,7 @@ #include "util/evsel.h" #include "util/evswitch.h" #include "util/header.h" +#include "util/mem-info.h" #include "util/session.h" #include "util/srcline.h" #include "util/tool.h" @@ -59,6 +60,7 @@ #include <linux/ctype.h> #include <signal.h> #include <linux/bitmap.h> +#include <linux/list_sort.h> #include <linux/string.h> #include <linux/stringify.h> #include <linux/time64.h> @@ -171,7 +173,7 @@ static int hist_iter__report_callback(struct hist_entry_iter *iter, struct mem_info *mi; struct branch_info *bi; - if (!ui__has_annotation() && !rep->symbol_ipc && !rep->data_type) + if (!ui__has_annotation() && !rep->symbol_ipc) return 0; if (sort__mode == SORT_MODE__BRANCH) { @@ -184,7 +186,7 @@ static int hist_iter__report_callback(struct hist_entry_iter *iter, } else if (rep->mem_mode) { mi = he->mem_info; - err = addr_map_symbol__inc_samples(&mi->daddr, sample, evsel); + err = addr_map_symbol__inc_samples(mem_info__daddr(mi), sample, evsel); if (err) goto out; @@ -321,7 +323,7 @@ static int process_sample_event(struct perf_tool *tool, } if (al.map != NULL) - map__dso(al.map)->hit = 1; + dso__set_hit(map__dso(al.map)); if (ui__has_annotation() || rep->symbol_ipc || rep->total_cycles_mode) { hist__account_cycles(sample->branch_stack, &al, sample, @@ -427,7 +429,7 @@ static int report__setup_sample_type(struct report *rep) * compatibility, set the bit if it's an old perf data file. */ evlist__for_each_entry(session->evlist, evsel) { - if (strstr(evsel->name, "arm_spe") && + if (strstr(evsel__name(evsel), "arm_spe") && !(sample_type & PERF_SAMPLE_DATA_SRC)) { evsel->core.attr.sample_type |= PERF_SAMPLE_DATA_SRC; sample_type |= PERF_SAMPLE_DATA_SRC; @@ -608,7 +610,7 @@ static void report__warn_kptr_restrict(const struct report *rep) return; if (kernel_map == NULL || - (map__dso(kernel_map)->hit && + (dso__hit(map__dso(kernel_map)) && (kernel_kmap->ref_reloc_sym == NULL || kernel_kmap->ref_reloc_sym->addr == 0))) { const char *desc = @@ -828,35 +830,6 @@ static void tasks_setup(struct report *rep) rep->tool.no_warn = true; } -struct task { - struct thread *thread; - struct list_head list; - struct list_head children; -}; - -static struct task *tasks_list(struct task *task, struct machine *machine) -{ - struct thread *parent_thread, *thread = task->thread; - struct task *parent_task; - - /* Already listed. */ - if (!list_empty(&task->list)) - return NULL; - - /* Last one in the chain. */ - if (thread__ppid(thread) == -1) - return task; - - parent_thread = machine__find_thread(machine, -1, thread__ppid(thread)); - if (!parent_thread) - return ERR_PTR(-ENOENT); - - parent_task = thread__priv(parent_thread); - thread__put(parent_thread); - list_add_tail(&task->list, &parent_task->children); - return tasks_list(parent_task, machine); -} - struct maps__fprintf_task_args { int indent; FILE *fp; @@ -878,7 +851,7 @@ static int maps__fprintf_task_cb(struct map *map, void *data) prot & PROT_EXEC ? 'x' : '-', map__flags(map) ? 's' : 'p', map__pgoff(map), - dso->id.ino, dso->name); + dso__id_const(dso)->ino, dso__name(dso)); if (ret < 0) return ret; @@ -900,89 +873,156 @@ static size_t maps__fprintf_task(struct maps *maps, int indent, FILE *fp) return args.printed; } -static void task__print_level(struct task *task, FILE *fp, int level) +static int thread_level(struct machine *machine, const struct thread *thread) { - struct thread *thread = task->thread; - struct task *child; - int comm_indent = fprintf(fp, " %8d %8d %8d |%*s", - thread__pid(thread), thread__tid(thread), - thread__ppid(thread), level, ""); + struct thread *parent_thread; + int res; - fprintf(fp, "%s\n", thread__comm_str(thread)); + if (thread__tid(thread) <= 0) + return 0; - maps__fprintf_task(thread__maps(thread), comm_indent, fp); + if (thread__ppid(thread) <= 0) + return 1; - if (!list_empty(&task->children)) { - list_for_each_entry(child, &task->children, list) - task__print_level(child, fp, level + 1); + parent_thread = machine__find_thread(machine, -1, thread__ppid(thread)); + if (!parent_thread) { + pr_err("Missing parent thread of %d\n", thread__tid(thread)); + return 0; } + res = 1 + thread_level(machine, parent_thread); + thread__put(parent_thread); + return res; } -static int tasks_print(struct report *rep, FILE *fp) +static void task__print_level(struct machine *machine, struct thread *thread, FILE *fp) { - struct perf_session *session = rep->session; - struct machine *machine = &session->machines.host; - struct task *tasks, *task; - unsigned int nr = 0, itask = 0, i; - struct rb_node *nd; - LIST_HEAD(list); + int level = thread_level(machine, thread); + int comm_indent = fprintf(fp, " %8d %8d %8d |%*s", + thread__pid(thread), thread__tid(thread), + thread__ppid(thread), level, ""); - /* - * No locking needed while accessing machine->threads, - * because --tasks is single threaded command. - */ + fprintf(fp, "%s\n", thread__comm_str(thread)); - /* Count all the threads. */ - for (i = 0; i < THREADS__TABLE_SIZE; i++) - nr += machine->threads[i].nr; + maps__fprintf_task(thread__maps(thread), comm_indent, fp); +} - tasks = malloc(sizeof(*tasks) * nr); - if (!tasks) - return -ENOMEM; +/* + * Sort two thread list nodes such that they form a tree. The first node is the + * root of the tree, its children are ordered numerically after it. If a child + * has children itself then they appear immediately after their parent. For + * example, the 4 threads in the order they'd appear in the list: + * - init with a TID 1 and a parent of 0 + * - systemd with a TID 3000 and a parent of init/1 + * - systemd child thread with TID 4000, the parent is 3000 + * - NetworkManager is a child of init with a TID of 3500. + */ +static int task_list_cmp(void *priv, const struct list_head *la, const struct list_head *lb) +{ + struct machine *machine = priv; + struct thread_list *task_a = list_entry(la, struct thread_list, list); + struct thread_list *task_b = list_entry(lb, struct thread_list, list); + struct thread *a = task_a->thread; + struct thread *b = task_b->thread; + int level_a, level_b, res; + + /* Same thread? */ + if (thread__tid(a) == thread__tid(b)) + return 0; - for (i = 0; i < THREADS__TABLE_SIZE; i++) { - struct threads *threads = &machine->threads[i]; + /* Compare a and b to root. */ + if (thread__tid(a) == 0) + return -1; - for (nd = rb_first_cached(&threads->entries); nd; - nd = rb_next(nd)) { - task = tasks + itask++; + if (thread__tid(b) == 0) + return 1; - task->thread = rb_entry(nd, struct thread_rb_node, rb_node)->thread; - INIT_LIST_HEAD(&task->children); - INIT_LIST_HEAD(&task->list); - thread__set_priv(task->thread, task); - } - } + /* If parents match sort by tid. */ + if (thread__ppid(a) == thread__ppid(b)) + return thread__tid(a) < thread__tid(b) ? -1 : 1; /* - * Iterate every task down to the unprocessed parent - * and link all in task children list. Task with no - * parent is added into 'list'. + * Find a and b such that if they are a child of each other a and b's + * tid's match, otherwise a and b have a common parent and distinct + * tid's to sort by. First make the depths of the threads match. */ - for (itask = 0; itask < nr; itask++) { - task = tasks + itask; - - if (!list_empty(&task->list)) - continue; - - task = tasks_list(task, machine); - if (IS_ERR(task)) { - pr_err("Error: failed to process tasks\n"); - free(tasks); - return PTR_ERR(task); + level_a = thread_level(machine, a); + level_b = thread_level(machine, b); + a = thread__get(a); + b = thread__get(b); + for (int i = level_a; i > level_b; i--) { + struct thread *parent = machine__find_thread(machine, -1, thread__ppid(a)); + + thread__put(a); + if (!parent) { + pr_err("Missing parent thread of %d\n", thread__tid(a)); + thread__put(b); + return -1; } + a = parent; + } + for (int i = level_b; i > level_a; i--) { + struct thread *parent = machine__find_thread(machine, -1, thread__ppid(b)); - if (task) - list_add_tail(&task->list, &list); + thread__put(b); + if (!parent) { + pr_err("Missing parent thread of %d\n", thread__tid(b)); + thread__put(a); + return 1; + } + b = parent; + } + /* Search up to a common parent. */ + while (thread__ppid(a) != thread__ppid(b)) { + struct thread *parent; + + parent = machine__find_thread(machine, -1, thread__ppid(a)); + thread__put(a); + if (!parent) + pr_err("Missing parent thread of %d\n", thread__tid(a)); + a = parent; + parent = machine__find_thread(machine, -1, thread__ppid(b)); + thread__put(b); + if (!parent) + pr_err("Missing parent thread of %d\n", thread__tid(b)); + b = parent; + if (!a || !b) { + /* Handle missing parent (unexpected) with some sanity. */ + thread__put(a); + thread__put(b); + return !a && !b ? 0 : (!a ? -1 : 1); + } + } + if (thread__tid(a) == thread__tid(b)) { + /* a is a child of b or vice-versa, deeper levels appear later. */ + res = level_a < level_b ? -1 : (level_a > level_b ? 1 : 0); + } else { + /* Sort by tid now the parent is the same. */ + res = thread__tid(a) < thread__tid(b) ? -1 : 1; } + thread__put(a); + thread__put(b); + return res; +} + +static int tasks_print(struct report *rep, FILE *fp) +{ + struct machine *machine = &rep->session->machines.host; + LIST_HEAD(tasks); + int ret; - fprintf(fp, "# %8s %8s %8s %s\n", "pid", "tid", "ppid", "comm"); + ret = machine__thread_list(machine, &tasks); + if (!ret) { + struct thread_list *task; - list_for_each_entry(task, &list, list) - task__print_level(task, fp, 0); + list_sort(machine, &tasks, task_list_cmp); - free(tasks); - return 0; + fprintf(fp, "# %8s %8s %8s %s\n", "pid", "tid", "ppid", "comm"); + + list_for_each_entry(task, &tasks, list) + task__print_level(machine, task->thread, fp); + } + thread_list__delete(&tasks); + return ret; } static int __cmd_report(struct report *rep) @@ -1410,7 +1450,7 @@ int cmd_report(int argc, const char **argv) "only show processor socket that match with this filter"), OPT_BOOLEAN(0, "raw-trace", &symbol_conf.raw_trace, "Show raw trace event output (do not use print fmt or plugins)"), - OPT_BOOLEAN(0, "hierarchy", &symbol_conf.report_hierarchy, + OPT_BOOLEAN('H', "hierarchy", &symbol_conf.report_hierarchy, "Show entries in a hierarchy"), OPT_CALLBACK_DEFAULT(0, "stdio-color", NULL, "mode", "'always' (default), 'never' or 'auto' only applicable to --stdio mode", @@ -1655,6 +1695,11 @@ repeat: else use_browser = 0; + if (report.data_type && use_browser == 1) { + symbol_conf.annotate_data_member = true; + symbol_conf.annotate_data_sample = true; + } + if (sort_order && strstr(sort_order, "ipc")) { parse_options_usage(report_usage, options, "s", 1); goto error; @@ -1766,6 +1811,8 @@ repeat: } else ret = 0; + if (!use_browser && (verbose > 2 || debug_kmaps)) + perf_session__dump_kmaps(session); error: if (report.ptime_range) { itrace_synth_opts__clear_time_range(&itrace_synth_opts); diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index dd6065afbbaf..5977c49ae2c7 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -92,24 +92,6 @@ struct sched_atom { struct task_desc *wakee; }; -#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP" - -/* task state bitmask, copied from include/linux/sched.h */ -#define TASK_RUNNING 0 -#define TASK_INTERRUPTIBLE 1 -#define TASK_UNINTERRUPTIBLE 2 -#define __TASK_STOPPED 4 -#define __TASK_TRACED 8 -/* in tsk->exit_state */ -#define EXIT_DEAD 16 -#define EXIT_ZOMBIE 32 -#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD) -/* in tsk->state again */ -#define TASK_DEAD 64 -#define TASK_WAKEKILL 128 -#define TASK_WAKING 256 -#define TASK_PARKED 512 - enum thread_state { THREAD_SLEEPING = 0, THREAD_WAIT_CPU, @@ -266,7 +248,7 @@ struct thread_runtime { u64 total_preempt_time; u64 total_delay_time; - int last_state; + char last_state; char shortname[3]; bool comm_changed; @@ -436,7 +418,7 @@ static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *t } static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task, - u64 timestamp, u64 task_state __maybe_unused) + u64 timestamp, const char task_state __maybe_unused) { struct sched_atom *event = get_new_event(task, timestamp); @@ -860,7 +842,7 @@ static int replay_switch_event(struct perf_sched *sched, *next_comm = evsel__strval(evsel, sample, "next_comm"); const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"), next_pid = evsel__intval(evsel, sample, "next_pid"); - const u64 prev_state = evsel__intval(evsel, sample, "prev_state"); + const char prev_state = evsel__taskstate(evsel, sample, "prev_state"); struct task_desc *prev, __maybe_unused *next; u64 timestamp0, timestamp = sample->time; int cpu = sample->cpu; @@ -1050,13 +1032,6 @@ static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread) return 0; } -static char sched_out_state(u64 prev_state) -{ - const char *str = TASK_STATE_TO_CHAR_STR; - - return str[prev_state]; -} - static int add_sched_out_event(struct work_atoms *atoms, char run_state, @@ -1132,7 +1107,7 @@ static int latency_switch_event(struct perf_sched *sched, { const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"), next_pid = evsel__intval(evsel, sample, "next_pid"); - const u64 prev_state = evsel__intval(evsel, sample, "prev_state"); + const char prev_state = evsel__taskstate(evsel, sample, "prev_state"); struct work_atoms *out_events, *in_events; struct thread *sched_out, *sched_in; u64 timestamp0, timestamp = sample->time; @@ -1168,7 +1143,7 @@ static int latency_switch_event(struct perf_sched *sched, goto out_put; } } - if (add_sched_out_event(out_events, sched_out_state(prev_state), timestamp)) + if (add_sched_out_event(out_events, prev_state, timestamp)) return -1; in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid); @@ -2033,24 +2008,12 @@ static void timehist_header(struct perf_sched *sched) printf("\n"); } -static char task_state_char(struct thread *thread, int state) -{ - static const char state_to_char[] = TASK_STATE_TO_CHAR_STR; - unsigned bit = state ? ffs(state) : 0; - - /* 'I' for idle */ - if (thread__tid(thread) == 0) - return 'I'; - - return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?'; -} - static void timehist_print_sample(struct perf_sched *sched, struct evsel *evsel, struct perf_sample *sample, struct addr_location *al, struct thread *thread, - u64 t, int state) + u64 t, const char state) { struct thread_runtime *tr = thread__priv(thread); const char *next_comm = evsel__strval(evsel, sample, "next_comm"); @@ -2091,7 +2054,7 @@ static void timehist_print_sample(struct perf_sched *sched, print_sched_time(tr->dt_run, 6); if (sched->show_state) - printf(" %5c ", task_state_char(thread, state)); + printf(" %5c ", thread__tid(thread) == 0 ? 'I' : state); if (sched->show_next) { snprintf(nstr, sizeof(nstr), "next: %s[%d]", next_comm, next_pid); @@ -2163,9 +2126,9 @@ static void timehist_update_runtime_stats(struct thread_runtime *r, else if (r->last_time) { u64 dt_wait = tprev - r->last_time; - if (r->last_state == TASK_RUNNING) + if (r->last_state == 'R') r->dt_preempt = dt_wait; - else if (r->last_state == TASK_UNINTERRUPTIBLE) + else if (r->last_state == 'D') r->dt_iowait = dt_wait; else r->dt_sleep = dt_wait; @@ -2185,7 +2148,7 @@ static bool is_idle_sample(struct perf_sample *sample, struct evsel *evsel) { /* pid 0 == swapper == idle task */ - if (strcmp(evsel__name(evsel), "sched:sched_switch") == 0) + if (evsel__name_is(evsel, "sched:sched_switch")) return evsel__intval(evsel, sample, "prev_pid") == 0; return sample->pid == 0; @@ -2412,7 +2375,7 @@ static bool timehist_skip_sample(struct perf_sched *sched, } if (sched->idle_hist) { - if (strcmp(evsel__name(evsel), "sched:sched_switch")) + if (!evsel__name_is(evsel, "sched:sched_switch")) rc = true; else if (evsel__intval(evsel, sample, "prev_pid") != 0 && evsel__intval(evsel, sample, "next_pid") != 0) @@ -2590,7 +2553,7 @@ static int timehist_sched_change_event(struct perf_tool *tool, struct thread_runtime *tr = NULL; u64 tprev, t = sample->time; int rc = 0; - int state = evsel__intval(evsel, sample, "prev_state"); + const char state = evsel__taskstate(evsel, sample, "prev_state"); addr_location__init(&al); if (machine__resolve(machine, &al, sample) < 0) { @@ -3000,8 +2963,11 @@ static int timehist_check_attr(struct perf_sched *sched, return -1; } - if (sched->show_callchain && !evsel__has_callchain(evsel)) { - pr_info("Samples do not have callchains.\n"); + /* only need to save callchain related to sched_switch event */ + if (sched->show_callchain && + evsel__name_is(evsel, "sched:sched_switch") && + !evsel__has_callchain(evsel)) { + pr_info("Samples of sched_switch event do not have callchains.\n"); sched->show_callchain = 0; symbol_conf.use_callchain = 0; } @@ -3204,20 +3170,50 @@ static void perf_sched__merge_lat(struct perf_sched *sched) } } +static int setup_cpus_switch_event(struct perf_sched *sched) +{ + unsigned int i; + + sched->cpu_last_switched = calloc(MAX_CPUS, sizeof(*(sched->cpu_last_switched))); + if (!sched->cpu_last_switched) + return -1; + + sched->curr_pid = malloc(MAX_CPUS * sizeof(*(sched->curr_pid))); + if (!sched->curr_pid) { + zfree(&sched->cpu_last_switched); + return -1; + } + + for (i = 0; i < MAX_CPUS; i++) + sched->curr_pid[i] = -1; + + return 0; +} + +static void free_cpus_switch_event(struct perf_sched *sched) +{ + zfree(&sched->curr_pid); + zfree(&sched->cpu_last_switched); +} + static int perf_sched__lat(struct perf_sched *sched) { + int rc = -1; struct rb_node *next; setup_pager(); + if (setup_cpus_switch_event(sched)) + return rc; + if (perf_sched__read_events(sched)) - return -1; + goto out_free_cpus_switch_event; perf_sched__merge_lat(sched); perf_sched__sort_lat(sched); printf("\n -------------------------------------------------------------------------------------------------------------------------------------------\n"); - printf(" Task | Runtime ms | Switches | Avg delay ms | Max delay ms | Max delay start | Max delay end |\n"); + printf(" Task | Runtime ms | Count | Avg delay ms | Max delay ms | Max delay start | Max delay end |\n"); printf(" -------------------------------------------------------------------------------------------------------------------------------------------\n"); next = rb_first_cached(&sched->sorted_atom_root); @@ -3240,13 +3236,15 @@ static int perf_sched__lat(struct perf_sched *sched) print_bad_events(sched); printf("\n"); - return 0; + rc = 0; + +out_free_cpus_switch_event: + free_cpus_switch_event(sched); + return rc; } static int setup_map_cpus(struct perf_sched *sched) { - struct perf_cpu_map *map; - sched->max_cpu.cpu = sysconf(_SC_NPROCESSORS_CONF); if (sched->map.comp) { @@ -3255,16 +3253,15 @@ static int setup_map_cpus(struct perf_sched *sched) return -1; } - if (!sched->map.cpus_str) - return 0; - - map = perf_cpu_map__new(sched->map.cpus_str); - if (!map) { - pr_err("failed to get cpus map from %s\n", sched->map.cpus_str); - return -1; + if (sched->map.cpus_str) { + sched->map.cpus = perf_cpu_map__new(sched->map.cpus_str); + if (!sched->map.cpus) { + pr_err("failed to get cpus map from %s\n", sched->map.cpus_str); + zfree(&sched->map.comp_cpus); + return -1; + } } - sched->map.cpus = map; return 0; } @@ -3304,33 +3301,69 @@ static int setup_color_cpus(struct perf_sched *sched) static int perf_sched__map(struct perf_sched *sched) { + int rc = -1; + + sched->curr_thread = calloc(MAX_CPUS, sizeof(*(sched->curr_thread))); + if (!sched->curr_thread) + return rc; + + if (setup_cpus_switch_event(sched)) + goto out_free_curr_thread; + if (setup_map_cpus(sched)) - return -1; + goto out_free_cpus_switch_event; if (setup_color_pids(sched)) - return -1; + goto out_put_map_cpus; if (setup_color_cpus(sched)) - return -1; + goto out_put_color_pids; setup_pager(); if (perf_sched__read_events(sched)) - return -1; + goto out_put_color_cpus; + + rc = 0; print_bad_events(sched); - return 0; + +out_put_color_cpus: + perf_cpu_map__put(sched->map.color_cpus); + +out_put_color_pids: + perf_thread_map__put(sched->map.color_pids); + +out_put_map_cpus: + zfree(&sched->map.comp_cpus); + perf_cpu_map__put(sched->map.cpus); + +out_free_cpus_switch_event: + free_cpus_switch_event(sched); + +out_free_curr_thread: + zfree(&sched->curr_thread); + return rc; } static int perf_sched__replay(struct perf_sched *sched) { + int ret; unsigned long i; + mutex_init(&sched->start_work_mutex); + mutex_init(&sched->work_done_wait_mutex); + + ret = setup_cpus_switch_event(sched); + if (ret) + goto out_mutex_destroy; + calibrate_run_measurement_overhead(sched); calibrate_sleep_measurement_overhead(sched); test_calibrations(sched); - if (perf_sched__read_events(sched)) - return -1; + ret = perf_sched__read_events(sched); + if (ret) + goto out_free_cpus_switch_event; printf("nr_run_events: %ld\n", sched->nr_run_events); printf("nr_sleep_events: %ld\n", sched->nr_sleep_events); @@ -3355,7 +3388,14 @@ static int perf_sched__replay(struct perf_sched *sched) sched->thread_funcs_exit = true; destroy_tasks(sched); - return 0; + +out_free_cpus_switch_event: + free_cpus_switch_event(sched); + +out_mutex_destroy: + mutex_destroy(&sched->start_work_mutex); + mutex_destroy(&sched->work_done_wait_mutex); + return ret; } static void setup_sorting(struct perf_sched *sched, const struct option *options, @@ -3590,28 +3630,7 @@ int cmd_sched(int argc, const char **argv) .switch_event = replay_switch_event, .fork_event = replay_fork_event, }; - unsigned int i; - int ret = 0; - - mutex_init(&sched.start_work_mutex); - mutex_init(&sched.work_done_wait_mutex); - sched.curr_thread = calloc(MAX_CPUS, sizeof(*sched.curr_thread)); - if (!sched.curr_thread) { - ret = -ENOMEM; - goto out; - } - sched.cpu_last_switched = calloc(MAX_CPUS, sizeof(*sched.cpu_last_switched)); - if (!sched.cpu_last_switched) { - ret = -ENOMEM; - goto out; - } - sched.curr_pid = malloc(MAX_CPUS * sizeof(*sched.curr_pid)); - if (!sched.curr_pid) { - ret = -ENOMEM; - goto out; - } - for (i = 0; i < MAX_CPUS; i++) - sched.curr_pid[i] = -1; + int ret; argc = parse_options_subcommand(argc, argv, sched_options, sched_subcommands, sched_usage, PARSE_OPT_STOP_AT_NON_OPTION); @@ -3622,9 +3641,9 @@ int cmd_sched(int argc, const char **argv) * Aliased to 'perf script' for now: */ if (!strcmp(argv[0], "script")) { - ret = cmd_script(argc, argv); + return cmd_script(argc, argv); } else if (strlen(argv[0]) > 2 && strstarts("record", argv[0])) { - ret = __cmd_record(argc, argv); + return __cmd_record(argc, argv); } else if (strlen(argv[0]) > 2 && strstarts("latency", argv[0])) { sched.tp_handler = &lat_ops; if (argc > 1) { @@ -3633,7 +3652,7 @@ int cmd_sched(int argc, const char **argv) usage_with_options(latency_usage, latency_options); } setup_sorting(&sched, latency_options, latency_usage); - ret = perf_sched__lat(&sched); + return perf_sched__lat(&sched); } else if (!strcmp(argv[0], "map")) { if (argc) { argc = parse_options(argc, argv, map_options, map_usage, 0); @@ -3642,7 +3661,7 @@ int cmd_sched(int argc, const char **argv) } sched.tp_handler = &map_ops; setup_sorting(&sched, latency_options, latency_usage); - ret = perf_sched__map(&sched); + return perf_sched__map(&sched); } else if (strlen(argv[0]) > 2 && strstarts("replay", argv[0])) { sched.tp_handler = &replay_ops; if (argc) { @@ -3650,7 +3669,7 @@ int cmd_sched(int argc, const char **argv) if (argc) usage_with_options(replay_usage, replay_options); } - ret = perf_sched__replay(&sched); + return perf_sched__replay(&sched); } else if (!strcmp(argv[0], "timehist")) { if (argc) { argc = parse_options(argc, argv, timehist_options, @@ -3666,24 +3685,16 @@ int cmd_sched(int argc, const char **argv) parse_options_usage(NULL, timehist_options, "w", true); if (sched.show_next) parse_options_usage(NULL, timehist_options, "n", true); - ret = -EINVAL; - goto out; + return -EINVAL; } ret = symbol__validate_sym_arguments(); if (ret) - goto out; + return ret; - ret = perf_sched__timehist(&sched); + return perf_sched__timehist(&sched); } else { usage_with_options(sched_usage, sched_options); } -out: - free(sched.curr_pid); - free(sched.cpu_last_switched); - free(sched.curr_thread); - mutex_destroy(&sched.start_work_mutex); - mutex_destroy(&sched.work_done_wait_mutex); - - return ret; + return 0; } diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index b1f57401ff23..c16224b1fef3 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c @@ -32,8 +32,10 @@ #include "util/time-utils.h" #include "util/path.h" #include "util/event.h" +#include "util/mem-info.h" #include "ui/ui.h" #include "print_binary.h" +#include "print_insn.h" #include "archinsn.h" #include <linux/bitmap.h> #include <linux/kernel.h> @@ -134,6 +136,8 @@ enum perf_output_field { PERF_OUTPUT_CGROUP = 1ULL << 39, PERF_OUTPUT_RETIRE_LAT = 1ULL << 40, PERF_OUTPUT_DSOFF = 1ULL << 41, + PERF_OUTPUT_DISASM = 1ULL << 42, + PERF_OUTPUT_BRSTACKDISASM = 1ULL << 43, }; struct perf_script { @@ -189,6 +193,7 @@ struct output_option { {.str = "bpf-output", .field = PERF_OUTPUT_BPF_OUTPUT}, {.str = "callindent", .field = PERF_OUTPUT_CALLINDENT}, {.str = "insn", .field = PERF_OUTPUT_INSN}, + {.str = "disasm", .field = PERF_OUTPUT_DISASM}, {.str = "insnlen", .field = PERF_OUTPUT_INSNLEN}, {.str = "brstackinsn", .field = PERF_OUTPUT_BRSTACKINSN}, {.str = "brstackoff", .field = PERF_OUTPUT_BRSTACKOFF}, @@ -207,6 +212,7 @@ struct output_option { {.str = "vcpu", .field = PERF_OUTPUT_VCPU}, {.str = "cgroup", .field = PERF_OUTPUT_CGROUP}, {.str = "retire_lat", .field = PERF_OUTPUT_RETIRE_LAT}, + {.str = "brstackdisasm", .field = PERF_OUTPUT_BRSTACKDISASM}, }; enum { @@ -507,7 +513,8 @@ static int evsel__check_attr(struct evsel *evsel, struct perf_session *session) "selected. Hence, no address to lookup the source line number.\n"); return -EINVAL; } - if ((PRINT_FIELD(BRSTACKINSN) || PRINT_FIELD(BRSTACKINSNLEN)) && !allow_user_set && + if ((PRINT_FIELD(BRSTACKINSN) || PRINT_FIELD(BRSTACKINSNLEN) || PRINT_FIELD(BRSTACKDISASM)) + && !allow_user_set && !(evlist__combined_branch_type(session->evlist) & PERF_SAMPLE_BRANCH_ANY)) { pr_err("Display of branch stack assembler requested, but non all-branch filter set\n" "Hint: run 'perf record -b ...'\n"); @@ -1011,11 +1018,11 @@ static int perf_sample__fprintf_brstackoff(struct perf_sample *sample, to = entries[i].to; if (thread__find_map_fb(thread, sample->cpumode, from, &alf) && - !map__dso(alf.map)->adjust_symbols) + !dso__adjust_symbols(map__dso(alf.map))) from = map__dso_map_ip(alf.map, from); if (thread__find_map_fb(thread, sample->cpumode, to, &alt) && - !map__dso(alt.map)->adjust_symbols) + !dso__adjust_symbols(map__dso(alt.map))) to = map__dso_map_ip(alt.map, to); printed += fprintf(fp, " 0x%"PRIx64, from); @@ -1076,7 +1083,7 @@ static int grab_bb(u8 *buffer, u64 start, u64 end, pr_debug("\tcannot resolve %" PRIx64 "-%" PRIx64 "\n", start, end); goto out; } - if (dso->data.status == DSO_DATA_STATUS_ERROR) { + if (dso__data(dso)->status == DSO_DATA_STATUS_ERROR) { pr_debug("\tcannot resolve %" PRIx64 "-%" PRIx64 "\n", start, end); goto out; } @@ -1088,7 +1095,7 @@ static int grab_bb(u8 *buffer, u64 start, u64 end, len = dso__data_read_offset(dso, machine, offset, (u8 *)buffer, end - start + MAXINSN); - *is64bit = dso->is_64_bit; + *is64bit = dso__is_64_bit(dso); if (len <= 0) pr_debug("\tcannot fetch code for block at %" PRIx64 "-%" PRIx64 "\n", start, end); @@ -1159,18 +1166,56 @@ out: return ret; } +static int any_dump_insn(struct perf_event_attr *attr __maybe_unused, + struct perf_insn *x, uint64_t ip, + u8 *inbuf, int inlen, int *lenp, + FILE *fp) +{ +#ifdef HAVE_LIBCAPSTONE_SUPPORT + if (PRINT_FIELD(BRSTACKDISASM)) { + int printed = fprintf_insn_asm(x->machine, x->thread, x->cpumode, x->is64bit, + (uint8_t *)inbuf, inlen, ip, lenp, + PRINT_INSN_IMM_HEX, fp); + + if (printed > 0) + return printed; + } +#endif + return fprintf(fp, "%s", dump_insn(x, ip, inbuf, inlen, lenp)); +} + +static int add_padding(FILE *fp, int printed, int padding) +{ + if (printed >= 0 && printed < padding) + printed += fprintf(fp, "%*s", padding - printed, ""); + return printed; +} + static int ip__fprintf_jump(uint64_t ip, struct branch_entry *en, struct perf_insn *x, u8 *inbuf, int len, int insn, FILE *fp, int *total_cycles, - struct perf_event_attr *attr) + struct perf_event_attr *attr, + struct thread *thread) { int ilen = 0; - int printed = fprintf(fp, "\t%016" PRIx64 "\t%-30s\t", ip, - dump_insn(x, ip, inbuf, len, &ilen)); + int printed = fprintf(fp, "\t%016" PRIx64 "\t", ip); + + printed += add_padding(fp, any_dump_insn(attr, x, ip, inbuf, len, &ilen, fp), 30); + printed += fprintf(fp, "\t"); if (PRINT_FIELD(BRSTACKINSNLEN)) printed += fprintf(fp, "ilen: %d\t", ilen); + if (PRINT_FIELD(SRCLINE)) { + struct addr_location al; + + addr_location__init(&al); + thread__find_map(thread, x->cpumode, ip, &al); + printed += map__fprintf_srcline(al.map, al.addr, " srcline: ", fp); + printed += fprintf(fp, "\t"); + addr_location__exit(&al); + } + printed += fprintf(fp, "#%s%s%s%s", en->flags.predicted ? " PRED" : "", en->flags.mispred ? " MISPRED" : "", @@ -1182,6 +1227,7 @@ static int ip__fprintf_jump(uint64_t ip, struct branch_entry *en, if (insn) printed += fprintf(fp, " %.2f IPC", (float)insn / en->flags.cycles); } + return printed + fprintf(fp, "\n"); } @@ -1247,6 +1293,7 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample, nr = max_blocks + 1; x.thread = thread; + x.machine = machine; x.cpu = sample->cpu; printed += fprintf(fp, "%c", '\n'); @@ -1260,7 +1307,7 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample, x.cpumode, x.cpu, &lastsym, attr, fp); printed += ip__fprintf_jump(entries[nr - 1].from, &entries[nr - 1], &x, buffer, len, 0, fp, &total_cycles, - attr); + attr, thread); if (PRINT_FIELD(SRCCODE)) printed += print_srccode(thread, x.cpumode, entries[nr - 1].from); } @@ -1291,14 +1338,14 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample, printed += ip__fprintf_sym(ip, thread, x.cpumode, x.cpu, &lastsym, attr, fp); if (ip == end) { printed += ip__fprintf_jump(ip, &entries[i], &x, buffer + off, len - off, ++insn, fp, - &total_cycles, attr); + &total_cycles, attr, thread); if (PRINT_FIELD(SRCCODE)) printed += print_srccode(thread, x.cpumode, ip); break; } else { ilen = 0; - printed += fprintf(fp, "\t%016" PRIx64 "\t%s", ip, - dump_insn(&x, ip, buffer + off, len - off, &ilen)); + printed += fprintf(fp, "\t%016" PRIx64 "\t", ip); + printed += any_dump_insn(attr, &x, ip, buffer + off, len - off, &ilen, fp); if (PRINT_FIELD(BRSTACKINSNLEN)) printed += fprintf(fp, "\tilen: %d", ilen); printed += fprintf(fp, "\n"); @@ -1345,8 +1392,8 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample, if (len <= 0) goto out; ilen = 0; - printed += fprintf(fp, "\t%016" PRIx64 "\t%s", sample->ip, - dump_insn(&x, sample->ip, buffer, len, &ilen)); + printed += fprintf(fp, "\t%016" PRIx64 "\t", sample->ip); + printed += any_dump_insn(attr, &x, sample->ip, buffer, len, &ilen, fp); if (PRINT_FIELD(BRSTACKINSNLEN)) printed += fprintf(fp, "\tilen: %d", ilen); printed += fprintf(fp, "\n"); @@ -1356,8 +1403,8 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample, } for (off = 0; off <= end - start; off += ilen) { ilen = 0; - printed += fprintf(fp, "\t%016" PRIx64 "\t%s", start + off, - dump_insn(&x, start + off, buffer + off, len - off, &ilen)); + printed += fprintf(fp, "\t%016" PRIx64 "\t", start + off); + printed += any_dump_insn(attr, &x, start + off, buffer + off, len - off, &ilen, fp); if (PRINT_FIELD(BRSTACKINSNLEN)) printed += fprintf(fp, "\tilen: %d", ilen); printed += fprintf(fp, "\n"); @@ -1502,7 +1549,8 @@ void script_fetch_insn(struct perf_sample *sample, struct thread *thread, static int perf_sample__fprintf_insn(struct perf_sample *sample, struct perf_event_attr *attr, struct thread *thread, - struct machine *machine, FILE *fp) + struct machine *machine, FILE *fp, + struct addr_location *al) { int printed = 0; @@ -1511,13 +1559,14 @@ static int perf_sample__fprintf_insn(struct perf_sample *sample, if (PRINT_FIELD(INSNLEN)) printed += fprintf(fp, " ilen: %d", sample->insn_len); if (PRINT_FIELD(INSN) && sample->insn_len) { - int i; - - printed += fprintf(fp, " insn:"); - for (i = 0; i < sample->insn_len; i++) - printed += fprintf(fp, " %02x", (unsigned char)sample->insn[i]); + printed += fprintf(fp, " insn: "); + printed += sample__fprintf_insn_raw(sample, fp); + } + if (PRINT_FIELD(DISASM) && sample->insn_len) { + printed += fprintf(fp, "\t\t"); + printed += sample__fprintf_insn_asm(sample, thread, machine, fp, al); } - if (PRINT_FIELD(BRSTACKINSN) || PRINT_FIELD(BRSTACKINSNLEN)) + if (PRINT_FIELD(BRSTACKINSN) || PRINT_FIELD(BRSTACKINSNLEN) || PRINT_FIELD(BRSTACKDISASM)) printed += perf_sample__fprintf_brstackinsn(sample, thread, attr, machine, fp); return printed; @@ -1590,7 +1639,7 @@ static int perf_sample__fprintf_bts(struct perf_sample *sample, if (print_srcline_last) printed += map__fprintf_srcline(al->map, al->addr, "\n ", fp); - printed += perf_sample__fprintf_insn(sample, attr, thread, machine, fp); + printed += perf_sample__fprintf_insn(sample, attr, thread, machine, fp, al); printed += fprintf(fp, "\n"); if (PRINT_FIELD(SRCCODE)) { int ret = map__fprintf_srccode(al->map, al->addr, stdout, @@ -2002,13 +2051,18 @@ static int evlist__max_name_len(struct evlist *evlist) static int data_src__fprintf(u64 data_src, FILE *fp) { - struct mem_info mi = { .data_src.val = data_src }; + struct mem_info *mi = mem_info__new(); char decode[100]; char out[100]; static int maxlen; int len; - perf_script__meminfo_scnprintf(decode, 100, &mi); + if (!mi) + return -ENOMEM; + + mem_info__data_src(mi)->val = data_src; + perf_script__meminfo_scnprintf(decode, 100, mi); + mem_info__put(mi); len = scnprintf(out, 100, "%16" PRIx64 " %s", data_src, decode); if (maxlen < len) @@ -2243,7 +2297,7 @@ static void process_event(struct perf_script *script, if (evsel__is_bpf_output(evsel) && PRINT_FIELD(BPF_OUTPUT)) perf_sample__fprintf_bpf_output(sample, fp); - perf_sample__fprintf_insn(sample, attr, thread, machine, fp); + perf_sample__fprintf_insn(sample, attr, thread, machine, fp, al); if (PRINT_FIELD(PHYS_ADDR)) fprintf(fp, "%16" PRIx64, sample->phys_addr); @@ -2449,7 +2503,7 @@ static int process_attr(struct perf_tool *tool, union perf_event *event, evsel = evlist__last(*pevlist); if (!evsel->priv) { - if (scr->per_event_dump) { + if (scr->per_event_dump) { evsel->priv = evsel_script__new(evsel, scr->session->data); if (!evsel->priv) return -ENOMEM; @@ -3108,6 +3162,13 @@ parse: rc = -EINVAL; goto out; } +#ifndef HAVE_LIBCAPSTONE_SUPPORT + if (change != REMOVE && strcmp(tok, "disasm") == 0) { + fprintf(stderr, "Field \"disasm\" requires perf to be built with libcapstone support.\n"); + rc = -EINVAL; + goto out; + } +#endif if (type == -1) { /* add user option to all events types for @@ -3448,7 +3509,7 @@ static int check_ev_match(char *dir_name, char *scriptname, match = 0; evlist__for_each_entry(session->evlist, pos) { - if (!strcmp(evsel__name(pos), evname)) { + if (evsel__name_is(pos, evname)) { match = 1; break; } @@ -3765,11 +3826,25 @@ static int perf_script__process_auxtrace_info(struct perf_session *session, #endif static int parse_insn_trace(const struct option *opt __maybe_unused, - const char *str __maybe_unused, - int unset __maybe_unused) + const char *str, int unset __maybe_unused) { - parse_output_fields(NULL, "+insn,-event,-period", 0); - itrace_parse_synth_opts(opt, "i0ns", 0); + const char *fields = "+insn,-event,-period"; + int ret; + + if (str) { + if (strcmp(str, "disasm") == 0) + fields = "+disasm,-event,-period"; + else if (strlen(str) != 0 && strcmp(str, "raw") != 0) { + fprintf(stderr, "Only accept raw|disasm\n"); + return -EINVAL; + } + } + + ret = parse_output_fields(NULL, fields, 0); + if (ret < 0) + return ret; + + itrace_parse_synth_opts(opt, "i0nse", 0); symbol_conf.nanosecs = true; return 0; } @@ -3902,7 +3977,7 @@ int cmd_script(int argc, const char **argv) "Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso,dsoff," "addr,symoff,srcline,period,iregs,uregs,brstack," "brstacksym,flags,data_src,weight,bpf-output,brstackinsn," - "brstackinsnlen,brstackoff,callindent,insn,insnlen,synth," + "brstackinsnlen,brstackdisasm,brstackoff,callindent,insn,disasm,insnlen,synth," "phys_addr,metric,misc,srccode,ipc,tod,data_page_size," "code_page_size,ins_lat,machine_pid,vcpu,cgroup,retire_lat", parse_output_fields), @@ -3914,7 +3989,7 @@ int cmd_script(int argc, const char **argv) "only consider these symbols"), OPT_INTEGER(0, "addr-range", &symbol_conf.addr_range, "Use with -S to list traced records within address range"), - OPT_CALLBACK_OPTARG(0, "insn-trace", &itrace_synth_opts, NULL, NULL, + OPT_CALLBACK_OPTARG(0, "insn-trace", &itrace_synth_opts, NULL, "raw|disasm", "Decode instructions from itrace", parse_insn_trace), OPT_CALLBACK_OPTARG(0, "xed", NULL, NULL, NULL, "Run xed disassembler on output", parse_xed), @@ -4366,6 +4441,9 @@ script_found: flush_scripting(); + if (verbose > 2 || debug_kmaps) + perf_session__dump_kmaps(session); + out_delete: if (script.ptime_range) { itrace_synth_opts__clear_time_range(&itrace_synth_opts); diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 5fe9abc6a524..35f79b48e8dc 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -164,26 +164,6 @@ static struct perf_stat_config stat_config = { .iostat_run = false, }; -static bool cpus_map_matched(struct evsel *a, struct evsel *b) -{ - if (!a->core.cpus && !b->core.cpus) - return true; - - if (!a->core.cpus || !b->core.cpus) - return false; - - if (perf_cpu_map__nr(a->core.cpus) != perf_cpu_map__nr(b->core.cpus)) - return false; - - for (int i = 0; i < perf_cpu_map__nr(a->core.cpus); i++) { - if (perf_cpu_map__cpu(a->core.cpus, i).cpu != - perf_cpu_map__cpu(b->core.cpus, i).cpu) - return false; - } - - return true; -} - static void evlist__check_cpu_maps(struct evlist *evlist) { struct evsel *evsel, *warned_leader = NULL; @@ -194,7 +174,7 @@ static void evlist__check_cpu_maps(struct evlist *evlist) /* Check that leader matches cpus with each member. */ if (leader == evsel) continue; - if (cpus_map_matched(leader, evsel)) + if (perf_cpu_map__equal(leader->core.cpus, evsel->core.cpus)) continue; /* If there's mismatch disable the group and warn user. */ @@ -1238,6 +1218,8 @@ static struct option stat_options[] = { "aggregate counts per processor socket", AGGR_SOCKET), OPT_SET_UINT(0, "per-die", &stat_config.aggr_mode, "aggregate counts per processor die", AGGR_DIE), + OPT_SET_UINT(0, "per-cluster", &stat_config.aggr_mode, + "aggregate counts per processor cluster", AGGR_CLUSTER), OPT_CALLBACK_OPTARG(0, "per-cache", &stat_config.aggr_mode, &stat_config.aggr_level, "cache level", "aggregate count at this cache level (Default: LLC)", parse_cache_level), @@ -1317,10 +1299,9 @@ static int cpu__get_cache_id_from_map(struct perf_cpu cpu, char *map) * be the first online CPU in the cache domain else use the * first online CPU of the cache domain as the ID. */ - if (perf_cpu_map__has_any_cpu_or_is_empty(cpu_map)) + id = perf_cpu_map__min(cpu_map).cpu; + if (id == -1) id = cpu.cpu; - else - id = perf_cpu_map__cpu(cpu_map, 0).cpu; /* Free the perf_cpu_map used to find the cache ID */ perf_cpu_map__put(cpu_map); @@ -1428,6 +1409,7 @@ static struct aggr_cpu_id aggr_cpu_id__cache(struct perf_cpu cpu, void *data) static const char *const aggr_mode__string[] = { [AGGR_CORE] = "core", [AGGR_CACHE] = "cache", + [AGGR_CLUSTER] = "cluster", [AGGR_DIE] = "die", [AGGR_GLOBAL] = "global", [AGGR_NODE] = "node", @@ -1455,6 +1437,12 @@ static struct aggr_cpu_id perf_stat__get_cache_id(struct perf_stat_config *confi return aggr_cpu_id__cache(cpu, /*data=*/NULL); } +static struct aggr_cpu_id perf_stat__get_cluster(struct perf_stat_config *config __maybe_unused, + struct perf_cpu cpu) +{ + return aggr_cpu_id__cluster(cpu, /*data=*/NULL); +} + static struct aggr_cpu_id perf_stat__get_core(struct perf_stat_config *config __maybe_unused, struct perf_cpu cpu) { @@ -1507,6 +1495,12 @@ static struct aggr_cpu_id perf_stat__get_die_cached(struct perf_stat_config *con return perf_stat__get_aggr(config, perf_stat__get_die, cpu); } +static struct aggr_cpu_id perf_stat__get_cluster_cached(struct perf_stat_config *config, + struct perf_cpu cpu) +{ + return perf_stat__get_aggr(config, perf_stat__get_cluster, cpu); +} + static struct aggr_cpu_id perf_stat__get_cache_id_cached(struct perf_stat_config *config, struct perf_cpu cpu) { @@ -1544,6 +1538,8 @@ static aggr_cpu_id_get_t aggr_mode__get_aggr(enum aggr_mode aggr_mode) return aggr_cpu_id__socket; case AGGR_DIE: return aggr_cpu_id__die; + case AGGR_CLUSTER: + return aggr_cpu_id__cluster; case AGGR_CACHE: return aggr_cpu_id__cache; case AGGR_CORE: @@ -1569,6 +1565,8 @@ static aggr_get_id_t aggr_mode__get_id(enum aggr_mode aggr_mode) return perf_stat__get_socket_cached; case AGGR_DIE: return perf_stat__get_die_cached; + case AGGR_CLUSTER: + return perf_stat__get_cluster_cached; case AGGR_CACHE: return perf_stat__get_cache_id_cached; case AGGR_CORE: @@ -1623,7 +1621,7 @@ static int perf_stat_init_aggr_mode(void) * taking the highest cpu number to be the size of * the aggregation translate cpumap. */ - if (!perf_cpu_map__has_any_cpu_or_is_empty(evsel_list->core.user_requested_cpus)) + if (!perf_cpu_map__is_any_cpu_or_is_empty(evsel_list->core.user_requested_cpus)) nr = perf_cpu_map__max(evsel_list->core.user_requested_cpus).cpu; else nr = 0; @@ -1633,23 +1631,13 @@ static int perf_stat_init_aggr_mode(void) static void cpu_aggr_map__delete(struct cpu_aggr_map *map) { - if (map) { - WARN_ONCE(refcount_read(&map->refcnt) != 0, - "cpu_aggr_map refcnt unbalanced\n"); - free(map); - } -} - -static void cpu_aggr_map__put(struct cpu_aggr_map *map) -{ - if (map && refcount_dec_and_test(&map->refcnt)) - cpu_aggr_map__delete(map); + free(map); } static void perf_stat__exit_aggr_mode(void) { - cpu_aggr_map__put(stat_config.aggr_map); - cpu_aggr_map__put(stat_config.cpus_aggr_map); + cpu_aggr_map__delete(stat_config.aggr_map); + cpu_aggr_map__delete(stat_config.cpus_aggr_map); stat_config.aggr_map = NULL; stat_config.cpus_aggr_map = NULL; } @@ -1737,6 +1725,21 @@ static struct aggr_cpu_id perf_env__get_cache_aggr_by_cpu(struct perf_cpu cpu, return id; } +static struct aggr_cpu_id perf_env__get_cluster_aggr_by_cpu(struct perf_cpu cpu, + void *data) +{ + struct perf_env *env = data; + struct aggr_cpu_id id = aggr_cpu_id__empty(); + + if (cpu.cpu != -1) { + id.socket = env->cpu[cpu.cpu].socket_id; + id.die = env->cpu[cpu.cpu].die_id; + id.cluster = env->cpu[cpu.cpu].cluster_id; + } + + return id; +} + static struct aggr_cpu_id perf_env__get_core_aggr_by_cpu(struct perf_cpu cpu, void *data) { struct perf_env *env = data; @@ -1744,12 +1747,12 @@ static struct aggr_cpu_id perf_env__get_core_aggr_by_cpu(struct perf_cpu cpu, vo if (cpu.cpu != -1) { /* - * core_id is relative to socket and die, - * we need a global id. So we set - * socket, die id and core id + * core_id is relative to socket, die and cluster, we need a + * global id. So we set socket, die id, cluster id and core id. */ id.socket = env->cpu[cpu.cpu].socket_id; id.die = env->cpu[cpu.cpu].die_id; + id.cluster = env->cpu[cpu.cpu].cluster_id; id.core = env->cpu[cpu.cpu].core_id; } @@ -1805,6 +1808,12 @@ static struct aggr_cpu_id perf_stat__get_die_file(struct perf_stat_config *confi return perf_env__get_die_aggr_by_cpu(cpu, &perf_stat.session->header.env); } +static struct aggr_cpu_id perf_stat__get_cluster_file(struct perf_stat_config *config __maybe_unused, + struct perf_cpu cpu) +{ + return perf_env__get_cluster_aggr_by_cpu(cpu, &perf_stat.session->header.env); +} + static struct aggr_cpu_id perf_stat__get_cache_file(struct perf_stat_config *config __maybe_unused, struct perf_cpu cpu) { @@ -1842,6 +1851,8 @@ static aggr_cpu_id_get_t aggr_mode__get_aggr_file(enum aggr_mode aggr_mode) return perf_env__get_socket_aggr_by_cpu; case AGGR_DIE: return perf_env__get_die_aggr_by_cpu; + case AGGR_CLUSTER: + return perf_env__get_cluster_aggr_by_cpu; case AGGR_CACHE: return perf_env__get_cache_aggr_by_cpu; case AGGR_CORE: @@ -1867,6 +1878,8 @@ static aggr_get_id_t aggr_mode__get_id_file(enum aggr_mode aggr_mode) return perf_stat__get_socket_file; case AGGR_DIE: return perf_stat__get_die_file; + case AGGR_CLUSTER: + return perf_stat__get_cluster_file; case AGGR_CACHE: return perf_stat__get_cache_file; case AGGR_CORE: @@ -2062,6 +2075,7 @@ static int add_default_attributes(void) stat_config.metric_no_threshold, stat_config.user_requested_cpu_list, stat_config.system_wide, + stat_config.hardware_aware_grouping, &stat_config.metric_events); } @@ -2095,6 +2109,7 @@ static int add_default_attributes(void) stat_config.metric_no_threshold, stat_config.user_requested_cpu_list, stat_config.system_wide, + stat_config.hardware_aware_grouping, &stat_config.metric_events); } @@ -2129,6 +2144,7 @@ static int add_default_attributes(void) /*metric_no_threshold=*/true, stat_config.user_requested_cpu_list, stat_config.system_wide, + stat_config.hardware_aware_grouping, &stat_config.metric_events) < 0) return -1; } @@ -2170,6 +2186,7 @@ static int add_default_attributes(void) /*metric_no_threshold=*/true, stat_config.user_requested_cpu_list, stat_config.system_wide, + stat_config.hardware_aware_grouping, &stat_config.metric_events) < 0) return -1; @@ -2290,7 +2307,7 @@ int process_stat_config_event(struct perf_session *session, perf_event__read_stat_config(&stat_config, &event->stat_config); - if (perf_cpu_map__has_any_cpu_or_is_empty(st->cpus)) { + if (perf_cpu_map__is_empty(st->cpus)) { if (st->aggr_mode != AGGR_UNSET) pr_warning("warning: processing task data, aggregation mode not set\n"); } else if (st->aggr_mode != AGGR_UNSET) { @@ -2398,6 +2415,8 @@ static int __cmd_report(int argc, const char **argv) "aggregate counts per processor socket", AGGR_SOCKET), OPT_SET_UINT(0, "per-die", &perf_stat.aggr_mode, "aggregate counts per processor die", AGGR_DIE), + OPT_SET_UINT(0, "per-cluster", &perf_stat.aggr_mode, + "aggregate counts perf processor cluster", AGGR_CLUSTER), OPT_CALLBACK_OPTARG(0, "per-cache", &perf_stat.aggr_mode, &perf_stat.aggr_level, "cache level", "aggregate count at this cache level (Default: LLC)", @@ -2702,6 +2721,7 @@ int cmd_stat(int argc, const char **argv) stat_config.metric_no_threshold, stat_config.user_requested_cpu_list, stat_config.system_wide, + stat_config.hardware_aware_grouping, &stat_config.metric_events); zfree(&metrics); diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 5301d1badd43..1d6aef51c122 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -129,7 +129,7 @@ static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he) /* * We can't annotate with just /proc/kallsyms */ - if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS && !dso__is_kcore(dso)) { + if (dso__symtab_type(dso) == DSO_BINARY_TYPE__KALLSYMS && !dso__is_kcore(dso)) { pr_err("Can't annotate %s: No vmlinux file was found in the " "path\n", sym->name); sleep(1); @@ -182,7 +182,7 @@ static void ui__warn_map_erange(struct map *map, struct symbol *sym, u64 ip) "Tools: %s\n\n" "Not all samples will be on the annotation output.\n\n" "Please report to linux-kernel@vger.kernel.org\n", - ip, dso->long_name, dso__symtab_origin(dso), + ip, dso__long_name(dso), dso__symtab_origin(dso), map__start(map), map__end(map), sym->start, sym->end, sym->binding == STB_GLOBAL ? 'g' : sym->binding == STB_LOCAL ? 'l' : 'w', sym->name, @@ -1573,7 +1573,7 @@ int cmd_top(int argc, const char **argv) "add last branch records to call history"), OPT_BOOLEAN(0, "raw-trace", &symbol_conf.raw_trace, "Show raw trace event output (do not use print fmt or plugins)"), - OPT_BOOLEAN(0, "hierarchy", &symbol_conf.report_hierarchy, + OPT_BOOLEAN('H', "hierarchy", &symbol_conf.report_hierarchy, "Show entries in a hierarchy"), OPT_BOOLEAN(0, "overwrite", &top.record_opts.overwrite, "Use a backward ring buffer, default: no"), diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index 109b8e64fe69..51eca671c797 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -74,6 +74,7 @@ #include <linux/err.h> #include <linux/filter.h> #include <linux/kernel.h> +#include <linux/list_sort.h> #include <linux/random.h> #include <linux/stringify.h> #include <linux/time64.h> @@ -946,6 +947,15 @@ static const struct syscall_fmt syscall_fmts[] = { .arg = { [1] = STRARRAY(op, epoll_ctl_ops), }, }, { .name = "eventfd2", .arg = { [1] = { .scnprintf = SCA_EFD_FLAGS, /* flags */ }, }, }, + { .name = "faccessat", + .arg = { [0] = { .scnprintf = SCA_FDAT, /* dirfd */ }, + [1] = { .scnprintf = SCA_FILENAME, /* pathname */ }, + [2] = { .scnprintf = SCA_ACCMODE, /* mode */ }, }, }, + { .name = "faccessat2", + .arg = { [0] = { .scnprintf = SCA_FDAT, /* dirfd */ }, + [1] = { .scnprintf = SCA_FILENAME, /* pathname */ }, + [2] = { .scnprintf = SCA_ACCMODE, /* mode */ }, + [3] = { .scnprintf = SCA_FACCESSAT2_FLAGS, /* flags */ }, }, }, { .name = "fchmodat", .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, }, { .name = "fchownat", @@ -968,7 +978,6 @@ static const struct syscall_fmt syscall_fmts[] = { [1] = { .scnprintf = SCA_FILENAME, /* path */ }, [2] = { .scnprintf = SCA_FSPICK_FLAGS, /* flags */ }, }, }, { .name = "fstat", .alias = "newfstat", }, - { .name = "fstatat", .alias = "newfstatat", }, { .name = "futex", .arg = { [1] = { .scnprintf = SCA_FUTEX_OP, /* op */ }, [5] = { .scnprintf = SCA_FUTEX_VAL3, /* val3 */ }, }, }, @@ -1048,8 +1057,12 @@ static const struct syscall_fmt syscall_fmts[] = { .arg = { [3] = { .scnprintf = SCA_MREMAP_FLAGS, /* flags */ }, }, }, { .name = "name_to_handle_at", .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, }, - { .name = "newfstatat", - .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, }, + { .name = "nanosleep", + .arg = { [0] = { .scnprintf = SCA_TIMESPEC, /* req */ }, }, }, + { .name = "newfstatat", .alias = "fstatat", + .arg = { [0] = { .scnprintf = SCA_FDAT, /* dirfd */ }, + [1] = { .scnprintf = SCA_FILENAME, /* pathname */ }, + [3] = { .scnprintf = SCA_FS_AT_FLAGS, /* flags */ }, }, }, { .name = "open", .arg = { [1] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, }, { .name = "open_by_handle_at", @@ -1141,7 +1154,7 @@ static const struct syscall_fmt syscall_fmts[] = { { .name = "stat", .alias = "newstat", }, { .name = "statx", .arg = { [0] = { .scnprintf = SCA_FDAT, /* fdat */ }, - [2] = { .scnprintf = SCA_STATX_FLAGS, /* flags */ } , + [2] = { .scnprintf = SCA_FS_AT_FLAGS, /* flags */ } , [3] = { .scnprintf = SCA_STATX_MASK, /* mask */ }, }, }, { .name = "swapoff", .arg = { [0] = { .scnprintf = SCA_FILENAME, /* specialfile */ }, }, }, @@ -1159,7 +1172,9 @@ static const struct syscall_fmt syscall_fmts[] = { .arg = { [0] = { .scnprintf = SCA_FILENAME, /* name */ }, }, }, { .name = "uname", .alias = "newuname", }, { .name = "unlinkat", - .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, }, + .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, + [1] = { .scnprintf = SCA_FILENAME, /* pathname */ }, + [2] = { .scnprintf = SCA_FS_AT_FLAGS, /* flags */ }, }, }, { .name = "utimensat", .arg = { [0] = { .scnprintf = SCA_FDAT, /* dirfd */ }, }, }, { .name = "wait4", .errpid = true, @@ -2902,7 +2917,7 @@ static void print_location(FILE *f, struct perf_sample *sample, { if ((verbose > 0 || print_dso) && al->map) - fprintf(f, "%s@", map__dso(al->map)->long_name); + fprintf(f, "%s@", dso__long_name(map__dso(al->map))); if ((verbose > 0 || print_sym) && al->sym) fprintf(f, "%s+0x%" PRIx64, al->sym->name, @@ -4312,34 +4327,38 @@ static unsigned long thread__nr_events(struct thread_trace *ttrace) return ttrace ? ttrace->nr_events : 0; } -DEFINE_RESORT_RB(threads, - (thread__nr_events(thread__priv(a->thread)) < - thread__nr_events(thread__priv(b->thread))), - struct thread *thread; -) +static int trace_nr_events_cmp(void *priv __maybe_unused, + const struct list_head *la, + const struct list_head *lb) { - entry->thread = rb_entry(nd, struct thread_rb_node, rb_node)->thread; + struct thread_list *a = list_entry(la, struct thread_list, list); + struct thread_list *b = list_entry(lb, struct thread_list, list); + unsigned long a_nr_events = thread__nr_events(thread__priv(a->thread)); + unsigned long b_nr_events = thread__nr_events(thread__priv(b->thread)); + + if (a_nr_events != b_nr_events) + return a_nr_events < b_nr_events ? -1 : 1; + + /* Identical number of threads, place smaller tids first. */ + return thread__tid(a->thread) < thread__tid(b->thread) + ? -1 + : (thread__tid(a->thread) > thread__tid(b->thread) ? 1 : 0); } static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp) { size_t printed = trace__fprintf_threads_header(fp); - struct rb_node *nd; - int i; - - for (i = 0; i < THREADS__TABLE_SIZE; i++) { - DECLARE_RESORT_RB_MACHINE_THREADS(threads, trace->host, i); + LIST_HEAD(threads); - if (threads == NULL) { - fprintf(fp, "%s", "Error sorting output by nr_events!\n"); - return 0; - } + if (machine__thread_list(trace->host, &threads) == 0) { + struct thread_list *pos; - resort_rb__for_each_entry(nd, threads) - printed += trace__fprintf_thread(fp, threads_entry->thread, trace); + list_sort(NULL, &threads, trace_nr_events_cmp); - resort_rb__delete(threads); + list_for_each_entry(pos, &threads, list) + printed += trace__fprintf_thread(fp, pos->thread, trace); } + thread_list__delete(&threads); return printed; } @@ -4864,6 +4883,11 @@ int cmd_trace(int argc, const char **argv) if (!trace.trace_syscalls) goto skip_augmentation; + if ((argc >= 1) && (strcmp(argv[0], "record") == 0)) { + pr_debug("Syscall augmentation fails with record, disabling augmentation"); + goto skip_augmentation; + } + trace.skel = augmented_raw_syscalls_bpf__open(); if (!trace.skel) { pr_debug("Failed to open augmented syscalls BPF skeleton"); @@ -4897,7 +4921,7 @@ int cmd_trace(int argc, const char **argv) goto out; } trace.syscalls.events.bpf_output = evlist__last(trace.evlist); - assert(!strcmp(evsel__name(trace.syscalls.events.bpf_output), "__augmented_syscalls__")); + assert(evsel__name_is(trace.syscalls.events.bpf_output, "__augmented_syscalls__")); skip_augmentation: #endif err = -1; @@ -4954,7 +4978,7 @@ skip_augmentation: */ if (trace.syscalls.events.bpf_output) { evlist__for_each_entry(trace.evlist, evsel) { - bool raw_syscalls_sys_exit = strcmp(evsel__name(evsel), "raw_syscalls:sys_exit") == 0; + bool raw_syscalls_sys_exit = evsel__name_is(evsel, "raw_syscalls:sys_exit"); if (raw_syscalls_sys_exit) { trace.raw_augmented_syscalls = true; diff --git a/tools/perf/builtin-version.c b/tools/perf/builtin-version.c index ac20c2b9bbc2..398aa53e9e2e 100644 --- a/tools/perf/builtin-version.c +++ b/tools/perf/builtin-version.c @@ -73,6 +73,7 @@ static void library_status(void) STATUS(HAVE_LIBCRYPTO_SUPPORT, libcrypto); STATUS(HAVE_LIBUNWIND_SUPPORT, libunwind); STATUS(HAVE_DWARF_SUPPORT, libdw-dwarf-unwind); + STATUS(HAVE_LIBCAPSTONE_SUPPORT, libcapstone); STATUS(HAVE_ZLIB_SUPPORT, zlib); STATUS(HAVE_LZMA_SUPPORT, lzma); STATUS(HAVE_AUXTRACE_SUPPORT, get_cpuid); @@ -82,6 +83,8 @@ static void library_status(void) STATUS(HAVE_LIBPFM, libpfm4); STATUS(HAVE_LIBTRACEEVENT, libtraceevent); STATUS(HAVE_BPF_SKEL, bpf_skeletons); + STATUS(HAVE_DWARF_UNWIND_SUPPORT, dwarf-unwind-support); + STATUS(HAVE_CSTRACE_SUPPORT, libopencsd); } int cmd_version(int argc, const char **argv) diff --git a/tools/perf/builtin.h b/tools/perf/builtin.h index f2ab5bae2150..f4375deabfa3 100644 --- a/tools/perf/builtin.h +++ b/tools/perf/builtin.h @@ -2,8 +2,10 @@ #ifndef BUILTIN_H #define BUILTIN_H +struct cmdnames; + void list_common_cmds_help(void); -const char *help_unknown_cmd(const char *cmd); +const char *help_unknown_cmd(const char *cmd, struct cmdnames *main_cmds); int cmd_annotate(int argc, const char **argv); int cmd_bench(int argc, const char **argv); diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh index 66ba33dbcef2..672421b858ac 100755 --- a/tools/perf/check-headers.sh +++ b/tools/perf/check-headers.sh @@ -9,23 +9,15 @@ FILES=( "include/uapi/linux/const.h" "include/uapi/drm/drm.h" "include/uapi/drm/i915_drm.h" + "include/uapi/linux/bits.h" "include/uapi/linux/fadvise.h" - "include/uapi/linux/fcntl.h" - "include/uapi/linux/fs.h" "include/uapi/linux/fscrypt.h" "include/uapi/linux/kcmp.h" "include/uapi/linux/kvm.h" "include/uapi/linux/in.h" - "include/uapi/linux/mount.h" - "include/uapi/linux/openat2.h" "include/uapi/linux/perf_event.h" - "include/uapi/linux/prctl.h" - "include/uapi/linux/sched.h" "include/uapi/linux/seccomp.h" "include/uapi/linux/stat.h" - "include/uapi/linux/usbdevice_fs.h" - "include/uapi/linux/vhost.h" - "include/uapi/sound/asound.h" "include/linux/bits.h" "include/vdso/bits.h" "include/linux/const.h" @@ -38,9 +30,7 @@ FILES=( "arch/x86/include/asm/cpufeatures.h" "arch/x86/include/asm/inat_types.h" "arch/x86/include/asm/emulate_prefix.h" - "arch/x86/include/asm/irq_vectors.h" "arch/x86/include/asm/msr-index.h" - "arch/x86/include/uapi/asm/prctl.h" "arch/x86/lib/x86-opcode-map.txt" "arch/x86/tools/gen-insn-attr-x86.awk" "arch/arm/include/uapi/asm/perf_regs.h" @@ -97,7 +87,18 @@ SYNC_CHECK_FILES=( declare -a BEAUTY_FILES BEAUTY_FILES=( + "arch/x86/include/asm/irq_vectors.h" + "arch/x86/include/uapi/asm/prctl.h" "include/linux/socket.h" + "include/uapi/linux/fcntl.h" + "include/uapi/linux/fs.h" + "include/uapi/linux/mount.h" + "include/uapi/linux/prctl.h" + "include/uapi/linux/sched.h" + "include/uapi/linux/stat.h" + "include/uapi/linux/usbdevice_fs.h" + "include/uapi/linux/vhost.h" + "include/uapi/sound/asound.h" ) declare -a FAILURES diff --git a/tools/perf/perf-archive.sh b/tools/perf/perf-archive.sh index f94795794b36..6ed7e52ab881 100755 --- a/tools/perf/perf-archive.sh +++ b/tools/perf/perf-archive.sh @@ -34,7 +34,7 @@ if [ $UNPACK -eq 1 ]; then TARGET=`find . -regex "\./perf.*\.tar\.bz2"` TARGET_NUM=`echo -n "$TARGET" | grep -c '^'` - if [ -z "$TARGET" -o $TARGET_NUM -gt 1 ]; then + if [ -z "$TARGET" ] || [ $TARGET_NUM -gt 1 ]; then echo -e "Error: $TARGET_NUM files found for unpacking:\n$TARGET" echo "Provide the requested file as an argument" exit 1 diff --git a/tools/perf/perf-completion.sh b/tools/perf/perf-completion.sh index f224d79b89e6..69cba3c170d5 100644 --- a/tools/perf/perf-completion.sh +++ b/tools/perf/perf-completion.sh @@ -108,6 +108,8 @@ __perf__ltrim_colon_completions() __perfcomp () { + # Expansion of spaces to array is deliberate. + # shellcheck disable=SC2207 COMPREPLY=( $( compgen -W "$1" -- "$2" ) ) } @@ -127,13 +129,13 @@ __perf_prev_skip_opts () let i=cword-1 cmds_=$($cmd $1 --list-cmds) - prev_skip_opts=() + prev_skip_opts="" while [ $i -ge 0 ]; do - if [[ ${words[i]} == $1 ]]; then + if [[ ${words[i]} == "$1" ]]; then return fi for cmd_ in $cmds_; do - if [[ ${words[i]} == $cmd_ ]]; then + if [[ ${words[i]} == "$cmd_" ]]; then prev_skip_opts=${words[i]} return fi @@ -164,9 +166,10 @@ __perf_main () $prev_skip_opts == @(record|stat|top) ]]; then local cur1=${COMP_WORDS[COMP_CWORD]} - local raw_evts=$($cmd list --raw-dump hw sw cache tracepoint pmu sdt) + local raw_evts local arr s tmp result cpu_evts + raw_evts=$($cmd list --raw-dump hw sw cache tracepoint pmu sdt) # aarch64 doesn't have /sys/bus/event_source/devices/cpu/events if [[ `uname -m` != aarch64 ]]; then cpu_evts=$(ls /sys/bus/event_source/devices/cpu/events) @@ -175,10 +178,12 @@ __perf_main () if [[ "$cur1" == */* && ${cur1#*/} =~ ^[A-Z] ]]; then OLD_IFS="$IFS" IFS=" " + # Expansion of spaces to array is deliberate. + # shellcheck disable=SC2206 arr=($raw_evts) IFS="$OLD_IFS" - for s in ${arr[@]} + for s in "${arr[@]}" do if [[ "$s" == *cpu/* ]]; then tmp=${s#*cpu/} @@ -200,11 +205,13 @@ __perf_main () fi elif [[ $prev == @("--pfm-events") && $prev_skip_opts == @(record|stat|top) ]]; then - local evts=$($cmd list --raw-dump pfm) + local evts + evts=$($cmd list --raw-dump pfm) __perfcomp "$evts" "$cur" elif [[ $prev == @("-M"|"--metrics") && $prev_skip_opts == @(stat) ]]; then - local metrics=$($cmd list --raw-dump metric metricgroup) + local metrics + metrics=$($cmd list --raw-dump metric metricgroup) __perfcomp "$metrics" "$cur" else # List subcommands for perf commands @@ -278,6 +285,8 @@ if [[ -n ${ZSH_VERSION-} ]]; then let cword=CURRENT-1 emulate ksh -c __perf_main let _ret && _default && _ret=0 + # _ret is only assigned 0 or 1, disable inaccurate analysis. + # shellcheck disable=SC2152 return _ret } diff --git a/tools/perf/perf.c b/tools/perf/perf.c index 921bee0a6437..bd3f80b5bb46 100644 --- a/tools/perf/perf.c +++ b/tools/perf/perf.c @@ -18,6 +18,7 @@ #include <subcmd/run-command.h> #include "util/parse-events.h" #include <subcmd/parse-options.h> +#include <subcmd/help.h> #include "util/debug.h" #include "util/event.h" #include "util/util.h" // usage() @@ -458,7 +459,7 @@ static int libperf_print(enum libperf_print_level level, int main(int argc, const char **argv) { - int err; + int err, done_help = 0; const char *cmd; char sbuf[STRERR_BUFSIZE]; @@ -557,22 +558,32 @@ int main(int argc, const char **argv) pthread__block_sigwinch(); while (1) { - static int done_help; - run_argv(&argc, &argv); if (errno != ENOENT) break; if (!done_help) { - cmd = argv[0] = help_unknown_cmd(cmd); + struct cmdnames main_cmds = {}; + + for (unsigned int i = 0; i < ARRAY_SIZE(commands); i++) { + add_cmdname(&main_cmds, + commands[i].cmd, + strlen(commands[i].cmd)); + } + cmd = argv[0] = help_unknown_cmd(cmd, &main_cmds); + clean_cmdnames(&main_cmds); done_help = 1; + if (!cmd) + break; } else break; } - fprintf(stderr, "Failed to run command '%s': %s\n", - cmd, str_error_r(errno, sbuf, sizeof(sbuf))); + if (cmd) { + fprintf(stderr, "Failed to run command '%s': %s\n", + cmd, str_error_r(errno, sbuf, sizeof(sbuf))); + } out: if (debug_fp) fclose(debug_fp); diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereone/cache.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereone/cache.json index 7a2b7b200f14..ac75f12e27bf 100644 --- a/tools/perf/pmu-events/arch/arm64/ampere/ampereone/cache.json +++ b/tools/perf/pmu-events/arch/arm64/ampere/ampereone/cache.json @@ -9,7 +9,9 @@ "ArchStdEvent": "L1D_CACHE_REFILL_RD" }, { - "ArchStdEvent": "L1D_CACHE_INVAL" + "ArchStdEvent": "L1D_CACHE_INVAL", + "Errata": "Errata AC03_CPU_41", + "BriefDescription": "L1D cache invalidate. Impacted by errata -" }, { "ArchStdEvent": "L1D_TLB_REFILL_RD" diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/cache.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/cache.json index c50d8e930b05..f4bfe7083a6b 100644 --- a/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/cache.json +++ b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/cache.json @@ -9,7 +9,9 @@ "ArchStdEvent": "L1D_CACHE_REFILL_RD" }, { - "ArchStdEvent": "L1D_CACHE_INVAL" + "ArchStdEvent": "L1D_CACHE_INVAL", + "Errata": "Errata AC04_CPU_1", + "BriefDescription": "L1D cache invalidate. Impacted by errata -" }, { "ArchStdEvent": "L1D_TLB_REFILL_RD" diff --git a/tools/perf/pmu-events/arch/powerpc/mapfile.csv b/tools/perf/pmu-events/arch/powerpc/mapfile.csv index 599a588dbeb4..4d5e9138d4cc 100644 --- a/tools/perf/pmu-events/arch/powerpc/mapfile.csv +++ b/tools/perf/pmu-events/arch/powerpc/mapfile.csv @@ -15,3 +15,4 @@ 0x0066[[:xdigit:]]{4},1,power8,core 0x004e[[:xdigit:]]{4},1,power9,core 0x0080[[:xdigit:]]{4},1,power10,core +0x0082[[:xdigit:]]{4},1,power10,core diff --git a/tools/perf/pmu-events/arch/riscv/andes/ax45/firmware.json b/tools/perf/pmu-events/arch/riscv/andes/ax45/firmware.json new file mode 100644 index 000000000000..9b4a032186a7 --- /dev/null +++ b/tools/perf/pmu-events/arch/riscv/andes/ax45/firmware.json @@ -0,0 +1,68 @@ +[ + { + "ArchStdEvent": "FW_MISALIGNED_LOAD" + }, + { + "ArchStdEvent": "FW_MISALIGNED_STORE" + }, + { + "ArchStdEvent": "FW_ACCESS_LOAD" + }, + { + "ArchStdEvent": "FW_ACCESS_STORE" + }, + { + "ArchStdEvent": "FW_ILLEGAL_INSN" + }, + { + "ArchStdEvent": "FW_SET_TIMER" + }, + { + "ArchStdEvent": "FW_IPI_SENT" + }, + { + "ArchStdEvent": "FW_IPI_RECEIVED" + }, + { + "ArchStdEvent": "FW_FENCE_I_SENT" + }, + { + "ArchStdEvent": "FW_FENCE_I_RECEIVED" + }, + { + "ArchStdEvent": "FW_SFENCE_VMA_SENT" + }, + { + "ArchStdEvent": "FW_SFENCE_VMA_RECEIVED" + }, + { + "ArchStdEvent": "FW_SFENCE_VMA_RECEIVED" + }, + { + "ArchStdEvent": "FW_SFENCE_VMA_ASID_RECEIVED" + }, + { + "ArchStdEvent": "FW_HFENCE_GVMA_SENT" + }, + { + "ArchStdEvent": "FW_HFENCE_GVMA_RECEIVED" + }, + { + "ArchStdEvent": "FW_HFENCE_GVMA_VMID_SENT" + }, + { + "ArchStdEvent": "FW_HFENCE_GVMA_VMID_RECEIVED" + }, + { + "ArchStdEvent": "FW_HFENCE_VVMA_SENT" + }, + { + "ArchStdEvent": "FW_HFENCE_VVMA_RECEIVED" + }, + { + "ArchStdEvent": "FW_HFENCE_VVMA_ASID_SENT" + }, + { + "ArchStdEvent": "FW_HFENCE_VVMA_ASID_RECEIVED" + } +] diff --git a/tools/perf/pmu-events/arch/riscv/andes/ax45/instructions.json b/tools/perf/pmu-events/arch/riscv/andes/ax45/instructions.json new file mode 100644 index 000000000000..713a08c1a40f --- /dev/null +++ b/tools/perf/pmu-events/arch/riscv/andes/ax45/instructions.json @@ -0,0 +1,127 @@ +[ + { + "EventCode": "0x10", + "EventName": "cycle_count", + "BriefDescription": "Cycle count" + }, + { + "EventCode": "0x20", + "EventName": "inst_count", + "BriefDescription": "Retired instruction count" + }, + { + "EventCode": "0x30", + "EventName": "int_load_inst", + "BriefDescription": "Integer load instruction count" + }, + { + "EventCode": "0x40", + "EventName": "int_store_inst", + "BriefDescription": "Integer store instruction count" + }, + { + "EventCode": "0x50", + "EventName": "atomic_inst", + "BriefDescription": "Atomic instruction count" + }, + { + "EventCode": "0x60", + "EventName": "sys_inst", + "BriefDescription": "System instruction count" + }, + { + "EventCode": "0x70", + "EventName": "int_compute_inst", + "BriefDescription": "Integer computational instruction count" + }, + { + "EventCode": "0x80", + "EventName": "condition_br", + "BriefDescription": "Conditional branch instruction count" + }, + { + "EventCode": "0x90", + "EventName": "taken_condition_br", + "BriefDescription": "Taken conditional branch instruction count" + }, + { + "EventCode": "0xA0", + "EventName": "jal_inst", + "BriefDescription": "JAL instruction count" + }, + { + "EventCode": "0xB0", + "EventName": "jalr_inst", + "BriefDescription": "JALR instruction count" + }, + { + "EventCode": "0xC0", + "EventName": "ret_inst", + "BriefDescription": "Return instruction count" + }, + { + "EventCode": "0xD0", + "EventName": "control_trans_inst", + "BriefDescription": "Control transfer instruction count" + }, + { + "EventCode": "0xE0", + "EventName": "ex9_inst", + "BriefDescription": "EXEC.IT instruction count" + }, + { + "EventCode": "0xF0", + "EventName": "int_mul_inst", + "BriefDescription": "Integer multiplication instruction count" + }, + { + "EventCode": "0x100", + "EventName": "int_div_rem_inst", + "BriefDescription": "Integer division/remainder instruction count" + }, + { + "EventCode": "0x110", + "EventName": "float_load_inst", + "BriefDescription": "Floating-point load instruction count" + }, + { + "EventCode": "0x120", + "EventName": "float_store_inst", + "BriefDescription": "Floating-point store instruction count" + }, + { + "EventCode": "0x130", + "EventName": "float_add_sub_inst", + "BriefDescription": "Floating-point addition/subtraction instruction count" + }, + { + "EventCode": "0x140", + "EventName": "float_mul_inst", + "BriefDescription": "Floating-point multiplication instruction count" + }, + { + "EventCode": "0x150", + "EventName": "float_fused_muladd_inst", + "BriefDescription": "Floating-point fused multiply-add instruction count" + }, + { + "EventCode": "0x160", + "EventName": "float_div_sqrt_inst", + "BriefDescription": "Floating-point division or square-root instruction count" + }, + { + "EventCode": "0x170", + "EventName": "other_float_inst", + "BriefDescription": "Other floating-point instruction count" + }, + { + "EventCode": "0x180", + "EventName": "int_mul_add_sub_inst", + "BriefDescription": "Integer multiplication and add/sub instruction count" + }, + { + "EventCode": "0x190", + "EventName": "retired_ops", + "BriefDescription": "Retired operation count" + } +] diff --git a/tools/perf/pmu-events/arch/riscv/andes/ax45/memory.json b/tools/perf/pmu-events/arch/riscv/andes/ax45/memory.json new file mode 100644 index 000000000000..c7401b526c77 --- /dev/null +++ b/tools/perf/pmu-events/arch/riscv/andes/ax45/memory.json @@ -0,0 +1,57 @@ +[ + { + "EventCode": "0x01", + "EventName": "ilm_access", + "BriefDescription": "ILM access" + }, + { + "EventCode": "0x11", + "EventName": "dlm_access", + "BriefDescription": "DLM access" + }, + { + "EventCode": "0x21", + "EventName": "icache_access", + "BriefDescription": "ICACHE access" + }, + { + "EventCode": "0x31", + "EventName": "icache_miss", + "BriefDescription": "ICACHE miss" + }, + { + "EventCode": "0x41", + "EventName": "dcache_access", + "BriefDescription": "DCACHE access" + }, + { + "EventCode": "0x51", + "EventName": "dcache_miss", + "BriefDescription": "DCACHE miss" + }, + { + "EventCode": "0x61", + "EventName": "dcache_load_access", + "BriefDescription": "DCACHE load access" + }, + { + "EventCode": "0x71", + "EventName": "dcache_load_miss", + "BriefDescription": "DCACHE load miss" + }, + { + "EventCode": "0x81", + "EventName": "dcache_store_access", + "BriefDescription": "DCACHE store access" + }, + { + "EventCode": "0x91", + "EventName": "dcache_store_miss", + "BriefDescription": "DCACHE store miss" + }, + { + "EventCode": "0xA1", + "EventName": "dcache_wb", + "BriefDescription": "DCACHE writeback" + } +] diff --git a/tools/perf/pmu-events/arch/riscv/andes/ax45/microarch.json b/tools/perf/pmu-events/arch/riscv/andes/ax45/microarch.json new file mode 100644 index 000000000000..a6d378cbaa74 --- /dev/null +++ b/tools/perf/pmu-events/arch/riscv/andes/ax45/microarch.json @@ -0,0 +1,77 @@ +[ + { + "EventCode": "0xB1", + "EventName": "cycle_wait_icache_fill", + "BriefDescription": "Cycles waiting for ICACHE fill data" + }, + { + "EventCode": "0xC1", + "EventName": "cycle_wait_dcache_fill", + "BriefDescription": "Cycles waiting for DCACHE fill data" + }, + { + "EventCode": "0xD1", + "EventName": "uncached_ifetch_from_bus", + "BriefDescription": "Uncached ifetch data access from bus" + }, + { + "EventCode": "0xE1", + "EventName": "uncached_load_from_bus", + "BriefDescription": "Uncached load data access from bus" + }, + { + "EventCode": "0xF1", + "EventName": "cycle_wait_uncached_ifetch", + "BriefDescription": "Cycles waiting for uncached ifetch data from bus" + }, + { + "EventCode": "0x101", + "EventName": "cycle_wait_uncached_load", + "BriefDescription": "Cycles waiting for uncached load data from bus" + }, + { + "EventCode": "0x111", + "EventName": "main_itlb_access", + "BriefDescription": "Main ITLB access" + }, + { + "EventCode": "0x121", + "EventName": "main_itlb_miss", + "BriefDescription": "Main ITLB miss" + }, + { + "EventCode": "0x131", + "EventName": "main_dtlb_access", + "BriefDescription": "Main DTLB access" + }, + { + "EventCode": "0x141", + "EventName": "main_dtlb_miss", + "BriefDescription": "Main DTLB miss" + }, + { + "EventCode": "0x151", + "EventName": "cycle_wait_itlb_fill", + "BriefDescription": "Cycles waiting for Main ITLB fill data" + }, + { + "EventCode": "0x161", + "EventName": "pipe_stall_cycle_dtlb_miss", + "BriefDescription": "Pipeline stall cycles caused by Main DTLB miss" + }, + { + "EventCode": "0x02", + "EventName": "mispredict_condition_br", + "BriefDescription": "Misprediction of conditional branches" + }, + { + "EventCode": "0x12", + "EventName": "mispredict_take_condition_br", + "BriefDescription": "Misprediction of taken conditional branches" + }, + { + "EventCode": "0x22", + "EventName": "mispredict_target_ret_inst", + "BriefDescription": "Misprediction of targets of Return instructions" + } +] diff --git a/tools/perf/pmu-events/arch/riscv/mapfile.csv b/tools/perf/pmu-events/arch/riscv/mapfile.csv index cfc449b19810..3d3a809a5446 100644 --- a/tools/perf/pmu-events/arch/riscv/mapfile.csv +++ b/tools/perf/pmu-events/arch/riscv/mapfile.csv @@ -17,3 +17,4 @@ 0x489-0x8000000000000007-0x[[:xdigit:]]+,v1,sifive/u74,core 0x5b7-0x0-0x0,v1,thead/c900-legacy,core 0x67e-0x80000000db0000[89]0-0x[[:xdigit:]]+,v1,starfive/dubhe-80,core +0x31e-0x8000000000008a45-0x[[:xdigit:]]+,v1,andes/ax45,core diff --git a/tools/perf/pmu-events/arch/s390/cf_z16/extended.json b/tools/perf/pmu-events/arch/s390/cf_z16/extended.json index c2b10ec1c6e0..02cce3a629cb 100644 --- a/tools/perf/pmu-events/arch/s390/cf_z16/extended.json +++ b/tools/perf/pmu-events/arch/s390/cf_z16/extended.json @@ -94,77 +94,77 @@ "Unit": "CPU-M-CF", "EventCode": "145", "EventName": "DCW_REQ", - "BriefDescription": "Directory Write Level 1 Data Cache from Cache", + "BriefDescription": "Directory Write Level 1 Data Cache from L2-Cache", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the requestors Level-2 cache." }, { "Unit": "CPU-M-CF", "EventCode": "146", "EventName": "DCW_REQ_IV", - "BriefDescription": "Directory Write Level 1 Data Cache from Cache with Intervention", + "BriefDescription": "Directory Write Level 1 Data Cache from L2-Cache with Intervention", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the requestors Level-2 cache with intervention." }, { "Unit": "CPU-M-CF", "EventCode": "147", "EventName": "DCW_REQ_CHIP_HIT", - "BriefDescription": "Directory Write Level 1 Data Cache from Cache with Chip HP Hit", + "BriefDescription": "Directory Write Level 1 Data Cache from L2-Cache with Chip HP Hit", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the requestors Level-2 cache after using chip level horizontal persistence, Chip-HP hit." }, { "Unit": "CPU-M-CF", "EventCode": "148", "EventName": "DCW_REQ_DRAWER_HIT", - "BriefDescription": "Directory Write Level 1 Data Cache from Cache with Drawer HP Hit", + "BriefDescription": "Directory Write Level 1 Data Cache from L2-Cache with Drawer HP Hit", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the requestors Level-2 cache after using drawer level horizontal persistence, Drawer-HP hit." }, { "Unit": "CPU-M-CF", "EventCode": "149", "EventName": "DCW_ON_CHIP", - "BriefDescription": "Directory Write Level 1 Data Cache from On-Chip Cache", + "BriefDescription": "Directory Write Level 1 Data Cache from On-Chip L2-Cache", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-2 cache." }, { "Unit": "CPU-M-CF", "EventCode": "150", "EventName": "DCW_ON_CHIP_IV", - "BriefDescription": "Directory Write Level 1 Data Cache from On-Chip Cache with Intervention", + "BriefDescription": "Directory Write Level 1 Data Cache from On-Chip L2-Cache with Intervention", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-2 cache with intervention." }, { "Unit": "CPU-M-CF", "EventCode": "151", "EventName": "DCW_ON_CHIP_CHIP_HIT", - "BriefDescription": "Directory Write Level 1 Data Cache from On-Chip Cache with Chip HP Hit", + "BriefDescription": "Directory Write Level 1 Data Cache from On-Chip L2-Cache with Chip HP Hit", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-2 cache after using chip level horizontal persistence, Chip-HP hit." }, { "Unit": "CPU-M-CF", "EventCode": "152", "EventName": "DCW_ON_CHIP_DRAWER_HIT", - "BriefDescription": "Directory Write Level 1 Data Cache from On-Chip Cache with Drawer HP Hit", + "BriefDescription": "Directory Write Level 1 Data Cache from On-Chip L2-Cache with Drawer HP Hit", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-2 cache using drawer level horizontal persistence, Drawer-HP hit." }, { "Unit": "CPU-M-CF", "EventCode": "153", "EventName": "DCW_ON_MODULE", - "BriefDescription": "Directory Write Level 1 Data Cache from On-Module Cache", + "BriefDescription": "Directory Write Level 1 Data Cache from On-Module L2-Cache", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Module Level-2 cache." }, { "Unit": "CPU-M-CF", "EventCode": "154", "EventName": "DCW_ON_DRAWER", - "BriefDescription": "Directory Write Level 1 Data Cache from On-Drawer Cache", + "BriefDescription": "Directory Write Level 1 Data Cache from On-Drawer L2-Cache", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Drawer Level-2 cache." }, { "Unit": "CPU-M-CF", "EventCode": "155", "EventName": "DCW_OFF_DRAWER", - "BriefDescription": "Directory Write Level 1 Data Cache from Off-Drawer Cache", + "BriefDescription": "Directory Write Level 1 Data Cache from Off-Drawer L2-Cache", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-2 cache." }, { @@ -199,140 +199,140 @@ "Unit": "CPU-M-CF", "EventCode": "160", "EventName": "IDCW_ON_MODULE_IV", - "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from On-Module Memory Cache with Intervention", + "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from On-Module Memory L2-Cache with Intervention", "PublicDescription": "A directory write to the Level-1 Data or Level-1 Instruction cache directory where the returned cache line was sourced from an On-Module Level-2 cache with intervention." }, { "Unit": "CPU-M-CF", "EventCode": "161", "EventName": "IDCW_ON_MODULE_CHIP_HIT", - "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from On-Module Memory Cache with Chip Hit", + "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from On-Module Memory L2-Cache with Chip Hit", "PublicDescription": "A directory write to the Level-1 Data or Level-1 Instruction cache directory where the returned cache line was sourced from an On-Module Level-2 cache using chip horizontal persistence, Chip-HP hit." }, { "Unit": "CPU-M-CF", "EventCode": "162", "EventName": "IDCW_ON_MODULE_DRAWER_HIT", - "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from On-Module Memory Cache with Drawer Hit", + "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from On-Module Memory L2-Cache with Drawer Hit", "PublicDescription": "A directory write to the Level-1 Data or Level-1 Instruction cache directory where the returned cache line was sourced from an On-Module Level-2 cache using drawer level horizontal persistence, Drawer-HP hit." }, { "Unit": "CPU-M-CF", "EventCode": "163", "EventName": "IDCW_ON_DRAWER_IV", - "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from On-Drawer Cache with Intervention", + "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from On-Drawer L2-Cache with Intervention", "PublicDescription": "A directory write to the Level-1 Data or Level-1 Instruction cache directory where the returned cache line was sourced from an On-Drawer Level-2 cache with intervention." }, { "Unit": "CPU-M-CF", "EventCode": "164", "EventName": "IDCW_ON_DRAWER_CHIP_HIT", - "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from On-Drawer Cache with Chip Hit", + "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from On-Drawer L2-Cache with Chip Hit", "PublicDescription": "A directory write to the Level-1 Data or Level-1 instruction cache directory where the returned cache line was sourced from an On-Drawer Level-2 cache using chip level horizontal persistence, Chip-HP hit." }, { "Unit": "CPU-M-CF", "EventCode": "165", "EventName": "IDCW_ON_DRAWER_DRAWER_HIT", - "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from On-Drawer Cache with Drawer Hit", + "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from On-Drawer L2-Cache with Drawer Hit", "PublicDescription": "A directory write to the Level-1 Data or Level-1 instruction cache directory where the returned cache line was sourced from an On-Drawer Level-2 cache using drawer level horizontal persistence, Drawer-HP hit." }, { "Unit": "CPU-M-CF", "EventCode": "166", "EventName": "IDCW_OFF_DRAWER_IV", - "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from Off-Drawer Cache with Intervention", + "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from Off-Drawer L2-Cache with Intervention", "PublicDescription": "A directory write to the Level-1 Data or Level-1 instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-2 cache with intervention." }, { "Unit": "CPU-M-CF", "EventCode": "167", "EventName": "IDCW_OFF_DRAWER_CHIP_HIT", - "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from Off-Drawer Cache with Chip Hit", + "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from Off-Drawer L2-Cache with Chip Hit", "PublicDescription": "A directory write to the Level-1 Data or Level-1 instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-2 cache using chip level horizontal persistence, Chip-HP hit." }, { "Unit": "CPU-M-CF", "EventCode": "168", "EventName": "IDCW_OFF_DRAWER_DRAWER_HIT", - "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from Off-Drawer Cache with Drawer Hit", + "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from Off-Drawer L2-Cache with Drawer Hit", "PublicDescription": "A directory write to the Level-1 Data or Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-2 cache using drawer level horizontal persistence, Drawer-HP hit." }, { "Unit": "CPU-M-CF", "EventCode": "169", "EventName": "ICW_REQ", - "BriefDescription": "Directory Write Level 1 Instruction Cache from Cache", + "BriefDescription": "Directory Write Level 1 Instruction Cache from L2-Cache", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced the requestors Level-2 cache." }, { "Unit": "CPU-M-CF", "EventCode": "170", "EventName": "ICW_REQ_IV", - "BriefDescription": "Directory Write Level 1 Instruction Cache from Cache with Intervention", + "BriefDescription": "Directory Write Level 1 Instruction Cache from L2-Cache with Intervention", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the requestors Level-2 cache with intervention." }, { "Unit": "CPU-M-CF", "EventCode": "171", "EventName": "ICW_REQ_CHIP_HIT", - "BriefDescription": "Directory Write Level 1 Instruction Cache from Cache with Chip HP Hit", + "BriefDescription": "Directory Write Level 1 Instruction Cache from L2-Cache with Chip HP Hit", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the requestors Level-2 cache using chip level horizontal persistence, Chip-HP hit." }, { "Unit": "CPU-M-CF", "EventCode": "172", "EventName": "ICW_REQ_DRAWER_HIT", - "BriefDescription": "Directory Write Level 1 Instruction Cache from Cache with Drawer HP Hit", + "BriefDescription": "Directory Write Level 1 Instruction Cache from L2-Cache with Drawer HP Hit", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the requestors Level-2 cache using drawer level horizontal persistence, Drawer-HP hit." }, { "Unit": "CPU-M-CF", "EventCode": "173", "EventName": "ICW_ON_CHIP", - "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Chip Cache", + "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Chip L2-Cache", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Chip Level-2 cache." }, { "Unit": "CPU-M-CF", "EventCode": "174", "EventName": "ICW_ON_CHIP_IV", - "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Chip Cache with Intervention", + "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Chip L2-Cache with Intervention", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced an On-Chip Level-2 cache with intervention." }, { "Unit": "CPU-M-CF", "EventCode": "175", "EventName": "ICW_ON_CHIP_CHIP_HIT", - "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Chip Cache with Chip HP Hit", + "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Chip L2-Cache with Chip HP Hit", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Chip Level-2 cache using chip level horizontal persistence, Chip-HP hit." }, { "Unit": "CPU-M-CF", "EventCode": "176", "EventName": "ICW_ON_CHIP_DRAWER_HIT", - "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Chip Cache with Drawer HP Hit", + "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Chip L2-Cache with Drawer HP Hit", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Chip level 2 cache using drawer level horizontal persistence, Drawer-HP hit." }, { "Unit": "CPU-M-CF", "EventCode": "177", "EventName": "ICW_ON_MODULE", - "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Module Cache", + "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Module L2-Cache", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Module Level-2 cache." }, { "Unit": "CPU-M-CF", "EventCode": "178", "EventName": "ICW_ON_DRAWER", - "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Drawer Cache", + "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Drawer L2-Cache", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced an On-Drawer Level-2 cache." }, { "Unit": "CPU-M-CF", "EventCode": "179", "EventName": "ICW_OFF_DRAWER", - "BriefDescription": "Directory Write Level 1 Instruction Cache from Off-Drawer Cache", + "BriefDescription": "Directory Write Level 1 Instruction Cache from Off-Drawer L2-Cache", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced an Off-Drawer Level-2 cache." }, { diff --git a/tools/perf/pmu-events/arch/s390/cf_z16/transaction.json b/tools/perf/pmu-events/arch/s390/cf_z16/transaction.json index ec2ff78e2b5f..3ab1d3a6638c 100644 --- a/tools/perf/pmu-events/arch/s390/cf_z16/transaction.json +++ b/tools/perf/pmu-events/arch/s390/cf_z16/transaction.json @@ -2,71 +2,71 @@ { "BriefDescription": "Transaction count", "MetricName": "transaction", - "MetricExpr": "TX_C_TEND + TX_NC_TEND + TX_NC_TABORT + TX_C_TABORT_SPECIAL + TX_C_TABORT_NO_SPECIAL" + "MetricExpr": "TX_C_TEND + TX_NC_TEND + TX_NC_TABORT + TX_C_TABORT_SPECIAL + TX_C_TABORT_NO_SPECIAL if has_event(TX_C_TEND) else 0" }, { "BriefDescription": "Cycles per Instruction", "MetricName": "cpi", - "MetricExpr": "CPU_CYCLES / INSTRUCTIONS" + "MetricExpr": "CPU_CYCLES / INSTRUCTIONS if has_event(INSTRUCTIONS) else 0" }, { "BriefDescription": "Problem State Instruction Ratio", "MetricName": "prbstate", - "MetricExpr": "(PROBLEM_STATE_INSTRUCTIONS / INSTRUCTIONS) * 100" + "MetricExpr": "(PROBLEM_STATE_INSTRUCTIONS / INSTRUCTIONS) * 100 if has_event(INSTRUCTIONS) else 0" }, { "BriefDescription": "Level One Miss per 100 Instructions", "MetricName": "l1mp", - "MetricExpr": "((L1I_DIR_WRITES + L1D_DIR_WRITES) / INSTRUCTIONS) * 100" + "MetricExpr": "((L1I_DIR_WRITES + L1D_DIR_WRITES) / INSTRUCTIONS) * 100 if has_event(INSTRUCTIONS) else 0" }, { "BriefDescription": "Percentage sourced from Level 2 cache", "MetricName": "l2p", - "MetricExpr": "((DCW_REQ + DCW_REQ_IV + ICW_REQ + ICW_REQ_IV) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100" + "MetricExpr": "((DCW_REQ + DCW_REQ_IV + ICW_REQ + ICW_REQ_IV) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100 if has_event(DCW_REQ) else 0" }, { "BriefDescription": "Percentage sourced from Level 3 on same chip cache", "MetricName": "l3p", - "MetricExpr": "((DCW_REQ_CHIP_HIT + DCW_ON_CHIP + DCW_ON_CHIP_IV + DCW_ON_CHIP_CHIP_HIT + ICW_REQ_CHIP_HIT + ICW_ON_CHIP + ICW_ON_CHIP_IV + ICW_ON_CHIP_CHIP_HIT) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100" + "MetricExpr": "((DCW_REQ_CHIP_HIT + DCW_ON_CHIP + DCW_ON_CHIP_IV + DCW_ON_CHIP_CHIP_HIT + ICW_REQ_CHIP_HIT + ICW_ON_CHIP + ICW_ON_CHIP_IV + ICW_ON_CHIP_CHIP_HIT) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100 if has_event(DCW_REQ_CHIP_HIT) else 0" }, { "BriefDescription": "Percentage sourced from Level 4 Local cache on same book", "MetricName": "l4lp", - "MetricExpr": "((DCW_REQ_DRAWER_HIT + DCW_ON_CHIP_DRAWER_HIT + DCW_ON_MODULE + DCW_ON_DRAWER + IDCW_ON_MODULE_IV + IDCW_ON_MODULE_CHIP_HIT + IDCW_ON_MODULE_DRAWER_HIT + IDCW_ON_DRAWER_IV + IDCW_ON_DRAWER_CHIP_HIT + IDCW_ON_DRAWER_DRAWER_HIT + ICW_REQ_DRAWER_HIT + ICW_ON_CHIP_DRAWER_HIT + ICW_ON_MODULE + ICW_ON_DRAWER) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100" + "MetricExpr": "((DCW_REQ_DRAWER_HIT + DCW_ON_CHIP_DRAWER_HIT + DCW_ON_MODULE + DCW_ON_DRAWER + IDCW_ON_MODULE_IV + IDCW_ON_MODULE_CHIP_HIT + IDCW_ON_MODULE_DRAWER_HIT + IDCW_ON_DRAWER_IV + IDCW_ON_DRAWER_CHIP_HIT + IDCW_ON_DRAWER_DRAWER_HIT + ICW_REQ_DRAWER_HIT + ICW_ON_CHIP_DRAWER_HIT + ICW_ON_MODULE + ICW_ON_DRAWER) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100 if has_event(DCW_REQ_DRAWER_HIT) else 0" }, { "BriefDescription": "Percentage sourced from Level 4 Remote cache on different book", "MetricName": "l4rp", - "MetricExpr": "((DCW_OFF_DRAWER + IDCW_OFF_DRAWER_IV + IDCW_OFF_DRAWER_CHIP_HIT + IDCW_OFF_DRAWER_DRAWER_HIT + ICW_OFF_DRAWER) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100" + "MetricExpr": "((DCW_OFF_DRAWER + IDCW_OFF_DRAWER_IV + IDCW_OFF_DRAWER_CHIP_HIT + IDCW_OFF_DRAWER_DRAWER_HIT + ICW_OFF_DRAWER) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100 if has_event(DCW_OFF_DRAWER) else 0" }, { "BriefDescription": "Percentage sourced from memory", "MetricName": "memp", - "MetricExpr": "((DCW_ON_CHIP_MEMORY + DCW_ON_MODULE_MEMORY + DCW_ON_DRAWER_MEMORY + DCW_OFF_DRAWER_MEMORY + ICW_ON_CHIP_MEMORY + ICW_ON_MODULE_MEMORY + ICW_ON_DRAWER_MEMORY + ICW_OFF_DRAWER_MEMORY) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100" + "MetricExpr": "((DCW_ON_CHIP_MEMORY + DCW_ON_MODULE_MEMORY + DCW_ON_DRAWER_MEMORY + DCW_OFF_DRAWER_MEMORY + ICW_ON_CHIP_MEMORY + ICW_ON_MODULE_MEMORY + ICW_ON_DRAWER_MEMORY + ICW_OFF_DRAWER_MEMORY) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100 if has_event(DCW_ON_CHIP_MEMORY) else 0" }, { "BriefDescription": "Cycles per Instructions from Finite cache/memory", "MetricName": "finite_cpi", - "MetricExpr": "L1C_TLB2_MISSES / INSTRUCTIONS" + "MetricExpr": "L1C_TLB2_MISSES / INSTRUCTIONS if has_event(L1C_TLB2_MISSES) else 0" }, { "BriefDescription": "Estimated Instruction Complexity CPI infinite Level 1", "MetricName": "est_cpi", - "MetricExpr": "(CPU_CYCLES / INSTRUCTIONS) - (L1C_TLB2_MISSES / INSTRUCTIONS)" + "MetricExpr": "(CPU_CYCLES / INSTRUCTIONS) - (L1C_TLB2_MISSES / INSTRUCTIONS) if has_event(INSTRUCTIONS) else 0" }, { "BriefDescription": "Estimated Sourcing Cycles per Level 1 Miss", "MetricName": "scpl1m", - "MetricExpr": "L1C_TLB2_MISSES / (L1I_DIR_WRITES + L1D_DIR_WRITES)" + "MetricExpr": "L1C_TLB2_MISSES / (L1I_DIR_WRITES + L1D_DIR_WRITES) if has_event(L1C_TLB2_MISSES) else 0" }, { "BriefDescription": "Estimated TLB CPU percentage of Total CPU", "MetricName": "tlb_percent", - "MetricExpr": "((DTLB2_MISSES + ITLB2_MISSES) / CPU_CYCLES) * (L1C_TLB2_MISSES / (L1I_PENALTY_CYCLES + L1D_PENALTY_CYCLES)) * 100" + "MetricExpr": "((DTLB2_MISSES + ITLB2_MISSES) / CPU_CYCLES) * (L1C_TLB2_MISSES / (L1I_PENALTY_CYCLES + L1D_PENALTY_CYCLES)) * 100 if has_event(CPU_CYCLES) else 0" }, { "BriefDescription": "Estimated Cycles per TLB Miss", "MetricName": "tlb_miss", - "MetricExpr": "((DTLB2_MISSES + ITLB2_MISSES) / (DTLB2_WRITES + ITLB2_WRITES)) * (L1C_TLB2_MISSES / (L1I_PENALTY_CYCLES + L1D_PENALTY_CYCLES))" + "MetricExpr": "((DTLB2_MISSES + ITLB2_MISSES) / (DTLB2_WRITES + ITLB2_WRITES)) * (L1C_TLB2_MISSES / (L1I_PENALTY_CYCLES + L1D_PENALTY_CYCLES)) if has_event(DTLB2_MISSES) else 0" } ] diff --git a/tools/perf/pmu-events/arch/s390/mapfile.csv b/tools/perf/pmu-events/arch/s390/mapfile.csv index a918e1af77a5..b22648d12751 100644 --- a/tools/perf/pmu-events/arch/s390/mapfile.csv +++ b/tools/perf/pmu-events/arch/s390/mapfile.csv @@ -5,4 +5,4 @@ Family-model,Version,Filename,EventType ^IBM.296[45].*[13]\.[1-5].[[:xdigit:]]+$,1,cf_z13,core ^IBM.390[67].*[13]\.[1-5].[[:xdigit:]]+$,3,cf_z14,core ^IBM.856[12].*3\.6.[[:xdigit:]]+$,3,cf_z15,core -^IBM.393[12].*3\.7.[[:xdigit:]]+$,3,cf_z16,core +^IBM.393[12].*$,3,cf_z16,core diff --git a/tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json b/tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json index bbfa3883e533..b72c0e2cb946 100644 --- a/tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json +++ b/tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json @@ -811,13 +811,13 @@ "MetricExpr": "(cpu_core@UOPS_DISPATCHED.PORT_0@ + cpu_core@UOPS_DISPATCHED.PORT_1@ + cpu_core@UOPS_DISPATCHED.PORT_5_11@ + cpu_core@UOPS_DISPATCHED.PORT_6@) / (5 * tma_info_core_core_clks)", "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group", "MetricName": "tma_alu_op_utilization", - "MetricThreshold": "tma_alu_op_utilization > 0.6", + "MetricThreshold": "tma_alu_op_utilization > 0.4", "ScaleUnit": "100%", "Unit": "cpu_core" }, { "BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists", - "MetricExpr": "100 * cpu_core@ASSISTS.ANY\\,umask\\=0x1B@ / tma_info_thread_slots", + "MetricExpr": "78 * cpu_core@ASSISTS.ANY@ / tma_info_thread_slots", "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group", "MetricName": "tma_assists", "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)", @@ -880,6 +880,24 @@ "Unit": "cpu_core" }, { + "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due staying in C0.1 power-performance optimized state (Faster wakeup time; Smaller power savings).", + "MetricExpr": "cpu_core@CPU_CLK_UNHALTED.C01@ / tma_info_thread_clks", + "MetricGroup": "C0Wait;TopdownL4;tma_L4_group;tma_serializing_operation_group", + "MetricName": "tma_c01_wait", + "MetricThreshold": "tma_c01_wait > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due staying in C0.2 power-performance optimized state (Slower wakeup time; Larger power savings).", + "MetricExpr": "cpu_core@CPU_CLK_UNHALTED.C02@ / tma_info_thread_clks", + "MetricGroup": "C0Wait;TopdownL4;tma_L4_group;tma_serializing_operation_group", + "MetricName": "tma_c02_wait", + "MetricThreshold": "tma_c02_wait > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { "BriefDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction", "MetricExpr": "max(0, tma_microcode_sequencer - tma_assists)", "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group", @@ -901,7 +919,7 @@ }, { "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses", - "MetricExpr": "(25 * tma_info_system_average_frequency * (cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD@ * (cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM@ / (cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM@ + cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD@))) + 24 * tma_info_system_average_frequency * cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS@) * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2) / tma_info_thread_clks", + "MetricExpr": "(25 * tma_info_system_core_frequency * (cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD@ * (cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM@ / (cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM@ + cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD@))) + 24 * tma_info_system_core_frequency * cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS@) * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2) / tma_info_thread_clks", "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group", "MetricName": "tma_contested_accesses", "MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", @@ -922,7 +940,7 @@ }, { "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses", - "MetricExpr": "24 * tma_info_system_average_frequency * (cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD@ + cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD@ * (1 - cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM@ / (cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM@ + cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD@))) * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2) / tma_info_thread_clks", + "MetricExpr": "24 * tma_info_system_core_frequency * (cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD@ + cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD@ * (1 - cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM@ / (cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM@ + cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD@))) * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2) / tma_info_thread_clks", "MetricGroup": "Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group", "MetricName": "tma_data_sharing", "MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", @@ -935,7 +953,7 @@ "MetricExpr": "(cpu_core@INST_DECODED.DECODERS\\,cmask\\=1@ - cpu_core@INST_DECODED.DECODERS\\,cmask\\=2@) / tma_info_core_core_clks / 2", "MetricGroup": "DSBmiss;FetchBW;TopdownL4;tma_L4_group;tma_issueD0;tma_mite_group", "MetricName": "tma_decoder0_alone", - "MetricThreshold": "tma_decoder0_alone > 0.1 & (tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 6 > 0.35))", + "MetricThreshold": "tma_decoder0_alone > 0.1 & (tma_mite > 0.1 & tma_fetch_bandwidth > 0.2)", "PublicDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder. Related metrics: tma_few_uops_instructions", "ScaleUnit": "100%", "Unit": "cpu_core" @@ -965,7 +983,7 @@ "MetricExpr": "(cpu_core@IDQ.DSB_CYCLES_ANY@ - cpu_core@IDQ.DSB_CYCLES_OK@) / tma_info_core_core_clks / 2", "MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group", "MetricName": "tma_dsb", - "MetricThreshold": "tma_dsb > 0.15 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 6 > 0.35)", + "MetricThreshold": "tma_dsb > 0.15 & tma_fetch_bandwidth > 0.2", "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.", "ScaleUnit": "100%", "Unit": "cpu_core" @@ -986,7 +1004,7 @@ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group", "MetricName": "tma_dtlb_load", "MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store, tma_info_bottleneck_memory_data_tlbs", + "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store, tma_info_bottleneck_memory_data_tlbs, tma_info_bottleneck_memory_synchronization", "ScaleUnit": "100%", "Unit": "cpu_core" }, @@ -996,13 +1014,13 @@ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group", "MetricName": "tma_dtlb_store", "MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load, tma_info_bottleneck_memory_data_tlbs", + "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load, tma_info_bottleneck_memory_data_tlbs, tma_info_bottleneck_memory_synchronization", "ScaleUnit": "100%", "Unit": "cpu_core" }, { "BriefDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing", - "MetricExpr": "28 * tma_info_system_average_frequency * cpu_core@OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM@ / tma_info_thread_clks", + "MetricExpr": "28 * tma_info_system_core_frequency * cpu_core@OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM@ / tma_info_thread_clks", "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group", "MetricName": "tma_false_sharing", "MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", @@ -1016,7 +1034,7 @@ "MetricGroup": "MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group", "MetricName": "tma_fb_full", "MetricThreshold": "tma_fb_full > 0.3", - "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_bottleneck_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores", + "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores", "ScaleUnit": "100%", "Unit": "cpu_core" }, @@ -1025,7 +1043,7 @@ "MetricExpr": "max(0, tma_frontend_bound - tma_fetch_latency)", "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB", "MetricName": "tma_fetch_bandwidth", - "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 6 > 0.35", + "MetricThreshold": "tma_fetch_bandwidth > 0.2", "MetricgroupNoGroup": "TopdownL2", "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp", "ScaleUnit": "100%", @@ -1127,10 +1145,10 @@ { "BriefDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions", "MetricExpr": "tma_light_operations * cpu_core@INST_RETIRED.MACRO_FUSED@ / (tma_retiring * tma_info_thread_slots)", - "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", + "MetricGroup": "Branches;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", "MetricName": "tma_fused_instructions", "MetricThreshold": "tma_fused_instructions > 0.1 & tma_light_operations > 0.6", - "PublicDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions. The instruction pairs of CMP+JCC or DEC+JCC are commonly used examples.", + "PublicDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions. CMP+JCC or DEC+JCC are common examples of legacy fusions. {([MTL] Note new MOV+OP and Load+OP fusions appear under Other_Light_Ops in MTL!)}", "ScaleUnit": "100%", "Unit": "cpu_core" }, @@ -1141,14 +1159,14 @@ "MetricName": "tma_heavy_operations", "MetricThreshold": "tma_heavy_operations > 0.1", "MetricgroupNoGroup": "TopdownL2", - "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences. Sample with: UOPS_RETIRED.HEAVY", + "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences. ([ICL+] Note this may overcount due to approximation using indirect events; [ADL+] .). Sample with: UOPS_RETIRED.HEAVY", "ScaleUnit": "100%", "Unit": "cpu_core" }, { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses", "MetricExpr": "cpu_core@ICACHE_DATA.STALLS@ / tma_info_thread_clks", - "MetricGroup": "BigFoot;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group", + "MetricGroup": "BigFootprint;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group", "MetricName": "tma_icache_misses", "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS_PS;FRONTEND_RETIRED.L1I_MISS_PS", @@ -1157,7 +1175,7 @@ }, { "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)", - "MetricExpr": "(tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) * tma_info_thread_slots / BR_MISP_RETIRED.ALL_BRANCHES", + "MetricExpr": "tma_info_bottleneck_mispredictions * tma_info_thread_slots / cpu_core@BR_MISP_RETIRED.ALL_BRANCHES@ / 100", "MetricGroup": "Bad;BrMispredicts;tma_issueBM", "MetricName": "tma_info_bad_spec_branch_misprediction_cost", "PublicDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear). Related metrics: tma_branch_mispredicts, tma_info_bottleneck_mispredictions, tma_mispredicts_resteers", @@ -1181,7 +1199,7 @@ }, { "BriefDescription": "Instructions per retired mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).", - "MetricExpr": "cpu_core@BR_MISP_RETIRED.INDIRECT_CALL\\,umask\\=0x80@ / BR_MISP_RETIRED.INDIRECT", + "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / BR_MISP_RETIRED.INDIRECT", "MetricGroup": "Bad;BrMispredicts", "MetricName": "tma_info_bad_spec_ipmisp_indirect", "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1e3", @@ -1204,6 +1222,13 @@ "Unit": "cpu_core" }, { + "BriefDescription": "Speculative to Retired ratio of all clears (covering mispredicts and nukes)", + "MetricExpr": "cpu_core@INT_MISC.CLEARS_COUNT@ / (cpu_core@BR_MISP_RETIRED.ALL_BRANCHES@ + cpu_core@MACHINE_CLEARS.COUNT@)", + "MetricGroup": "BrMispredicts", + "MetricName": "tma_info_bad_spec_spec_clears_ratio", + "Unit": "cpu_core" + }, + { "BriefDescription": "Probability of Core Bound bottleneck hidden by SMT-profiling artifacts", "MetricExpr": "(100 * (1 - tma_core_bound / tma_ports_utilization if tma_core_bound < tma_ports_utilization else 1) if tma_info_system_smt_2t_utilization > 0.5 else 0)", "MetricGroup": "Cor;SMT", @@ -1230,61 +1255,94 @@ "Unit": "cpu_core" }, { + "BriefDescription": "Total pipeline cost of \"useful operations\" - the baseline operations not covered by Branching_Overhead nor Irregular_Overhead.", + "MetricExpr": "100 * (tma_retiring - (cpu_core@BR_INST_RETIRED.ALL_BRANCHES@ + cpu_core@BR_INST_RETIRED.NEAR_CALL@) / tma_info_thread_slots - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)", + "MetricGroup": "Ret", + "MetricName": "tma_info_bottleneck_base_non_br", + "MetricThreshold": "tma_info_bottleneck_base_non_br > 20", + "Unit": "cpu_core" + }, + { "BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)", "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)", - "MetricGroup": "BigFoot;Fed;Frontend;IcMiss;MemoryTLB;tma_issueBC", + "MetricGroup": "BigFootprint;Fed;Frontend;IcMiss;MemoryTLB", "MetricName": "tma_info_bottleneck_big_code", "MetricThreshold": "tma_info_bottleneck_big_code > 20", - "PublicDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses). Related metrics: tma_info_bottleneck_branching_overhead", "Unit": "cpu_core" }, { "BriefDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls)", - "MetricExpr": "100 * ((cpu_core@BR_INST_RETIRED.COND@ + 3 * cpu_core@BR_INST_RETIRED.NEAR_CALL@ + (cpu_core@BR_INST_RETIRED.NEAR_TAKEN@ - cpu_core@BR_INST_RETIRED.COND_TAKEN@ - 2 * cpu_core@BR_INST_RETIRED.NEAR_CALL@)) / tma_info_thread_slots)", - "MetricGroup": "Ret;tma_issueBC", + "MetricExpr": "100 * ((cpu_core@BR_INST_RETIRED.ALL_BRANCHES@ + cpu_core@BR_INST_RETIRED.NEAR_CALL@) / tma_info_thread_slots)", + "MetricGroup": "Ret", "MetricName": "tma_info_bottleneck_branching_overhead", - "MetricThreshold": "tma_info_bottleneck_branching_overhead > 10", - "PublicDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls). Related metrics: tma_info_bottleneck_big_code", + "MetricThreshold": "tma_info_bottleneck_branching_overhead > 5", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks", + "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_fb_full / (tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))", + "MetricGroup": "Mem;MemoryBW;Offcore;tma_issueBW", + "MetricName": "tma_info_bottleneck_cache_memory_bandwidth", + "MetricThreshold": "tma_info_bottleneck_cache_memory_bandwidth > 20", + "PublicDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks", + "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_store_latency / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))", + "MetricGroup": "Mem;MemoryLat;Offcore;tma_issueLat", + "MetricName": "tma_info_bottleneck_cache_memory_latency", + "MetricThreshold": "tma_info_bottleneck_cache_memory_latency > 20", + "PublicDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks. Related metrics: tma_l3_hit_latency, tma_mem_latency", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Total pipeline cost when the execution is compute-bound - an estimation", + "MetricExpr": "100 * (tma_core_bound * tma_divider / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_core_bound * (tma_ports_utilization / (tma_divider + tma_ports_utilization + tma_serializing_operation)) * (tma_ports_utilized_3m / (tma_ports_utilized_0 + tma_ports_utilized_1 + tma_ports_utilized_2 + tma_ports_utilized_3m)))", + "MetricGroup": "Cor;tma_issueComp", + "MetricName": "tma_info_bottleneck_compute_bound_est", + "MetricThreshold": "tma_info_bottleneck_compute_bound_est > 20", + "PublicDescription": "Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy. Related metrics: ", "Unit": "cpu_core" }, { "BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks", - "MetricExpr": "100 * (tma_frontend_bound - tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) - tma_info_bottleneck_big_code", + "MetricExpr": "100 * (tma_frontend_bound - (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) - (1 - cpu_core@INST_RETIRED.REP_ITERATION@ / cpu_core@UOPS_RETIRED.MS\\,cmask\\=1@) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * tma_other_mispredicts / tma_branch_mispredicts) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))) - tma_info_bottleneck_big_code", "MetricGroup": "Fed;FetchBW;Frontend", "MetricName": "tma_info_bottleneck_instruction_fetch_bw", "MetricThreshold": "tma_info_bottleneck_instruction_fetch_bw > 20", "Unit": "cpu_core" }, { - "BriefDescription": "Total pipeline cost of (external) Memory Bandwidth related bottlenecks", - "MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full))) + tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_fb_full / (tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk))", - "MetricGroup": "Mem;MemoryBW;Offcore;tma_issueBW", - "MetricName": "tma_info_bottleneck_memory_bandwidth", - "MetricThreshold": "tma_info_bottleneck_memory_bandwidth > 20", - "PublicDescription": "Total pipeline cost of (external) Memory Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full", + "BriefDescription": "Total pipeline cost of irregular execution (e.g", + "MetricExpr": "100 * ((1 - cpu_core@INST_RETIRED.REP_ITERATION@ / cpu_core@UOPS_RETIRED.MS\\,cmask\\=1@) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * tma_other_mispredicts / tma_branch_mispredicts) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_branch_mispredicts + tma_machine_clears * tma_other_nukes / tma_other_nukes + tma_core_bound * (tma_serializing_operation + cpu_core@RS.EMPTY\\,umask\\=1@ / tma_info_thread_clks * tma_ports_utilized_0) / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)", + "MetricGroup": "Bad;Cor;Ret;tma_issueMS", + "MetricName": "tma_info_bottleneck_irregular_overhead", + "MetricThreshold": "tma_info_bottleneck_irregular_overhead > 10", + "PublicDescription": "Total pipeline cost of irregular execution (e.g. FP-assists in HPC, Wait time with work imbalance multithreaded workloads, overhead in system services or virtualized environments). Related metrics: tma_microcode_sequencer, tma_ms_switches", "Unit": "cpu_core" }, { "BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)", - "MetricExpr": "100 * tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_dtlb_load / max(tma_l1_bound, tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))", + "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_load / max(tma_l1_bound, tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))", "MetricGroup": "Mem;MemoryTLB;Offcore;tma_issueTLB", "MetricName": "tma_info_bottleneck_memory_data_tlbs", "MetricThreshold": "tma_info_bottleneck_memory_data_tlbs > 20", - "PublicDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs). Related metrics: tma_dtlb_load, tma_dtlb_store", + "PublicDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs). Related metrics: tma_dtlb_load, tma_dtlb_store, tma_info_bottleneck_memory_synchronization", "Unit": "cpu_core" }, { - "BriefDescription": "Total pipeline cost of Memory Latency related bottlenecks (external memory and off-core caches)", - "MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound))", - "MetricGroup": "Mem;MemoryLat;Offcore;tma_issueLat", - "MetricName": "tma_info_bottleneck_memory_latency", - "MetricThreshold": "tma_info_bottleneck_memory_latency > 20", - "PublicDescription": "Total pipeline cost of Memory Latency related bottlenecks (external memory and off-core caches). Related metrics: tma_l3_hit_latency, tma_mem_latency", + "BriefDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors)", + "MetricExpr": "100 * (tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_contested_accesses + tma_data_sharing) / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * tma_false_sharing / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores - tma_store_latency)) + tma_machine_clears * (1 - tma_other_nukes / tma_other_nukes))", + "MetricGroup": "Mem;Offcore;tma_issueTLB", + "MetricName": "tma_info_bottleneck_memory_synchronization", + "MetricThreshold": "tma_info_bottleneck_memory_synchronization > 10", + "PublicDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors). Related metrics: tma_dtlb_load, tma_dtlb_store, tma_info_bottleneck_memory_data_tlbs", "Unit": "cpu_core" }, { "BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks", - "MetricExpr": "100 * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))", + "MetricExpr": "100 * (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))", "MetricGroup": "Bad;BadSpec;BrMispredicts;tma_issueBM", "MetricName": "tma_info_bottleneck_mispredictions", "MetricThreshold": "tma_info_bottleneck_mispredictions > 20", @@ -1292,6 +1350,15 @@ "Unit": "cpu_core" }, { + "BriefDescription": "Total pipeline cost of remaining bottlenecks (apart from those listed in the Info.Bottlenecks metrics class)", + "MetricExpr": "100 - (tma_info_bottleneck_big_code + tma_info_bottleneck_instruction_fetch_bw + tma_info_bottleneck_mispredictions + tma_info_bottleneck_cache_memory_bandwidth + tma_info_bottleneck_cache_memory_latency + tma_info_bottleneck_memory_data_tlbs + tma_info_bottleneck_memory_synchronization + tma_info_bottleneck_compute_bound_est + tma_info_bottleneck_irregular_overhead + tma_info_bottleneck_branching_overhead + tma_info_bottleneck_base_non_br)", + "MetricGroup": "Cor;Offcore", + "MetricName": "tma_info_bottleneck_other_bottlenecks", + "MetricThreshold": "tma_info_bottleneck_other_bottlenecks > 20", + "PublicDescription": "Total pipeline cost of remaining bottlenecks (apart from those listed in the Info.Bottlenecks metrics class). Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls.", + "Unit": "cpu_core" + }, + { "BriefDescription": "Fraction of branches that are CALL or RET", "MetricExpr": "(cpu_core@BR_INST_RETIRED.NEAR_CALL@ + cpu_core@BR_INST_RETIRED.NEAR_RETURN@) / BR_INST_RETIRED.ALL_BRANCHES", "MetricGroup": "Bad;Branches", @@ -1328,7 +1395,7 @@ }, { "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core", - "MetricExpr": "cpu_core@CPU_CLK_UNHALTED.DISTRIBUTED@", + "MetricExpr": "(cpu_core@CPU_CLK_UNHALTED.DISTRIBUTED@ if #SMT_on else tma_info_thread_clks)", "MetricGroup": "SMT", "MetricName": "tma_info_core_core_clks", "Unit": "cpu_core" @@ -1341,8 +1408,15 @@ "Unit": "cpu_core" }, { + "BriefDescription": "uops Executed per Cycle", + "MetricExpr": "cpu_core@UOPS_EXECUTED.THREAD@ / tma_info_thread_clks", + "MetricGroup": "Power", + "MetricName": "tma_info_core_epc", + "Unit": "cpu_core" + }, + { "BriefDescription": "Floating Point Operations Per Cycle", - "MetricExpr": "(cpu_core@FP_ARITH_INST_RETIRED.SCALAR_SINGLE@ + cpu_core@FP_ARITH_INST_RETIRED.SCALAR_DOUBLE@ + 2 * cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE@ + 4 * (cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE@ + cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE@) + 8 * cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE@) / tma_info_core_core_clks", + "MetricExpr": "(cpu_core@FP_ARITH_INST_RETIRED.SCALAR@ + 2 * cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE@ + 4 * cpu_core@FP_ARITH_INST_RETIRED.4_FLOPS@ + 8 * cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE@) / tma_info_core_core_clks", "MetricGroup": "Flops;Ret", "MetricName": "tma_info_core_flopc", "Unit": "cpu_core" @@ -1356,8 +1430,8 @@ "Unit": "cpu_core" }, { - "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core", - "MetricExpr": "cpu_core@UOPS_EXECUTED.THREAD@ / (cpu_core@UOPS_EXECUTED.CORE_CYCLES_GE_1@ / 2 if #SMT_on else cpu_core@UOPS_EXECUTED.CORE_CYCLES_GE_1@)", + "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor)", + "MetricExpr": "cpu_core@UOPS_EXECUTED.THREAD@ / cpu_core@UOPS_EXECUTED.THREAD\\,cmask\\=1@", "MetricGroup": "Backend;Cor;Pipeline;PortsUtil", "MetricName": "tma_info_core_ilp", "Unit": "cpu_core" @@ -1429,6 +1503,14 @@ "Unit": "cpu_core" }, { + "BriefDescription": "Average number of cycles the front-end was delayed due to an Unknown Branch detection", + "MetricExpr": "cpu_core@INT_MISC.UNKNOWN_BRANCH_CYCLES@ / cpu_core@INT_MISC.UNKNOWN_BRANCH_CYCLES\\,cmask\\=1\\,edge@", + "MetricGroup": "Fed", + "MetricName": "tma_info_frontend_unknown_branch_cost", + "PublicDescription": "Average number of cycles the front-end was delayed due to an Unknown Branch detection. See Unknown_Branches node.", + "Unit": "cpu_core" + }, + { "BriefDescription": "Branch instructions per taken branch.", "MetricExpr": "cpu_core@BR_INST_RETIRED.ALL_BRANCHES@ / BR_INST_RETIRED.NEAR_TAKEN", "MetricGroup": "Branches;Fed;PGO", @@ -1449,7 +1531,7 @@ "MetricGroup": "Flops;InsType", "MetricName": "tma_info_inst_mix_iparith", "MetricThreshold": "tma_info_inst_mix_iparith < 10", - "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). May undercount due to FMA double counting. Approximated prior to BDW.", + "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW.", "Unit": "cpu_core" }, { @@ -1458,7 +1540,7 @@ "MetricGroup": "Flops;FpVector;InsType", "MetricName": "tma_info_inst_mix_iparith_avx128", "MetricThreshold": "tma_info_inst_mix_iparith_avx128 < 10", - "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting.", + "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.", "Unit": "cpu_core" }, { @@ -1467,7 +1549,7 @@ "MetricGroup": "Flops;FpVector;InsType", "MetricName": "tma_info_inst_mix_iparith_avx256", "MetricThreshold": "tma_info_inst_mix_iparith_avx256 < 10", - "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting.", + "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.", "Unit": "cpu_core" }, { @@ -1476,7 +1558,7 @@ "MetricGroup": "Flops;FpScalar;InsType", "MetricName": "tma_info_inst_mix_iparith_scalar_dp", "MetricThreshold": "tma_info_inst_mix_iparith_scalar_dp < 10", - "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting.", + "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.", "Unit": "cpu_core" }, { @@ -1485,7 +1567,7 @@ "MetricGroup": "Flops;FpScalar;InsType", "MetricName": "tma_info_inst_mix_iparith_scalar_sp", "MetricThreshold": "tma_info_inst_mix_iparith_scalar_sp < 10", - "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting.", + "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.", "Unit": "cpu_core" }, { @@ -1506,7 +1588,7 @@ }, { "BriefDescription": "Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate)", - "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / (cpu_core@FP_ARITH_INST_RETIRED.SCALAR_SINGLE@ + cpu_core@FP_ARITH_INST_RETIRED.SCALAR_DOUBLE@ + 2 * cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE@ + 4 * (cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE@ + cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE@) + 8 * cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE@)", + "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / (cpu_core@FP_ARITH_INST_RETIRED.SCALAR@ + 2 * cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE@ + 4 * cpu_core@FP_ARITH_INST_RETIRED.4_FLOPS@ + 8 * cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE@)", "MetricGroup": "Flops;InsType", "MetricName": "tma_info_inst_mix_ipflop", "MetricThreshold": "tma_info_inst_mix_ipflop < 10", @@ -1521,6 +1603,13 @@ "Unit": "cpu_core" }, { + "BriefDescription": "Instructions per PAUSE (lower number means higher occurrence rate)", + "MetricExpr": "tma_info_inst_mix_instructions / CPU_CLK_UNHALTED.PAUSE_INST", + "MetricGroup": "Flops;FpVector;InsType", + "MetricName": "tma_info_inst_mix_ippause", + "Unit": "cpu_core" + }, + { "BriefDescription": "Instructions per Store (lower number means higher occurrence rate)", "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / MEM_INST_RETIRED.ALL_STORES", "MetricGroup": "InsType", @@ -1547,164 +1636,178 @@ }, { "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]", - "MetricExpr": "64 * cpu_core@L1D.REPLACEMENT@ / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l1d_cache_fill_bw", "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_core_l1d_cache_fill_bw", + "MetricName": "tma_info_memory_core_l1d_cache_fill_bw_2t", "Unit": "cpu_core" }, { "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]", - "MetricExpr": "64 * cpu_core@L2_LINES_IN.ALL@ / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l2_cache_fill_bw", "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_core_l2_cache_fill_bw", + "MetricName": "tma_info_memory_core_l2_cache_fill_bw_2t", "Unit": "cpu_core" }, { "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "64 * cpu_core@OFFCORE_REQUESTS.ALL_REQUESTS@ / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l3_cache_access_bw", "MetricGroup": "Mem;MemoryBW;Offcore", - "MetricName": "tma_info_memory_core_l3_cache_access_bw", + "MetricName": "tma_info_memory_core_l3_cache_access_bw_2t", "Unit": "cpu_core" }, { "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "64 * cpu_core@LONGEST_LAT_CACHE.MISS@ / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l3_cache_fill_bw", "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_core_l3_cache_fill_bw", + "MetricName": "tma_info_memory_core_l3_cache_fill_bw_2t", "Unit": "cpu_core" }, { "BriefDescription": "Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss-handling entries)", "MetricExpr": "1e3 * cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_fb_hpki", "Unit": "cpu_core" }, { + "BriefDescription": "", + "MetricExpr": "64 * cpu_core@L1D.REPLACEMENT@ / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l1d_cache_fill_bw", + "Unit": "cpu_core" + }, + { "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads", "MetricExpr": "1e3 * cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l1mpki", "Unit": "cpu_core" }, { "BriefDescription": "L1 cache true misses per kilo instruction for all demand loads (including speculative)", "MetricExpr": "1e3 * cpu_core@L2_RQSTS.ALL_DEMAND_DATA_RD@ / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l1mpki_load", "Unit": "cpu_core" }, { + "BriefDescription": "", + "MetricExpr": "64 * cpu_core@L2_LINES_IN.ALL@ / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l2_cache_fill_bw", + "Unit": "cpu_core" + }, + { "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)", "MetricExpr": "1e3 * (cpu_core@L2_RQSTS.REFERENCES@ - cpu_core@L2_RQSTS.MISS@) / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l2hpki_all", "Unit": "cpu_core" }, { "BriefDescription": "L2 cache hits per kilo instruction for all demand loads (including speculative)", "MetricExpr": "1e3 * cpu_core@L2_RQSTS.DEMAND_DATA_RD_HIT@ / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l2hpki_load", "Unit": "cpu_core" }, { "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads", "MetricExpr": "1e3 * cpu_core@MEM_LOAD_RETIRED.L2_MISS@ / INST_RETIRED.ANY", - "MetricGroup": "Backend;CacheMisses;Mem", + "MetricGroup": "Backend;CacheHits;Mem", "MetricName": "tma_info_memory_l2mpki", "Unit": "cpu_core" }, { "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all request types (including speculative)", "MetricExpr": "1e3 * cpu_core@L2_RQSTS.MISS@ / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem;Offcore", + "MetricGroup": "CacheHits;Mem;Offcore", "MetricName": "tma_info_memory_l2mpki_all", "Unit": "cpu_core" }, { "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all demand loads (including speculative)", "MetricExpr": "1e3 * cpu_core@L2_RQSTS.DEMAND_DATA_RD_MISS@ / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l2mpki_load", "Unit": "cpu_core" }, { - "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads", - "MetricExpr": "1e3 * cpu_core@MEM_LOAD_RETIRED.L3_MISS@ / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", - "MetricName": "tma_info_memory_l3mpki", + "BriefDescription": "", + "MetricExpr": "64 * cpu_core@OFFCORE_REQUESTS.ALL_REQUESTS@ / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW;Offcore", + "MetricName": "tma_info_memory_l3_cache_access_bw", "Unit": "cpu_core" }, { - "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)", - "MetricExpr": "cpu_core@L1D_PEND_MISS.PENDING@ / MEM_LOAD_COMPLETED.L1_MISS_ANY", - "MetricGroup": "Mem;MemoryBound;MemoryLat", - "MetricName": "tma_info_memory_load_miss_real_latency", + "BriefDescription": "", + "MetricExpr": "64 * cpu_core@LONGEST_LAT_CACHE.MISS@ / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l3_cache_fill_bw", "Unit": "cpu_core" }, { - "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss", - "MetricExpr": "cpu_core@L1D_PEND_MISS.PENDING@ / L1D_PEND_MISS.PENDING_CYCLES", - "MetricGroup": "Mem;MemoryBW;MemoryBound", - "MetricName": "tma_info_memory_mlp", - "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)", + "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads", + "MetricExpr": "1e3 * cpu_core@MEM_LOAD_RETIRED.L3_MISS@ / INST_RETIRED.ANY", + "MetricGroup": "Mem", + "MetricName": "tma_info_memory_l3mpki", "Unit": "cpu_core" }, { "BriefDescription": "Average Parallel L2 cache miss data reads", - "MetricExpr": "cpu_core@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD@ / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", + "MetricExpr": "cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DATA_RD@ / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", "MetricGroup": "Memory_BW;Offcore", - "MetricName": "tma_info_memory_oro_data_l2_mlp", + "MetricName": "tma_info_memory_latency_data_l2_mlp", "Unit": "cpu_core" }, { "BriefDescription": "Average Latency for L2 cache miss demand Loads", "MetricExpr": "cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD@ / OFFCORE_REQUESTS.DEMAND_DATA_RD", "MetricGroup": "Memory_Lat;Offcore", - "MetricName": "tma_info_memory_oro_load_l2_miss_latency", + "MetricName": "tma_info_memory_latency_load_l2_miss_latency", "Unit": "cpu_core" }, { "BriefDescription": "Average Parallel L2 cache miss demand Loads", "MetricExpr": "cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD@ / cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=1@", "MetricGroup": "Memory_BW;Offcore", - "MetricName": "tma_info_memory_oro_load_l2_mlp", + "MetricName": "tma_info_memory_latency_load_l2_mlp", "Unit": "cpu_core" }, { "BriefDescription": "Average Latency for L3 cache miss demand Loads", "MetricExpr": "cpu_core@OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD@ / OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD", "MetricGroup": "Memory_Lat;Offcore", - "MetricName": "tma_info_memory_oro_load_l3_miss_latency", + "MetricName": "tma_info_memory_latency_load_l3_miss_latency", "Unit": "cpu_core" }, { - "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l1d_cache_fill_bw", - "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_thread_l1d_cache_fill_bw_1t", + "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)", + "MetricExpr": "cpu_core@L1D_PEND_MISS.PENDING@ / MEM_LOAD_COMPLETED.L1_MISS_ANY", + "MetricGroup": "Mem;MemoryBound;MemoryLat", + "MetricName": "tma_info_memory_load_miss_real_latency", "Unit": "cpu_core" }, { - "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l2_cache_fill_bw", - "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_thread_l2_cache_fill_bw_1t", + "BriefDescription": "\"Bus lock\" per kilo instruction", + "MetricExpr": "1e3 * cpu_core@SQ_MISC.BUS_LOCK@ / INST_RETIRED.ANY", + "MetricGroup": "Mem", + "MetricName": "tma_info_memory_mix_bus_lock_pki", "Unit": "cpu_core" }, { - "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l3_cache_access_bw", - "MetricGroup": "Mem;MemoryBW;Offcore", - "MetricName": "tma_info_memory_thread_l3_cache_access_bw_1t", + "BriefDescription": "Un-cacheable retired load per kilo instruction", + "MetricExpr": "1e3 * cpu_core@MEM_LOAD_MISC_RETIRED.UC@ / INST_RETIRED.ANY", + "MetricGroup": "Mem", + "MetricName": "tma_info_memory_mix_uc_load_pki", "Unit": "cpu_core" }, { - "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l3_cache_fill_bw", - "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_thread_l3_cache_fill_bw_1t", + "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss", + "MetricExpr": "cpu_core@L1D_PEND_MISS.PENDING@ / L1D_PEND_MISS.PENDING_CYCLES", + "MetricGroup": "Mem;MemoryBW;MemoryBound", + "MetricName": "tma_info_memory_mlp", + "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)", "Unit": "cpu_core" }, { @@ -1737,16 +1840,16 @@ "Unit": "cpu_core" }, { - "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-thread", - "MetricExpr": "cpu_core@UOPS_EXECUTED.THREAD@ / cpu_core@UOPS_EXECUTED.THREAD\\,cmask\\=1@", + "BriefDescription": "", + "MetricExpr": "cpu_core@UOPS_EXECUTED.THREAD@ / (cpu_core@UOPS_EXECUTED.CORE_CYCLES_GE_1@ / 2 if #SMT_on else cpu_core@UOPS_EXECUTED.THREAD\\,cmask\\=1@)", "MetricGroup": "Cor;Pipeline;PortsUtil;SMT", "MetricName": "tma_info_pipeline_execute", "Unit": "cpu_core" }, { "BriefDescription": "Instructions per a microcode Assist invocation", - "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@ASSISTS.ANY\\,umask\\=0x1B@", - "MetricGroup": "Pipeline;Ret;Retire", + "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / ASSISTS.ANY", + "MetricGroup": "MicroSeq;Pipeline;Ret;Retire", "MetricName": "tma_info_pipeline_ipassist", "MetricThreshold": "tma_info_pipeline_ipassist < 100e3", "PublicDescription": "Instructions per a microcode Assist invocation. See Assists tree node for details (lower number means higher occurrence rate)", @@ -1762,39 +1865,54 @@ { "BriefDescription": "Estimated fraction of retirement-cycles dealing with repeat instructions", "MetricExpr": "cpu_core@INST_RETIRED.REP_ITERATION@ / cpu_core@UOPS_RETIRED.SLOTS\\,cmask\\=1@", - "MetricGroup": "Pipeline;Ret", + "MetricGroup": "MicroSeq;Pipeline;Ret", "MetricName": "tma_info_pipeline_strings_cycles", "MetricThreshold": "tma_info_pipeline_strings_cycles > 0.1", "Unit": "cpu_core" }, { - "BriefDescription": "Measured Average Frequency for unhalted processors [GHz]", + "BriefDescription": "Fraction of cycles the processor is waiting yet unhalted; covering legacy PAUSE instruction, as well as C0.1 / C0.2 power-performance optimized states", + "MetricExpr": "cpu_core@CPU_CLK_UNHALTED.C0_WAIT@ / tma_info_thread_clks", + "MetricGroup": "C0Wait", + "MetricName": "tma_info_system_c0_wait", + "MetricThreshold": "tma_info_system_c0_wait > 0.05", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Measured Average Core Frequency for unhalted processors [GHz]", "MetricExpr": "tma_info_system_turbo_utilization * TSC / 1e9 / duration_time", "MetricGroup": "Power;Summary", - "MetricName": "tma_info_system_average_frequency", + "MetricName": "tma_info_system_core_frequency", "Unit": "cpu_core" }, { - "BriefDescription": "Average CPU Utilization", + "BriefDescription": "Average CPU Utilization (percentage)", "MetricExpr": "cpu_core@CPU_CLK_UNHALTED.REF_TSC@ / TSC", "MetricGroup": "HPC;Summary", "MetricName": "tma_info_system_cpu_utilization", "Unit": "cpu_core" }, { + "BriefDescription": "Average number of utilized CPUs", + "MetricExpr": "#num_cpus_online * tma_info_system_cpu_utilization", + "MetricGroup": "Summary", + "MetricName": "tma_info_system_cpus_utilized", + "Unit": "cpu_core" + }, + { "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]", "MetricExpr": "64 * (UNC_ARB_TRK_REQUESTS.ALL + UNC_ARB_COH_TRK_REQUESTS.ALL) / 1e6 / duration_time / 1e3", - "MetricGroup": "HPC;Mem;MemoryBW;SoC;tma_issueBW", + "MetricGroup": "HPC;MemOffcore;MemoryBW;SoC;tma_issueBW", "MetricName": "tma_info_system_dram_bw_use", - "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_info_bottleneck_memory_bandwidth, tma_mem_bandwidth, tma_sq_full", + "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_mem_bandwidth, tma_sq_full", "Unit": "cpu_core" }, { "BriefDescription": "Giga Floating Point Operations Per Second", - "MetricExpr": "(cpu_core@FP_ARITH_INST_RETIRED.SCALAR_SINGLE@ + cpu_core@FP_ARITH_INST_RETIRED.SCALAR_DOUBLE@ + 2 * cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE@ + 4 * (cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE@ + cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE@) + 8 * cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE@) / 1e9 / duration_time", + "MetricExpr": "(cpu_core@FP_ARITH_INST_RETIRED.SCALAR@ + 2 * cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE@ + 4 * cpu_core@FP_ARITH_INST_RETIRED.4_FLOPS@ + 8 * cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE@) / 1e9 / duration_time", "MetricGroup": "Cor;Flops;HPC", "MetricName": "tma_info_system_gflops", - "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine.", + "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width", "Unit": "cpu_core" }, { @@ -1838,14 +1956,6 @@ "Unit": "cpu_core" }, { - "BriefDescription": "Average latency of all requests to external memory (in Uncore cycles)", - "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "(UNC_ARB_TRK_OCCUPANCY.ALL + UNC_ARB_DAT_OCCUPANCY.RD) / UNC_ARB_TRK_REQUESTS.ALL", - "MetricGroup": "Mem;SoC", - "MetricName": "tma_info_system_mem_request_latency", - "Unit": "cpu_core" - }, - { "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active", "MetricExpr": "(1 - cpu_core@CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE@ / cpu_core@CPU_CLK_UNHALTED.REF_DISTRIBUTED@ if #SMT_on else 0)", "MetricGroup": "SMT", @@ -1927,7 +2037,7 @@ }, { "BriefDescription": "This metric represents overall Integer (Int) select operations fraction the CPU has executed (retired)", - "MetricExpr": "tma_int_vector_128b + tma_int_vector_256b + tma_shuffles", + "MetricExpr": "tma_int_vector_128b + tma_int_vector_256b", "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", "MetricName": "tma_int_operations", "MetricThreshold": "tma_int_operations > 0.1 & tma_light_operations > 0.6", @@ -1946,19 +2056,19 @@ "Unit": "cpu_core" }, { - "BriefDescription": "This metric represents 256-bit vector Integer ADD/SUB/SAD or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired", + "BriefDescription": "This metric represents 256-bit vector Integer ADD/SUB/SAD/MUL or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired", "MetricExpr": "(cpu_core@INT_VEC_RETIRED.ADD_256@ + cpu_core@INT_VEC_RETIRED.MUL_256@ + cpu_core@INT_VEC_RETIRED.VNNI_256@) / (tma_retiring * tma_info_thread_slots)", "MetricGroup": "Compute;IntVector;Pipeline;TopdownL4;tma_L4_group;tma_int_operations_group;tma_issue2P", "MetricName": "tma_int_vector_256b", "MetricThreshold": "tma_int_vector_256b > 0.1 & (tma_int_operations > 0.1 & tma_light_operations > 0.6)", - "PublicDescription": "This metric represents 256-bit vector Integer ADD/SUB/SAD or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2", + "PublicDescription": "This metric represents 256-bit vector Integer ADD/SUB/SAD/MUL or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2", "ScaleUnit": "100%", "Unit": "cpu_core" }, { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses", "MetricExpr": "cpu_core@ICACHE_TAG.STALLS@ / tma_info_thread_clks", - "MetricGroup": "BigFoot;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group", + "MetricGroup": "BigFootprint;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group", "MetricName": "tma_itlb_misses", "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS_PS;FRONTEND_RETIRED.ITLB_MISS_PS", @@ -1968,7 +2078,7 @@ { "BriefDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache", "MetricExpr": "max((cpu_core@EXE_ACTIVITY.BOUND_ON_LOADS@ - cpu_core@MEMORY_ACTIVITY.STALLS_L1D_MISS@) / tma_info_thread_clks, 0)", - "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group", "MetricName": "tma_l1_bound", "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", "PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_RETIRED.L1_HIT_PS;MEM_LOAD_RETIRED.FB_HIT_PS. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1", @@ -1978,7 +2088,7 @@ { "BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads", "MetricExpr": "(cpu_core@MEMORY_ACTIVITY.STALLS_L1D_MISS@ - cpu_core@MEMORY_ACTIVITY.STALLS_L2_MISS@) / tma_info_thread_clks", - "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", "MetricName": "tma_l2_bound", "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", "PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L2_HIT_PS", @@ -1988,7 +2098,7 @@ { "BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core", "MetricExpr": "(cpu_core@MEMORY_ACTIVITY.STALLS_L2_MISS@ - cpu_core@MEMORY_ACTIVITY.STALLS_L3_MISS@) / tma_info_thread_clks", - "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", "MetricName": "tma_l3_bound", "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS", @@ -1996,12 +2106,12 @@ "Unit": "cpu_core" }, { - "BriefDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)", - "MetricExpr": "9 * tma_info_system_average_frequency * cpu_core@MEM_LOAD_RETIRED.L3_HIT@ * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2) / tma_info_thread_clks", + "BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)", + "MetricExpr": "9 * tma_info_system_core_frequency * (cpu_core@MEM_LOAD_RETIRED.L3_HIT@ * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2)) / tma_info_thread_clks", "MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group", "MetricName": "tma_l3_hit_latency", "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_info_bottleneck_memory_latency, tma_mem_latency", + "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_info_bottleneck_cache_memory_latency, tma_mem_latency", "ScaleUnit": "100%", "Unit": "cpu_core" }, @@ -2022,7 +2132,7 @@ "MetricName": "tma_light_operations", "MetricThreshold": "tma_light_operations > 0.6", "MetricgroupNoGroup": "TopdownL2", - "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST", + "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST", "ScaleUnit": "100%", "Unit": "cpu_core" }, @@ -2069,7 +2179,7 @@ "MetricExpr": "(cpu_core@LSD.CYCLES_ACTIVE@ - cpu_core@LSD.CYCLES_OK@) / tma_info_core_core_clks / 2", "MetricGroup": "FetchBW;LSD;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group", "MetricName": "tma_lsd", - "MetricThreshold": "tma_lsd > 0.15 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 6 > 0.35)", + "MetricThreshold": "tma_lsd > 0.15 & tma_fetch_bandwidth > 0.2", "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to LSD (Loop Stream Detector) unit. LSD typically does well sustaining Uop supply. However; in some rare cases; optimal uop-delivery could not be reached for small loops whose size (in terms of number of uops) does not suit well the LSD structure.", "ScaleUnit": "100%", "Unit": "cpu_core" @@ -2086,22 +2196,22 @@ "Unit": "cpu_core" }, { - "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM)", + "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)", "MetricExpr": "min(cpu_core@CPU_CLK_UNHALTED.THREAD@, cpu_core@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@) / tma_info_thread_clks", "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW", "MetricName": "tma_mem_bandwidth", "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_bottleneck_memory_bandwidth, tma_info_system_dram_bw_use, tma_sq_full", + "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_sq_full", "ScaleUnit": "100%", "Unit": "cpu_core" }, { - "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM)", + "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM)", "MetricExpr": "min(cpu_core@CPU_CLK_UNHALTED.THREAD@, cpu_core@OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD@) / tma_info_thread_clks - tma_mem_bandwidth", "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat", "MetricName": "tma_mem_latency", "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_info_bottleneck_memory_latency, tma_l3_hit_latency", + "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_info_bottleneck_cache_memory_latency, tma_l3_hit_latency", "ScaleUnit": "100%", "Unit": "cpu_core" }, @@ -2120,9 +2230,9 @@ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to LFENCE Instructions.", "MetricConstraint": "NO_GROUP_EVENTS_NMI", "MetricExpr": "13 * cpu_core@MISC2_RETIRED.LFENCE@ / tma_info_thread_clks", - "MetricGroup": "TopdownL6;tma_L6_group;tma_serializing_operation_group", + "MetricGroup": "TopdownL4;tma_L4_group;tma_serializing_operation_group", "MetricName": "tma_memory_fence", - "MetricThreshold": "tma_memory_fence > 0.05 & (tma_serializing_operation > 0.1 & (tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))))", + "MetricThreshold": "tma_memory_fence > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", "ScaleUnit": "100%", "Unit": "cpu_core" }, @@ -2141,7 +2251,7 @@ "MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueMC;tma_issueMS", "MetricName": "tma_microcode_sequencer", "MetricThreshold": "tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1", - "PublicDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided. Sample with: UOPS_RETIRED.MS. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_ms_switches", + "PublicDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided. Sample with: UOPS_RETIRED.MS. Related metrics: tma_clears_resteers, tma_info_bottleneck_irregular_overhead, tma_l1_bound, tma_machine_clears, tma_ms_switches", "ScaleUnit": "100%", "Unit": "cpu_core" }, @@ -2160,35 +2270,35 @@ "MetricExpr": "(cpu_core@IDQ.MITE_CYCLES_ANY@ - cpu_core@IDQ.MITE_CYCLES_OK@) / tma_info_core_core_clks / 2", "MetricGroup": "DSBmiss;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group", "MetricName": "tma_mite", - "MetricThreshold": "tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 6 > 0.35)", + "MetricThreshold": "tma_mite > 0.1 & tma_fetch_bandwidth > 0.2", "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck. Sample with: FRONTEND_RETIRED.ANY_DSB_MISS", "ScaleUnit": "100%", "Unit": "cpu_core" }, { - "BriefDescription": "The Mixing_Vectors metric gives the percentage of injected blend uops out of all uops issued", + "BriefDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles)", "MetricExpr": "160 * cpu_core@ASSISTS.SSE_AVX_MIX@ / tma_info_thread_clks", "MetricGroup": "TopdownL5;tma_L5_group;tma_issueMV;tma_ports_utilized_0_group", "MetricName": "tma_mixing_vectors", "MetricThreshold": "tma_mixing_vectors > 0.05", - "PublicDescription": "The Mixing_Vectors metric gives the percentage of injected blend uops out of all uops issued. Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches", + "PublicDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles). Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches", "ScaleUnit": "100%", "Unit": "cpu_core" }, { "BriefDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS)", - "MetricExpr": "3 * cpu_core@UOPS_RETIRED.MS\\,cmask\\=1\\,edge@ / (tma_retiring * tma_info_thread_slots / cpu_core@UOPS_ISSUED.ANY@) / tma_info_thread_clks", + "MetricExpr": "3 * cpu_core@UOPS_RETIRED.MS\\,cmask\\=1\\,edge@ / (cpu_core@UOPS_RETIRED.SLOTS@ / cpu_core@UOPS_ISSUED.ANY@) / tma_info_thread_clks", "MetricGroup": "FetchLat;MicroSeq;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueMC;tma_issueMS;tma_issueMV;tma_issueSO", "MetricName": "tma_ms_switches", "MetricThreshold": "tma_ms_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", - "PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: FRONTEND_RETIRED.MS_FLOWS. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation", + "PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: FRONTEND_RETIRED.MS_FLOWS. Related metrics: tma_clears_resteers, tma_info_bottleneck_irregular_overhead, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation", "ScaleUnit": "100%", "Unit": "cpu_core" }, { "BriefDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused", "MetricExpr": "tma_light_operations * (cpu_core@BR_INST_RETIRED.ALL_BRANCHES@ - cpu_core@INST_RETIRED.MACRO_FUSED@) / (tma_retiring * tma_info_thread_slots)", - "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", + "MetricGroup": "Branches;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", "MetricName": "tma_non_fused_branches", "MetricThreshold": "tma_non_fused_branches > 0.1 & tma_light_operations > 0.6", "PublicDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused. Non-conditional branches like direct JMP or CALL would count here. Can be used to examine fusible conditional jumps that were not fused.", @@ -2198,16 +2308,16 @@ { "BriefDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions", "MetricExpr": "tma_light_operations * cpu_core@INST_RETIRED.NOP@ / (tma_retiring * tma_info_thread_slots)", - "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", + "MetricGroup": "Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group", "MetricName": "tma_nop_instructions", - "MetricThreshold": "tma_nop_instructions > 0.1 & tma_light_operations > 0.6", + "MetricThreshold": "tma_nop_instructions > 0.1 & (tma_other_light_ops > 0.3 & tma_light_operations > 0.6)", "PublicDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body. Sample with: INST_RETIRED.NOP", "ScaleUnit": "100%", "Unit": "cpu_core" }, { "BriefDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes", - "MetricExpr": "max(0, tma_light_operations - (tma_fp_arith + tma_int_operations + tma_memory_operations + tma_fused_instructions + tma_non_fused_branches + tma_nop_instructions))", + "MetricExpr": "max(0, tma_light_operations - (tma_fp_arith + tma_int_operations + tma_memory_operations + tma_fused_instructions + tma_non_fused_branches))", "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", "MetricName": "tma_other_light_ops", "MetricThreshold": "tma_other_light_ops > 0.3 & tma_light_operations > 0.6", @@ -2216,6 +2326,24 @@ "Unit": "cpu_core" }, { + "BriefDescription": "This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types).", + "MetricExpr": "max(tma_branch_mispredicts * (1 - cpu_core@BR_MISP_RETIRED.ALL_BRANCHES@ / (cpu_core@INT_MISC.CLEARS_COUNT@ - cpu_core@MACHINE_CLEARS.COUNT@)), 0.0001)", + "MetricGroup": "BrMispredicts;TopdownL3;tma_L3_group;tma_branch_mispredicts_group", + "MetricName": "tma_other_mispredicts", + "MetricThreshold": "tma_other_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering.", + "MetricExpr": "max(tma_machine_clears * (1 - cpu_core@MACHINE_CLEARS.MEMORY_ORDERING@ / cpu_core@MACHINE_CLEARS.COUNT@), 0.0001)", + "MetricGroup": "Machine_Clears;TopdownL3;tma_L3_group;tma_machine_clears_group", + "MetricName": "tma_other_nukes", + "MetricThreshold": "tma_other_nukes > 0.05 & (tma_machine_clears > 0.1 & tma_bad_speculation > 0.15)", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { "BriefDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Page Faults", "MetricExpr": "99 * cpu_core@ASSISTS.PAGE_FAULT@ / tma_info_thread_slots", "MetricGroup": "TopdownL5;tma_L5_group;tma_assists_group", @@ -2246,18 +2374,18 @@ "Unit": "cpu_core" }, { - "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU)", + "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU)", "MetricExpr": "cpu_core@UOPS_DISPATCHED.PORT_6@ / tma_info_core_core_clks", "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P", "MetricName": "tma_port_6", "MetricThreshold": "tma_port_6 > 0.6", - "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED.PORT_6. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2", + "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED.PORT_6. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2", "ScaleUnit": "100%", "Unit": "cpu_core" }, { "BriefDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related)", - "MetricExpr": "((cpu_core@EXE_ACTIVITY.3_PORTS_UTIL\\,umask\\=0x80@ + tma_serializing_operation * (cpu_core@CYCLE_ACTIVITY.STALLS_TOTAL@ - cpu_core@EXE_ACTIVITY.BOUND_ON_LOADS@) + (cpu_core@EXE_ACTIVITY.1_PORTS_UTIL@ + tma_retiring * cpu_core@EXE_ACTIVITY.2_PORTS_UTIL\\,umask\\=0xc@)) / tma_info_thread_clks if cpu_core@ARITH.DIV_ACTIVE@ < cpu_core@CYCLE_ACTIVITY.STALLS_TOTAL@ - cpu_core@EXE_ACTIVITY.BOUND_ON_LOADS@ else (cpu_core@EXE_ACTIVITY.1_PORTS_UTIL@ + tma_retiring * cpu_core@EXE_ACTIVITY.2_PORTS_UTIL\\,umask\\=0xc@) / tma_info_thread_clks)", + "MetricExpr": "((tma_ports_utilized_0 * tma_info_thread_clks + (cpu_core@EXE_ACTIVITY.1_PORTS_UTIL@ + tma_retiring * cpu_core@EXE_ACTIVITY.2_PORTS_UTIL\\,umask\\=0xc@)) / tma_info_thread_clks if cpu_core@ARITH.DIV_ACTIVE@ < cpu_core@CYCLE_ACTIVITY.STALLS_TOTAL@ - cpu_core@EXE_ACTIVITY.BOUND_ON_LOADS@ else (cpu_core@EXE_ACTIVITY.1_PORTS_UTIL@ + tma_retiring * cpu_core@EXE_ACTIVITY.2_PORTS_UTIL\\,umask\\=0xc@) / tma_info_thread_clks)", "MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group", "MetricName": "tma_ports_utilization", "MetricThreshold": "tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)", @@ -2267,7 +2395,7 @@ }, { "BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)", - "MetricExpr": "cpu_core@EXE_ACTIVITY.3_PORTS_UTIL\\,umask\\=0x80@ / tma_info_thread_clks + tma_serializing_operation * (cpu_core@CYCLE_ACTIVITY.STALLS_TOTAL@ - cpu_core@EXE_ACTIVITY.BOUND_ON_LOADS@) / tma_info_thread_clks", + "MetricExpr": "(cpu_core@EXE_ACTIVITY.3_PORTS_UTIL\\,umask\\=0x80@ + cpu_core@RS.EMPTY\\,umask\\=1@) / tma_info_thread_clks * (cpu_core@CYCLE_ACTIVITY.STALLS_TOTAL@ - cpu_core@EXE_ACTIVITY.BOUND_ON_LOADS@) / tma_info_thread_clks", "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group", "MetricName": "tma_ports_utilized_0", "MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", @@ -2302,7 +2430,7 @@ "MetricExpr": "cpu_core@UOPS_EXECUTED.CYCLES_GE_3@ / tma_info_thread_clks", "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group", "MetricName": "tma_ports_utilized_3m", - "MetricThreshold": "tma_ports_utilized_3m > 0.7 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", + "MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", "PublicDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Sample with: UOPS_EXECUTED.CYCLES_GE_3", "ScaleUnit": "100%", "Unit": "cpu_core" @@ -2321,20 +2449,21 @@ }, { "BriefDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations", - "MetricExpr": "cpu_core@RESOURCE_STALLS.SCOREBOARD@ / tma_info_thread_clks", - "MetricGroup": "PortsUtil;TopdownL5;tma_L5_group;tma_issueSO;tma_ports_utilized_0_group", + "MetricExpr": "cpu_core@RESOURCE_STALLS.SCOREBOARD@ / tma_info_thread_clks + tma_c02_wait", + "MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group;tma_issueSO", "MetricName": "tma_serializing_operation", - "MetricThreshold": "tma_serializing_operation > 0.1 & (tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)))", + "MetricThreshold": "tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)", "PublicDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out-of-order execution which may limit performance. Sample with: RESOURCE_STALLS.SCOREBOARD. Related metrics: tma_ms_switches", "ScaleUnit": "100%", "Unit": "cpu_core" }, { - "BriefDescription": "This metric represents Shuffle (cross \"vector lane\" data transfers) uops fraction the CPU has retired.", - "MetricExpr": "cpu_core@INT_VEC_RETIRED.SHUFFLES@ / (tma_retiring * tma_info_thread_slots)", - "MetricGroup": "HPC;Pipeline;TopdownL4;tma_L4_group;tma_int_operations_group", - "MetricName": "tma_shuffles", - "MetricThreshold": "tma_shuffles > 0.1 & (tma_int_operations > 0.1 & tma_light_operations > 0.6)", + "BriefDescription": "This metric represents fraction of slots where the CPU was retiring Shuffle operations of 256-bit vector size (FP or Integer)", + "MetricExpr": "tma_light_operations * cpu_core@INT_VEC_RETIRED.SHUFFLES@ / (tma_retiring * tma_info_thread_slots)", + "MetricGroup": "HPC;Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group", + "MetricName": "tma_shuffles_256b", + "MetricThreshold": "tma_shuffles_256b > 0.1 & (tma_other_light_ops > 0.3 & tma_light_operations > 0.6)", + "PublicDescription": "This metric represents fraction of slots where the CPU was retiring Shuffle operations of 256-bit vector size (FP or Integer). Shuffles may incur slow cross \"vector lane\" data transfers.", "ScaleUnit": "100%", "Unit": "cpu_core" }, @@ -2342,9 +2471,9 @@ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions", "MetricConstraint": "NO_GROUP_EVENTS_NMI", "MetricExpr": "cpu_core@CPU_CLK_UNHALTED.PAUSE@ / tma_info_thread_clks", - "MetricGroup": "TopdownL6;tma_L6_group;tma_serializing_operation_group", + "MetricGroup": "TopdownL4;tma_L4_group;tma_serializing_operation_group", "MetricName": "tma_slow_pause", - "MetricThreshold": "tma_slow_pause > 0.05 & (tma_serializing_operation > 0.1 & (tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))))", + "MetricThreshold": "tma_slow_pause > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions. Sample with: CPU_CLK_UNHALTED.PAUSE_INST", "ScaleUnit": "100%", "Unit": "cpu_core" @@ -2375,7 +2504,7 @@ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group", "MetricName": "tma_sq_full", "MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_bottleneck_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth", + "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth", "ScaleUnit": "100%", "Unit": "cpu_core" }, @@ -2450,10 +2579,10 @@ { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears", "MetricExpr": "cpu_core@INT_MISC.UNKNOWN_BRANCH_CYCLES@ / tma_info_thread_clks", - "MetricGroup": "BigFoot;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group", + "MetricGroup": "BigFootprint;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group", "MetricName": "tma_unknown_branches", "MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))", - "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit). Sample with: FRONTEND_RETIRED.UNKNOWN_BRANCH", + "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit) hence called Unknown Branches. Sample with: FRONTEND_RETIRED.UNKNOWN_BRANCH", "ScaleUnit": "100%", "Unit": "cpu_core" }, diff --git a/tools/perf/pmu-events/arch/x86/alderlake/floating-point.json b/tools/perf/pmu-events/arch/x86/alderlake/floating-point.json index c8ba96c4a7f8..cd291943dc08 100644 --- a/tools/perf/pmu-events/arch/x86/alderlake/floating-point.json +++ b/tools/perf/pmu-events/arch/x86/alderlake/floating-point.json @@ -26,7 +26,7 @@ "Unit": "cpu_core" }, { - "BriefDescription": "FP_ARITH_DISPATCHED.PORT_0", + "BriefDescription": "FP_ARITH_DISPATCHED.PORT_0 [This event is alias to FP_ARITH_DISPATCHED.V0]", "EventCode": "0xb3", "EventName": "FP_ARITH_DISPATCHED.PORT_0", "SampleAfterValue": "2000003", @@ -34,7 +34,7 @@ "Unit": "cpu_core" }, { - "BriefDescription": "FP_ARITH_DISPATCHED.PORT_1", + "BriefDescription": "FP_ARITH_DISPATCHED.PORT_1 [This event is alias to FP_ARITH_DISPATCHED.V1]", "EventCode": "0xb3", "EventName": "FP_ARITH_DISPATCHED.PORT_1", "SampleAfterValue": "2000003", @@ -42,7 +42,7 @@ "Unit": "cpu_core" }, { - "BriefDescription": "FP_ARITH_DISPATCHED.PORT_5", + "BriefDescription": "FP_ARITH_DISPATCHED.PORT_5 [This event is alias to FP_ARITH_DISPATCHED.V2]", "EventCode": "0xb3", "EventName": "FP_ARITH_DISPATCHED.PORT_5", "SampleAfterValue": "2000003", @@ -50,6 +50,30 @@ "Unit": "cpu_core" }, { + "BriefDescription": "FP_ARITH_DISPATCHED.V0 [This event is alias to FP_ARITH_DISPATCHED.PORT_0]", + "EventCode": "0xb3", + "EventName": "FP_ARITH_DISPATCHED.V0", + "SampleAfterValue": "2000003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "FP_ARITH_DISPATCHED.V1 [This event is alias to FP_ARITH_DISPATCHED.PORT_1]", + "EventCode": "0xb3", + "EventName": "FP_ARITH_DISPATCHED.V1", + "SampleAfterValue": "2000003", + "UMask": "0x2", + "Unit": "cpu_core" + }, + { + "BriefDescription": "FP_ARITH_DISPATCHED.V2 [This event is alias to FP_ARITH_DISPATCHED.PORT_5]", + "EventCode": "0xb3", + "EventName": "FP_ARITH_DISPATCHED.V2", + "SampleAfterValue": "2000003", + "UMask": "0x4", + "Unit": "cpu_core" + }, + { "BriefDescription": "Counts number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.", "EventCode": "0xc7", "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE", diff --git a/tools/perf/pmu-events/arch/x86/alderlake/metricgroups.json b/tools/perf/pmu-events/arch/x86/alderlake/metricgroups.json index 516eb0f93f02..7a03835f262c 100644 --- a/tools/perf/pmu-events/arch/x86/alderlake/metricgroups.json +++ b/tools/perf/pmu-events/arch/x86/alderlake/metricgroups.json @@ -2,10 +2,11 @@ "Backend": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Bad": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "BadSpec": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", - "BigFoot": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "BigFootprint": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "BrMispredicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Branches": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", - "CacheMisses": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "C0Wait": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "CacheHits": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "CodeGen": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Compute": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Cor": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", @@ -26,7 +27,9 @@ "L2Evicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "LSD": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MachineClears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "Machine_Clears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Mem": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "MemOffcore": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryBW": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryBound": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryLat": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", @@ -69,6 +72,7 @@ "tma_backend_bound_group": "Metrics contributing to tma_backend_bound category", "tma_bad_speculation_group": "Metrics contributing to tma_bad_speculation category", "tma_base_group": "Metrics contributing to tma_base category", + "tma_branch_mispredicts_group": "Metrics contributing to tma_branch_mispredicts category", "tma_branch_resteers_group": "Metrics contributing to tma_branch_resteers category", "tma_core_bound_group": "Metrics contributing to tma_core_bound category", "tma_dram_bound_group": "Metrics contributing to tma_dram_bound category", @@ -82,9 +86,9 @@ "tma_heavy_operations_group": "Metrics contributing to tma_heavy_operations category", "tma_int_operations_group": "Metrics contributing to tma_int_operations category", "tma_issue2P": "Metrics related by the issue $issue2P", - "tma_issueBC": "Metrics related by the issue $issueBC", "tma_issueBM": "Metrics related by the issue $issueBM", "tma_issueBW": "Metrics related by the issue $issueBW", + "tma_issueComp": "Metrics related by the issue $issueComp", "tma_issueD0": "Metrics related by the issue $issueD0", "tma_issueFB": "Metrics related by the issue $issueFB", "tma_issueFL": "Metrics related by the issue $issueFL", @@ -111,6 +115,7 @@ "tma_microcode_sequencer_group": "Metrics contributing to tma_microcode_sequencer category", "tma_mite_group": "Metrics contributing to tma_mite category", "tma_nuke_group": "Metrics contributing to tma_nuke category", + "tma_other_light_ops_group": "Metrics contributing to tma_other_light_ops category", "tma_ports_utilization_group": "Metrics contributing to tma_ports_utilization category", "tma_ports_utilized_0_group": "Metrics contributing to tma_ports_utilized_0 category", "tma_ports_utilized_3m_group": "Metrics contributing to tma_ports_utilized_3m category", diff --git a/tools/perf/pmu-events/arch/x86/alderlake/other.json b/tools/perf/pmu-events/arch/x86/alderlake/other.json index 1db73e020215..5250a17d9cae 100644 --- a/tools/perf/pmu-events/arch/x86/alderlake/other.json +++ b/tools/perf/pmu-events/arch/x86/alderlake/other.json @@ -40,6 +40,16 @@ "Unit": "cpu_core" }, { + "BriefDescription": "This event is deprecated. [This event is alias to MISC_RETIRED.LBR_INSERTS]", + "Deprecated": "1", + "EventCode": "0xe4", + "EventName": "LBR_INSERTS.ANY", + "PEBS": "1", + "SampleAfterValue": "1000003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { "BriefDescription": "Counts modified writebacks from L1 cache and L2 cache that have any type of response.", "EventCode": "0xB7", "EventName": "OCR.COREWB_M.ANY_RESPONSE", diff --git a/tools/perf/pmu-events/arch/x86/alderlake/pipeline.json b/tools/perf/pmu-events/arch/x86/alderlake/pipeline.json index f9876bef16da..df6032e816d4 100644 --- a/tools/perf/pmu-events/arch/x86/alderlake/pipeline.json +++ b/tools/perf/pmu-events/arch/x86/alderlake/pipeline.json @@ -799,6 +799,7 @@ "BriefDescription": "INST_RETIRED.MACRO_FUSED", "EventCode": "0xc0", "EventName": "INST_RETIRED.MACRO_FUSED", + "PEBS": "1", "SampleAfterValue": "2000003", "UMask": "0x10", "Unit": "cpu_core" @@ -807,6 +808,7 @@ "BriefDescription": "Retired NOP instructions.", "EventCode": "0xc0", "EventName": "INST_RETIRED.NOP", + "PEBS": "1", "PublicDescription": "Counts all retired NOP or ENDBR32/64 instructions", "SampleAfterValue": "2000003", "UMask": "0x2", @@ -825,6 +827,7 @@ "BriefDescription": "Iterations of Repeat string retired instructions.", "EventCode": "0xc0", "EventName": "INST_RETIRED.REP_ITERATION", + "PEBS": "1", "PublicDescription": "Number of iterations of Repeat (REP) string retired instructions such as MOVS, CMPS, and SCAS. Each has a byte, word, and doubleword version and string instructions can be repeated using a repetition prefix, REP, that allows their architectural execution to be repeated a number of times as specified by the RCX register. Note the number of iterations is implementation-dependent.", "SampleAfterValue": "2000003", "UMask": "0x8", @@ -1107,6 +1110,16 @@ "Unit": "cpu_core" }, { + "BriefDescription": "Counts the number of LBR entries recorded. Requires LBRs to be enabled in IA32_LBR_CTL. [This event is alias to LBR_INSERTS.ANY]", + "EventCode": "0xe4", + "EventName": "MISC_RETIRED.LBR_INSERTS", + "PEBS": "1", + "PublicDescription": "Counts the number of LBR entries recorded. Requires LBRs to be enabled in IA32_LBR_CTL. This event is PDIR on GP0 and NPEBS on all other GPs [This event is alias to LBR_INSERTS.ANY]", + "SampleAfterValue": "1000003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { "BriefDescription": "Increments whenever there is an update to the LBR array.", "EventCode": "0xcc", "EventName": "MISC_RETIRED.LBR_INSERTS", diff --git a/tools/perf/pmu-events/arch/x86/alderlaken/other.json b/tools/perf/pmu-events/arch/x86/alderlaken/other.json index 6336de61f628..ccc892149dbe 100644 --- a/tools/perf/pmu-events/arch/x86/alderlaken/other.json +++ b/tools/perf/pmu-events/arch/x86/alderlaken/other.json @@ -1,5 +1,14 @@ [ { + "BriefDescription": "This event is deprecated. [This event is alias to MISC_RETIRED.LBR_INSERTS]", + "Deprecated": "1", + "EventCode": "0xe4", + "EventName": "LBR_INSERTS.ANY", + "PEBS": "1", + "SampleAfterValue": "1000003", + "UMask": "0x1" + }, + { "BriefDescription": "Counts modified writebacks from L1 cache and L2 cache that have any type of response.", "EventCode": "0xB7", "EventName": "OCR.COREWB_M.ANY_RESPONSE", diff --git a/tools/perf/pmu-events/arch/x86/alderlaken/pipeline.json b/tools/perf/pmu-events/arch/x86/alderlaken/pipeline.json index 3153bab527a9..846bcdafca6d 100644 --- a/tools/perf/pmu-events/arch/x86/alderlaken/pipeline.json +++ b/tools/perf/pmu-events/arch/x86/alderlaken/pipeline.json @@ -345,6 +345,15 @@ "UMask": "0x1" }, { + "BriefDescription": "Counts the number of LBR entries recorded. Requires LBRs to be enabled in IA32_LBR_CTL. [This event is alias to LBR_INSERTS.ANY]", + "EventCode": "0xe4", + "EventName": "MISC_RETIRED.LBR_INSERTS", + "PEBS": "1", + "PublicDescription": "Counts the number of LBR entries recorded. Requires LBRs to be enabled in IA32_LBR_CTL. This event is PDIR on GP0 and NPEBS on all other GPs [This event is alias to LBR_INSERTS.ANY]", + "SampleAfterValue": "1000003", + "UMask": "0x1" + }, + { "BriefDescription": "Counts the number of issue slots not consumed by the backend due to a micro-sequencer (MS) scoreboard, which stalls the front-end from issuing from the UROM until a specified older uop retires.", "EventCode": "0x75", "EventName": "SERIALIZATION.NON_C01_MS_SCB", diff --git a/tools/perf/pmu-events/arch/x86/amdzen4/cache.json b/tools/perf/pmu-events/arch/x86/amdzen4/cache.json index ecbe9660b2b3..e6d710cf3ce2 100644 --- a/tools/perf/pmu-events/arch/x86/amdzen4/cache.json +++ b/tools/perf/pmu-events/arch/x86/amdzen4/cache.json @@ -676,6 +676,10 @@ "EventCode": "0xac", "BriefDescription": "Average sampled latency when data is sourced from DRAM in the same NUMA node.", "UMask": "0x01", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", "Unit": "L3PMC" }, { @@ -683,6 +687,10 @@ "EventCode": "0xac", "BriefDescription": "Average sampled latency when data is sourced from DRAM in a different NUMA node.", "UMask": "0x02", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", "Unit": "L3PMC" }, { @@ -690,6 +698,10 @@ "EventCode": "0xac", "BriefDescription": "Average sampled latency when data is sourced from another CCX's cache when the address was in the same NUMA node.", "UMask": "0x04", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", "Unit": "L3PMC" }, { @@ -697,6 +709,10 @@ "EventCode": "0xac", "BriefDescription": "Average sampled latency when data is sourced from another CCX's cache when the address was in a different NUMA node.", "UMask": "0x08", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", "Unit": "L3PMC" }, { @@ -704,6 +720,10 @@ "EventCode": "0xac", "BriefDescription": "Average sampled latency when data is sourced from extension memory (CXL) in the same NUMA node.", "UMask": "0x10", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", "Unit": "L3PMC" }, { @@ -711,6 +731,10 @@ "EventCode": "0xac", "BriefDescription": "Average sampled latency when data is sourced from extension memory (CXL) in a different NUMA node.", "UMask": "0x20", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", "Unit": "L3PMC" }, { @@ -718,6 +742,10 @@ "EventCode": "0xac", "BriefDescription": "Average sampled latency from all data sources.", "UMask": "0x3f", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", "Unit": "L3PMC" }, { @@ -725,6 +753,10 @@ "EventCode": "0xad", "BriefDescription": "L3 cache fill requests sourced from DRAM in the same NUMA node.", "UMask": "0x01", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", "Unit": "L3PMC" }, { @@ -732,6 +764,10 @@ "EventCode": "0xad", "BriefDescription": "L3 cache fill requests sourced from DRAM in a different NUMA node.", "UMask": "0x02", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", "Unit": "L3PMC" }, { @@ -739,6 +775,10 @@ "EventCode": "0xad", "BriefDescription": "L3 cache fill requests sourced from another CCX's cache when the address was in the same NUMA node.", "UMask": "0x04", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", "Unit": "L3PMC" }, { @@ -746,6 +786,10 @@ "EventCode": "0xad", "BriefDescription": "L3 cache fill requests sourced from another CCX's cache when the address was in a different NUMA node.", "UMask": "0x08", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", "Unit": "L3PMC" }, { @@ -753,6 +797,10 @@ "EventCode": "0xad", "BriefDescription": "L3 cache fill requests sourced from extension memory (CXL) in the same NUMA node.", "UMask": "0x10", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", "Unit": "L3PMC" }, { @@ -760,6 +808,10 @@ "EventCode": "0xad", "BriefDescription": "L3 cache fill requests sourced from extension memory (CXL) in a different NUMA node.", "UMask": "0x20", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", "Unit": "L3PMC" }, { @@ -767,6 +819,10 @@ "EventCode": "0xad", "BriefDescription": "L3 cache fill requests sourced from all data sources.", "UMask": "0x3f", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", "Unit": "L3PMC" } ] diff --git a/tools/perf/pmu-events/arch/x86/amdzen5/branch-prediction.json b/tools/perf/pmu-events/arch/x86/amdzen5/branch-prediction.json new file mode 100644 index 000000000000..2d8d18cb85c1 --- /dev/null +++ b/tools/perf/pmu-events/arch/x86/amdzen5/branch-prediction.json @@ -0,0 +1,93 @@ +[ + { + "EventName": "bp_l1_tlb_miss_l2_tlb_hit", + "EventCode": "0x84", + "BriefDescription": "Instruction fetches that miss in the L1 ITLB but hit in the L2 ITLB." + }, + { + "EventName": "bp_l1_tlb_miss_l2_tlb_miss.if4k", + "EventCode": "0x85", + "BriefDescription": "Instruction fetches that miss in both the L1 and L2 ITLBs (page-table walks are requested) for 4k pages.", + "UMask": "0x01" + }, + { + "EventName": "bp_l1_tlb_miss_l2_tlb_miss.if2m", + "EventCode": "0x85", + "BriefDescription": "Instruction fetches that miss in both the L1 and L2 ITLBs (page-table walks are requested) for 2M pages.", + "UMask": "0x02" + }, + { + "EventName": "bp_l1_tlb_miss_l2_tlb_miss.if1g", + "EventCode": "0x85", + "BriefDescription": "Instruction fetches that miss in both the L1 and L2 ITLBs (page-table walks are requested) for 1G pages.", + "UMask": "0x04" + }, + { + "EventName": "bp_l1_tlb_miss_l2_tlb_miss.coalesced_4k", + "EventCode": "0x85", + "BriefDescription": "Instruction fetches that miss in both the L1 and L2 ITLBs (page-table walks are requested) for coalesced pages. A coalesced page is a 16k page created from four adjacent 4k pages.", + "UMask": "0x08" + }, + { + "EventName": "bp_l1_tlb_miss_l2_tlb_miss.all", + "EventCode": "0x85", + "BriefDescription": "Instruction fetches that miss in both the L1 and L2 ITLBs (page-table walks are requested) for all page sizes.", + "UMask": "0x0f" + }, + { + "EventName": "bp_l2_btb_correct", + "EventCode": "0x8b", + "BriefDescription": "L2 branch prediction overrides existing prediction (speculative)." + }, + { + "EventName": "bp_dyn_ind_pred", + "EventCode": "0x8e", + "BriefDescription": "Dynamic indirect predictions (branch used the indirect predictor to make a prediction)." + }, + { + "EventName": "bp_de_redirect", + "EventCode": "0x91", + "BriefDescription": "Number of times an early redirect is sent to branch predictor. This happens when either the decoder or dispatch logic is able to detect that the branch predictor needs to be redirected." + }, + { + "EventName": "bp_l1_tlb_fetch_hit.if4k", + "EventCode": "0x94", + "BriefDescription": "Instruction fetches that hit in the L1 ITLB for 4k or coalesced pages. A coalesced page is a 16k page created from four adjacent 4k pages.", + "UMask": "0x01" + }, + { + "EventName": "bp_l1_tlb_fetch_hit.if2m", + "EventCode": "0x94", + "BriefDescription": "Instruction fetches that hit in the L1 ITLB for 2M pages.", + "UMask": "0x02" + }, + { + "EventName": "bp_l1_tlb_fetch_hit.if1g", + "EventCode": "0x94", + "BriefDescription": "Instruction fetches that hit in the L1 ITLB for 1G pages.", + "UMask": "0x04" + }, + { + "EventName": "bp_l1_tlb_fetch_hit.all", + "EventCode": "0x94", + "BriefDescription": "Instruction fetches that hit in the L1 ITLB for all page sizes.", + "UMask": "0x07" + }, + { + "EventName": "bp_redirects.resync", + "EventCode": "0x9f", + "BriefDescription": "Redirects of the branch predictor caused by resyncs.", + "UMask": "0x01" + }, + { + "EventName": "bp_redirects.ex_redir", + "EventCode": "0x9f", + "BriefDescription": "Redirects of the branch predictor caused by mispredicts.", + "UMask": "0x02" + }, + { + "EventName": "bp_redirects.all", + "EventCode": "0x9f", + "BriefDescription": "Redirects of the branch predictor." + } +] diff --git a/tools/perf/pmu-events/arch/x86/amdzen5/decode.json b/tools/perf/pmu-events/arch/x86/amdzen5/decode.json new file mode 100644 index 000000000000..d0eff7f2a3ea --- /dev/null +++ b/tools/perf/pmu-events/arch/x86/amdzen5/decode.json @@ -0,0 +1,115 @@ +[ + { + "EventName": "de_op_queue_empty", + "EventCode": "0xa9", + "BriefDescription": "Cycles where the op queue is empty. Such cycles indicate that the front-end is not delivering instructions fast enough." + }, + { + "EventName": "de_src_op_disp.x86_decoder", + "EventCode": "0xaa", + "BriefDescription": "Ops dispatched from x86 decoder.", + "UMask": "0x01" + }, + { + "EventName": "de_src_op_disp.op_cache", + "EventCode": "0xaa", + "BriefDescription": "Ops dispatched from op cache.", + "UMask": "0x02" + }, + { + "EventName": "de_src_op_disp.all", + "EventCode": "0xaa", + "BriefDescription": "Ops dispatched from any source.", + "UMask": "0x07" + }, + { + "EventName": "de_dis_ops_from_decoder.any_fp_dispatch", + "EventCode": "0xab", + "BriefDescription": "Number of ops dispatched to the floating-point unit.", + "UMask": "0x04" + }, + { + "EventName": "de_dis_ops_from_decoder.any_integer_dispatch", + "EventCode": "0xab", + "BriefDescription": "Number of ops dispatched to the integer execution unit.", + "UMask": "0x08" + }, + { + "EventName": "de_dispatch_stall_cycle_dynamic_tokens_part1.int_phy_reg_file_rsrc_stall", + "EventCode": "0xae", + "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to an integer physical register file resource stall.", + "UMask": "0x01" + }, + { + "EventName": "de_dispatch_stall_cycle_dynamic_tokens_part1.load_queue_rsrc_stall", + "EventCode": "0xae", + "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a lack of load queue tokens.", + "UMask": "0x02" + }, + { + "EventName": "de_dispatch_stall_cycle_dynamic_tokens_part1.store_queue_rsrc_stall", + "EventCode": "0xae", + "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a lack of store queue tokens.", + "UMask": "0x04" + }, + { + "EventName": "de_dispatch_stall_cycle_dynamic_tokens_part1.taken_brnch_buffer_rsrc", + "EventCode": "0xae", + "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a taken branch buffer resource stall.", + "UMask": "0x10" + }, + { + "EventName": "de_dispatch_stall_cycle_dynamic_tokens_part1.fp_sch_rsrc_stall", + "EventCode": "0xae", + "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a floating-point non-schedulable queue token stall.", + "UMask": "0x40" + }, + { + "EventName": "de_dispatch_stall_cycle_dynamic_tokens_part2.al_tokens", + "EventCode": "0xaf", + "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to unavailability of ALU tokens.", + "UMask": "0x01" + }, + { + "EventName": "de_dispatch_stall_cycle_dynamic_tokens_part2.ag_tokens", + "EventCode": "0xaf", + "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to unavailability of agen tokens.", + "UMask": "0x02" + }, + { + "EventName": "de_dispatch_stall_cycle_dynamic_tokens_part2.ex_flush_recovery", + "EventCode": "0xaf", + "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a pending integer execution flush recovery.", + "UMask": "0x04" + }, + { + "EventName": "de_dispatch_stall_cycle_dynamic_tokens_part2.retq", + "EventCode": "0xaf", + "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to unavailability of retire queue tokens.", + "UMask": "0x20" + }, + { + "EventName": "de_no_dispatch_per_slot.no_ops_from_frontend", + "EventCode": "0x1a0", + "BriefDescription": "In each cycle counts dispatch slots left empty because the front-end did not supply ops.", + "UMask": "0x01" + }, + { + "EventName": "de_no_dispatch_per_slot.backend_stalls", + "EventCode": "0x1a0", + "BriefDescription": "In each cycle counts ops unable to dispatch because of back-end stalls.", + "UMask": "0x1e" + }, + { + "EventName": "de_no_dispatch_per_slot.smt_contention", + "EventCode": "0x1a0", + "BriefDescription": "In each cycle counts ops unable to dispatch because the dispatch cycle was granted to the other SMT thread.", + "UMask": "0x60" + }, + { + "EventName": "de_additional_resource_stalls.dispatch_stalls", + "EventCode": "0x1a2", + "BriefDescription": "Counts additional cycles where dispatch is stalled due to a lack of dispatch resources.", + "UMask": "0x30" + } +] diff --git a/tools/perf/pmu-events/arch/x86/amdzen5/execution.json b/tools/perf/pmu-events/arch/x86/amdzen5/execution.json new file mode 100644 index 000000000000..5a46d3db74e7 --- /dev/null +++ b/tools/perf/pmu-events/arch/x86/amdzen5/execution.json @@ -0,0 +1,174 @@ +[ + { + "EventName": "ex_ret_instr", + "EventCode": "0xc0", + "BriefDescription": "Retired instructions." + }, + { + "EventName": "ex_ret_ops", + "EventCode": "0xc1", + "BriefDescription": "Retired macro-ops." + }, + { + "EventName": "ex_ret_brn", + "EventCode": "0xc2", + "BriefDescription": "Retired branch instructions (all types of architectural control flow changes, including exceptions and interrupts)." + }, + { + "EventName": "ex_ret_brn_misp", + "EventCode": "0xc3", + "BriefDescription": "Retired branch instructions mispredicted." + }, + { + "EventName": "ex_ret_brn_tkn", + "EventCode": "0xc4", + "BriefDescription": "Retired taken branch instructions (all types of architectural control flow changes, including exceptions and interrupts)." + }, + { + "EventName": "ex_ret_brn_tkn_misp", + "EventCode": "0xc5", + "BriefDescription": "Retired taken branch instructions mispredicted." + }, + { + "EventName": "ex_ret_brn_far", + "EventCode": "0xc6", + "BriefDescription": "Retired far control transfers (far call/jump/return, IRET, SYSCALL and SYSRET, plus exceptions and interrupts). Far control transfers are not subject to branch prediction." + }, + { + "EventName": "ex_ret_near_ret", + "EventCode": "0xc8", + "BriefDescription": "Retired near returns (RET or RET Iw)." + }, + { + "EventName": "ex_ret_near_ret_mispred", + "EventCode": "0xc9", + "BriefDescription": "Retired near returns mispredicted. Each misprediction incurs the same penalty as a mispredicted conditional branch instruction." + }, + { + "EventName": "ex_ret_brn_ind_misp", + "EventCode": "0xca", + "BriefDescription": "Retired indirect branch instructions mispredicted (only EX mispredicts). Each misprediction incurs the same penalty as a mispredicted conditional branch instruction." + }, + { + "EventName": "ex_ret_mmx_fp_instr.x87", + "EventCode": "0xcb", + "BriefDescription": "Retired x87 instructions.", + "UMask": "0x01" + }, + { + "EventName": "ex_ret_mmx_fp_instr.mmx", + "EventCode": "0xcb", + "BriefDescription": "Retired MMX instructions.", + "UMask": "0x02" + }, + { + "EventName": "ex_ret_mmx_fp_instr.sse", + "EventCode": "0xcb", + "BriefDescription": "Retired SSE instructions (includes SSE, SSE2, SSE3, SSSE3, SSE4A, SSE41, SSE42 and AVX).", + "UMask": "0x04" + }, + { + "EventName": "ex_ret_ind_brch_instr", + "EventCode": "0xcc", + "BriefDescription": "Retired indirect branch instructions." + }, + { + "EventName": "ex_ret_cond", + "EventCode": "0xd1", + "BriefDescription": "Retired conditional branch instructions." + }, + { + "EventName": "ex_div_busy", + "EventCode": "0xd3", + "BriefDescription": "Number of cycles the divider is busy." + }, + { + "EventName": "ex_div_count", + "EventCode": "0xd4", + "BriefDescription": "Divide ops executed." + }, + { + "EventName": "ex_no_retire.empty", + "EventCode": "0xd6", + "BriefDescription": "Cycles with no retire due to the lack of valid ops in the retire queue (may be caused by front-end bottlenecks or pipeline redirects).", + "UMask": "0x01" + }, + { + "EventName": "ex_no_retire.not_complete", + "EventCode": "0xd6", + "BriefDescription": "Cycles with no retire while the oldest op is waiting to be executed.", + "UMask": "0x02" + }, + { + "EventName": "ex_no_retire.other", + "EventCode": "0xd6", + "BriefDescription": "Cycles with no retire caused by other reasons (retire breaks, traps, faults, etc.).", + "UMask": "0x08" + }, + { + "EventName": "ex_no_retire.thread_not_selected", + "EventCode": "0xd6", + "BriefDescription": "Cycles with no retire because thread arbitration did not select the thread.", + "UMask": "0x10" + }, + { + "EventName": "ex_no_retire.load_not_complete", + "EventCode": "0xd6", + "BriefDescription": "Cycles with no retire while the oldest op is waiting for load data.", + "UMask": "0xa2" + }, + { + "EventName": "ex_no_retire.all", + "EventCode": "0xd6", + "BriefDescription": "Cycles with no retire for any reason.", + "UMask": "0x1b" + }, + { + "EventName": "ex_ret_ucode_instr", + "EventCode": "0x1c1", + "BriefDescription": "Retired microcoded instructions." + }, + { + "EventName": "ex_ret_ucode_ops", + "EventCode": "0x1c2", + "BriefDescription": "Retired microcode ops." + }, + { + "EventName": "ex_ret_msprd_brnch_instr_dir_msmtch", + "EventCode": "0x1c7", + "BriefDescription": "Retired branch instructions mispredicted due to direction mismatch." + }, + { + "EventName": "ex_ret_uncond_brnch_instr_mispred", + "EventCode": "0x1c8", + "BriefDescription": "Retired unconditional indirect branch instructions mispredicted." + }, + { + "EventName": "ex_ret_uncond_brnch_instr", + "EventCode": "0x1c9", + "BriefDescription": "Retired unconditional branch instructions." + }, + { + "EventName": "ex_tagged_ibs_ops.ibs_tagged_ops", + "EventCode": "0x1cf", + "BriefDescription": "Ops tagged by IBS.", + "UMask": "0x01" + }, + { + "EventName": "ex_tagged_ibs_ops.ibs_tagged_ops_ret", + "EventCode": "0x1cf", + "BriefDescription": "Ops tagged by IBS that retired.", + "UMask": "0x02" + }, + { + "EventName": "ex_tagged_ibs_ops.ibs_count_rollover", + "EventCode": "0x1cf", + "BriefDescription": "Ops not tagged by IBS due to a previous tagged op that has not yet signaled interrupt.", + "UMask": "0x04" + }, + { + "EventName": "ex_ret_fused_instr", + "EventCode": "0x1d0", + "BriefDescription": "Retired fused instructions." + } +] diff --git a/tools/perf/pmu-events/arch/x86/amdzen5/floating-point.json b/tools/perf/pmu-events/arch/x86/amdzen5/floating-point.json new file mode 100644 index 000000000000..9204bfb1d69e --- /dev/null +++ b/tools/perf/pmu-events/arch/x86/amdzen5/floating-point.json @@ -0,0 +1,812 @@ +[ + { + "EventName": "fp_ret_x87_fp_ops.add_sub_ops", + "EventCode": "0x02", + "BriefDescription": "Retired x87 floating-point add and subtract ops.", + "UMask": "0x01" + }, + { + "EventName": "fp_ret_x87_fp_ops.mul_ops", + "EventCode": "0x02", + "BriefDescription": "Retired x87 floating-point multiply ops.", + "UMask": "0x02" + }, + { + "EventName": "fp_ret_x87_fp_ops.div_sqrt_ops", + "EventCode": "0x02", + "BriefDescription": "Retired x87 floating-point divide and square root ops.", + "UMask": "0x04" + }, + { + "EventName": "fp_ret_x87_fp_ops.all", + "EventCode": "0x02", + "BriefDescription": "Retired x87 floating-point ops of all types.", + "UMask": "0x07" + }, + { + "EventName": "fp_ret_sse_avx_ops.add_sub_flops", + "EventCode": "0x03", + "BriefDescription": "Retired SSE and AVX floating-point add and subtract ops.", + "UMask": "0x01" + }, + { + "EventName": "fp_ret_sse_avx_ops.mult_flops", + "EventCode": "0x03", + "BriefDescription": "Retired SSE and AVX floating-point multiply ops.", + "UMask": "0x02" + }, + { + "EventName": "fp_ret_sse_avx_ops.div_flops", + "EventCode": "0x03", + "BriefDescription": "Retired SSE and AVX floating-point divide and square root ops.", + "UMask": "0x04" + }, + { + "EventName": "fp_ret_sse_avx_ops.mac_flops", + "EventCode": "0x03", + "BriefDescription": "Retired SSE and AVX floating-point multiply-accumulate ops (each operation is counted as 2 ops).", + "UMask": "0x08" + }, + { + "EventName": "fp_ret_sse_avx_ops.bfloat16_flops", + "EventCode": "0x03", + "BriefDescription": "Retired SSE and AVX floating-point bfloat16 ops.", + "UMask": "0x20" + }, + { + "EventName": "fp_ret_sse_avx_ops.scalar_single_flops", + "EventCode": "0x03", + "BriefDescription": "Retired SSE and AVX floating-point scalar single-precision ops.", + "UMask": "0x40" + }, + { + "EventName": "fp_ret_sse_avx_ops.packed_single_flops", + "EventCode": "0x03", + "BriefDescription": "Retired SSE and AVX floating-point packed single-precision ops.", + "UMask": "0x60" + }, + { + "EventName": "fp_ret_sse_avx_ops.scalar_double_flops", + "EventCode": "0x03", + "BriefDescription": "Retired SSE and AVX floating-point scalar double-precision ops.", + "UMask": "0x80" + }, + { + "EventName": "fp_ret_sse_avx_ops.packed_double_flops", + "EventCode": "0x03", + "BriefDescription": "Retired SSE and AVX floating-point packed double-precision ops.", + "UMask": "0xa0" + }, + { + "EventName": "fp_ret_sse_avx_ops.all", + "EventCode": "0x03", + "BriefDescription": "Retired SSE and AVX floating-point ops of all types.", + "UMask": "0x0f" + }, + { + "EventName": "fp_ops_retired_by_width.x87_uops_retired", + "EventCode": "0x08", + "BriefDescription": "Retired x87 floating-point ops.", + "UMask": "0x01" + }, + { + "EventName": "fp_ops_retired_by_width.mmx_uops_retired", + "EventCode": "0x08", + "BriefDescription": "Retired MMX floating-point ops.", + "UMask": "0x02" + }, + { + "EventName": "fp_ops_retired_by_width.scalar_uops_retired", + "EventCode": "0x08", + "BriefDescription": "Retired scalar floating-point ops.", + "UMask": "0x04" + }, + { + "EventName": "fp_ops_retired_by_width.pack_128_uops_retired", + "EventCode": "0x08", + "BriefDescription": "Retired packed 128-bit floating-point ops.", + "UMask": "0x08" + }, + { + "EventName": "fp_ops_retired_by_width.pack_256_uops_retired", + "EventCode": "0x08", + "BriefDescription": "Retired packed 256-bit floating-point ops.", + "UMask": "0x10" + }, + { + "EventName": "fp_ops_retired_by_width.pack_512_uops_retired", + "EventCode": "0x08", + "BriefDescription": "Retired packed 512-bit floating-point ops.", + "UMask": "0x20" + }, + { + "EventName": "fp_ops_retired_by_width.all", + "EventCode": "0x08", + "BriefDescription": "Retired floating-point ops of all widths.", + "UMask": "0x3f" + }, + { + "EventName": "fp_ops_retired_by_type.scalar_add", + "EventCode": "0x0a", + "BriefDescription": "Retired scalar floating-point add ops.", + "UMask": "0x01" + }, + { + "EventName": "fp_ops_retired_by_type.scalar_sub", + "EventCode": "0x0a", + "BriefDescription": "Retired scalar floating-point subtract ops.", + "UMask": "0x02" + }, + { + "EventName": "fp_ops_retired_by_type.scalar_mul", + "EventCode": "0x0a", + "BriefDescription": "Retired scalar floating-point multiply ops.", + "UMask": "0x03" + }, + { + "EventName": "fp_ops_retired_by_type.scalar_mac", + "EventCode": "0x0a", + "BriefDescription": "Retired scalar floating-point multiply-accumulate ops.", + "UMask": "0x04" + }, + { + "EventName": "fp_ops_retired_by_type.scalar_div", + "EventCode": "0x0a", + "BriefDescription": "Retired scalar floating-point divide ops.", + "UMask": "0x05" + }, + { + "EventName": "fp_ops_retired_by_type.scalar_sqrt", + "EventCode": "0x0a", + "BriefDescription": "Retired scalar floating-point square root ops.", + "UMask": "0x06" + }, + { + "EventName": "fp_ops_retired_by_type.scalar_cmp", + "EventCode": "0x0a", + "BriefDescription": "Retired scalar floating-point compare ops.", + "UMask": "0x07" + }, + { + "EventName": "fp_ops_retired_by_type.scalar_cvt", + "EventCode": "0x0a", + "BriefDescription": "Retired scalar floating-point convert ops.", + "UMask": "0x08" + }, + { + "EventName": "fp_ops_retired_by_type.scalar_blend", + "EventCode": "0x0a", + "BriefDescription": "Retired scalar floating-point blend ops.", + "UMask": "0x09" + }, + { + "EventName": "fp_ops_retired_by_type.scalar_other", + "EventCode": "0x0a", + "BriefDescription": "Retired scalar floating-point ops of other types.", + "UMask": "0x0e" + }, + { + "EventName": "fp_ops_retired_by_type.scalar_all", + "EventCode": "0x0a", + "BriefDescription": "Retired scalar floating-point ops of all types.", + "UMask": "0x0f" + }, + { + "EventName": "fp_ops_retired_by_type.vector_add", + "EventCode": "0x0a", + "BriefDescription": "Retired vector floating-point add ops.", + "UMask": "0x10" + }, + { + "EventName": "fp_ops_retired_by_type.vector_sub", + "EventCode": "0x0a", + "BriefDescription": "Retired vector floating-point subtract ops.", + "UMask": "0x20" + }, + { + "EventName": "fp_ops_retired_by_type.vector_mul", + "EventCode": "0x0a", + "BriefDescription": "Retired vector floating-point multiply ops.", + "UMask": "0x30" + }, + { + "EventName": "fp_ops_retired_by_type.vector_mac", + "EventCode": "0x0a", + "BriefDescription": "Retired vector floating-point multiply-accumulate ops.", + "UMask": "0x40" + }, + { + "EventName": "fp_ops_retired_by_type.vector_div", + "EventCode": "0x0a", + "BriefDescription": "Retired vector floating-point divide ops.", + "UMask": "0x50" + }, + { + "EventName": "fp_ops_retired_by_type.vector_sqrt", + "EventCode": "0x0a", + "BriefDescription": "Retired vector floating-point square root ops.", + "UMask": "0x60" + }, + { + "EventName": "fp_ops_retired_by_type.vector_cmp", + "EventCode": "0x0a", + "BriefDescription": "Retired vector floating-point compare ops.", + "UMask": "0x70" + }, + { + "EventName": "fp_ops_retired_by_type.vector_cvt", + "EventCode": "0x0a", + "BriefDescription": "Retired vector floating-point convert ops.", + "UMask": "0x80" + }, + { + "EventName": "fp_ops_retired_by_type.vector_blend", + "EventCode": "0x0a", + "BriefDescription": "Retired vector floating-point blend ops.", + "UMask": "0x90" + }, + { + "EventName": "fp_ops_retired_by_type.vector_shuffle", + "EventCode": "0x0a", + "BriefDescription": "Retired vector floating-point shuffle ops (may include instructions not necessarily thought of as including shuffles e.g. horizontal add, dot product, and certain MOV instructions).", + "UMask": "0xb0" + }, + { + "EventName": "fp_ops_retired_by_type.vector_logical", + "EventCode": "0x0a", + "BriefDescription": "Retired vector floating-point logical ops.", + "UMask": "0xd0" + }, + { + "EventName": "fp_ops_retired_by_type.vector_other", + "EventCode": "0x0a", + "BriefDescription": "Retired vector floating-point ops of other types.", + "UMask": "0xe0" + }, + { + "EventName": "fp_ops_retired_by_type.vector_all", + "EventCode": "0x0a", + "BriefDescription": "Retired vector floating-point ops of all types.", + "UMask": "0xf0" + }, + { + "EventName": "fp_ops_retired_by_type.all", + "EventCode": "0x0a", + "BriefDescription": "Retired floating-point ops of all types.", + "UMask": "0xff" + }, + { + "EventName": "sse_avx_ops_retired.mmx_add", + "EventCode": "0x0b", + "BriefDescription": "Retired MMX integer add.", + "UMask": "0x01" + }, + { + "EventName": "sse_avx_ops_retired.mmx_sub", + "EventCode": "0x0b", + "BriefDescription": "Retired MMX integer subtract ops.", + "UMask": "0x02" + }, + { + "EventName": "sse_avx_ops_retired.mmx_mul", + "EventCode": "0x0b", + "BriefDescription": "Retired MMX integer multiply ops.", + "UMask": "0x03" + }, + { + "EventName": "sse_avx_ops_retired.mmx_mac", + "EventCode": "0x0b", + "BriefDescription": "Retired MMX integer multiply-accumulate ops.", + "UMask": "0x04" + }, + { + "EventName": "sse_avx_ops_retired.mmx_cmp", + "EventCode": "0x0b", + "BriefDescription": "Retired MMX integer compare ops.", + "UMask": "0x07" + }, + { + "EventName": "sse_avx_ops_retired.mmx_shift", + "EventCode": "0x0b", + "BriefDescription": "Retired MMX integer shift ops.", + "UMask": "0x09" + }, + { + "EventName": "sse_avx_ops_retired.mmx_mov", + "EventCode": "0x0b", + "BriefDescription": "Retired MMX integer MOV ops.", + "UMask": "0x0a" + }, + { + "EventName": "sse_avx_ops_retired.mmx_shuffle", + "EventCode": "0x0b", + "BriefDescription": "Retired MMX integer shuffle ops (may include instructions not necessarily thought of as including shuffles e.g. horizontal add, dot product, and certain MOV instructions).", + "UMask": "0x0b" + }, + { + "EventName": "sse_avx_ops_retired.mmx_pack", + "EventCode": "0x0b", + "BriefDescription": "Retired MMX integer pack ops.", + "UMask": "0x0c" + }, + { + "EventName": "sse_avx_ops_retired.mmx_logical", + "EventCode": "0x0b", + "BriefDescription": "Retired MMX integer logical ops.", + "UMask": "0x0d" + }, + { + "EventName": "sse_avx_ops_retired.mmx_other", + "EventCode": "0x0b", + "BriefDescription": "Retired MMX integer multiply ops of other types.", + "UMask": "0x0e" + }, + { + "EventName": "sse_avx_ops_retired.mmx_all", + "EventCode": "0x0b", + "BriefDescription": "Retired MMX integer ops of all types.", + "UMask": "0x0f" + }, + { + "EventName": "sse_avx_ops_retired.sse_avx_add", + "EventCode": "0x0b", + "BriefDescription": "Retired SSE and AVX integer add ops.", + "UMask": "0x10" + }, + { + "EventName": "sse_avx_ops_retired.sse_avx_sub", + "EventCode": "0x0b", + "BriefDescription": "Retired SSE and AVX integer subtract ops.", + "UMask": "0x20" + }, + { + "EventName": "sse_avx_ops_retired.sse_avx_mul", + "EventCode": "0x0b", + "BriefDescription": "Retired SSE and AVX integer multiply ops.", + "UMask": "0x30" + }, + { + "EventName": "sse_avx_ops_retired.sse_avx_mac", + "EventCode": "0x0b", + "BriefDescription": "Retired SSE and AVX integer multiply-accumulate ops.", + "UMask": "0x40" + }, + { + "EventName": "sse_avx_ops_retired.sse_avx_aes", + "EventCode": "0x0b", + "BriefDescription": "Retired SSE and AVX integer AES ops.", + "UMask": "0x50" + }, + { + "EventName": "sse_avx_ops_retired.sse_avx_sha", + "EventCode": "0x0b", + "BriefDescription": "Retired SSE and AVX integer SHA ops.", + "UMask": "0x60" + }, + { + "EventName": "sse_avx_ops_retired.sse_avx_cmp", + "EventCode": "0x0b", + "BriefDescription": "Retired SSE and AVX integer compare ops.", + "UMask": "0x70" + }, + { + "EventName": "sse_avx_ops_retired.sse_avx_clm", + "EventCode": "0x0b", + "BriefDescription": "Retired SSE and AVX integer CLM ops.", + "UMask": "0x80" + }, + { + "EventName": "sse_avx_ops_retired.sse_avx_shift", + "EventCode": "0x0b", + "BriefDescription": "Retired SSE and AVX integer shift ops.", + "UMask": "0x90" + }, + { + "EventName": "sse_avx_ops_retired.sse_avx_mov", + "EventCode": "0x0b", + "BriefDescription": "Retired SSE and AVX integer MOV ops.", + "UMask": "0xa0" + }, + { + "EventName": "sse_avx_ops_retired.sse_avx_shuffle", + "EventCode": "0x0b", + "BriefDescription": "Retired SSE and AVX integer shuffle ops (may include instructions not necessarily thought of as including shuffles e.g. horizontal add, dot product, and certain MOV instructions).", + "UMask": "0xb0" + }, + { + "EventName": "sse_avx_ops_retired.sse_avx_pack", + "EventCode": "0x0b", + "BriefDescription": "Retired SSE and AVX integer pack ops.", + "UMask": "0xc0" + }, + { + "EventName": "sse_avx_ops_retired.sse_avx_logical", + "EventCode": "0x0b", + "BriefDescription": "Retired SSE and AVX integer logical ops.", + "UMask": "0xd0" + }, + { + "EventName": "sse_avx_ops_retired.sse_avx_other", + "EventCode": "0x0b", + "BriefDescription": "Retired SSE and AVX integer ops of other types.", + "UMask": "0xe0" + }, + { + "EventName": "sse_avx_ops_retired.sse_avx_all", + "EventCode": "0x0b", + "BriefDescription": "Retired SSE and AVX integer ops of all types.", + "UMask": "0xf0" + }, + { + "EventName": "sse_avx_ops_retired.all", + "EventCode": "0x0b", + "BriefDescription": "Retired SSE, AVX and MMX integer ops of all types.", + "UMask": "0xff" + }, + { + "EventName": "fp_pack_ops_retired.fp128_add", + "EventCode": "0x0c", + "BriefDescription": "Retired 128-bit packed floating-point add ops.", + "UMask": "0x01" + }, + { + "EventName": "fp_pack_ops_retired.fp128_sub", + "EventCode": "0x0c", + "BriefDescription": "Retired 128-bit packed floating-point subtract ops.", + "UMask": "0x02" + }, + { + "EventName": "fp_pack_ops_retired.fp128_mul", + "EventCode": "0x0c", + "BriefDescription": "Retired 128-bit packed floating-point multiply ops.", + "UMask": "0x03" + }, + { + "EventName": "fp_pack_ops_retired.fp128_mac", + "EventCode": "0x0c", + "BriefDescription": "Retired 128-bit packed floating-point multiply-accumulate ops.", + "UMask": "0x04" + }, + { + "EventName": "fp_pack_ops_retired.fp128_div", + "EventCode": "0x0c", + "BriefDescription": "Retired 128-bit packed floating-point divide ops.", + "UMask": "0x05" + }, + { + "EventName": "fp_pack_ops_retired.fp128_sqrt", + "EventCode": "0x0c", + "BriefDescription": "Retired 128-bit packed floating-point square root ops.", + "UMask": "0x06" + }, + { + "EventName": "fp_pack_ops_retired.fp128_cmp", + "EventCode": "0x0c", + "BriefDescription": "Retired 128-bit packed floating-point compare ops.", + "UMask": "0x07" + }, + { + "EventName": "fp_pack_ops_retired.fp128_cvt", + "EventCode": "0x0c", + "BriefDescription": "Retired 128-bit packed floating-point convert ops.", + "UMask": "0x08" + }, + { + "EventName": "fp_pack_ops_retired.fp128_blend", + "EventCode": "0x0c", + "BriefDescription": "Retired 128-bit packed floating-point blend ops.", + "UMask": "0x09" + }, + { + "EventName": "fp_pack_ops_retired.fp128_shuffle", + "EventCode": "0x0c", + "BriefDescription": "Retired 128-bit packed floating-point shuffle ops (may include instructions not necessarily thought of as including shuffles e.g. horizontal add, dot product, and certain MOV instructions).", + "UMask": "0x0b" + }, + { + "EventName": "fp_pack_ops_retired.fp128_logical", + "EventCode": "0x0c", + "BriefDescription": "Retired 128-bit packed floating-point logical ops.", + "UMask": "0x0d" + }, + { + "EventName": "fp_pack_ops_retired.fp128_other", + "EventCode": "0x0c", + "BriefDescription": "Retired 128-bit packed floating-point ops of other types.", + "UMask": "0x0e" + }, + { + "EventName": "fp_pack_ops_retired.fp128_all", + "EventCode": "0x0c", + "BriefDescription": "Retired 128-bit packed floating-point ops of all types.", + "UMask": "0x0f" + }, + { + "EventName": "fp_pack_ops_retired.fp256_add", + "EventCode": "0x0c", + "BriefDescription": "Retired 256-bit packed floating-point add ops.", + "UMask": "0x10" + }, + { + "EventName": "fp_pack_ops_retired.fp256_sub", + "EventCode": "0x0c", + "BriefDescription": "Retired 256-bit packed floating-point subtract ops.", + "UMask": "0x20" + }, + { + "EventName": "fp_pack_ops_retired.fp256_mul", + "EventCode": "0x0c", + "BriefDescription": "Retired 256-bit packed floating-point multiply ops.", + "UMask": "0x30" + }, + { + "EventName": "fp_pack_ops_retired.fp256_mac", + "EventCode": "0x0c", + "BriefDescription": "Retired 256-bit packed floating-point multiply-accumulate ops.", + "UMask": "0x40" + }, + { + "EventName": "fp_pack_ops_retired.fp256_div", + "EventCode": "0x0c", + "BriefDescription": "Retired 256-bit packed floating-point divide ops.", + "UMask": "0x50" + }, + { + "EventName": "fp_pack_ops_retired.fp256_sqrt", + "EventCode": "0x0c", + "BriefDescription": "Retired 256-bit packed floating-point square root ops.", + "UMask": "0x60" + }, + { + "EventName": "fp_pack_ops_retired.fp256_cmp", + "EventCode": "0x0c", + "BriefDescription": "Retired 256-bit packed floating-point compare ops.", + "UMask": "0x70" + }, + { + "EventName": "fp_pack_ops_retired.fp256_cvt", + "EventCode": "0x0c", + "BriefDescription": "Retired 256-bit packed floating-point convert ops.", + "UMask": "0x80" + }, + { + "EventName": "fp_pack_ops_retired.fp256_blend", + "EventCode": "0x0c", + "BriefDescription": "Retired 256-bit packed floating-point blend ops.", + "UMask": "0x90" + }, + { + "EventName": "fp_pack_ops_retired.fp256_shuffle", + "EventCode": "0x0c", + "BriefDescription": "Retired 256-bit packed floating-point shuffle ops (may include instructions not necessarily thought of as including shuffles e.g. horizontal add, dot product, and certain MOV instructions).", + "UMask": "0xb0" + }, + { + "EventName": "fp_pack_ops_retired.fp256_logical", + "EventCode": "0x0c", + "BriefDescription": "Retired 256-bit packed floating-point logical ops.", + "UMask": "0xd0" + }, + { + "EventName": "fp_pack_ops_retired.fp256_other", + "EventCode": "0x0c", + "BriefDescription": "Retired 256-bit packed floating-point ops of other types.", + "UMask": "0xe0" + }, + { + "EventName": "fp_pack_ops_retired.fp256_all", + "EventCode": "0x0c", + "BriefDescription": "Retired 256-bit packed floating-point ops of all types.", + "UMask": "0xf0" + }, + { + "EventName": "fp_pack_ops_retired.all", + "EventCode": "0x0c", + "BriefDescription": "Retired packed floating-point ops of all types.", + "UMask": "0xff" + }, + { + "EventName": "packed_int_op_type.int128_add", + "EventCode": "0x0d", + "BriefDescription": "Retired 128-bit packed integer add ops.", + "UMask": "0x01" + }, + { + "EventName": "packed_int_op_type.int128_sub", + "EventCode": "0x0d", + "BriefDescription": "Retired 128-bit packed integer subtract ops.", + "UMask": "0x02" + }, + { + "EventName": "packed_int_op_type.int128_mul", + "EventCode": "0x0d", + "BriefDescription": "Retired 128-bit packed integer multiply ops.", + "UMask": "0x03" + }, + { + "EventName": "packed_int_op_type.int128_mac", + "EventCode": "0x0d", + "BriefDescription": "Retired 128-bit packed integer multiply-accumulate ops.", + "UMask": "0x04" + }, + { + "EventName": "packed_int_op_type.int128_aes", + "EventCode": "0x0d", + "BriefDescription": "Retired 128-bit packed integer AES ops.", + "UMask": "0x05" + }, + { + "EventName": "packed_int_op_type.int128_sha", + "EventCode": "0x0d", + "BriefDescription": "Retired 128-bit packed integer SHA ops.", + "UMask": "0x06" + }, + { + "EventName": "packed_int_op_type.int128_cmp", + "EventCode": "0x0d", + "BriefDescription": "Retired 128-bit packed integer compare ops.", + "UMask": "0x07" + }, + { + "EventName": "packed_int_op_type.int128_clm", + "EventCode": "0x0d", + "BriefDescription": "Retired 128-bit packed integer CLM ops.", + "UMask": "0x08" + }, + { + "EventName": "packed_int_op_type.int128_shift", + "EventCode": "0x0d", + "BriefDescription": "Retired 128-bit packed integer shift ops.", + "UMask": "0x09" + }, + { + "EventName": "packed_int_op_type.int128_mov", + "EventCode": "0x0d", + "BriefDescription": "Retired 128-bit packed integer MOV ops.", + "UMask": "0x0a" + }, + { + "EventName": "packed_int_op_type.int128_shuffle", + "EventCode": "0x0d", + "BriefDescription": "Retired 128-bit packed integer shuffle ops (may include instructions not necessarily thought of as including shuffles e.g. horizontal add, dot product, and certain MOV instructions).", + "UMask": "0x0b" + }, + { + "EventName": "packed_int_op_type.int128_pack", + "EventCode": "0x0d", + "BriefDescription": "Retired 128-bit packed integer pack ops.", + "UMask": "0x0c" + }, + { + "EventName": "packed_int_op_type.int128_logical", + "EventCode": "0x0d", + "BriefDescription": "Retired 128-bit packed integer logical ops.", + "UMask": "0x0d" + }, + { + "EventName": "packed_int_op_type.int128_other", + "EventCode": "0x0d", + "BriefDescription": "Retired 128-bit packed integer ops of other types.", + "UMask": "0x0e" + }, + { + "EventName": "packed_int_op_type.int128_all", + "EventCode": "0x0d", + "BriefDescription": "Retired 128-bit packed integer ops of all types.", + "UMask": "0x0f" + }, + { + "EventName": "packed_int_op_type.int256_add", + "EventCode": "0x0d", + "BriefDescription": "Retired 256-bit packed integer add ops.", + "UMask": "0x10" + }, + { + "EventName": "packed_int_op_type.int256_sub", + "EventCode": "0x0d", + "BriefDescription": "Retired 256-bit packed integer subtract ops.", + "UMask": "0x20" + }, + { + "EventName": "packed_int_op_type.int256_mul", + "EventCode": "0x0d", + "BriefDescription": "Retired 256-bit packed integer multiply ops.", + "UMask": "0x30" + }, + { + "EventName": "packed_int_op_type.int256_mac", + "EventCode": "0x0d", + "BriefDescription": "Retired 256-bit packed integer multiply-accumulate ops.", + "UMask": "0x40" + }, + { + "EventName": "packed_int_op_type.int256_cmp", + "EventCode": "0x0d", + "BriefDescription": "Retired 256-bit packed integer compare ops.", + "UMask": "0x70" + }, + { + "EventName": "packed_int_op_type.int256_shift", + "EventCode": "0x0d", + "BriefDescription": "Retired 256-bit packed integer shift ops.", + "UMask": "0x90" + }, + { + "EventName": "packed_int_op_type.int256_mov", + "EventCode": "0x0d", + "BriefDescription": "Retired 256-bit packed integer MOV ops.", + "UMask": "0xa0" + }, + { + "EventName": "packed_int_op_type.int256_shuffle", + "EventCode": "0x0d", + "BriefDescription": "Retired 256-bit packed integer shuffle ops (may include instructions not necessarily thought of as including shuffles e.g. horizontal add, dot product, and certain MOV instructions).", + "UMask": "0xb0" + }, + { + "EventName": "packed_int_op_type.int256_pack", + "EventCode": "0x0d", + "BriefDescription": "Retired 256-bit packed integer pack ops.", + "UMask": "0xc0" + }, + { + "EventName": "packed_int_op_type.int256_logical", + "EventCode": "0x0d", + "BriefDescription": "Retired 256-bit packed integer logical ops.", + "UMask": "0xd0" + }, + { + "EventName": "packed_int_op_type.int256_other", + "EventCode": "0x0d", + "BriefDescription": "Retired 256-bit packed integer ops of other types.", + "UMask": "0xe0" + }, + { + "EventName": "packed_int_op_type.int256_all", + "EventCode": "0x0d", + "BriefDescription": "Retired 256-bit packed integer ops of all types.", + "UMask": "0xf0" + }, + { + "EventName": "packed_int_op_type.all", + "EventCode": "0x0d", + "BriefDescription": "Retired packed integer ops of all types.", + "UMask": "0xff" + }, + { + "EventName": "fp_disp_faults.x87_fill_fault", + "EventCode": "0x0e", + "BriefDescription": "Floating-point dispatch faults for x87 fills.", + "UMask": "0x01" + }, + { + "EventName": "fp_disp_faults.xmm_fill_fault", + "EventCode": "0x0e", + "BriefDescription": "Floating-point dispatch faults for XMM fills.", + "UMask": "0x02" + }, + { + "EventName": "fp_disp_faults.ymm_fill_fault", + "EventCode": "0x0e", + "BriefDescription": "Floating-point dispatch faults for YMM fills.", + "UMask": "0x04" + }, + { + "EventName": "fp_disp_faults.ymm_spill_fault", + "EventCode": "0x0e", + "BriefDescription": "Floating-point dispatch faults for YMM spills.", + "UMask": "0x08" + }, + { + "EventName": "fp_disp_faults.sse_avx_all", + "EventCode": "0x0e", + "BriefDescription": "Floating-point dispatch faults of all types for SSE and AVX ops.", + "UMask": "0x0e" + }, + { + "EventName": "fp_disp_faults.all", + "EventCode": "0x0e", + "BriefDescription": "Floating-point dispatch faults of all types.", + "UMask": "0x0f" + } +] diff --git a/tools/perf/pmu-events/arch/x86/amdzen5/inst-cache.json b/tools/perf/pmu-events/arch/x86/amdzen5/inst-cache.json new file mode 100644 index 000000000000..ad75e5bf9513 --- /dev/null +++ b/tools/perf/pmu-events/arch/x86/amdzen5/inst-cache.json @@ -0,0 +1,72 @@ +[ + { + "EventName": "ic_cache_fill_l2", + "EventCode": "0x82", + "BriefDescription": "Instruction cache lines (64 bytes) fulfilled from the L2 cache." + }, + { + "EventName": "ic_cache_fill_sys", + "EventCode": "0x83", + "BriefDescription": "Instruction cache lines (64 bytes) fulfilled from system memory or another cache." + }, + { + "EventName": "ic_fetch_ibs_events.fetch_tagged", + "EventCode": "0x188", + "BriefDescription": "Fetches tagged by Fetch IBS. Not all tagged fetches result in a valid sample and an IBS interrupt.", + "UMask": "0x02" + }, + { + "EventName": "ic_fetch_ibs_events.sample_discarded", + "EventCode": "0x188", + "BriefDescription": "Fetches discarded after being tagged by Fetch IBS due to reasons other than IBS filtering.", + "UMask": "0x04" + }, + { + "EventName": "ic_fetch_ibs_events.sample_filtered", + "EventCode": "0x188", + "BriefDescription": "Fetches discarded after being tagged by Fetch IBS due to IBS filtering.", + "UMask": "0x08" + }, + { + "EventName": "ic_fetch_ibs_events.sample_valid", + "EventCode": "0x188", + "BriefDescription": "Fetches tagged by Fetch IBS that result in a valid sample and an IBS interrupt.", + "UMask": "0x10" + }, + { + "EventName": "ic_tag_hit_miss.instruction_cache_hit", + "EventCode": "0x18e", + "BriefDescription": "Instruction cache hits.", + "UMask": "0x07" + }, + { + "EventName": "ic_tag_hit_miss.instruction_cache_miss", + "EventCode": "0x18e", + "BriefDescription": "Instruction cache misses.", + "UMask": "0x18" + }, + { + "EventName": "ic_tag_hit_miss.all_instruction_cache_accesses", + "EventCode": "0x18e", + "BriefDescription": "Instruction cache accesses of all types.", + "UMask": "0x1f" + }, + { + "EventName": "op_cache_hit_miss.op_cache_hit", + "EventCode": "0x28f", + "BriefDescription": "Op cache hits.", + "UMask": "0x03" + }, + { + "EventName": "op_cache_hit_miss.op_cache_miss", + "EventCode": "0x28f", + "BriefDescription": "Op cache misses.", + "UMask": "0x04" + }, + { + "EventName": "op_cache_hit_miss.all_op_cache_accesses", + "EventCode": "0x28f", + "BriefDescription": "Op cache accesses of all types.", + "UMask": "0x07" + } +] diff --git a/tools/perf/pmu-events/arch/x86/amdzen5/l2-cache.json b/tools/perf/pmu-events/arch/x86/amdzen5/l2-cache.json new file mode 100644 index 000000000000..d1de51a02922 --- /dev/null +++ b/tools/perf/pmu-events/arch/x86/amdzen5/l2-cache.json @@ -0,0 +1,266 @@ +[ + { + "EventName": "l2_request_g1.group2", + "EventCode": "0x60", + "BriefDescription": "L2 cache requests of non-cacheable type (non-cached data and instructions reads, self-modifying code checks).", + "UMask": "0x01" + }, + { + "EventName": "l2_request_g1.l2_hw_pf", + "EventCode": "0x60", + "BriefDescription": "L2 cache requests: from hardware prefetchers to prefetch directly into L2 (hit or miss).", + "UMask": "0x02" + }, + { + "EventName": "l2_request_g1.prefetch_l2_cmd", + "EventCode": "0x60", + "BriefDescription": "L2 cache requests: prefetch directly into L2.", + "UMask": "0x04" + }, + { + "EventName": "l2_request_g1.cacheable_ic_read", + "EventCode": "0x60", + "BriefDescription": "L2 cache requests: instruction cache reads.", + "UMask": "0x10" + }, + { + "EventName": "l2_request_g1.ls_rd_blk_c_s", + "EventCode": "0x60", + "BriefDescription": "L2 cache requests: data cache shared reads.", + "UMask": "0x20" + }, + { + "EventName": "l2_request_g1.rd_blk_x", + "EventCode": "0x60", + "BriefDescription": "L2 cache requests: data cache stores.", + "UMask": "0x40" + }, + { + "EventName": "l2_request_g1.rd_blk_l", + "EventCode": "0x60", + "BriefDescription": "L2 cache requests: data cache reads including hardware and software prefetch.", + "UMask": "0x80" + }, + { + "EventName": "l2_request_g1.all_dc", + "EventCode": "0x60", + "BriefDescription": "L2 cache requests of common types from L1 data cache (including prefetches).", + "UMask": "0xe0" + }, + { + "EventName": "l2_request_g1.all_no_prefetch", + "EventCode": "0x60", + "BriefDescription": "L2 cache requests of common types not including prefetches.", + "UMask": "0xf1" + }, + { + "EventName": "l2_request_g1.all", + "EventCode": "0x60", + "BriefDescription": "L2 cache requests of all types.", + "UMask": "0xf7" + }, + { + "EventName": "l2_request_g2.ls_rd_sized_nc", + "EventCode": "0x61", + "BriefDescription": "L2 cache requests: non-coherent, non-cacheable LS sized reads.", + "UMask": "0x20" + }, + { + "EventName": "l2_request_g2.ls_rd_sized", + "EventCode": "0x61", + "BriefDescription": "L2 cache requests: coherent, non-cacheable LS sized reads.", + "UMask": "0x40" + }, + { + "EventName": "l2_wcb_req.wcb_close", + "EventCode": "0x63", + "BriefDescription": "Write Combining Buffer (WCB) closures.", + "UMask": "0x20" + }, + { + "EventName": "l2_cache_req_stat.ic_fill_miss", + "EventCode": "0x64", + "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) with status: instruction cache request miss in L2.", + "UMask": "0x01" + }, + { + "EventName": "l2_cache_req_stat.ic_fill_hit_s", + "EventCode": "0x64", + "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) with status: instruction cache hit non-modifiable line in L2.", + "UMask": "0x02" + }, + { + "EventName": "l2_cache_req_stat.ic_fill_hit_x", + "EventCode": "0x64", + "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) with status: instruction cache hit modifiable line in L2.", + "UMask": "0x04" + }, + { + "EventName": "l2_cache_req_stat.ic_hit_in_l2", + "EventCode": "0x64", + "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) for instruction cache hits.", + "UMask": "0x06" + }, + { + "EventName": "l2_cache_req_stat.ic_access_in_l2", + "EventCode": "0x64", + "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) for instruction cache access.", + "UMask": "0x07" + }, + { + "EventName": "l2_cache_req_stat.ls_rd_blk_c", + "EventCode": "0x64", + "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) with status: data cache request miss in L2.", + "UMask": "0x08" + }, + { + "EventName": "l2_cache_req_stat.ic_dc_miss_in_l2", + "EventCode": "0x64", + "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) for data and instruction cache misses.", + "UMask": "0x09" + }, + { + "EventName": "l2_cache_req_stat.ls_rd_blk_x", + "EventCode": "0x64", + "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) with status: data cache store or state change hit in L2.", + "UMask": "0x10" + }, + { + "EventName": "l2_cache_req_stat.ls_rd_blk_l_hit_s", + "EventCode": "0x64", + "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) with status: data cache read hit non-modifiable line in L2.", + "UMask": "0x20" + }, + { + "EventName": "l2_cache_req_stat.ls_rd_blk_l_hit_x", + "EventCode": "0x64", + "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) with status: data cache read hit modifiable line in L2.", + "UMask": "0x40" + }, + { + "EventName": "l2_cache_req_stat.ls_rd_blk_cs", + "EventCode": "0x64", + "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) with status: data cache shared read hit in L2.", + "UMask": "0x80" + }, + { + "EventName": "l2_cache_req_stat.dc_hit_in_l2", + "EventCode": "0x64", + "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) for data cache hits.", + "UMask": "0xf0" + }, + { + "EventName": "l2_cache_req_stat.ic_dc_hit_in_l2", + "EventCode": "0x64", + "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) for data and instruction cache hits.", + "UMask": "0xf6" + }, + { + "EventName": "l2_cache_req_stat.dc_access_in_l2", + "EventCode": "0x64", + "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) for data cache access.", + "UMask": "0xf8" + }, + { + "EventName": "l2_cache_req_stat.all", + "EventCode": "0x64", + "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) for data and instruction cache access.", + "UMask": "0xff" + }, + { + "EventName": "l2_pf_hit_l2.l2_hwpf", + "EventCode": "0x70", + "BriefDescription": "L2 prefetches accepted by the L2 pipeline which hit in the L2 cache and are generated from L2 hardware prefetchers.", + "UMask": "0x1f" + }, + { + "EventName": "l2_pf_hit_l2.l1_dc_hwpf", + "EventCode": "0x70", + "BriefDescription": "L2 prefetches accepted by the L2 pipeline which hit in the L2 cache and are generated from L1 data hardware prefetchers.", + "UMask": "0xe0" + }, + { + "EventName": "l2_pf_hit_l2.l1_dc_l2_hwpf", + "EventCode": "0x70", + "BriefDescription": "L2 prefetches accepted by the L2 pipeline which hit in the L2 cache and are generated from L1 data and L2 hardware prefetchers.", + "UMask": "0xff" + }, + { + "EventName": "l2_pf_miss_l2_hit_l3.l2_hwpf", + "EventCode": "0x71", + "BriefDescription": "L2 prefetches accepted by the L2 pipeline which miss the L2 cache but hit in the L3 cache and are generated from L2 hardware prefetchers.", + "UMask": "0x1f" + }, + { + "EventName": "l2_pf_miss_l2_hit_l3.l1_dc_hwpf", + "EventCode": "0x71", + "BriefDescription": "L2 prefetches accepted by the L2 pipeline which miss the L2 cache but hit in the L3 cache and are generated from L1 data hardware prefetchers.", + "UMask": "0xe0" + }, + { + "EventName": "l2_pf_miss_l2_hit_l3.l1_dc_l2_hwpf", + "EventCode": "0x71", + "BriefDescription": "L2 prefetches accepted by the L2 pipeline which miss the L2 cache but hit in the L3 cache and are generated from L1 data and L2 hardware prefetchers.", + "UMask": "0xff" + }, + { + "EventName": "l2_pf_miss_l2_l3.l2_hwpf", + "EventCode": "0x72", + "BriefDescription": "L2 prefetches accepted by the L2 pipeline which miss the L2 as well as the L3 caches and are generated from L2 hardware prefetchers.", + "UMask": "0x1f" + }, + { + "EventName": "l2_pf_miss_l2_l3.l1_dc_hwpf", + "EventCode": "0x72", + "BriefDescription": "L2 prefetches accepted by the L2 pipeline which miss the L2 as well as the L3 caches and are generated from L1 data hardware prefetchers.", + "UMask": "0xe0" + }, + { + "EventName": "l2_pf_miss_l2_l3.l1_dc_l2_hwpf", + "EventCode": "0x72", + "BriefDescription": "L2 prefetches accepted by the L2 pipeline which miss the L2 as well as the L3 caches and are generated from L1 data and L2 hardware prefetchers.", + "UMask": "0xff" + }, + { + "EventName": "l2_fill_rsp_src.local_ccx", + "EventCode": "0x165", + "BriefDescription": "L2 cache fills from L3 cache or different L2 cache in the same CCX.", + "UMask": "0x02" + }, + { + "EventName": "l2_fill_rsp_src.near_cache", + "EventCode": "0x165", + "BriefDescription": "L2 cache fills from cache of another CCX when the address was in the same NUMA node.", + "UMask": "0x04" + }, + { + "EventName": "l2_fill_rsp_src.dram_io_near", + "EventCode": "0x165", + "BriefDescription": "L2 cache fills from either DRAM or MMIO in the same NUMA node.", + "UMask": "0x08" + }, + { + "EventName": "l2_fill_rsp_src.far_cache", + "EventCode": "0x165", + "BriefDescription": "L2 cache fills from cache of another CCX when the address was in a different NUMA node.", + "UMask": "0x10" + }, + { + "EventName": "l2_fill_rsp_src.dram_io_far", + "EventCode": "0x165", + "BriefDescription": "L2 cache fills from either DRAM or MMIO in a different NUMA node (same or different socket).", + "UMask": "0x40" + }, + { + "EventName": "l2_fill_rsp_src.alternate_memories", + "EventCode": "0x165", + "BriefDescription": "L2 cache fills from extension memory.", + "UMask": "0x80" + }, + { + "EventName": "l2_fill_rsp_src.all", + "EventCode": "0x165", + "BriefDescription": "L2 cache fills from all types of data sources.", + "UMask": "0xde" + } +] diff --git a/tools/perf/pmu-events/arch/x86/amdzen5/l3-cache.json b/tools/perf/pmu-events/arch/x86/amdzen5/l3-cache.json new file mode 100644 index 000000000000..b50fe14d4520 --- /dev/null +++ b/tools/perf/pmu-events/arch/x86/amdzen5/l3-cache.json @@ -0,0 +1,177 @@ +[ + { + "EventName": "l3_lookup_state.l3_miss", + "EventCode": "0x04", + "BriefDescription": "L3 cache misses.", + "UMask": "0x01", + "Unit": "L3PMC" + }, + { + "EventName": "l3_lookup_state.l3_hit", + "EventCode": "0x04", + "BriefDescription": "L3 cache hits.", + "UMask": "0xfe", + "Unit": "L3PMC" + }, + { + "EventName": "l3_lookup_state.all_coherent_accesses_to_l3", + "EventCode": "0x04", + "BriefDescription": "L3 cache requests for all coherent accesses.", + "UMask": "0xff", + "Unit": "L3PMC" + }, + { + "EventName": "l3_xi_sampled_latency.dram_near", + "EventCode": "0xac", + "BriefDescription": "Average sampled latency when data is sourced from DRAM in the same NUMA node.", + "UMask": "0x01", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", + "Unit": "L3PMC" + }, + { + "EventName": "l3_xi_sampled_latency.dram_far", + "EventCode": "0xac", + "BriefDescription": "Average sampled latency when data is sourced from DRAM in a different NUMA node.", + "UMask": "0x02", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", + "Unit": "L3PMC" + }, + { + "EventName": "l3_xi_sampled_latency.near_cache", + "EventCode": "0xac", + "BriefDescription": "Average sampled latency when data is sourced from another CCX's cache when the address was in the same NUMA node.", + "UMask": "0x04", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", + "Unit": "L3PMC" + }, + { + "EventName": "l3_xi_sampled_latency.far_cache", + "EventCode": "0xac", + "BriefDescription": "Average sampled latency when data is sourced from another CCX's cache when the address was in a different NUMA node.", + "UMask": "0x08", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", + "Unit": "L3PMC" + }, + { + "EventName": "l3_xi_sampled_latency.ext_near", + "EventCode": "0xac", + "BriefDescription": "Average sampled latency when data is sourced from extension memory (CXL) in the same NUMA node.", + "UMask": "0x10", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", + "Unit": "L3PMC" + }, + { + "EventName": "l3_xi_sampled_latency.ext_far", + "EventCode": "0xac", + "BriefDescription": "Average sampled latency when data is sourced from extension memory (CXL) in a different NUMA node.", + "UMask": "0x20", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", + "Unit": "L3PMC" + }, + { + "EventName": "l3_xi_sampled_latency.all", + "EventCode": "0xac", + "BriefDescription": "Average sampled latency from all data sources.", + "UMask": "0x3f", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", + "Unit": "L3PMC" + }, + { + "EventName": "l3_xi_sampled_latency_requests.dram_near", + "EventCode": "0xad", + "BriefDescription": "L3 cache fill requests sourced from DRAM in the same NUMA node.", + "UMask": "0x01", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", + "Unit": "L3PMC" + }, + { + "EventName": "l3_xi_sampled_latency_requests.dram_far", + "EventCode": "0xad", + "BriefDescription": "L3 cache fill requests sourced from DRAM in a different NUMA node.", + "UMask": "0x02", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", + "Unit": "L3PMC" + }, + { + "EventName": "l3_xi_sampled_latency_requests.near_cache", + "EventCode": "0xad", + "BriefDescription": "L3 cache fill requests sourced from another CCX's cache when the address was in the same NUMA node.", + "UMask": "0x04", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", + "Unit": "L3PMC" + }, + { + "EventName": "l3_xi_sampled_latency_requests.far_cache", + "EventCode": "0xad", + "BriefDescription": "L3 cache fill requests sourced from another CCX's cache when the address was in a different NUMA node.", + "UMask": "0x08", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", + "Unit": "L3PMC" + }, + { + "EventName": "l3_xi_sampled_latency_requests.ext_near", + "EventCode": "0xad", + "BriefDescription": "L3 cache fill requests sourced from extension memory (CXL) in the same NUMA node.", + "UMask": "0x10", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", + "Unit": "L3PMC" + }, + { + "EventName": "l3_xi_sampled_latency_requests.ext_far", + "EventCode": "0xad", + "BriefDescription": "L3 cache fill requests sourced from extension memory (CXL) in a different NUMA node.", + "UMask": "0x20", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", + "Unit": "L3PMC" + }, + { + "EventName": "l3_xi_sampled_latency_requests.all", + "EventCode": "0xad", + "BriefDescription": "L3 cache fill requests sourced from all data sources.", + "UMask": "0x3f", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", + "Unit": "L3PMC" + } +] diff --git a/tools/perf/pmu-events/arch/x86/amdzen5/load-store.json b/tools/perf/pmu-events/arch/x86/amdzen5/load-store.json new file mode 100644 index 000000000000..af2fdf1f55d6 --- /dev/null +++ b/tools/perf/pmu-events/arch/x86/amdzen5/load-store.json @@ -0,0 +1,451 @@ +[ + { + "EventName": "ls_bad_status2.stli_other", + "EventCode": "0x24", + "BriefDescription": "Store-to-load conflicts (load unable to complete due to a non-forwardable conflict with an older store).", + "UMask": "0x02" + }, + { + "EventName": "ls_locks.bus_lock", + "EventCode": "0x25", + "BriefDescription": "Retired Lock instructions which caused a bus lock.", + "UMask": "0x01" + }, + { + "EventName": "ls_ret_cl_flush", + "EventCode": "0x26", + "BriefDescription": "Retired CLFLUSH instructions." + }, + { + "EventName": "ls_ret_cpuid", + "EventCode": "0x27", + "BriefDescription": "Retired CPUID instructions." + }, + { + "EventName": "ls_dispatch.ld_dispatch", + "EventCode": "0x29", + "BriefDescription": "Number of memory load operations dispatched to the load-store unit.", + "UMask": "0x01" + }, + { + "EventName": "ls_dispatch.store_dispatch", + "EventCode": "0x29", + "BriefDescription": "Number of memory store operations dispatched to the load-store unit.", + "UMask": "0x02" + }, + { + "EventName": "ls_dispatch.ld_st_dispatch", + "EventCode": "0x29", + "BriefDescription": "Number of memory load-store operations dispatched to the load-store unit.", + "UMask": "0x04" + }, + { + "EventName": "ls_dispatch.all", + "EventCode": "0x29", + "BriefDescription": "Number of memory operations dispatched to the load-store unit.", + "UMask": "0x07" + }, + { + "EventName": "ls_smi_rx", + "EventCode": "0x2b", + "BriefDescription": "SMIs received." + }, + { + "EventName": "ls_int_taken", + "EventCode": "0x2c", + "BriefDescription": "Interrupts taken." + }, + { + "EventName": "ls_stlf", + "EventCode": "0x35", + "BriefDescription": "Store-to-load-forward (STLF) hits." + }, + { + "EventName": "ls_st_commit_cancel2.st_commit_cancel_wcb_full", + "EventCode": "0x37", + "BriefDescription": "Non-cacheable store commits cancelled due to the non-cacheable commit buffer being full.", + "UMask": "0x01" + }, + { + "EventName": "ls_mab_alloc.load_store_allocations", + "EventCode": "0x41", + "BriefDescription": "Miss Address Buffer (MAB) entries allocated by a Load-Store (LS) pipe for load-store allocations.", + "UMask": "0x3f" + }, + { + "EventName": "ls_mab_alloc.hardware_prefetcher_allocations", + "EventCode": "0x41", + "BriefDescription": "Miss Address Buffer (MAB) entries allocated by a Load-Store (LS) pipe for hardware prefetcher allocations.", + "UMask": "0x40" + }, + { + "EventName": "ls_mab_alloc.all_allocations", + "EventCode": "0x41", + "BriefDescription": "Miss Address Buffer (MAB) entries allocated by a Load-Store (LS) pipe for all types of allocations.", + "UMask": "0x7f" + }, + { + "EventName": "ls_dmnd_fills_from_sys.local_l2", + "EventCode": "0x43", + "BriefDescription": "Demand data cache fills from local L2 cache.", + "UMask": "0x01" + }, + { + "EventName": "ls_dmnd_fills_from_sys.local_ccx", + "EventCode": "0x43", + "BriefDescription": "Demand data cache fills from L3 cache or different L2 cache in the same CCX.", + "UMask": "0x02" + }, + { + "EventName": "ls_dmnd_fills_from_sys.near_cache", + "EventCode": "0x43", + "BriefDescription": "Demand data cache fills from cache of another CCX when the address was in the same NUMA node.", + "UMask": "0x04" + }, + { + "EventName": "ls_dmnd_fills_from_sys.dram_io_near", + "EventCode": "0x43", + "BriefDescription": "Demand data cache fills from either DRAM or MMIO in the same NUMA node.", + "UMask": "0x08" + }, + { + "EventName": "ls_dmnd_fills_from_sys.far_cache", + "EventCode": "0x43", + "BriefDescription": "Demand data cache fills from cache of another CCX when the address was in a different NUMA node.", + "UMask": "0x10" + }, + { + "EventName": "ls_dmnd_fills_from_sys.dram_io_far", + "EventCode": "0x43", + "BriefDescription": "Demand data cache fills from either DRAM or MMIO in a different NUMA node (same or different socket).", + "UMask": "0x40" + }, + { + "EventName": "ls_dmnd_fills_from_sys.alternate_memories", + "EventCode": "0x43", + "BriefDescription": "Demand data cache fills from extension memory.", + "UMask": "0x80" + }, + { + "EventName": "ls_dmnd_fills_from_sys.all", + "EventCode": "0x43", + "BriefDescription": "Demand data cache fills from all types of data sources.", + "UMask": "0xff" + }, + { + "EventName": "ls_any_fills_from_sys.local_l2", + "EventCode": "0x44", + "BriefDescription": "Any data cache fills from local L2 cache.", + "UMask": "0x01" + }, + { + "EventName": "ls_any_fills_from_sys.local_ccx", + "EventCode": "0x44", + "BriefDescription": "Any data cache fills from L3 cache or different L2 cache in the same CCX.", + "UMask": "0x02" + }, + { + "EventName": "ls_any_fills_from_sys.local_all", + "EventCode": "0x44", + "BriefDescription": "Any data cache fills from local L2 cache or L3 cache or different L2 cache in the same CCX.", + "UMask": "0x03" + }, + { + "EventName": "ls_any_fills_from_sys.near_cache", + "EventCode": "0x44", + "BriefDescription": "Any data cache fills from cache of another CCX when the address was in the same NUMA node.", + "UMask": "0x04" + }, + { + "EventName": "ls_any_fills_from_sys.dram_io_near", + "EventCode": "0x44", + "BriefDescription": "Any data cache fills from either DRAM or MMIO in the same NUMA node.", + "UMask": "0x08" + }, + { + "EventName": "ls_any_fills_from_sys.far_cache", + "EventCode": "0x44", + "BriefDescription": "Any data cache fills from cache of another CCX when the address was in a different NUMA node.", + "UMask": "0x10" + }, + { + "EventName": "ls_any_fills_from_sys.remote_cache", + "EventCode": "0x44", + "BriefDescription": "Any data cache fills from cache of another CCX when the address was in the same or a different NUMA node.", + "UMask": "0x14" + }, + { + "EventName": "ls_any_fills_from_sys.dram_io_far", + "EventCode": "0x44", + "BriefDescription": "Any data cache fills from either DRAM or MMIO in a different NUMA node (same or different socket).", + "UMask": "0x40" + }, + { + "EventName": "ls_any_fills_from_sys.dram_io_all", + "EventCode": "0x44", + "BriefDescription": "Any data cache fills from either DRAM or MMIO in any NUMA node (same or different socket).", + "UMask": "0x48" + }, + { + "EventName": "ls_any_fills_from_sys.far_all", + "EventCode": "0x44", + "BriefDescription": "Any data cache fills from either cache of another CCX, DRAM or MMIO when the address was in a different NUMA node (same or different socket).", + "UMask": "0x50" + }, + { + "EventName": "ls_any_fills_from_sys.all_dram_io", + "EventCode": "0x44", + "BriefDescription": "Any data cache fills from either DRAM or MMIO in any NUMA node (same or different socket).", + "UMask": "0x48" + }, + { + "EventName": "ls_any_fills_from_sys.alternate_memories", + "EventCode": "0x44", + "BriefDescription": "Any data cache fills from extension memory.", + "UMask": "0x80" + }, + { + "EventName": "ls_any_fills_from_sys.all", + "EventCode": "0x44", + "BriefDescription": "Any data cache fills from all types of data sources.", + "UMask": "0xff" + }, + { + "EventName": "ls_l1_d_tlb_miss.tlb_reload_4k_l2_hit", + "EventCode": "0x45", + "BriefDescription": "L1 DTLB misses with L2 DTLB hits for 4k pages.", + "UMask": "0x01" + }, + { + "EventName": "ls_l1_d_tlb_miss.tlb_reload_coalesced_page_hit", + "EventCode": "0x45", + "BriefDescription": "L1 DTLB misses with L2 DTLB hits for coalesced pages. A coalesced page is a 16k page created from four adjacent 4k pages.", + "UMask": "0x02" + }, + { + "EventName": "ls_l1_d_tlb_miss.tlb_reload_2m_l2_hit", + "EventCode": "0x45", + "BriefDescription": "L1 DTLB misses with L2 DTLB hits for 2M pages.", + "UMask": "0x04" + }, + { + "EventName": "ls_l1_d_tlb_miss.tlb_reload_1g_l2_hit", + "EventCode": "0x45", + "BriefDescription": "L1 DTLB misses with L2 DTLB hits for 1G pages.", + "UMask": "0x08" + }, + { + "EventName": "ls_l1_d_tlb_miss.tlb_reload_4k_l2_miss", + "EventCode": "0x45", + "BriefDescription": "L1 DTLB misses with L2 DTLB misses (page-table walks are requested) for 4k pages.", + "UMask": "0x10" + }, + { + "EventName": "ls_l1_d_tlb_miss.tlb_reload_coalesced_page_miss", + "EventCode": "0x45", + "BriefDescription": "L1 DTLB misses with L2 DTLB misses (page-table walks are requested) for coalesced pages. A coalesced page is a 16k page created from four adjacent 4k pages.", + "UMask": "0x20" + }, + { + "EventName": "ls_l1_d_tlb_miss.tlb_reload_2m_l2_miss", + "EventCode": "0x45", + "BriefDescription": "L1 DTLB misses with L2 DTLB misses (page-table walks are requested) for 2M pages.", + "UMask": "0x40" + }, + { + "EventName": "ls_l1_d_tlb_miss.tlb_reload_1g_l2_miss", + "EventCode": "0x45", + "BriefDescription": "L1 DTLB misses with L2 DTLB misses (page-table walks are requested) for 1G pages.", + "UMask": "0x80" + }, + { + "EventName": "ls_l1_d_tlb_miss.all_l2_miss", + "EventCode": "0x45", + "BriefDescription": "L1 DTLB misses with L2 DTLB misses (page-table walks are requested) for all page sizes.", + "UMask": "0xf0" + }, + { + "EventName": "ls_l1_d_tlb_miss.all", + "EventCode": "0x45", + "BriefDescription": "L1 DTLB misses for all page sizes.", + "UMask": "0xff" + }, + { + "EventName": "ls_misal_loads.ma64", + "EventCode": "0x47", + "BriefDescription": "64B misaligned (cacheline crossing) loads.", + "UMask": "0x01" + }, + { + "EventName": "ls_misal_loads.ma4k", + "EventCode": "0x47", + "BriefDescription": "4kB misaligned (page crossing) loads.", + "UMask": "0x02" + }, + { + "EventName": "ls_pref_instr_disp.prefetch", + "EventCode": "0x4b", + "BriefDescription": "Software prefetch instructions dispatched (speculative) of type PrefetchT0 (move data to all cache levels), T1 (move data to all cache levels except L1) and T2 (move data to all cache levels except L1 and L2).", + "UMask": "0x01" + }, + { + "EventName": "ls_pref_instr_disp.prefetch_w", + "EventCode": "0x4b", + "BriefDescription": "Software prefetch instructions dispatched (speculative) of type PrefetchW (move data to L1 cache and mark it modifiable).", + "UMask": "0x02" + }, + { + "EventName": "ls_pref_instr_disp.prefetch_nta", + "EventCode": "0x4b", + "BriefDescription": "Software prefetch instructions dispatched (speculative) of type PrefetchNTA (move data with minimum cache pollution i.e. non-temporal access).", + "UMask": "0x04" + }, + { + "EventName": "ls_pref_instr_disp.all", + "EventCode": "0x4b", + "BriefDescription": "Software prefetch instructions dispatched (speculative) of all types.", + "UMask": "0x07" + }, + { + "EventName": "wcb_close.full_line_64b", + "EventCode": "0x50", + "BriefDescription": "Number of events that caused a Write Combining Buffer (WCB) entry to close because all 64 bytes of the entry have been written to.", + "UMask": "0x01" + }, + { + "EventName": "ls_inef_sw_pref.data_pipe_sw_pf_dc_hit", + "EventCode": "0x52", + "BriefDescription": "Software prefetches that did not fetch data outside of the processor core as the PREFETCH instruction saw a data cache hit.", + "UMask": "0x01" + }, + { + "EventName": "ls_inef_sw_pref.mab_mch_cnt", + "EventCode": "0x52", + "BriefDescription": "Software prefetches that did not fetch data outside of the processor core as the PREFETCH instruction saw a match on an already allocated Miss Address Buffer (MAB).", + "UMask": "0x02" + }, + { + "EventName": "ls_inef_sw_pref.all", + "EventCode": "0x52", + "BriefDescript6ion": "Software prefetches that did not fetch data outside of the processor core for any reason.", + "UMask": "0x03" + }, + { + "EventName": "ls_sw_pf_dc_fills.local_l2", + "EventCode": "0x59", + "BriefDescription": "Software prefetch data cache fills from local L2 cache.", + "UMask": "0x01" + }, + { + "EventName": "ls_sw_pf_dc_fills.local_ccx", + "EventCode": "0x59", + "BriefDescription": "Software prefetch data cache fills from L3 cache or different L2 cache in the same CCX.", + "UMask": "0x02" + }, + { + "EventName": "ls_sw_pf_dc_fills.near_cache", + "EventCode": "0x59", + "BriefDescription": "Software prefetch data cache fills from cache of another CCX in the same NUMA node.", + "UMask": "0x04" + }, + { + "EventName": "ls_sw_pf_dc_fills.dram_io_near", + "EventCode": "0x59", + "BriefDescription": "Software prefetch data cache fills from either DRAM or MMIO in the same NUMA node.", + "UMask": "0x08" + }, + { + "EventName": "ls_sw_pf_dc_fills.far_cache", + "EventCode": "0x59", + "BriefDescription": "Software prefetch data cache fills from cache of another CCX in a different NUMA node.", + "UMask": "0x10" + }, + { + "EventName": "ls_sw_pf_dc_fills.dram_io_far", + "EventCode": "0x59", + "BriefDescription": "Software prefetch data cache fills from either DRAM or MMIO in a different NUMA node (same or different socket).", + "UMask": "0x40" + }, + { + "EventName": "ls_sw_pf_dc_fills.alternate_memories", + "EventCode": "0x59", + "BriefDescription": "Software prefetch data cache fills from extension memory.", + "UMask": "0x80" + }, + { + "EventName": "ls_sw_pf_dc_fills.all", + "EventCode": "0x59", + "BriefDescription": "Software prefetch data cache fills from all types of data sources.", + "UMask": "0xdf" + }, + { + "EventName": "ls_hw_pf_dc_fills.local_l2", + "EventCode": "0x5a", + "BriefDescription": "Hardware prefetch data cache fills from local L2 cache.", + "UMask": "0x01" + }, + { + "EventName": "ls_hw_pf_dc_fills.local_ccx", + "EventCode": "0x5a", + "BriefDescription": "Hardware prefetch data cache fills from L3 cache or different L2 cache in the same CCX.", + "UMask": "0x02" + }, + { + "EventName": "ls_hw_pf_dc_fills.near_cache", + "EventCode": "0x5a", + "BriefDescription": "Hardware prefetch data cache fills from cache of another CCX when the address was in the same NUMA node.", + "UMask": "0x04" + }, + { + "EventName": "ls_hw_pf_dc_fills.dram_io_near", + "EventCode": "0x5a", + "BriefDescription": "Hardware prefetch data cache fills from either DRAM or MMIO in the same NUMA node.", + "UMask": "0x08" + }, + { + "EventName": "ls_hw_pf_dc_fills.far_cache", + "EventCode": "0x5a", + "BriefDescription": "Hardware prefetch data cache fills from cache of another CCX when the address was in a different NUMA node.", + "UMask": "0x10" + }, + { + "EventName": "ls_hw_pf_dc_fills.dram_io_far", + "EventCode": "0x5a", + "BriefDescription": "Hardware prefetch data cache fills from either DRAM or MMIO in a different NUMA node (same or different socket).", + "UMask": "0x40" + }, + { + "EventName": "ls_hw_pf_dc_fills.alternate_memories", + "EventCode": "0x5a", + "BriefDescription": "Hardware prefetch data cache fills from extension memory.", + "UMask": "0x80" + }, + { + "EventName": "ls_hw_pf_dc_fills.all", + "EventCode": "0x5a", + "BriefDescription": "Hardware prefetch data cache fills from all types of data sources.", + "UMask": "0xdf" + }, + { + "EventName": "ls_alloc_mab_count", + "EventCode": "0x5f", + "BriefDescription": "In-flight L1 data cache misses i.e. Miss Address Buffer (MAB) allocations each cycle." + }, + { + "EventName": "ls_not_halted_cyc", + "EventCode": "0x76", + "BriefDescription": "Core cycles not in halt." + }, + { + "EventName": "ls_tlb_flush.all", + "EventCode": "0x78", + "BriefDescription": "All TLB Flushes.", + "UMask": "0xff" + }, + { + "EventName": "ls_not_halted_p0_cyc.p0_freq_cyc", + "EventCode": "0x120", + "BriefDescription": "Reference cycles (P0 frequency) not in halt .", + "UMask": "0x1" + } +] diff --git a/tools/perf/pmu-events/arch/x86/amdzen5/memory-controller.json b/tools/perf/pmu-events/arch/x86/amdzen5/memory-controller.json new file mode 100644 index 000000000000..1a629fc9474a --- /dev/null +++ b/tools/perf/pmu-events/arch/x86/amdzen5/memory-controller.json @@ -0,0 +1,101 @@ +[ + { + "EventName": "umc_mem_clk", + "PublicDescription": "Number of memory clock (MEMCLK) cycles.", + "EventCode": "0x00", + "PerPkg": "1", + "Unit": "UMCPMC" + }, + { + "EventName": "umc_act_cmd.all", + "PublicDescription": "Number of ACTIVATE commands sent.", + "EventCode": "0x05", + "PerPkg": "1", + "Unit": "UMCPMC" + }, + { + "EventName": "umc_act_cmd.rd", + "PublicDescription": "Number of ACTIVATE commands sent for reads.", + "EventCode": "0x05", + "RdWrMask": "0x1", + "PerPkg": "1", + "Unit": "UMCPMC" + }, + { + "EventName": "umc_act_cmd.wr", + "PublicDescription": "Number of ACTIVATE commands sent for writes.", + "EventCode": "0x05", + "RdWrMask": "0x2", + "PerPkg": "1", + "Unit": "UMCPMC" + }, + { + "EventName": "umc_pchg_cmd.all", + "PublicDescription": "Number of PRECHARGE commands sent.", + "EventCode": "0x06", + "PerPkg": "1", + "Unit": "UMCPMC" + }, + { + "EventName": "umc_pchg_cmd.rd", + "PublicDescription": "Number of PRECHARGE commands sent for reads.", + "EventCode": "0x06", + "RdWrMask": "0x1", + "PerPkg": "1", + "Unit": "UMCPMC" + }, + { + "EventName": "umc_pchg_cmd.wr", + "PublicDescription": "Number of PRECHARGE commands sent for writes.", + "EventCode": "0x06", + "RdWrMask": "0x2", + "PerPkg": "1", + "Unit": "UMCPMC" + }, + { + "EventName": "umc_cas_cmd.all", + "PublicDescription": "Number of CAS commands sent.", + "EventCode": "0x0a", + "PerPkg": "1", + "Unit": "UMCPMC" + }, + { + "EventName": "umc_cas_cmd.rd", + "PublicDescription": "Number of CAS commands sent for reads.", + "EventCode": "0x0a", + "RdWrMask": "0x1", + "PerPkg": "1", + "Unit": "UMCPMC" + }, + { + "EventName": "umc_cas_cmd.wr", + "PublicDescription": "Number of CAS commands sent for writes.", + "EventCode": "0x0a", + "RdWrMask": "0x2", + "PerPkg": "1", + "Unit": "UMCPMC" + }, + { + "EventName": "umc_data_slot_clks.all", + "PublicDescription": "Number of clock cycles used by the data bus.", + "EventCode": "0x14", + "PerPkg": "1", + "Unit": "UMCPMC" + }, + { + "EventName": "umc_data_slot_clks.rd", + "PublicDescription": "Number of clock cycles used by the data bus for reads.", + "EventCode": "0x14", + "RdWrMask": "0x1", + "PerPkg": "1", + "Unit": "UMCPMC" + }, + { + "EventName": "umc_data_slot_clks.wr", + "PublicDescription": "Number of clock cycles used by the data bus for writes.", + "EventCode": "0x14", + "RdWrMask": "0x2", + "PerPkg": "1", + "Unit": "UMCPMC" + } +] diff --git a/tools/perf/pmu-events/arch/x86/amdzen5/pipeline.json b/tools/perf/pmu-events/arch/x86/amdzen5/pipeline.json new file mode 100644 index 000000000000..d860bf599cf2 --- /dev/null +++ b/tools/perf/pmu-events/arch/x86/amdzen5/pipeline.json @@ -0,0 +1,99 @@ +[ + { + "MetricName": "total_dispatch_slots", + "BriefDescription": "Total dispatch slots (up to 8 instructions can be dispatched in each cycle).", + "MetricExpr": "8 * ls_not_halted_cyc", + "ScaleUnit": "1slots" + }, + { + "MetricName": "frontend_bound", + "BriefDescription": "Percentage of dispatch slots that remained unused because the frontend did not supply enough instructions/ops.", + "MetricExpr": "d_ratio(de_no_dispatch_per_slot.no_ops_from_frontend, total_dispatch_slots)", + "MetricGroup": "PipelineL1", + "ScaleUnit": "100%slots" + }, + { + "MetricName": "bad_speculation", + "BriefDescription": "Percentage of dispatched ops that did not retire.", + "MetricExpr": "d_ratio(de_src_op_disp.all - ex_ret_ops, total_dispatch_slots)", + "MetricGroup": "PipelineL1", + "ScaleUnit": "100%ops" + }, + { + "MetricName": "backend_bound", + "BriefDescription": "Percentage of dispatch slots that remained unused because of backend stalls.", + "MetricExpr": "d_ratio(de_no_dispatch_per_slot.backend_stalls, total_dispatch_slots)", + "MetricGroup": "PipelineL1", + "ScaleUnit": "100%slots" + }, + { + "MetricName": "smt_contention", + "BriefDescription": "Percentage of dispatch slots that remained unused because the other thread was selected.", + "MetricExpr": "d_ratio(de_no_dispatch_per_slot.smt_contention, total_dispatch_slots)", + "MetricGroup": "PipelineL1", + "ScaleUnit": "100%slots" + }, + { + "MetricName": "retiring", + "BriefDescription": "Percentage of dispatch slots used by ops that retired.", + "MetricExpr": "d_ratio(ex_ret_ops, total_dispatch_slots)", + "MetricGroup": "PipelineL1", + "ScaleUnit": "100%slots" + }, + { + "MetricName": "frontend_bound_by_latency", + "BriefDescription": "Percentage of dispatch slots that remained unused because of a latency bottleneck in the frontend (such as instruction cache or TLB misses).", + "MetricExpr": "d_ratio((8 * cpu@de_no_dispatch_per_slot.no_ops_from_frontend\\,cmask\\=0x8@), total_dispatch_slots)", + "MetricGroup": "PipelineL2;frontend_bound_group", + "ScaleUnit": "100%slots" + }, + { + "MetricName": "frontend_bound_by_bandwidth", + "BriefDescription": "Percentage of dispatch slots that remained unused because of a bandwidth bottleneck in the frontend (such as decode or op cache fetch bandwidth).", + "MetricExpr": "d_ratio(de_no_dispatch_per_slot.no_ops_from_frontend - (8 * cpu@de_no_dispatch_per_slot.no_ops_from_frontend\\,cmask\\=0x8@), total_dispatch_slots)", + "MetricGroup": "PipelineL2;frontend_bound_group", + "ScaleUnit": "100%slots" + }, + { + "MetricName": "bad_speculation_from_mispredicts", + "BriefDescription": "Percentage of dispatched ops that were flushed due to branch mispredicts.", + "MetricExpr": "d_ratio(bad_speculation * ex_ret_brn_misp, ex_ret_brn_misp + bp_redirects.resync)", + "MetricGroup": "PipelineL2;bad_speculation_group", + "ScaleUnit": "100%ops" + }, + { + "MetricName": "bad_speculation_from_pipeline_restarts", + "BriefDescription": "Percentage of dispatched ops that were flushed due to pipeline restarts (resyncs).", + "MetricExpr": "d_ratio(bad_speculation * bp_redirects.resync, ex_ret_brn_misp + bp_redirects.resync)", + "MetricGroup": "PipelineL2;bad_speculation_group", + "ScaleUnit": "100%ops" + }, + { + "MetricName": "backend_bound_by_memory", + "BriefDescription": "Percentage of dispatch slots that remained unused because of stalls due to the memory subsystem.", + "MetricExpr": "backend_bound * d_ratio(ex_no_retire.load_not_complete, ex_no_retire.not_complete)", + "MetricGroup": "PipelineL2;backend_bound_group", + "ScaleUnit": "100%slots" + }, + { + "MetricName": "backend_bound_by_cpu", + "BriefDescription": "Percentage of dispatch slots that remained unused because of stalls not related to the memory subsystem.", + "MetricExpr": "backend_bound * (1 - d_ratio(ex_no_retire.load_not_complete, ex_no_retire.not_complete))", + "MetricGroup": "PipelineL2;backend_bound_group", + "ScaleUnit": "100%slots" + }, + { + "MetricName": "retiring_from_fastpath", + "BriefDescription": "Percentage of dispatch slots used by fastpath ops that retired.", + "MetricExpr": "retiring * (1 - d_ratio(ex_ret_ucode_ops, ex_ret_ops))", + "MetricGroup": "PipelineL2;retiring_group", + "ScaleUnit": "100%slots" + }, + { + "MetricName": "retiring_from_microcode", + "BriefDescription": "Percentage of dispatch slots used by microcode ops that retired.", + "MetricExpr": "retiring * d_ratio(ex_ret_ucode_ops, ex_ret_ops)", + "MetricGroup": "PipelineL2;retiring_group", + "ScaleUnit": "100%slots" + } +] diff --git a/tools/perf/pmu-events/arch/x86/amdzen5/recommended.json b/tools/perf/pmu-events/arch/x86/amdzen5/recommended.json new file mode 100644 index 000000000000..c97874039c1e --- /dev/null +++ b/tools/perf/pmu-events/arch/x86/amdzen5/recommended.json @@ -0,0 +1,345 @@ +[ + { + "MetricName": "branch_misprediction_rate", + "BriefDescription": "Execution-time branch misprediction rate (non-speculative).", + "MetricExpr": "d_ratio(ex_ret_brn_misp, ex_ret_brn)", + "MetricGroup": "branch_prediction", + "ScaleUnit": "1per_branch" + }, + { + "MetricName": "all_data_cache_accesses_pti", + "BriefDescription": "All data cache accesses per thousand instructions.", + "MetricExpr": "ls_dispatch.all / instructions", + "MetricGroup": "l1_dcache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "all_l2_cache_accesses_pti", + "BriefDescription": "All L2 cache accesses per thousand instructions.", + "MetricExpr": "(l2_request_g1.all_no_prefetch + l2_pf_hit_l2.l2_hwpf + l2_pf_miss_l2_hit_l3.l2_hwpf + l2_pf_miss_l2_l3.l2_hwpf) / instructions", + "MetricGroup": "l2_cache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l2_cache_accesses_from_l1_ic_misses_pti", + "BriefDescription": "L2 cache accesses from L1 instruction cache misses (including prefetch) per thousand instructions.", + "MetricExpr": "l2_request_g1.cacheable_ic_read / instructions", + "MetricGroup": "l2_cache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l2_cache_accesses_from_l1_dc_misses_pti", + "BriefDescription": "L2 cache accesses from L1 data cache misses (including prefetch) per thousand instructions.", + "MetricExpr": "l2_request_g1.all_dc / instructions", + "MetricGroup": "l2_cache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l2_cache_accesses_from_l2_hwpf_pti", + "BriefDescription": "L2 cache accesses from L2 cache hardware prefetcher per thousand instructions.", + "MetricExpr": "(l2_pf_hit_l2.l1_dc_l2_hwpf + l2_pf_miss_l2_hit_l3.l1_dc_l2_hwpf + l2_pf_miss_l2_l3.l1_dc_l2_hwpf) / instructions", + "MetricGroup": "l2_cache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "all_l2_cache_misses_pti", + "BriefDescription": "All L2 cache misses per thousand instructions.", + "MetricExpr": "(l2_cache_req_stat.ic_dc_miss_in_l2 + l2_pf_miss_l2_hit_l3.l2_hwpf + l2_pf_miss_l2_l3.l2_hwpf) / instructions", + "MetricGroup": "l2_cache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l2_cache_misses_from_l1_ic_miss_pti", + "BriefDescription": "L2 cache misses from L1 instruction cache misses per thousand instructions.", + "MetricExpr": "l2_cache_req_stat.ic_fill_miss / instructions", + "MetricGroup": "l2_cache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l2_cache_misses_from_l1_dc_miss_pti", + "BriefDescription": "L2 cache misses from L1 data cache misses per thousand instructions.", + "MetricExpr": "l2_cache_req_stat.ls_rd_blk_c / instructions", + "MetricGroup": "l2_cache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l2_cache_misses_from_l2_hwpf_pti", + "BriefDescription": "L2 cache misses from L2 cache hardware prefetcher per thousand instructions.", + "MetricExpr": "(l2_pf_miss_l2_hit_l3.l1_dc_l2_hwpf + l2_pf_miss_l2_l3.l1_dc_l2_hwpf) / instructions", + "MetricGroup": "l2_cache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "all_l2_cache_hits_pti", + "BriefDescription": "All L2 cache hits per thousand instructions.", + "MetricExpr": "(l2_cache_req_stat.ic_dc_hit_in_l2 + l2_pf_hit_l2.l2_hwpf) / instructions", + "MetricGroup": "l2_cache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l2_cache_hits_from_l1_ic_miss_pti", + "BriefDescription": "L2 cache hits from L1 instruction cache misses per thousand instructions.", + "MetricExpr": "l2_cache_req_stat.ic_hit_in_l2 / instructions", + "MetricGroup": "l2_cache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l2_cache_hits_from_l1_dc_miss_pti", + "BriefDescription": "L2 cache hits from L1 data cache misses per thousand instructions.", + "MetricExpr": "l2_cache_req_stat.dc_hit_in_l2 / instructions", + "MetricGroup": "l2_cache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l2_cache_hits_from_l2_hwpf_pti", + "BriefDescription": "L2 cache hits from L2 cache hardware prefetcher per thousand instructions.", + "MetricExpr": "l2_pf_hit_l2.l1_dc_l2_hwpf / instructions", + "MetricGroup": "l2_cache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l3_cache_accesses", + "BriefDescription": "L3 cache accesses.", + "MetricExpr": "l3_lookup_state.all_coherent_accesses_to_l3", + "MetricGroup": "l3_cache" + }, + { + "MetricName": "l3_misses", + "BriefDescription": "L3 misses (including cacheline state change requests).", + "MetricExpr": "l3_lookup_state.l3_miss", + "MetricGroup": "l3_cache" + }, + { + "MetricName": "l3_read_miss_latency", + "BriefDescription": "Average L3 read miss latency (in core clocks).", + "MetricExpr": "(l3_xi_sampled_latency.all * 10) / l3_xi_sampled_latency_requests.all", + "MetricGroup": "l3_cache", + "ScaleUnit": "1ns" + }, + { + "MetricName": "l3_read_miss_latency_for_local_dram", + "BriefDescription": "Average L3 read miss latency (in core clocks) for local DRAM.", + "MetricExpr": "(l3_xi_sampled_latency.dram_near * 10) / l3_xi_sampled_latency_requests.dram_near", + "MetricGroup": "l3_cache", + "ScaleUnit": "1ns" + }, + { + "MetricName": "l3_read_miss_latency_for_remote_dram", + "BriefDescription": "Average L3 read miss latency (in core clocks) for remote DRAM.", + "MetricExpr": "(l3_xi_sampled_latency.dram_far * 10) / l3_xi_sampled_latency_requests.dram_far", + "MetricGroup": "l3_cache", + "ScaleUnit": "1ns" + }, + { + "MetricName": "op_cache_fetch_miss_ratio", + "BriefDescription": "Op cache miss ratio for all fetches.", + "MetricExpr": "d_ratio(op_cache_hit_miss.op_cache_miss, op_cache_hit_miss.all_op_cache_accesses)", + "ScaleUnit": "100%" + }, + { + "MetricName": "ic_fetch_miss_ratio", + "BriefDescription": "Instruction cache miss ratio for all fetches. An instruction cache miss will not be counted by this metric if it is an OC hit.", + "MetricExpr": "d_ratio(ic_tag_hit_miss.instruction_cache_miss, ic_tag_hit_miss.all_instruction_cache_accesses)", + "ScaleUnit": "100%" + }, + { + "MetricName": "l1_data_cache_fills_from_memory_pti", + "BriefDescription": "L1 data cache fills from DRAM or MMIO in any NUMA node per thousand instructions.", + "MetricExpr": "ls_any_fills_from_sys.dram_io_all / instructions", + "MetricGroup": "l1_dcache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l1_data_cache_fills_from_remote_node_pti", + "BriefDescription": "L1 data cache fills from a different NUMA node per thousand instructions.", + "MetricExpr": "ls_any_fills_from_sys.far_all / instructions", + "MetricGroup": "l1_dcache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l1_data_cache_fills_from_same_ccx_pti", + "BriefDescription": "L1 data cache fills from within the same CCX per thousand instructions.", + "MetricExpr": "ls_any_fills_from_sys.local_all / instructions", + "MetricGroup": "l1_dcache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l1_data_cache_fills_from_different_ccx_pti", + "BriefDescription": "L1 data cache fills from another CCX cache in any NUMA node per thousand instructions.", + "MetricExpr": "ls_any_fills_from_sys.remote_cache / instructions", + "MetricGroup": "l1_dcache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "all_l1_data_cache_fills_pti", + "BriefDescription": "All L1 data cache fills per thousand instructions.", + "MetricExpr": "ls_any_fills_from_sys.all / instructions", + "MetricGroup": "l1_dcache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l1_demand_data_cache_fills_from_local_l2_pti", + "BriefDescription": "L1 demand data cache fills from local L2 cache per thousand instructions.", + "MetricExpr": "ls_dmnd_fills_from_sys.local_l2 / instructions", + "MetricGroup": "l1_dcache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l1_demand_data_cache_fills_from_same_ccx_pti", + "BriefDescription": "L1 demand data cache fills from within the same CCX per thousand instructions.", + "MetricExpr": "ls_dmnd_fills_from_sys.local_ccx / instructions", + "MetricGroup": "l1_dcache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l1_demand_data_cache_fills_from_near_cache_pti", + "BriefDescription": "L1 demand data cache fills from another CCX cache in the same NUMA node per thousand instructions.", + "MetricExpr": "ls_dmnd_fills_from_sys.near_cache / instructions", + "MetricGroup": "l1_dcache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l1_demand_data_cache_fills_from_near_memory_pti", + "BriefDescription": "L1 demand data cache fills from DRAM or MMIO in the same NUMA node per thousand instructions.", + "MetricExpr": "ls_dmnd_fills_from_sys.dram_io_near / instructions", + "MetricGroup": "l1_dcache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l1_demand_data_cache_fills_from_far_cache_pti", + "BriefDescription": "L1 demand data cache fills from another CCX cache in a different NUMA node per thousand instructions.", + "MetricExpr": "ls_dmnd_fills_from_sys.far_cache / instructions", + "MetricGroup": "l1_dcache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l1_demand_data_cache_fills_from_far_memory_pti", + "BriefDescription": "L1 demand data cache fills from DRAM or MMIO in a different NUMA node per thousand instructions.", + "MetricExpr": "ls_dmnd_fills_from_sys.dram_io_far / instructions", + "MetricGroup": "l1_dcache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l1_itlb_misses_pti", + "BriefDescription": "L1 instruction TLB misses per thousand instructions.", + "MetricExpr": "(bp_l1_tlb_miss_l2_tlb_hit + bp_l1_tlb_miss_l2_tlb_miss.all) / instructions", + "MetricGroup": "tlb", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l2_itlb_misses_pti", + "BriefDescription": "L2 instruction TLB misses and instruction page walks per thousand instructions.", + "MetricExpr": "bp_l1_tlb_miss_l2_tlb_miss.all / instructions", + "MetricGroup": "tlb", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l1_dtlb_misses_pti", + "BriefDescription": "L1 data TLB misses per thousand instructions.", + "MetricExpr": "ls_l1_d_tlb_miss.all / instructions", + "MetricGroup": "tlb", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l2_dtlb_misses_pti", + "BriefDescription": "L2 data TLB misses and data page walks per thousand instructions.", + "MetricExpr": "ls_l1_d_tlb_miss.all_l2_miss / instructions", + "MetricGroup": "tlb", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "all_tlbs_flushed_pti", + "BriefDescription": "All TLBs flushed per thousand instructions.", + "MetricExpr": "ls_tlb_flush.all / instructions", + "MetricGroup": "tlb", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "macro_ops_dispatched", + "BriefDescription": "Macro-ops dispatched.", + "MetricExpr": "de_src_op_disp.all", + "MetricGroup": "decoder" + }, + { + "MetricName": "sse_avx_stalls", + "BriefDescription": "Mixed SSE/AVX stalls.", + "MetricExpr": "fp_disp_faults.sse_avx_all" + }, + { + "MetricName": "macro_ops_retired", + "BriefDescription": "Macro-ops retired.", + "MetricExpr": "ex_ret_ops" + }, + { + "MetricName": "umc_data_bus_utilization", + "BriefDescription": "Memory controller data bus utilization.", + "MetricExpr": "d_ratio(umc_data_slot_clks.all / 2, umc_mem_clk)", + "MetricGroup": "memory_controller", + "PerPkg": "1", + "ScaleUnit": "100%" + }, + { + "MetricName": "umc_cas_cmd_rate", + "BriefDescription": "Memory controller CAS command rate.", + "MetricExpr": "d_ratio(umc_cas_cmd.all * 1000, umc_mem_clk)", + "MetricGroup": "memory_controller", + "PerPkg": "1", + "ScaleUnit": "1per_memclk" + }, + { + "MetricName": "umc_cas_cmd_read_ratio", + "BriefDescription": "Ratio of memory controller CAS commands for reads.", + "MetricExpr": "d_ratio(umc_cas_cmd.rd, umc_cas_cmd.all)", + "MetricGroup": "memory_controller", + "PerPkg": "1", + "ScaleUnit": "100%" + }, + { + "MetricName": "umc_cas_cmd_write_ratio", + "BriefDescription": "Ratio of memory controller CAS commands for writes.", + "MetricExpr": "d_ratio(umc_cas_cmd.wr, umc_cas_cmd.all)", + "MetricGroup": "memory_controller", + "PerPkg": "1", + "ScaleUnit": "100%" + }, + { + "MetricName": "umc_mem_read_bandwidth", + "BriefDescription": "Estimated memory read bandwidth.", + "MetricExpr": "(umc_cas_cmd.rd * 64) / 1e6 / duration_time", + "MetricGroup": "memory_controller", + "PerPkg": "1", + "ScaleUnit": "1MB/s" + }, + { + "MetricName": "umc_mem_write_bandwidth", + "BriefDescription": "Estimated memory write bandwidth.", + "MetricExpr": "(umc_cas_cmd.wr * 64) / 1e6 / duration_time", + "MetricGroup": "memory_controller", + "PerPkg": "1", + "ScaleUnit": "1MB/s" + }, + { + "MetricName": "umc_mem_bandwidth", + "BriefDescription": "Estimated combined memory bandwidth.", + "MetricExpr": "(umc_cas_cmd.all * 64) / 1e6 / duration_time", + "MetricGroup": "memory_controller", + "PerPkg": "1", + "ScaleUnit": "1MB/s" + }, + { + "MetricName": "umc_activate_cmd_rate", + "BriefDescription": "Memory controller ACTIVATE command rate.", + "MetricExpr": "d_ratio(umc_act_cmd.all * 1000, umc_mem_clk)", + "MetricGroup": "memory_controller", + "PerPkg": "1", + "ScaleUnit": "1per_memclk" + }, + { + "MetricName": "umc_precharge_cmd_rate", + "BriefDescription": "Memory controller PRECHARGE command rate.", + "MetricExpr": "d_ratio(umc_pchg_cmd.all * 1000, umc_mem_clk)", + "MetricGroup": "memory_controller", + "PerPkg": "1", + "ScaleUnit": "1per_memclk" + } +] diff --git a/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json b/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json index 55a10b0bf36f..c20833fb1f58 100644 --- a/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json +++ b/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json @@ -84,12 +84,12 @@ "MetricExpr": "(UOPS_DISPATCHED_PORT.PORT_0 + UOPS_DISPATCHED_PORT.PORT_1 + UOPS_DISPATCHED_PORT.PORT_5 + UOPS_DISPATCHED_PORT.PORT_6) / tma_info_thread_slots", "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group", "MetricName": "tma_alu_op_utilization", - "MetricThreshold": "tma_alu_op_utilization > 0.6", + "MetricThreshold": "tma_alu_op_utilization > 0.4", "ScaleUnit": "100%" }, { "BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists", - "MetricExpr": "100 * OTHER_ASSISTS.ANY_WB_ASSIST / tma_info_thread_slots", + "MetricExpr": "66 * OTHER_ASSISTS.ANY_WB_ASSIST / tma_info_thread_slots", "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group", "MetricName": "tma_assists", "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)", @@ -211,7 +211,7 @@ "MetricExpr": "(IDQ.ALL_DSB_CYCLES_ANY_UOPS - IDQ.ALL_DSB_CYCLES_4_UOPS) / tma_info_core_core_clks / 2", "MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group", "MetricName": "tma_dsb", - "MetricThreshold": "tma_dsb > 0.15 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 4 > 0.35)", + "MetricThreshold": "tma_dsb > 0.15 & tma_fetch_bandwidth > 0.2", "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.", "ScaleUnit": "100%" }, @@ -266,7 +266,7 @@ "MetricExpr": "tma_frontend_bound - tma_fetch_latency", "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB", "MetricName": "tma_fetch_bandwidth", - "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 4 > 0.35", + "MetricThreshold": "tma_fetch_bandwidth > 0.2", "MetricgroupNoGroup": "TopdownL2", "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Related metrics: tma_dsb_switches, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp", "ScaleUnit": "100%" @@ -343,27 +343,20 @@ "MetricName": "tma_heavy_operations", "MetricThreshold": "tma_heavy_operations > 0.1", "MetricgroupNoGroup": "TopdownL2", - "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.", + "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences. ([ICL+] Note this may overcount due to approximation using indirect events; [ADL+] .)", "ScaleUnit": "100%" }, { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses.", "MetricExpr": "ICACHE.IFDATA_STALL / tma_info_thread_clks", - "MetricGroup": "BigFoot;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group", + "MetricGroup": "BigFootprint;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group", "MetricName": "tma_icache_misses", "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", "ScaleUnit": "100%" }, { - "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)", - "MetricExpr": "(tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) * tma_info_thread_slots / BR_MISP_RETIRED.ALL_BRANCHES", - "MetricGroup": "Bad;BrMispredicts;tma_issueBM", - "MetricName": "tma_info_bad_spec_branch_misprediction_cost", - "PublicDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear). Related metrics: tma_branch_mispredicts, tma_mispredicts_resteers" - }, - { "BriefDescription": "Instructions per retired mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).", - "MetricExpr": "tma_info_inst_mix_instructions / (UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * cpu@BR_MISP_EXEC.ALL_BRANCHES\\,umask\\=0xE4@)", + "MetricExpr": "tma_info_inst_mix_instructions / (UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * BR_MISP_EXEC.INDIRECT)", "MetricGroup": "Bad;BrMispredicts", "MetricName": "tma_info_bad_spec_ipmisp_indirect", "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1e3" @@ -389,7 +382,7 @@ }, { "BriefDescription": "Floating Point Operations Per Cycle", - "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * (FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) / tma_info_core_core_clks", + "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * FP_ARITH_INST_RETIRED.4_FLOPS + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) / tma_info_core_core_clks", "MetricGroup": "Flops;Ret", "MetricName": "tma_info_core_flopc" }, @@ -401,8 +394,8 @@ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)." }, { - "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core", - "MetricExpr": "UOPS_EXECUTED.THREAD / (cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)", + "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor)", + "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@", "MetricGroup": "Backend;Cor;Pipeline;PortsUtil", "MetricName": "tma_info_core_ilp" }, @@ -439,7 +432,7 @@ "MetricGroup": "Flops;InsType", "MetricName": "tma_info_inst_mix_iparith", "MetricThreshold": "tma_info_inst_mix_iparith < 10", - "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). May undercount due to FMA double counting. Approximated prior to BDW." + "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW." }, { "BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate)", @@ -447,7 +440,7 @@ "MetricGroup": "Flops;FpVector;InsType", "MetricName": "tma_info_inst_mix_iparith_avx128", "MetricThreshold": "tma_info_inst_mix_iparith_avx128 < 10", - "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate)", @@ -455,7 +448,7 @@ "MetricGroup": "Flops;FpVector;InsType", "MetricName": "tma_info_inst_mix_iparith_avx256", "MetricThreshold": "tma_info_inst_mix_iparith_avx256 < 10", - "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate)", @@ -463,7 +456,7 @@ "MetricGroup": "Flops;FpScalar;InsType", "MetricName": "tma_info_inst_mix_iparith_scalar_dp", "MetricThreshold": "tma_info_inst_mix_iparith_scalar_dp < 10", - "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate)", @@ -471,7 +464,7 @@ "MetricGroup": "Flops;FpScalar;InsType", "MetricName": "tma_info_inst_mix_iparith_scalar_sp", "MetricThreshold": "tma_info_inst_mix_iparith_scalar_sp < 10", - "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)", @@ -489,7 +482,7 @@ }, { "BriefDescription": "Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate)", - "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * (FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE)", + "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.SCALAR + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * FP_ARITH_INST_RETIRED.4_FLOPS + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE)", "MetricGroup": "Flops;InsType", "MetricName": "tma_info_inst_mix_ipflop", "MetricThreshold": "tma_info_inst_mix_ipflop < 10" @@ -518,120 +511,114 @@ }, { "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]", - "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l1d_cache_fill_bw", "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_core_l1d_cache_fill_bw" + "MetricName": "tma_info_memory_core_l1d_cache_fill_bw_2t" }, { "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]", - "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l2_cache_fill_bw", "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_core_l2_cache_fill_bw" + "MetricName": "tma_info_memory_core_l2_cache_fill_bw_2t" }, { "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l3_cache_fill_bw", "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_core_l3_cache_fill_bw" + "MetricName": "tma_info_memory_core_l3_cache_fill_bw_2t" + }, + { + "BriefDescription": "", + "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l1d_cache_fill_bw" }, { "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads", "MetricExpr": "1e3 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l1mpki" }, { + "BriefDescription": "", + "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l2_cache_fill_bw" + }, + { "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)", "MetricExpr": "1e3 * (L2_RQSTS.REFERENCES - L2_RQSTS.MISS) / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l2hpki_all" }, { "BriefDescription": "L2 cache hits per kilo instruction for all demand loads (including speculative)", "MetricExpr": "1e3 * L2_RQSTS.DEMAND_DATA_RD_HIT / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l2hpki_load" }, { "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads", "MetricExpr": "1e3 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY", - "MetricGroup": "Backend;CacheMisses;Mem", + "MetricGroup": "Backend;CacheHits;Mem", "MetricName": "tma_info_memory_l2mpki" }, { "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all request types (including speculative)", "MetricExpr": "1e3 * L2_RQSTS.MISS / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem;Offcore", + "MetricGroup": "CacheHits;Mem;Offcore", "MetricName": "tma_info_memory_l2mpki_all" }, { "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all demand loads (including speculative)", "MetricExpr": "1e3 * L2_RQSTS.DEMAND_DATA_RD_MISS / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l2mpki_load" }, { + "BriefDescription": "", + "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l3_cache_fill_bw" + }, + { "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads", "MetricExpr": "1e3 * MEM_LOAD_UOPS_RETIRED.L3_MISS / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "Mem", "MetricName": "tma_info_memory_l3mpki" }, { - "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)", - "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "L1D_PEND_MISS.PENDING / (MEM_LOAD_UOPS_RETIRED.L1_MISS + MEM_LOAD_UOPS_RETIRED.HIT_LFB)", - "MetricGroup": "Mem;MemoryBound;MemoryLat", - "MetricName": "tma_info_memory_load_miss_real_latency" - }, - { - "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss", - "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES", - "MetricGroup": "Mem;MemoryBW;MemoryBound", - "MetricName": "tma_info_memory_mlp", - "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)" - }, - { "BriefDescription": "Average Parallel L2 cache miss data reads", "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", "MetricGroup": "Memory_BW;Offcore", - "MetricName": "tma_info_memory_oro_data_l2_mlp" + "MetricName": "tma_info_memory_latency_data_l2_mlp" }, { "BriefDescription": "Average Latency for L2 cache miss demand Loads", "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS.DEMAND_DATA_RD", "MetricGroup": "Memory_Lat;Offcore", - "MetricName": "tma_info_memory_oro_load_l2_miss_latency" + "MetricName": "tma_info_memory_latency_load_l2_miss_latency" }, { "BriefDescription": "Average Parallel L2 cache miss demand Loads", "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD", "MetricGroup": "Memory_BW;Offcore", - "MetricName": "tma_info_memory_oro_load_l2_mlp" + "MetricName": "tma_info_memory_latency_load_l2_mlp" }, { - "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l1d_cache_fill_bw", - "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_thread_l1d_cache_fill_bw_1t" - }, - { - "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l2_cache_fill_bw", - "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_thread_l2_cache_fill_bw_1t" - }, - { - "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "0", - "MetricGroup": "Mem;MemoryBW;Offcore", - "MetricName": "tma_info_memory_thread_l3_cache_access_bw_1t" + "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)", + "MetricConstraint": "NO_GROUP_EVENTS", + "MetricExpr": "L1D_PEND_MISS.PENDING / (MEM_LOAD_UOPS_RETIRED.L1_MISS + MEM_LOAD_UOPS_RETIRED.HIT_LFB)", + "MetricGroup": "Mem;MemoryBound;MemoryLat", + "MetricName": "tma_info_memory_load_miss_real_latency" }, { - "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l3_cache_fill_bw", - "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_thread_l3_cache_fill_bw_1t" + "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss", + "MetricConstraint": "NO_GROUP_EVENTS", + "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES", + "MetricGroup": "Mem;MemoryBW;MemoryBound", + "MetricName": "tma_info_memory_mlp", + "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)" }, { "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses", @@ -641,8 +628,8 @@ "MetricThreshold": "tma_info_memory_tlb_page_walks_utilization > 0.5" }, { - "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-thread", - "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@", + "BriefDescription": "", + "MetricExpr": "UOPS_EXECUTED.THREAD / (cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)", "MetricGroup": "Cor;Pipeline;PortsUtil;SMT", "MetricName": "tma_info_pipeline_execute" }, @@ -653,30 +640,36 @@ "MetricName": "tma_info_pipeline_retire" }, { - "BriefDescription": "Measured Average Frequency for unhalted processors [GHz]", + "BriefDescription": "Measured Average Core Frequency for unhalted processors [GHz]", "MetricExpr": "tma_info_system_turbo_utilization * TSC / 1e9 / duration_time", "MetricGroup": "Power;Summary", - "MetricName": "tma_info_system_average_frequency" + "MetricName": "tma_info_system_core_frequency" }, { - "BriefDescription": "Average CPU Utilization", + "BriefDescription": "Average CPU Utilization (percentage)", "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC", "MetricGroup": "HPC;Summary", "MetricName": "tma_info_system_cpu_utilization" }, { + "BriefDescription": "Average number of utilized CPUs", + "MetricExpr": "#num_cpus_online * tma_info_system_cpu_utilization", + "MetricGroup": "Summary", + "MetricName": "tma_info_system_cpus_utilized" + }, + { "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]", "MetricExpr": "64 * (UNC_ARB_TRK_REQUESTS.ALL + UNC_ARB_COH_TRK_REQUESTS.ALL) / 1e6 / duration_time / 1e3", - "MetricGroup": "HPC;Mem;MemoryBW;SoC;tma_issueBW", + "MetricGroup": "HPC;MemOffcore;MemoryBW;SoC;tma_issueBW", "MetricName": "tma_info_system_dram_bw_use", "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_mem_bandwidth, tma_sq_full" }, { "BriefDescription": "Giga Floating Point Operations Per Second", - "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * (FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) / 1e9 / duration_time", + "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * FP_ARITH_INST_RETIRED.4_FLOPS + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) / 1e9 / duration_time", "MetricGroup": "Cor;Flops;HPC", "MetricName": "tma_info_system_gflops", - "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine." + "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width" }, { "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]", @@ -699,19 +692,6 @@ "MetricThreshold": "tma_info_system_kernel_utilization > 0.05" }, { - "BriefDescription": "Average number of parallel requests to external memory", - "MetricExpr": "UNC_ARB_TRK_OCCUPANCY.ALL / UNC_ARB_TRK_OCCUPANCY.CYCLES_WITH_ANY_REQUEST", - "MetricGroup": "Mem;SoC", - "MetricName": "tma_info_system_mem_parallel_requests", - "PublicDescription": "Average number of parallel requests to external memory. Accounts for all requests" - }, - { - "BriefDescription": "Average latency of all requests to external memory (in Uncore cycles)", - "MetricExpr": "UNC_ARB_TRK_OCCUPANCY.ALL / UNC_ARB_TRK_REQUESTS.ALL", - "MetricGroup": "Mem;SoC", - "MetricName": "tma_info_system_mem_request_latency" - }, - { "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active", "MetricExpr": "(1 - CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / (CPU_CLK_UNHALTED.REF_XCLK_ANY / 2) if #SMT_on else 0)", "MetricGroup": "SMT", @@ -777,7 +757,7 @@ { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses", "MetricExpr": "(14 * ITLB_MISSES.STLB_HIT + cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * ITLB_MISSES.WALK_COMPLETED) / tma_info_thread_clks", - "MetricGroup": "BigFoot;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group", + "MetricGroup": "BigFootprint;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group", "MetricName": "tma_itlb_misses", "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: ITLB_MISSES.WALK_COMPLETED", @@ -786,7 +766,7 @@ { "BriefDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache", "MetricExpr": "max((CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS) / tma_info_thread_clks, 0)", - "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group", "MetricName": "tma_l1_bound", "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", "PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_UOPS_RETIRED.L1_HIT_PS;MEM_LOAD_UOPS_RETIRED.HIT_LFB_PS. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1", @@ -795,7 +775,7 @@ { "BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads", "MetricExpr": "(CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks", - "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", "MetricName": "tma_l2_bound", "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", "PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L2_HIT_PS", @@ -805,20 +785,20 @@ "BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core", "MetricConstraint": "NO_GROUP_EVENTS_SMT", "MetricExpr": "MEM_LOAD_UOPS_RETIRED.L3_HIT / (MEM_LOAD_UOPS_RETIRED.L3_HIT + 7 * MEM_LOAD_UOPS_RETIRED.L3_MISS) * CYCLE_ACTIVITY.STALLS_L2_MISS / tma_info_thread_clks", - "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", "MetricName": "tma_l3_bound", "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT_PS", "ScaleUnit": "100%" }, { - "BriefDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)", + "BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)", "MetricConstraint": "NO_GROUP_EVENTS", "MetricExpr": "29 * (MEM_LOAD_UOPS_RETIRED.L3_HIT * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_RETIRED.L3_MISS))) / tma_info_thread_clks", "MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group", "MetricName": "tma_l3_hit_latency", "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT_PS. Related metrics: tma_mem_latency", + "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT_PS. Related metrics: tma_mem_latency", "ScaleUnit": "100%" }, { @@ -837,7 +817,7 @@ "MetricName": "tma_light_operations", "MetricThreshold": "tma_light_operations > 0.6", "MetricgroupNoGroup": "TopdownL2", - "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST", + "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST", "ScaleUnit": "100%" }, { @@ -872,21 +852,21 @@ "ScaleUnit": "100%" }, { - "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM)", + "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)", "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@) / tma_info_thread_clks", "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW", "MetricName": "tma_mem_bandwidth", "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full", + "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full", "ScaleUnit": "100%" }, { - "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM)", + "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM)", "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth", "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat", "MetricName": "tma_mem_latency", "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_l3_hit_latency", + "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_l3_hit_latency", "ScaleUnit": "100%" }, { @@ -923,7 +903,7 @@ "MetricExpr": "(IDQ.ALL_MITE_CYCLES_ANY_UOPS - IDQ.ALL_MITE_CYCLES_4_UOPS) / tma_info_core_core_clks / 2", "MetricGroup": "DSBmiss;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group", "MetricName": "tma_mite", - "MetricThreshold": "tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 4 > 0.35)", + "MetricThreshold": "tma_mite > 0.1 & tma_fetch_bandwidth > 0.2", "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck.", "ScaleUnit": "100%" }, @@ -991,12 +971,12 @@ "ScaleUnit": "100%" }, { - "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU)", + "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU)", "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_6 / tma_info_core_core_clks", "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P", "MetricName": "tma_port_6", "MetricThreshold": "tma_port_6 > 0.6", - "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_6. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2", + "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_6. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2", "ScaleUnit": "100%" }, { @@ -1050,7 +1030,7 @@ "MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@ / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC) / tma_info_core_core_clks", "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group", "MetricName": "tma_ports_utilized_3m", - "MetricThreshold": "tma_ports_utilized_3m > 0.7 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", + "MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", "ScaleUnit": "100%" }, { @@ -1130,10 +1110,10 @@ { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears", "MetricExpr": "tma_branch_resteers - tma_mispredicts_resteers - tma_clears_resteers", - "MetricGroup": "BigFoot;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group", + "MetricGroup": "BigFootprint;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group", "MetricName": "tma_unknown_branches", "MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))", - "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit). Sample with: BACLEARS.ANY", + "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit) hence called Unknown Branches. Sample with: BACLEARS.ANY", "ScaleUnit": "100%" }, { diff --git a/tools/perf/pmu-events/arch/x86/broadwell/memory.json b/tools/perf/pmu-events/arch/x86/broadwell/memory.json index ac7cdb831960..b01ed47072bc 100644 --- a/tools/perf/pmu-events/arch/x86/broadwell/memory.json +++ b/tools/perf/pmu-events/arch/x86/broadwell/memory.json @@ -2005,7 +2005,7 @@ "BriefDescription": "Number of times RTM abort was triggered", "EventCode": "0xc9", "EventName": "RTM_RETIRED.ABORTED", - "PEBS": "1", + "PEBS": "2", "PublicDescription": "Number of times RTM abort was triggered .", "SampleAfterValue": "2000003", "UMask": "0x4" diff --git a/tools/perf/pmu-events/arch/x86/broadwell/metricgroups.json b/tools/perf/pmu-events/arch/x86/broadwell/metricgroups.json index f6a0258e3241..8c808347f6da 100644 --- a/tools/perf/pmu-events/arch/x86/broadwell/metricgroups.json +++ b/tools/perf/pmu-events/arch/x86/broadwell/metricgroups.json @@ -2,10 +2,10 @@ "Backend": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Bad": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "BadSpec": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", - "BigFoot": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "BigFootprint": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "BrMispredicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Branches": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", - "CacheMisses": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "CacheHits": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Compute": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Cor": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "DSB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", @@ -24,7 +24,9 @@ "L2Evicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "LSD": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MachineClears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "Machine_Clears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Mem": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "MemOffcore": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryBW": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryBound": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryLat": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", @@ -94,6 +96,7 @@ "tma_l3_bound_group": "Metrics contributing to tma_l3_bound category", "tma_light_operations_group": "Metrics contributing to tma_light_operations category", "tma_load_op_utilization_group": "Metrics contributing to tma_load_op_utilization category", + "tma_machine_clears_group": "Metrics contributing to tma_machine_clears category", "tma_mem_latency_group": "Metrics contributing to tma_mem_latency category", "tma_memory_bound_group": "Metrics contributing to tma_memory_bound category", "tma_microcode_sequencer_group": "Metrics contributing to tma_microcode_sequencer category", diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json b/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json index e1f55fcfa0d0..826357787201 100644 --- a/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json +++ b/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json @@ -84,12 +84,12 @@ "MetricExpr": "(UOPS_DISPATCHED_PORT.PORT_0 + UOPS_DISPATCHED_PORT.PORT_1 + UOPS_DISPATCHED_PORT.PORT_5 + UOPS_DISPATCHED_PORT.PORT_6) / tma_info_thread_slots", "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group", "MetricName": "tma_alu_op_utilization", - "MetricThreshold": "tma_alu_op_utilization > 0.6", + "MetricThreshold": "tma_alu_op_utilization > 0.4", "ScaleUnit": "100%" }, { "BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists", - "MetricExpr": "100 * OTHER_ASSISTS.ANY_WB_ASSIST / tma_info_thread_slots", + "MetricExpr": "66 * OTHER_ASSISTS.ANY_WB_ASSIST / tma_info_thread_slots", "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group", "MetricName": "tma_assists", "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)", @@ -211,7 +211,7 @@ "MetricExpr": "(IDQ.ALL_DSB_CYCLES_ANY_UOPS - IDQ.ALL_DSB_CYCLES_4_UOPS) / tma_info_core_core_clks / 2", "MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group", "MetricName": "tma_dsb", - "MetricThreshold": "tma_dsb > 0.15 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 4 > 0.35)", + "MetricThreshold": "tma_dsb > 0.15 & tma_fetch_bandwidth > 0.2", "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.", "ScaleUnit": "100%" }, @@ -257,7 +257,7 @@ "MetricExpr": "tma_frontend_bound - tma_fetch_latency", "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB", "MetricName": "tma_fetch_bandwidth", - "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 4 > 0.35", + "MetricThreshold": "tma_fetch_bandwidth > 0.2", "MetricgroupNoGroup": "TopdownL2", "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp", "ScaleUnit": "100%" @@ -334,28 +334,21 @@ "MetricName": "tma_heavy_operations", "MetricThreshold": "tma_heavy_operations > 0.1", "MetricgroupNoGroup": "TopdownL2", - "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.", + "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences. ([ICL+] Note this may overcount due to approximation using indirect events; [ADL+] .)", "ScaleUnit": "100%" }, { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses", "MetricExpr": "ICACHE.IFDATA_STALL / tma_info_thread_clks", - "MetricGroup": "BigFoot;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group", + "MetricGroup": "BigFootprint;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group", "MetricName": "tma_icache_misses", "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS_PS;FRONTEND_RETIRED.L1I_MISS_PS", "ScaleUnit": "100%" }, { - "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)", - "MetricExpr": "(tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) * tma_info_thread_slots / BR_MISP_RETIRED.ALL_BRANCHES", - "MetricGroup": "Bad;BrMispredicts;tma_issueBM", - "MetricName": "tma_info_bad_spec_branch_misprediction_cost", - "PublicDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear). Related metrics: tma_branch_mispredicts, tma_mispredicts_resteers" - }, - { "BriefDescription": "Instructions per retired mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).", - "MetricExpr": "tma_info_inst_mix_instructions / (UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * cpu@BR_MISP_EXEC.ALL_BRANCHES\\,umask\\=0xE4@)", + "MetricExpr": "tma_info_inst_mix_instructions / (UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * BR_MISP_EXEC.INDIRECT)", "MetricGroup": "Bad;BrMispredicts", "MetricName": "tma_info_bad_spec_ipmisp_indirect", "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1e3" @@ -381,7 +374,7 @@ }, { "BriefDescription": "Floating Point Operations Per Cycle", - "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * (FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) / tma_info_core_core_clks", + "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * FP_ARITH_INST_RETIRED.4_FLOPS + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) / tma_info_core_core_clks", "MetricGroup": "Flops;Ret", "MetricName": "tma_info_core_flopc" }, @@ -393,8 +386,8 @@ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)." }, { - "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core", - "MetricExpr": "UOPS_EXECUTED.THREAD / (cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)", + "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor)", + "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@", "MetricGroup": "Backend;Cor;Pipeline;PortsUtil", "MetricName": "tma_info_core_ilp" }, @@ -431,7 +424,7 @@ "MetricGroup": "Flops;InsType", "MetricName": "tma_info_inst_mix_iparith", "MetricThreshold": "tma_info_inst_mix_iparith < 10", - "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). May undercount due to FMA double counting. Approximated prior to BDW." + "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW." }, { "BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate)", @@ -439,7 +432,7 @@ "MetricGroup": "Flops;FpVector;InsType", "MetricName": "tma_info_inst_mix_iparith_avx128", "MetricThreshold": "tma_info_inst_mix_iparith_avx128 < 10", - "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate)", @@ -447,7 +440,7 @@ "MetricGroup": "Flops;FpVector;InsType", "MetricName": "tma_info_inst_mix_iparith_avx256", "MetricThreshold": "tma_info_inst_mix_iparith_avx256 < 10", - "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate)", @@ -455,7 +448,7 @@ "MetricGroup": "Flops;FpScalar;InsType", "MetricName": "tma_info_inst_mix_iparith_scalar_dp", "MetricThreshold": "tma_info_inst_mix_iparith_scalar_dp < 10", - "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate)", @@ -463,7 +456,7 @@ "MetricGroup": "Flops;FpScalar;InsType", "MetricName": "tma_info_inst_mix_iparith_scalar_sp", "MetricThreshold": "tma_info_inst_mix_iparith_scalar_sp < 10", - "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)", @@ -481,7 +474,7 @@ }, { "BriefDescription": "Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate)", - "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * (FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE)", + "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.SCALAR + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * FP_ARITH_INST_RETIRED.4_FLOPS + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE)", "MetricGroup": "Flops;InsType", "MetricName": "tma_info_inst_mix_ipflop", "MetricThreshold": "tma_info_inst_mix_ipflop < 10" @@ -510,120 +503,114 @@ }, { "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]", - "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l1d_cache_fill_bw", "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_core_l1d_cache_fill_bw" + "MetricName": "tma_info_memory_core_l1d_cache_fill_bw_2t" }, { "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]", - "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l2_cache_fill_bw", "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_core_l2_cache_fill_bw" + "MetricName": "tma_info_memory_core_l2_cache_fill_bw_2t" }, { "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l3_cache_fill_bw", "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_core_l3_cache_fill_bw" + "MetricName": "tma_info_memory_core_l3_cache_fill_bw_2t" + }, + { + "BriefDescription": "", + "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l1d_cache_fill_bw" }, { "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads", "MetricExpr": "1e3 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l1mpki" }, { + "BriefDescription": "", + "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l2_cache_fill_bw" + }, + { "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)", "MetricExpr": "1e3 * (L2_RQSTS.REFERENCES - L2_RQSTS.MISS) / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l2hpki_all" }, { "BriefDescription": "L2 cache hits per kilo instruction for all demand loads (including speculative)", "MetricExpr": "1e3 * L2_RQSTS.DEMAND_DATA_RD_HIT / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l2hpki_load" }, { "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads", "MetricExpr": "1e3 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY", - "MetricGroup": "Backend;CacheMisses;Mem", + "MetricGroup": "Backend;CacheHits;Mem", "MetricName": "tma_info_memory_l2mpki" }, { "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all request types (including speculative)", "MetricExpr": "1e3 * L2_RQSTS.MISS / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem;Offcore", + "MetricGroup": "CacheHits;Mem;Offcore", "MetricName": "tma_info_memory_l2mpki_all" }, { "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all demand loads (including speculative)", "MetricExpr": "1e3 * L2_RQSTS.DEMAND_DATA_RD_MISS / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l2mpki_load" }, { + "BriefDescription": "", + "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l3_cache_fill_bw" + }, + { "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads", "MetricExpr": "1e3 * MEM_LOAD_UOPS_RETIRED.L3_MISS / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "Mem", "MetricName": "tma_info_memory_l3mpki" }, { - "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)", - "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "L1D_PEND_MISS.PENDING / (MEM_LOAD_UOPS_RETIRED.L1_MISS + MEM_LOAD_UOPS_RETIRED.HIT_LFB)", - "MetricGroup": "Mem;MemoryBound;MemoryLat", - "MetricName": "tma_info_memory_load_miss_real_latency" - }, - { - "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss", - "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES", - "MetricGroup": "Mem;MemoryBW;MemoryBound", - "MetricName": "tma_info_memory_mlp", - "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)" - }, - { "BriefDescription": "Average Parallel L2 cache miss data reads", "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", "MetricGroup": "Memory_BW;Offcore", - "MetricName": "tma_info_memory_oro_data_l2_mlp" + "MetricName": "tma_info_memory_latency_data_l2_mlp" }, { "BriefDescription": "Average Latency for L2 cache miss demand Loads", "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS.DEMAND_DATA_RD", "MetricGroup": "Memory_Lat;Offcore", - "MetricName": "tma_info_memory_oro_load_l2_miss_latency" + "MetricName": "tma_info_memory_latency_load_l2_miss_latency" }, { "BriefDescription": "Average Parallel L2 cache miss demand Loads", "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD", "MetricGroup": "Memory_BW;Offcore", - "MetricName": "tma_info_memory_oro_load_l2_mlp" - }, - { - "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l1d_cache_fill_bw", - "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_thread_l1d_cache_fill_bw_1t" + "MetricName": "tma_info_memory_latency_load_l2_mlp" }, { - "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l2_cache_fill_bw", - "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_thread_l2_cache_fill_bw_1t" - }, - { - "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "0", - "MetricGroup": "Mem;MemoryBW;Offcore", - "MetricName": "tma_info_memory_thread_l3_cache_access_bw_1t" + "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)", + "MetricConstraint": "NO_GROUP_EVENTS", + "MetricExpr": "L1D_PEND_MISS.PENDING / (MEM_LOAD_UOPS_RETIRED.L1_MISS + MEM_LOAD_UOPS_RETIRED.HIT_LFB)", + "MetricGroup": "Mem;MemoryBound;MemoryLat", + "MetricName": "tma_info_memory_load_miss_real_latency" }, { - "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l3_cache_fill_bw", - "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_thread_l3_cache_fill_bw_1t" + "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss", + "MetricConstraint": "NO_GROUP_EVENTS", + "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES", + "MetricGroup": "Mem;MemoryBW;MemoryBound", + "MetricName": "tma_info_memory_mlp", + "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)" }, { "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses", @@ -633,8 +620,8 @@ "MetricThreshold": "tma_info_memory_tlb_page_walks_utilization > 0.5" }, { - "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-thread", - "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@", + "BriefDescription": "", + "MetricExpr": "UOPS_EXECUTED.THREAD / (cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)", "MetricGroup": "Cor;Pipeline;PortsUtil;SMT", "MetricName": "tma_info_pipeline_execute" }, @@ -645,30 +632,36 @@ "MetricName": "tma_info_pipeline_retire" }, { - "BriefDescription": "Measured Average Frequency for unhalted processors [GHz]", + "BriefDescription": "Measured Average Core Frequency for unhalted processors [GHz]", "MetricExpr": "tma_info_system_turbo_utilization * TSC / 1e9 / duration_time", "MetricGroup": "Power;Summary", - "MetricName": "tma_info_system_average_frequency" + "MetricName": "tma_info_system_core_frequency" }, { - "BriefDescription": "Average CPU Utilization", + "BriefDescription": "Average CPU Utilization (percentage)", "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC", "MetricGroup": "HPC;Summary", "MetricName": "tma_info_system_cpu_utilization" }, { + "BriefDescription": "Average number of utilized CPUs", + "MetricExpr": "#num_cpus_online * tma_info_system_cpu_utilization", + "MetricGroup": "Summary", + "MetricName": "tma_info_system_cpus_utilized" + }, + { "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]", "MetricExpr": "64 * (UNC_M_CAS_COUNT.RD + UNC_M_CAS_COUNT.WR) / 1e9 / duration_time", - "MetricGroup": "HPC;Mem;MemoryBW;SoC;tma_issueBW", + "MetricGroup": "HPC;MemOffcore;MemoryBW;SoC;tma_issueBW", "MetricName": "tma_info_system_dram_bw_use", "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_mem_bandwidth, tma_sq_full" }, { "BriefDescription": "Giga Floating Point Operations Per Second", - "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * (FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) / 1e9 / duration_time", + "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * FP_ARITH_INST_RETIRED.4_FLOPS + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) / 1e9 / duration_time", "MetricGroup": "Cor;Flops;HPC", "MetricName": "tma_info_system_gflops", - "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine." + "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width" }, { "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]", @@ -756,7 +749,7 @@ { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses", "MetricExpr": "(14 * ITLB_MISSES.STLB_HIT + cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * ITLB_MISSES.WALK_COMPLETED) / tma_info_thread_clks", - "MetricGroup": "BigFoot;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group", + "MetricGroup": "BigFootprint;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group", "MetricName": "tma_itlb_misses", "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS_PS;FRONTEND_RETIRED.ITLB_MISS_PS", @@ -765,7 +758,7 @@ { "BriefDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache", "MetricExpr": "max((CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS) / tma_info_thread_clks, 0)", - "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group", "MetricName": "tma_l1_bound", "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", "PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_RETIRED.L1_HIT_PS;MEM_LOAD_RETIRED.FB_HIT_PS. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1", @@ -774,7 +767,7 @@ { "BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads", "MetricExpr": "(CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks", - "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", "MetricName": "tma_l2_bound", "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", "PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L2_HIT_PS", @@ -784,20 +777,20 @@ "BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core", "MetricConstraint": "NO_GROUP_EVENTS_SMT", "MetricExpr": "MEM_LOAD_UOPS_RETIRED.L3_HIT / (MEM_LOAD_UOPS_RETIRED.L3_HIT + 7 * MEM_LOAD_UOPS_RETIRED.L3_MISS) * CYCLE_ACTIVITY.STALLS_L2_MISS / tma_info_thread_clks", - "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", "MetricName": "tma_l3_bound", "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS", "ScaleUnit": "100%" }, { - "BriefDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)", + "BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)", "MetricConstraint": "NO_GROUP_EVENTS", "MetricExpr": "29 * (MEM_LOAD_UOPS_RETIRED.L3_HIT * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_RETIRED.L3_MISS))) / tma_info_thread_clks", "MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group", "MetricName": "tma_l3_hit_latency", "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_mem_latency", + "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_mem_latency", "ScaleUnit": "100%" }, { @@ -816,7 +809,7 @@ "MetricName": "tma_light_operations", "MetricThreshold": "tma_light_operations > 0.6", "MetricgroupNoGroup": "TopdownL2", - "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST", + "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST", "ScaleUnit": "100%" }, { @@ -851,21 +844,21 @@ "ScaleUnit": "100%" }, { - "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM)", + "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)", "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@) / tma_info_thread_clks", "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW", "MetricName": "tma_mem_bandwidth", "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full", + "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full", "ScaleUnit": "100%" }, { - "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM)", + "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM)", "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth", "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat", "MetricName": "tma_mem_latency", "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_l3_hit_latency", + "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_l3_hit_latency", "ScaleUnit": "100%" }, { @@ -902,7 +895,7 @@ "MetricExpr": "(IDQ.ALL_MITE_CYCLES_ANY_UOPS - IDQ.ALL_MITE_CYCLES_4_UOPS) / tma_info_core_core_clks / 2", "MetricGroup": "DSBmiss;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group", "MetricName": "tma_mite", - "MetricThreshold": "tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 4 > 0.35)", + "MetricThreshold": "tma_mite > 0.1 & tma_fetch_bandwidth > 0.2", "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck. Sample with: FRONTEND_RETIRED.ANY_DSB_MISS", "ScaleUnit": "100%" }, @@ -968,12 +961,12 @@ "ScaleUnit": "100%" }, { - "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU)", + "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU)", "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_6 / tma_info_core_core_clks", "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P", "MetricName": "tma_port_6", "MetricThreshold": "tma_port_6 > 0.6", - "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED.PORT_6. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2", + "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED.PORT_6. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2", "ScaleUnit": "100%" }, { @@ -1026,7 +1019,7 @@ "MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@ / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC) / tma_info_core_core_clks", "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group", "MetricName": "tma_ports_utilized_3m", - "MetricThreshold": "tma_ports_utilized_3m > 0.7 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", + "MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", "PublicDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Sample with: UOPS_EXECUTED.CYCLES_GE_3", "ScaleUnit": "100%" }, @@ -1108,10 +1101,10 @@ { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears", "MetricExpr": "tma_branch_resteers - tma_mispredicts_resteers - tma_clears_resteers", - "MetricGroup": "BigFoot;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group", + "MetricGroup": "BigFootprint;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group", "MetricName": "tma_unknown_branches", "MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))", - "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit). Sample with: FRONTEND_RETIRED.UNKNOWN_BRANCH", + "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit) hence called Unknown Branches. Sample with: FRONTEND_RETIRED.UNKNOWN_BRANCH", "ScaleUnit": "100%" }, { diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/metricgroups.json b/tools/perf/pmu-events/arch/x86/broadwellde/metricgroups.json index f6a0258e3241..8c808347f6da 100644 --- a/tools/perf/pmu-events/arch/x86/broadwellde/metricgroups.json +++ b/tools/perf/pmu-events/arch/x86/broadwellde/metricgroups.json @@ -2,10 +2,10 @@ "Backend": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Bad": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "BadSpec": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", - "BigFoot": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "BigFootprint": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "BrMispredicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Branches": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", - "CacheMisses": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "CacheHits": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Compute": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Cor": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "DSB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", @@ -24,7 +24,9 @@ "L2Evicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "LSD": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MachineClears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "Machine_Clears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Mem": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "MemOffcore": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryBW": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryBound": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryLat": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", @@ -94,6 +96,7 @@ "tma_l3_bound_group": "Metrics contributing to tma_l3_bound category", "tma_light_operations_group": "Metrics contributing to tma_light_operations category", "tma_load_op_utilization_group": "Metrics contributing to tma_load_op_utilization category", + "tma_machine_clears_group": "Metrics contributing to tma_machine_clears category", "tma_mem_latency_group": "Metrics contributing to tma_mem_latency category", "tma_memory_bound_group": "Metrics contributing to tma_memory_bound category", "tma_microcode_sequencer_group": "Metrics contributing to tma_microcode_sequencer category", diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/uncore-power.json b/tools/perf/pmu-events/arch/x86/broadwellde/uncore-power.json index 83d20130c217..320aaab53a0b 100644 --- a/tools/perf/pmu-events/arch/x86/broadwellde/uncore-power.json +++ b/tools/perf/pmu-events/arch/x86/broadwellde/uncore-power.json @@ -394,6 +394,7 @@ "BriefDescription": "Number of cores in C-State; C0 and C1", "EventCode": "0x80", "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0", + "Filter": "occ_sel=1", "PerPkg": "1", "PublicDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", "Unit": "PCU" @@ -402,6 +403,7 @@ "BriefDescription": "Number of cores in C-State; C3", "EventCode": "0x80", "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3", + "Filter": "occ_sel=2", "PerPkg": "1", "PublicDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", "Unit": "PCU" @@ -410,6 +412,7 @@ "BriefDescription": "Number of cores in C-State; C6 and C7", "EventCode": "0x80", "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6", + "Filter": "occ_sel=3", "PerPkg": "1", "PublicDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", "Unit": "PCU" diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json b/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json index b319e4edc238..0aed533da882 100644 --- a/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json +++ b/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json @@ -286,12 +286,12 @@ "MetricExpr": "(UOPS_DISPATCHED_PORT.PORT_0 + UOPS_DISPATCHED_PORT.PORT_1 + UOPS_DISPATCHED_PORT.PORT_5 + UOPS_DISPATCHED_PORT.PORT_6) / tma_info_thread_slots", "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group", "MetricName": "tma_alu_op_utilization", - "MetricThreshold": "tma_alu_op_utilization > 0.6", + "MetricThreshold": "tma_alu_op_utilization > 0.4", "ScaleUnit": "100%" }, { "BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists", - "MetricExpr": "100 * OTHER_ASSISTS.ANY_WB_ASSIST / tma_info_thread_slots", + "MetricExpr": "66 * OTHER_ASSISTS.ANY_WB_ASSIST / tma_info_thread_slots", "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group", "MetricName": "tma_assists", "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)", @@ -413,7 +413,7 @@ "MetricExpr": "(IDQ.ALL_DSB_CYCLES_ANY_UOPS - IDQ.ALL_DSB_CYCLES_4_UOPS) / tma_info_core_core_clks / 2", "MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group", "MetricName": "tma_dsb", - "MetricThreshold": "tma_dsb > 0.15 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 4 > 0.35)", + "MetricThreshold": "tma_dsb > 0.15 & tma_fetch_bandwidth > 0.2", "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.", "ScaleUnit": "100%" }, @@ -468,7 +468,7 @@ "MetricExpr": "tma_frontend_bound - tma_fetch_latency", "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB", "MetricName": "tma_fetch_bandwidth", - "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 4 > 0.35", + "MetricThreshold": "tma_fetch_bandwidth > 0.2", "MetricgroupNoGroup": "TopdownL2", "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Related metrics: tma_dsb_switches, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp", "ScaleUnit": "100%" @@ -545,27 +545,20 @@ "MetricName": "tma_heavy_operations", "MetricThreshold": "tma_heavy_operations > 0.1", "MetricgroupNoGroup": "TopdownL2", - "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.", + "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences. ([ICL+] Note this may overcount due to approximation using indirect events; [ADL+] .)", "ScaleUnit": "100%" }, { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses.", "MetricExpr": "ICACHE.IFDATA_STALL / tma_info_thread_clks", - "MetricGroup": "BigFoot;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group", + "MetricGroup": "BigFootprint;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group", "MetricName": "tma_icache_misses", "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", "ScaleUnit": "100%" }, { - "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)", - "MetricExpr": "(tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) * tma_info_thread_slots / BR_MISP_RETIRED.ALL_BRANCHES", - "MetricGroup": "Bad;BrMispredicts;tma_issueBM", - "MetricName": "tma_info_bad_spec_branch_misprediction_cost", - "PublicDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear). Related metrics: tma_branch_mispredicts, tma_mispredicts_resteers" - }, - { "BriefDescription": "Instructions per retired mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).", - "MetricExpr": "tma_info_inst_mix_instructions / (UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * cpu@BR_MISP_EXEC.ALL_BRANCHES\\,umask\\=0xE4@)", + "MetricExpr": "tma_info_inst_mix_instructions / (UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * BR_MISP_EXEC.INDIRECT)", "MetricGroup": "Bad;BrMispredicts", "MetricName": "tma_info_bad_spec_ipmisp_indirect", "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1e3" @@ -591,7 +584,7 @@ }, { "BriefDescription": "Floating Point Operations Per Cycle", - "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * (FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) / tma_info_core_core_clks", + "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * FP_ARITH_INST_RETIRED.4_FLOPS + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) / tma_info_core_core_clks", "MetricGroup": "Flops;Ret", "MetricName": "tma_info_core_flopc" }, @@ -603,8 +596,8 @@ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)." }, { - "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core", - "MetricExpr": "UOPS_EXECUTED.THREAD / (cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)", + "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor)", + "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@", "MetricGroup": "Backend;Cor;Pipeline;PortsUtil", "MetricName": "tma_info_core_ilp" }, @@ -641,7 +634,7 @@ "MetricGroup": "Flops;InsType", "MetricName": "tma_info_inst_mix_iparith", "MetricThreshold": "tma_info_inst_mix_iparith < 10", - "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). May undercount due to FMA double counting. Approximated prior to BDW." + "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW." }, { "BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate)", @@ -649,7 +642,7 @@ "MetricGroup": "Flops;FpVector;InsType", "MetricName": "tma_info_inst_mix_iparith_avx128", "MetricThreshold": "tma_info_inst_mix_iparith_avx128 < 10", - "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate)", @@ -657,7 +650,7 @@ "MetricGroup": "Flops;FpVector;InsType", "MetricName": "tma_info_inst_mix_iparith_avx256", "MetricThreshold": "tma_info_inst_mix_iparith_avx256 < 10", - "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate)", @@ -665,7 +658,7 @@ "MetricGroup": "Flops;FpScalar;InsType", "MetricName": "tma_info_inst_mix_iparith_scalar_dp", "MetricThreshold": "tma_info_inst_mix_iparith_scalar_dp < 10", - "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate)", @@ -673,7 +666,7 @@ "MetricGroup": "Flops;FpScalar;InsType", "MetricName": "tma_info_inst_mix_iparith_scalar_sp", "MetricThreshold": "tma_info_inst_mix_iparith_scalar_sp < 10", - "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)", @@ -691,7 +684,7 @@ }, { "BriefDescription": "Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate)", - "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * (FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE)", + "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.SCALAR + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * FP_ARITH_INST_RETIRED.4_FLOPS + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE)", "MetricGroup": "Flops;InsType", "MetricName": "tma_info_inst_mix_ipflop", "MetricThreshold": "tma_info_inst_mix_ipflop < 10" @@ -720,120 +713,156 @@ }, { "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]", - "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l1d_cache_fill_bw", "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_core_l1d_cache_fill_bw" + "MetricName": "tma_info_memory_core_l1d_cache_fill_bw_2t" }, { "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]", - "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l2_cache_fill_bw", "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_core_l2_cache_fill_bw" + "MetricName": "tma_info_memory_core_l2_cache_fill_bw_2t" }, { "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l3_cache_fill_bw", "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_core_l3_cache_fill_bw" + "MetricName": "tma_info_memory_core_l3_cache_fill_bw_2t" + }, + { + "BriefDescription": "Average Parallel L2 cache miss data reads", + "MetricExpr": "tma_info_memory_latency_data_l2_mlp", + "MetricGroup": "Memory_BW;Offcore", + "MetricName": "tma_info_memory_data_l2_mlp" + }, + { + "BriefDescription": "", + "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l1d_cache_fill_bw" + }, + { + "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]", + "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / (duration_time * 1e3 / 1e3)", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l1d_cache_fill_bw_2t" }, { "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads", "MetricExpr": "1e3 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l1mpki" }, { + "BriefDescription": "", + "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l2_cache_fill_bw" + }, + { + "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]", + "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / (duration_time * 1e3 / 1e3)", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l2_cache_fill_bw_2t" + }, + { "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)", "MetricExpr": "1e3 * (L2_RQSTS.REFERENCES - L2_RQSTS.MISS) / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l2hpki_all" }, { "BriefDescription": "L2 cache hits per kilo instruction for all demand loads (including speculative)", "MetricExpr": "1e3 * L2_RQSTS.DEMAND_DATA_RD_HIT / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l2hpki_load" }, { "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads", "MetricExpr": "1e3 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY", - "MetricGroup": "Backend;CacheMisses;Mem", + "MetricGroup": "Backend;CacheHits;Mem", "MetricName": "tma_info_memory_l2mpki" }, { "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all request types (including speculative)", "MetricExpr": "1e3 * L2_RQSTS.MISS / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem;Offcore", + "MetricGroup": "CacheHits;Mem;Offcore", "MetricName": "tma_info_memory_l2mpki_all" }, { "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all demand loads (including speculative)", "MetricExpr": "1e3 * L2_RQSTS.DEMAND_DATA_RD_MISS / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l2mpki_load" }, { + "BriefDescription": "", + "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l3_cache_fill_bw" + }, + { + "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]", + "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / (duration_time * 1e3 / 1e3)", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l3_cache_fill_bw_2t" + }, + { "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads", "MetricExpr": "1e3 * MEM_LOAD_UOPS_RETIRED.L3_MISS / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "Mem", "MetricName": "tma_info_memory_l3mpki" }, { - "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)", - "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "L1D_PEND_MISS.PENDING / (MEM_LOAD_UOPS_RETIRED.L1_MISS + MEM_LOAD_UOPS_RETIRED.HIT_LFB)", - "MetricGroup": "Mem;MemoryBound;MemoryLat", - "MetricName": "tma_info_memory_load_miss_real_latency" + "BriefDescription": "Average Parallel L2 cache miss data reads", + "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", + "MetricGroup": "Memory_BW;Offcore", + "MetricName": "tma_info_memory_latency_data_l2_mlp" }, { - "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss", - "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES", - "MetricGroup": "Mem;MemoryBW;MemoryBound", - "MetricName": "tma_info_memory_mlp", - "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)" + "BriefDescription": "Average Latency for L2 cache miss demand Loads", + "MetricExpr": "tma_info_memory_load_l2_miss_latency", + "MetricGroup": "Memory_Lat;Offcore", + "MetricName": "tma_info_memory_latency_load_l2_miss_latency" }, { - "BriefDescription": "Average Parallel L2 cache miss data reads", - "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", + "BriefDescription": "Average Parallel L2 cache miss demand Loads", + "MetricExpr": "tma_info_memory_load_l2_mlp", "MetricGroup": "Memory_BW;Offcore", - "MetricName": "tma_info_memory_oro_data_l2_mlp" + "MetricName": "tma_info_memory_latency_load_l2_mlp" }, { "BriefDescription": "Average Latency for L2 cache miss demand Loads", "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS.DEMAND_DATA_RD", "MetricGroup": "Memory_Lat;Offcore", - "MetricName": "tma_info_memory_oro_load_l2_miss_latency" + "MetricName": "tma_info_memory_load_l2_miss_latency" }, { "BriefDescription": "Average Parallel L2 cache miss demand Loads", "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD", "MetricGroup": "Memory_BW;Offcore", - "MetricName": "tma_info_memory_oro_load_l2_mlp" - }, - { - "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l1d_cache_fill_bw", - "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_thread_l1d_cache_fill_bw_1t" + "MetricName": "tma_info_memory_load_l2_mlp" }, { - "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l2_cache_fill_bw", - "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_thread_l2_cache_fill_bw_1t" + "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)", + "MetricConstraint": "NO_GROUP_EVENTS", + "MetricExpr": "L1D_PEND_MISS.PENDING / (MEM_LOAD_UOPS_RETIRED.L1_MISS + MEM_LOAD_UOPS_RETIRED.HIT_LFB)", + "MetricGroup": "Mem;MemoryBound;MemoryLat", + "MetricName": "tma_info_memory_load_miss_real_latency" }, { - "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "0", - "MetricGroup": "Mem;MemoryBW;Offcore", - "MetricName": "tma_info_memory_thread_l3_cache_access_bw_1t" + "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss", + "MetricConstraint": "NO_GROUP_EVENTS", + "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES", + "MetricGroup": "Mem;MemoryBW;MemoryBound", + "MetricName": "tma_info_memory_mlp", + "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)" }, { - "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l3_cache_fill_bw", - "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_thread_l3_cache_fill_bw_1t" + "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses", + "MetricExpr": "tma_info_memory_tlb_page_walks_utilization", + "MetricGroup": "Mem;MemoryTLB", + "MetricName": "tma_info_memory_page_walks_utilization" }, { "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses", @@ -843,8 +872,8 @@ "MetricThreshold": "tma_info_memory_tlb_page_walks_utilization > 0.5" }, { - "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-thread", - "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@", + "BriefDescription": "", + "MetricExpr": "UOPS_EXECUTED.THREAD / (cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)", "MetricGroup": "Cor;Pipeline;PortsUtil;SMT", "MetricName": "tma_info_pipeline_execute" }, @@ -855,30 +884,36 @@ "MetricName": "tma_info_pipeline_retire" }, { - "BriefDescription": "Measured Average Frequency for unhalted processors [GHz]", + "BriefDescription": "Measured Average Core Frequency for unhalted processors [GHz]", "MetricExpr": "tma_info_system_turbo_utilization * TSC / 1e9 / duration_time", "MetricGroup": "Power;Summary", - "MetricName": "tma_info_system_average_frequency" + "MetricName": "tma_info_system_core_frequency" }, { - "BriefDescription": "Average CPU Utilization", + "BriefDescription": "Average CPU Utilization (percentage)", "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC", "MetricGroup": "HPC;Summary", "MetricName": "tma_info_system_cpu_utilization" }, { + "BriefDescription": "Average number of utilized CPUs", + "MetricExpr": "#num_cpus_online * tma_info_system_cpu_utilization", + "MetricGroup": "Summary", + "MetricName": "tma_info_system_cpus_utilized" + }, + { "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]", "MetricExpr": "64 * (UNC_M_CAS_COUNT.RD + UNC_M_CAS_COUNT.WR) / 1e9 / duration_time", - "MetricGroup": "HPC;Mem;MemoryBW;SoC;tma_issueBW", + "MetricGroup": "HPC;MemOffcore;MemoryBW;SoC;tma_issueBW", "MetricName": "tma_info_system_dram_bw_use", "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_mem_bandwidth, tma_sq_full" }, { "BriefDescription": "Giga Floating Point Operations Per Second", - "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * (FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) / 1e9 / duration_time", + "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * FP_ARITH_INST_RETIRED.4_FLOPS + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) / 1e9 / duration_time", "MetricGroup": "Cor;Flops;HPC", "MetricName": "tma_info_system_gflops", - "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine." + "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width" }, { "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]", @@ -933,6 +968,12 @@ "MetricName": "tma_info_system_turbo_utilization" }, { + "BriefDescription": "Measured Average Uncore Frequency for the SoC [GHz]", + "MetricExpr": "tma_info_system_socket_clks / 1e9 / duration_time", + "MetricGroup": "SoC", + "MetricName": "tma_info_system_uncore_frequency" + }, + { "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.", "MetricExpr": "CPU_CLK_UNHALTED.THREAD", "MetricGroup": "Pipeline", @@ -980,7 +1021,7 @@ { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses", "MetricExpr": "(14 * ITLB_MISSES.STLB_HIT + cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * ITLB_MISSES.WALK_COMPLETED) / tma_info_thread_clks", - "MetricGroup": "BigFoot;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group", + "MetricGroup": "BigFootprint;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group", "MetricName": "tma_itlb_misses", "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: ITLB_MISSES.WALK_COMPLETED", @@ -989,7 +1030,7 @@ { "BriefDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache", "MetricExpr": "max((CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS) / tma_info_thread_clks, 0)", - "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group", "MetricName": "tma_l1_bound", "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", "PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_UOPS_RETIRED.L1_HIT_PS;MEM_LOAD_UOPS_RETIRED.HIT_LFB_PS. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1", @@ -998,7 +1039,7 @@ { "BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads", "MetricExpr": "(CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks", - "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", "MetricName": "tma_l2_bound", "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", "PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L2_HIT_PS", @@ -1008,20 +1049,20 @@ "BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core", "MetricConstraint": "NO_GROUP_EVENTS_SMT", "MetricExpr": "MEM_LOAD_UOPS_RETIRED.L3_HIT / (MEM_LOAD_UOPS_RETIRED.L3_HIT + 7 * MEM_LOAD_UOPS_RETIRED.L3_MISS) * CYCLE_ACTIVITY.STALLS_L2_MISS / tma_info_thread_clks", - "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", "MetricName": "tma_l3_bound", "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT_PS", "ScaleUnit": "100%" }, { - "BriefDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)", + "BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)", "MetricConstraint": "NO_GROUP_EVENTS", "MetricExpr": "41 * (MEM_LOAD_UOPS_RETIRED.L3_HIT * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD))) / tma_info_thread_clks", "MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group", "MetricName": "tma_l3_hit_latency", "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT_PS. Related metrics: tma_mem_latency", + "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT_PS. Related metrics: tma_mem_latency", "ScaleUnit": "100%" }, { @@ -1040,7 +1081,7 @@ "MetricName": "tma_light_operations", "MetricThreshold": "tma_light_operations > 0.6", "MetricgroupNoGroup": "TopdownL2", - "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST", + "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST", "ScaleUnit": "100%" }, { @@ -1055,11 +1096,10 @@ }, { "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory", - "MetricConstraint": "NO_GROUP_EVENTS", "MetricExpr": "200 * (MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD))) / tma_info_thread_clks", "MetricGroup": "Server;TopdownL5;tma_L5_group;tma_mem_latency_group", - "MetricName": "tma_local_dram", - "MetricThreshold": "tma_local_dram > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))", + "MetricName": "tma_local_mem", + "MetricThreshold": "tma_local_mem > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))", "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory. Caching will improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM_PS", "ScaleUnit": "100%" }, @@ -1085,21 +1125,21 @@ "ScaleUnit": "100%" }, { - "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM)", + "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)", "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@) / tma_info_thread_clks", "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW", "MetricName": "tma_mem_bandwidth", "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full", + "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full", "ScaleUnit": "100%" }, { - "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM)", + "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM)", "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth", "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat", "MetricName": "tma_mem_latency", "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_l3_hit_latency", + "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_l3_hit_latency", "ScaleUnit": "100%" }, { @@ -1136,7 +1176,7 @@ "MetricExpr": "(IDQ.ALL_MITE_CYCLES_ANY_UOPS - IDQ.ALL_MITE_CYCLES_4_UOPS) / tma_info_core_core_clks / 2", "MetricGroup": "DSBmiss;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group", "MetricName": "tma_mite", - "MetricThreshold": "tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 4 > 0.35)", + "MetricThreshold": "tma_mite > 0.1 & tma_fetch_bandwidth > 0.2", "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck.", "ScaleUnit": "100%" }, @@ -1204,12 +1244,12 @@ "ScaleUnit": "100%" }, { - "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU)", + "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU)", "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_6 / tma_info_core_core_clks", "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P", "MetricName": "tma_port_6", "MetricThreshold": "tma_port_6 > 0.6", - "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_6. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2", + "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_6. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2", "ScaleUnit": "100%" }, { @@ -1263,7 +1303,7 @@ "MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@ / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC) / tma_info_core_core_clks", "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group", "MetricName": "tma_ports_utilized_3m", - "MetricThreshold": "tma_ports_utilized_3m > 0.7 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", + "MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", "ScaleUnit": "100%" }, { @@ -1278,11 +1318,10 @@ }, { "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory", - "MetricConstraint": "NO_GROUP_EVENTS", "MetricExpr": "310 * (MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD))) / tma_info_thread_clks", "MetricGroup": "Server;Snoop;TopdownL5;tma_L5_group;tma_mem_latency_group", - "MetricName": "tma_remote_dram", - "MetricThreshold": "tma_remote_dram > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))", + "MetricName": "tma_remote_mem", + "MetricThreshold": "tma_remote_mem > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))", "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory. This is caused often due to non-optimal NUMA allocations. #link to NUMA article. Sample with: MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM_PS", "ScaleUnit": "100%" }, @@ -1363,10 +1402,10 @@ { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears", "MetricExpr": "tma_branch_resteers - tma_mispredicts_resteers - tma_clears_resteers", - "MetricGroup": "BigFoot;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group", + "MetricGroup": "BigFootprint;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group", "MetricName": "tma_unknown_branches", "MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))", - "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit). Sample with: BACLEARS.ANY", + "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit) hence called Unknown Branches. Sample with: BACLEARS.ANY", "ScaleUnit": "100%" }, { diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/metricgroups.json b/tools/perf/pmu-events/arch/x86/broadwellx/metricgroups.json index f6a0258e3241..8c808347f6da 100644 --- a/tools/perf/pmu-events/arch/x86/broadwellx/metricgroups.json +++ b/tools/perf/pmu-events/arch/x86/broadwellx/metricgroups.json @@ -2,10 +2,10 @@ "Backend": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Bad": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "BadSpec": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", - "BigFoot": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "BigFootprint": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "BrMispredicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Branches": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", - "CacheMisses": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "CacheHits": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Compute": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Cor": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "DSB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", @@ -24,7 +24,9 @@ "L2Evicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "LSD": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MachineClears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "Machine_Clears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Mem": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "MemOffcore": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryBW": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryBound": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryLat": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", @@ -94,6 +96,7 @@ "tma_l3_bound_group": "Metrics contributing to tma_l3_bound category", "tma_light_operations_group": "Metrics contributing to tma_light_operations category", "tma_load_op_utilization_group": "Metrics contributing to tma_load_op_utilization category", + "tma_machine_clears_group": "Metrics contributing to tma_machine_clears category", "tma_mem_latency_group": "Metrics contributing to tma_mem_latency category", "tma_memory_bound_group": "Metrics contributing to tma_memory_bound category", "tma_microcode_sequencer_group": "Metrics contributing to tma_microcode_sequencer category", diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/uncore-power.json b/tools/perf/pmu-events/arch/x86/broadwellx/uncore-power.json index 83d20130c217..320aaab53a0b 100644 --- a/tools/perf/pmu-events/arch/x86/broadwellx/uncore-power.json +++ b/tools/perf/pmu-events/arch/x86/broadwellx/uncore-power.json @@ -394,6 +394,7 @@ "BriefDescription": "Number of cores in C-State; C0 and C1", "EventCode": "0x80", "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0", + "Filter": "occ_sel=1", "PerPkg": "1", "PublicDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", "Unit": "PCU" @@ -402,6 +403,7 @@ "BriefDescription": "Number of cores in C-State; C3", "EventCode": "0x80", "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3", + "Filter": "occ_sel=2", "PerPkg": "1", "PublicDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", "Unit": "PCU" @@ -410,6 +412,7 @@ "BriefDescription": "Number of cores in C-State; C6 and C7", "EventCode": "0x80", "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6", + "Filter": "occ_sel=3", "PerPkg": "1", "PublicDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", "Unit": "PCU" diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json b/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json index 8bc6c0707856..297046818efe 100644 --- a/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json +++ b/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json @@ -210,6 +210,12 @@ "ScaleUnit": "1MB/s" }, { + "BriefDescription": "Bandwidth (MB/sec) of write requests that miss the last level cache (LLC) and go to remote memory.", + "MetricExpr": "UNC_CHA_REQUESTS.WRITES_REMOTE * 64 / 1e6 / duration_time", + "MetricName": "llc_miss_remote_memory_bandwidth_write", + "ScaleUnit": "1MB/s" + }, + { "BriefDescription": "The ratio of number of completed memory load instructions to the total number completed instructions", "MetricExpr": "MEM_INST_RETIRED.ALL_LOADS / INST_RETIRED.ANY", "MetricName": "loads_per_instr", @@ -316,12 +322,12 @@ "MetricExpr": "(UOPS_DISPATCHED_PORT.PORT_0 + UOPS_DISPATCHED_PORT.PORT_1 + UOPS_DISPATCHED_PORT.PORT_5 + UOPS_DISPATCHED_PORT.PORT_6) / tma_info_thread_slots", "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group", "MetricName": "tma_alu_op_utilization", - "MetricThreshold": "tma_alu_op_utilization > 0.6", + "MetricThreshold": "tma_alu_op_utilization > 0.4", "ScaleUnit": "100%" }, { "BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists", - "MetricExpr": "100 * (FP_ASSIST.ANY + OTHER_ASSISTS.ANY) / tma_info_thread_slots", + "MetricExpr": "34 * (FP_ASSIST.ANY + OTHER_ASSISTS.ANY) / tma_info_thread_slots", "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group", "MetricName": "tma_assists", "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)", @@ -389,7 +395,7 @@ { "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "(44 * tma_info_system_average_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM * (OCR.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE / (OCR.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE + OCR.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD))) + 44 * tma_info_system_average_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", + "MetricExpr": "(44 * tma_info_system_core_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM * (OCR.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE / (OCR.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE + OCR.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD))) + 44 * tma_info_system_core_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group", "MetricName": "tma_contested_accesses", "MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", @@ -410,7 +416,7 @@ { "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "44 * tma_info_system_average_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM * (1 - OCR.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE / (OCR.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE + OCR.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD))) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", + "MetricExpr": "44 * tma_info_system_core_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM * (1 - OCR.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE / (OCR.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE + OCR.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD))) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", "MetricGroup": "Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group", "MetricName": "tma_data_sharing", "MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", @@ -422,7 +428,7 @@ "MetricExpr": "(cpu@INST_DECODED.DECODERS\\,cmask\\=1@ - cpu@INST_DECODED.DECODERS\\,cmask\\=2@) / tma_info_core_core_clks / 2", "MetricGroup": "DSBmiss;FetchBW;TopdownL4;tma_L4_group;tma_issueD0;tma_mite_group", "MetricName": "tma_decoder0_alone", - "MetricThreshold": "tma_decoder0_alone > 0.1 & (tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 4 > 0.35))", + "MetricThreshold": "tma_decoder0_alone > 0.1 & (tma_mite > 0.1 & tma_fetch_bandwidth > 0.2)", "PublicDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder. Related metrics: tma_few_uops_instructions", "ScaleUnit": "100%" }, @@ -447,10 +453,10 @@ }, { "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline", - "MetricExpr": "(IDQ.ALL_DSB_CYCLES_ANY_UOPS - IDQ.ALL_DSB_CYCLES_4_UOPS) / tma_info_core_core_clks / 2", + "MetricExpr": "(IDQ.DSB_CYCLES_ANY - IDQ.DSB_CYCLES_OK) / tma_info_core_core_clks / 2", "MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group", "MetricName": "tma_dsb", - "MetricThreshold": "tma_dsb > 0.15 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 4 > 0.35)", + "MetricThreshold": "tma_dsb > 0.15 & tma_fetch_bandwidth > 0.2", "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.", "ScaleUnit": "100%" }, @@ -470,7 +476,7 @@ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group", "MetricName": "tma_dtlb_load", "MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store, tma_info_bottleneck_memory_data_tlbs", + "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store, tma_info_bottleneck_memory_data_tlbs, tma_info_bottleneck_memory_synchronization", "ScaleUnit": "100%" }, { @@ -479,13 +485,13 @@ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group", "MetricName": "tma_dtlb_store", "MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load, tma_info_bottleneck_memory_data_tlbs", + "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load, tma_info_bottleneck_memory_data_tlbs, tma_info_bottleneck_memory_synchronization", "ScaleUnit": "100%" }, { "BriefDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "(110 * tma_info_system_average_frequency * (OCR.DEMAND_RFO.L3_MISS.REMOTE_HITM + OCR.PF_L2_RFO.L3_MISS.REMOTE_HITM) + 47.5 * tma_info_system_average_frequency * (OCR.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE + OCR.PF_L2_RFO.L3_HIT.HITM_OTHER_CORE)) / tma_info_thread_clks", + "MetricExpr": "(110 * tma_info_system_core_frequency * (OCR.DEMAND_RFO.L3_MISS.REMOTE_HITM + OCR.PF_L2_RFO.L3_MISS.REMOTE_HITM) + 47.5 * tma_info_system_core_frequency * (OCR.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE + OCR.PF_L2_RFO.L3_HIT.HITM_OTHER_CORE)) / tma_info_thread_clks", "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group", "MetricName": "tma_false_sharing", "MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", @@ -499,7 +505,7 @@ "MetricGroup": "MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group", "MetricName": "tma_fb_full", "MetricThreshold": "tma_fb_full > 0.3", - "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_bottleneck_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores", + "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores", "ScaleUnit": "100%" }, { @@ -507,7 +513,7 @@ "MetricExpr": "tma_frontend_bound - tma_fetch_latency", "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB", "MetricName": "tma_fetch_bandwidth", - "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 4 > 0.35", + "MetricThreshold": "tma_fetch_bandwidth > 0.2", "MetricgroupNoGroup": "TopdownL2", "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp", "ScaleUnit": "100%" @@ -542,6 +548,15 @@ "ScaleUnit": "100%" }, { + "BriefDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists", + "MetricExpr": "34 * FP_ASSIST.ANY / tma_info_thread_slots", + "MetricGroup": "HPC;TopdownL5;tma_L5_group;tma_assists_group", + "MetricName": "tma_fp_assists", + "MetricThreshold": "tma_fp_assists > 0.1", + "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists. FP Assist may apply when working with very small floating point values (so-called Denormals).", + "ScaleUnit": "100%" + }, + { "BriefDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired", "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ / UOPS_RETIRED.RETIRE_SLOTS", "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P", @@ -600,10 +615,10 @@ { "BriefDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions", "MetricExpr": "tma_light_operations * UOPS_RETIRED.MACRO_FUSED / UOPS_RETIRED.RETIRE_SLOTS", - "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", + "MetricGroup": "Branches;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", "MetricName": "tma_fused_instructions", "MetricThreshold": "tma_fused_instructions > 0.1 & tma_light_operations > 0.6", - "PublicDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions. The instruction pairs of CMP+JCC or DEC+JCC are commonly used examples.", + "PublicDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions. CMP+JCC or DEC+JCC are common examples of legacy fusions. {([MTL] Note new MOV+OP and Load+OP fusions appear under Other_Light_Ops in MTL!)}", "ScaleUnit": "100%" }, { @@ -613,13 +628,13 @@ "MetricName": "tma_heavy_operations", "MetricThreshold": "tma_heavy_operations > 0.1", "MetricgroupNoGroup": "TopdownL2", - "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.", + "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences. ([ICL+] Note this may overcount due to approximation using indirect events; [ADL+] .)", "ScaleUnit": "100%" }, { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses", "MetricExpr": "(ICACHE_16B.IFDATA_STALL + 2 * cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=1\\,edge@) / tma_info_thread_clks", - "MetricGroup": "BigFoot;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group", + "MetricGroup": "BigFootprint;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group", "MetricName": "tma_icache_misses", "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS_PS;FRONTEND_RETIRED.L1I_MISS_PS", @@ -627,26 +642,50 @@ }, { "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)", - "MetricExpr": "(tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) * tma_info_thread_slots / BR_MISP_RETIRED.ALL_BRANCHES", + "MetricExpr": "tma_info_bottleneck_mispredictions * tma_info_thread_slots / BR_MISP_RETIRED.ALL_BRANCHES / 100", "MetricGroup": "Bad;BrMispredicts;tma_issueBM", "MetricName": "tma_info_bad_spec_branch_misprediction_cost", "PublicDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear). Related metrics: tma_branch_mispredicts, tma_info_bottleneck_mispredictions, tma_mispredicts_resteers" }, { "BriefDescription": "Instructions per retired mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).", - "MetricExpr": "tma_info_inst_mix_instructions / (UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * cpu@BR_MISP_EXEC.ALL_BRANCHES\\,umask\\=0xE4@)", + "MetricExpr": "tma_info_inst_mix_instructions / (UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * BR_MISP_EXEC.INDIRECT)", "MetricGroup": "Bad;BrMispredicts", "MetricName": "tma_info_bad_spec_ipmisp_indirect", "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1e3" }, { "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)", - "MetricExpr": "tma_info_core_ipmispredict", + "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES", "MetricGroup": "Bad;BadSpec;BrMispredicts", "MetricName": "tma_info_bad_spec_ipmispredict", "MetricThreshold": "tma_info_bad_spec_ipmispredict < 200" }, { + "BriefDescription": "Speculative to Retired ratio of all clears (covering mispredicts and nukes)", + "MetricExpr": "INT_MISC.CLEARS_COUNT / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT)", + "MetricGroup": "BrMispredicts", + "MetricName": "tma_info_bad_spec_spec_clears_ratio" + }, + { + "BriefDescription": "Probability of Core Bound bottleneck hidden by SMT-profiling artifacts", + "MetricExpr": "(100 * (1 - tma_core_bound / (((EXE_ACTIVITY.EXE_BOUND_0_PORTS + tma_core_bound * RS_EVENTS.EMPTY_CYCLES) / CPU_CLK_UNHALTED.THREAD * (CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY) / CPU_CLK_UNHALTED.THREAD * CPU_CLK_UNHALTED.THREAD + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL)) / CPU_CLK_UNHALTED.THREAD if ARITH.DIVIDER_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY else (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL) / CPU_CLK_UNHALTED.THREAD) if tma_core_bound < (((EXE_ACTIVITY.EXE_BOUND_0_PORTS + tma_core_bound * RS_EVENTS.EMPTY_CYCLES) / CPU_CLK_UNHALTED.THREAD * (CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY) / CPU_CLK_UNHALTED.THREAD * CPU_CLK_UNHALTED.THREAD + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL)) / CPU_CLK_UNHALTED.THREAD if ARITH.DIVIDER_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY else (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL) / CPU_CLK_UNHALTED.THREAD) else 1) if tma_info_system_smt_2t_utilization > 0.5 else 0)", + "MetricGroup": "Cor;SMT", + "MetricName": "tma_info_botlnk_core_bound_likely" + }, + { + "BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck.", + "MetricExpr": "100 * (100 * (tma_fetch_latency * (DSB2MITE_SWITCHES.PENALTY_CYCLES / CPU_CLK_UNHALTED.THREAD) / ((ICACHE_16B.IFDATA_STALL + 2 * cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=0x1\\,edge\\=0x1@) / CPU_CLK_UNHALTED.THREAD + ICACHE_TAG.STALLS / CPU_CLK_UNHALTED.THREAD + (INT_MISC.CLEAR_RESTEER_CYCLES / CPU_CLK_UNHALTED.THREAD + 9 * BACLEARS.ANY / CPU_CLK_UNHALTED.THREAD) + min(2 * IDQ.MS_SWITCHES / CPU_CLK_UNHALTED.THREAD, 1) + DECODE.LCP / CPU_CLK_UNHALTED.THREAD + DSB2MITE_SWITCHES.PENALTY_CYCLES / CPU_CLK_UNHALTED.THREAD) + tma_fetch_bandwidth * tma_mite / (tma_mite + tma_dsb)))", + "MetricGroup": "DSBmiss;Fed", + "MetricName": "tma_info_botlnk_dsb_misses" + }, + { + "BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck.", + "MetricExpr": "100 * (100 * (tma_fetch_latency * ((ICACHE_16B.IFDATA_STALL + 2 * cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=0x1\\,edge\\=0x1@) / CPU_CLK_UNHALTED.THREAD) / ((ICACHE_16B.IFDATA_STALL + 2 * cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=0x1\\,edge\\=0x1@) / CPU_CLK_UNHALTED.THREAD + ICACHE_TAG.STALLS / CPU_CLK_UNHALTED.THREAD + (INT_MISC.CLEAR_RESTEER_CYCLES / CPU_CLK_UNHALTED.THREAD + 9 * BACLEARS.ANY / CPU_CLK_UNHALTED.THREAD) + min(2 * IDQ.MS_SWITCHES / CPU_CLK_UNHALTED.THREAD, 1) + DECODE.LCP / CPU_CLK_UNHALTED.THREAD + DSB2MITE_SWITCHES.PENALTY_CYCLES / CPU_CLK_UNHALTED.THREAD)))", + "MetricGroup": "Fed;FetchLat;IcMiss", + "MetricName": "tma_info_botlnk_ic_misses" + }, + { "BriefDescription": "Probability of Core Bound bottleneck hidden by SMT-profiling artifacts", "MetricConstraint": "NO_GROUP_EVENTS", "MetricExpr": "(100 * (1 - tma_core_bound / tma_ports_utilization if tma_core_bound < tma_ports_utilization else 1) if tma_info_system_smt_2t_utilization > 0.5 else 0)", @@ -672,67 +711,102 @@ "PublicDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck. Related metrics: " }, { + "BriefDescription": "Total pipeline cost of \"useful operations\" - the baseline operations not covered by Branching_Overhead nor Irregular_Overhead.", + "MetricExpr": "100 * (tma_retiring - (BR_INST_RETIRED.ALL_BRANCHES + BR_INST_RETIRED.NEAR_CALL) / tma_info_thread_slots - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)", + "MetricGroup": "Ret", + "MetricName": "tma_info_bottleneck_base_non_br", + "MetricThreshold": "tma_info_bottleneck_base_non_br > 20" + }, + { "BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)", "MetricConstraint": "NO_GROUP_EVENTS", "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)", - "MetricGroup": "BigFoot;Fed;Frontend;IcMiss;MemoryTLB;tma_issueBC", + "MetricGroup": "BigFootprint;Fed;Frontend;IcMiss;MemoryTLB", "MetricName": "tma_info_bottleneck_big_code", - "MetricThreshold": "tma_info_bottleneck_big_code > 20", - "PublicDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses). Related metrics: tma_info_bottleneck_branching_overhead" + "MetricThreshold": "tma_info_bottleneck_big_code > 20" }, { "BriefDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls)", - "MetricExpr": "100 * ((BR_INST_RETIRED.CONDITIONAL + 3 * BR_INST_RETIRED.NEAR_CALL + (BR_INST_RETIRED.NEAR_TAKEN - (BR_INST_RETIRED.CONDITIONAL - BR_INST_RETIRED.NOT_TAKEN) - 2 * BR_INST_RETIRED.NEAR_CALL)) / tma_info_thread_slots)", - "MetricGroup": "Ret;tma_issueBC", + "MetricExpr": "100 * ((BR_INST_RETIRED.ALL_BRANCHES + BR_INST_RETIRED.NEAR_CALL) / tma_info_thread_slots)", + "MetricGroup": "Ret", "MetricName": "tma_info_bottleneck_branching_overhead", - "MetricThreshold": "tma_info_bottleneck_branching_overhead > 10", - "PublicDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls). Related metrics: tma_info_bottleneck_big_code" + "MetricThreshold": "tma_info_bottleneck_branching_overhead > 5" + }, + { + "BriefDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks", + "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_fb_full / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))", + "MetricGroup": "Mem;MemoryBW;Offcore;tma_issueBW", + "MetricName": "tma_info_bottleneck_cache_memory_bandwidth", + "MetricThreshold": "tma_info_bottleneck_cache_memory_bandwidth > 20", + "PublicDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full" + }, + { + "BriefDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks", + "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_store_latency / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency)))", + "MetricGroup": "Mem;MemoryLat;Offcore;tma_issueLat", + "MetricName": "tma_info_bottleneck_cache_memory_latency", + "MetricThreshold": "tma_info_bottleneck_cache_memory_latency > 20", + "PublicDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks. Related metrics: tma_l3_hit_latency, tma_mem_latency" + }, + { + "BriefDescription": "Total pipeline cost when the execution is compute-bound - an estimation", + "MetricExpr": "100 * (tma_core_bound * tma_divider / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_core_bound * (tma_ports_utilization / (tma_divider + tma_ports_utilization + tma_serializing_operation)) * (tma_ports_utilized_3m / (tma_ports_utilized_0 + tma_ports_utilized_1 + tma_ports_utilized_2 + tma_ports_utilized_3m)))", + "MetricGroup": "Cor;tma_issueComp", + "MetricName": "tma_info_bottleneck_compute_bound_est", + "MetricThreshold": "tma_info_bottleneck_compute_bound_est > 20", + "PublicDescription": "Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy. Related metrics: " }, { "BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "100 * (tma_frontend_bound - tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) - tma_info_bottleneck_big_code", + "MetricExpr": "100 * (tma_frontend_bound - (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * (10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts)) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) - tma_info_bottleneck_big_code", "MetricGroup": "Fed;FetchBW;Frontend", "MetricName": "tma_info_bottleneck_instruction_fetch_bw", "MetricThreshold": "tma_info_bottleneck_instruction_fetch_bw > 20" }, { - "BriefDescription": "Total pipeline cost of (external) Memory Bandwidth related bottlenecks", - "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full))) + tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_fb_full / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk))", - "MetricGroup": "Mem;MemoryBW;Offcore;tma_issueBW", - "MetricName": "tma_info_bottleneck_memory_bandwidth", - "MetricThreshold": "tma_info_bottleneck_memory_bandwidth > 20", - "PublicDescription": "Total pipeline cost of (external) Memory Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full" + "BriefDescription": "Total pipeline cost of irregular execution (e.g", + "MetricExpr": "100 * (tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * (10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts)) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_branch_mispredicts + tma_machine_clears * tma_other_nukes / tma_other_nukes + tma_core_bound * (tma_serializing_operation + tma_core_bound * RS_EVENTS.EMPTY_CYCLES / tma_info_thread_clks * tma_ports_utilized_0) / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)", + "MetricGroup": "Bad;Cor;Ret;tma_issueMS", + "MetricName": "tma_info_bottleneck_irregular_overhead", + "MetricThreshold": "tma_info_bottleneck_irregular_overhead > 10", + "PublicDescription": "Total pipeline cost of irregular execution (e.g. FP-assists in HPC, Wait time with work imbalance multithreaded workloads, overhead in system services or virtualized environments). Related metrics: tma_microcode_sequencer, tma_ms_switches" }, { "BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "100 * tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_dtlb_load / max(tma_l1_bound, tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency)))", + "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_dtlb_load / max(tma_l1_bound, tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency)))", "MetricGroup": "Mem;MemoryTLB;Offcore;tma_issueTLB", "MetricName": "tma_info_bottleneck_memory_data_tlbs", "MetricThreshold": "tma_info_bottleneck_memory_data_tlbs > 20", - "PublicDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs). Related metrics: tma_dtlb_load, tma_dtlb_store" + "PublicDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs). Related metrics: tma_dtlb_load, tma_dtlb_store, tma_info_bottleneck_memory_synchronization" }, { - "BriefDescription": "Total pipeline cost of Memory Latency related bottlenecks (external memory and off-core caches)", - "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound))", - "MetricGroup": "Mem;MemoryLat;Offcore;tma_issueLat", - "MetricName": "tma_info_bottleneck_memory_latency", - "MetricThreshold": "tma_info_bottleneck_memory_latency > 20", - "PublicDescription": "Total pipeline cost of Memory Latency related bottlenecks (external memory and off-core caches). Related metrics: tma_l3_hit_latency, tma_mem_latency" + "BriefDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors)", + "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) * tma_remote_cache / (tma_local_mem + tma_remote_cache + tma_remote_mem) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_contested_accesses + tma_data_sharing) / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * tma_false_sharing / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency - tma_store_latency)) + tma_machine_clears * (1 - tma_other_nukes / tma_other_nukes))", + "MetricGroup": "Mem;Offcore;tma_issueTLB", + "MetricName": "tma_info_bottleneck_memory_synchronization", + "MetricThreshold": "tma_info_bottleneck_memory_synchronization > 10", + "PublicDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors). Related metrics: tma_dtlb_load, tma_dtlb_store, tma_info_bottleneck_memory_data_tlbs" }, { "BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "100 * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))", + "MetricExpr": "100 * (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))", "MetricGroup": "Bad;BadSpec;BrMispredicts;tma_issueBM", "MetricName": "tma_info_bottleneck_mispredictions", "MetricThreshold": "tma_info_bottleneck_mispredictions > 20", "PublicDescription": "Total pipeline cost of Branch Misprediction related bottlenecks. Related metrics: tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost, tma_mispredicts_resteers" }, { + "BriefDescription": "Total pipeline cost of remaining bottlenecks (apart from those listed in the Info.Bottlenecks metrics class)", + "MetricExpr": "100 - (tma_info_bottleneck_big_code + tma_info_bottleneck_instruction_fetch_bw + tma_info_bottleneck_mispredictions + tma_info_bottleneck_cache_memory_bandwidth + tma_info_bottleneck_cache_memory_latency + tma_info_bottleneck_memory_data_tlbs + tma_info_bottleneck_memory_synchronization + tma_info_bottleneck_compute_bound_est + tma_info_bottleneck_irregular_overhead + tma_info_bottleneck_branching_overhead + tma_info_bottleneck_base_non_br)", + "MetricGroup": "Cor;Offcore", + "MetricName": "tma_info_bottleneck_other_bottlenecks", + "MetricThreshold": "tma_info_bottleneck_other_bottlenecks > 20", + "PublicDescription": "Total pipeline cost of remaining bottlenecks (apart from those listed in the Info.Bottlenecks metrics class). Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls." + }, + { "BriefDescription": "Fraction of branches that are CALL or RET", "MetricExpr": "(BR_INST_RETIRED.NEAR_CALL + BR_INST_RETIRED.NEAR_RETURN) / BR_INST_RETIRED.ALL_BRANCHES", "MetricGroup": "Bad;Branches", @@ -753,7 +827,7 @@ { "BriefDescription": "Fraction of branches that are unconditional (direct or indirect) jumps", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "(BR_INST_RETIRED.NEAR_TAKEN - (BR_INST_RETIRED.CONDITIONAL - BR_INST_RETIRED.NOT_TAKEN) - 2 * BR_INST_RETIRED.NEAR_CALL) / BR_INST_RETIRED.ALL_BRANCHES", + "MetricExpr": "(BR_INST_RETIRED.NEAR_TAKEN - (BR_INST_RETIRED.COND - BR_INST_RETIRED.NOT_TAKEN) - 2 * BR_INST_RETIRED.NEAR_CALL) / BR_INST_RETIRED.ALL_BRANCHES", "MetricGroup": "Bad;Branches", "MetricName": "tma_info_branches_jump" }, @@ -770,9 +844,15 @@ "MetricName": "tma_info_core_coreipc" }, { + "BriefDescription": "uops Executed per Cycle", + "MetricExpr": "UOPS_EXECUTED.THREAD / tma_info_thread_clks", + "MetricGroup": "Power", + "MetricName": "tma_info_core_epc" + }, + { "BriefDescription": "Floating Point Operations Per Cycle", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * (FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE) + 8 * (FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / tma_info_core_core_clks", + "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * FP_ARITH_INST_RETIRED.4_FLOPS + 8 * FP_ARITH_INST_RETIRED.8_FLOPS + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / tma_info_core_core_clks", "MetricGroup": "Flops;Ret", "MetricName": "tma_info_core_flopc" }, @@ -784,19 +864,12 @@ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)." }, { - "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core", - "MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)", + "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor)", + "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@", "MetricGroup": "Backend;Cor;Pipeline;PortsUtil", "MetricName": "tma_info_core_ilp" }, { - "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)", - "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES", - "MetricGroup": "Bad;BadSpec;BrMispredicts;TopdownL1;tma_L1_group", - "MetricName": "tma_info_core_ipmispredict", - "MetricgroupNoGroup": "TopdownL1" - }, - { "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)", "MetricExpr": "IDQ.DSB_UOPS / (IDQ.DSB_UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS)", "MetricGroup": "DSB;Fed;FetchBW;tma_issueFB", @@ -867,7 +940,7 @@ "MetricGroup": "Flops;InsType", "MetricName": "tma_info_inst_mix_iparith", "MetricThreshold": "tma_info_inst_mix_iparith < 10", - "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). May undercount due to FMA double counting. Approximated prior to BDW." + "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW." }, { "BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate)", @@ -875,7 +948,7 @@ "MetricGroup": "Flops;FpVector;InsType", "MetricName": "tma_info_inst_mix_iparith_avx128", "MetricThreshold": "tma_info_inst_mix_iparith_avx128 < 10", - "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate)", @@ -883,7 +956,7 @@ "MetricGroup": "Flops;FpVector;InsType", "MetricName": "tma_info_inst_mix_iparith_avx256", "MetricThreshold": "tma_info_inst_mix_iparith_avx256 < 10", - "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate)", @@ -891,7 +964,7 @@ "MetricGroup": "Flops;FpVector;InsType", "MetricName": "tma_info_inst_mix_iparith_avx512", "MetricThreshold": "tma_info_inst_mix_iparith_avx512 < 10", - "PublicDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate)", @@ -899,7 +972,7 @@ "MetricGroup": "Flops;FpScalar;InsType", "MetricName": "tma_info_inst_mix_iparith_scalar_dp", "MetricThreshold": "tma_info_inst_mix_iparith_scalar_dp < 10", - "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate)", @@ -907,7 +980,7 @@ "MetricGroup": "Flops;FpScalar;InsType", "MetricName": "tma_info_inst_mix_iparith_scalar_sp", "MetricThreshold": "tma_info_inst_mix_iparith_scalar_sp < 10", - "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)", @@ -926,7 +999,7 @@ { "BriefDescription": "Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate)", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * (FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE) + 8 * (FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE)", + "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.SCALAR + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * FP_ARITH_INST_RETIRED.4_FLOPS + 8 * FP_ARITH_INST_RETIRED.8_FLOPS + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE)", "MetricGroup": "Flops;InsType", "MetricName": "tma_info_inst_mix_ipflop", "MetricThreshold": "tma_info_inst_mix_ipflop < 10" @@ -939,6 +1012,12 @@ "MetricThreshold": "tma_info_inst_mix_ipload < 3" }, { + "BriefDescription": "Instructions per PAUSE (lower number means higher occurrence rate)", + "MetricExpr": "tma_info_inst_mix_instructions / ROB_MISC_EVENTS.PAUSE_INST", + "MetricGroup": "Flops;FpVector;InsType", + "MetricName": "tma_info_inst_mix_ippause" + }, + { "BriefDescription": "Instructions per Store (lower number means higher occurrence rate)", "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_STORES", "MetricGroup": "InsType", @@ -961,16 +1040,22 @@ "PublicDescription": "Instruction per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_lcp" }, { + "BriefDescription": "STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk)", + "MetricExpr": "tma_info_memory_tlb_code_stlb_mpki", + "MetricGroup": "Fed;MemoryTLB", + "MetricName": "tma_info_memory_code_stlb_mpki" + }, + { "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]", - "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l1d_cache_fill_bw", "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_core_l1d_cache_fill_bw" + "MetricName": "tma_info_memory_core_l1d_cache_fill_bw_2t" }, { "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]", - "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l2_cache_fill_bw", "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_core_l2_cache_fill_bw" + "MetricName": "tma_info_memory_core_l2_cache_fill_bw_2t" }, { "BriefDescription": "Rate of non silent evictions from the L2 cache per Kilo instruction", @@ -986,124 +1071,202 @@ }, { "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l3_cache_access_bw", "MetricGroup": "Mem;MemoryBW;Offcore", - "MetricName": "tma_info_memory_core_l3_cache_access_bw" + "MetricName": "tma_info_memory_core_l3_cache_access_bw_2t" }, { "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l3_cache_fill_bw", "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_core_l3_cache_fill_bw" + "MetricName": "tma_info_memory_core_l3_cache_fill_bw_2t" + }, + { + "BriefDescription": "Average Parallel L2 cache miss data reads", + "MetricExpr": "tma_info_memory_latency_data_l2_mlp", + "MetricGroup": "Memory_BW;Offcore", + "MetricName": "tma_info_memory_data_l2_mlp" }, { "BriefDescription": "Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss-handling entries)", "MetricExpr": "1e3 * MEM_LOAD_RETIRED.FB_HIT / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_fb_hpki" }, { + "BriefDescription": "", + "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l1d_cache_fill_bw" + }, + { + "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]", + "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / (duration_time * 1e3 / 1e3)", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l1d_cache_fill_bw_2t" + }, + { "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads", "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l1mpki" }, { "BriefDescription": "L1 cache true misses per kilo instruction for all demand loads (including speculative)", "MetricExpr": "1e3 * L2_RQSTS.ALL_DEMAND_DATA_RD / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l1mpki_load" }, { + "BriefDescription": "", + "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l2_cache_fill_bw" + }, + { + "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]", + "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / (duration_time * 1e3 / 1e3)", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l2_cache_fill_bw_2t" + }, + { + "BriefDescription": "Rate of non silent evictions from the L2 cache per Kilo instruction", + "MetricExpr": "1e3 * L2_LINES_OUT.NON_SILENT / INST_RETIRED.ANY", + "MetricGroup": "L2Evicts;Mem;Server", + "MetricName": "tma_info_memory_l2_evictions_nonsilent_pki" + }, + { + "BriefDescription": "Rate of silent evictions from the L2 cache per Kilo instruction where the evicted lines are dropped (no writeback to L3 or memory)", + "MetricExpr": "1e3 * L2_LINES_OUT.SILENT / INST_RETIRED.ANY", + "MetricGroup": "L2Evicts;Mem;Server", + "MetricName": "tma_info_memory_l2_evictions_silent_pki" + }, + { "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)", "MetricExpr": "1e3 * (L2_RQSTS.REFERENCES - L2_RQSTS.MISS) / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l2hpki_all" }, { "BriefDescription": "L2 cache hits per kilo instruction for all demand loads (including speculative)", "MetricExpr": "1e3 * L2_RQSTS.DEMAND_DATA_RD_HIT / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l2hpki_load" }, { "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads", "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY", - "MetricGroup": "Backend;CacheMisses;Mem", + "MetricGroup": "Backend;CacheHits;Mem", "MetricName": "tma_info_memory_l2mpki" }, { "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all request types (including speculative)", "MetricExpr": "1e3 * L2_RQSTS.MISS / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem;Offcore", + "MetricGroup": "CacheHits;Mem;Offcore", "MetricName": "tma_info_memory_l2mpki_all" }, { "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all demand loads (including speculative)", "MetricExpr": "1e3 * L2_RQSTS.DEMAND_DATA_RD_MISS / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l2mpki_load" }, { + "BriefDescription": "", + "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW;Offcore", + "MetricName": "tma_info_memory_l3_cache_access_bw" + }, + { + "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]", + "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1e9 / (duration_time * 1e3 / 1e3)", + "MetricGroup": "Mem;MemoryBW;Offcore", + "MetricName": "tma_info_memory_l3_cache_access_bw_2t" + }, + { + "BriefDescription": "", + "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l3_cache_fill_bw" + }, + { + "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]", + "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / (duration_time * 1e3 / 1e3)", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l3_cache_fill_bw_2t" + }, + { "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads", "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "Mem", "MetricName": "tma_info_memory_l3mpki" }, { - "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)", - "MetricExpr": "L1D_PEND_MISS.PENDING / (MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT)", - "MetricGroup": "Mem;MemoryBound;MemoryLat", - "MetricName": "tma_info_memory_load_miss_real_latency" + "BriefDescription": "Average Parallel L2 cache miss data reads", + "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", + "MetricGroup": "Memory_BW;Offcore", + "MetricName": "tma_info_memory_latency_data_l2_mlp" }, { - "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss", - "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES", - "MetricGroup": "Mem;MemoryBW;MemoryBound", - "MetricName": "tma_info_memory_mlp", - "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)" + "BriefDescription": "Average Latency for L2 cache miss demand Loads", + "MetricExpr": "tma_info_memory_load_l2_miss_latency", + "MetricGroup": "Memory_Lat;Offcore", + "MetricName": "tma_info_memory_latency_load_l2_miss_latency" }, { - "BriefDescription": "Average Parallel L2 cache miss data reads", - "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", + "BriefDescription": "Average Parallel L2 cache miss demand Loads", + "MetricExpr": "tma_info_memory_load_l2_mlp", "MetricGroup": "Memory_BW;Offcore", - "MetricName": "tma_info_memory_oro_data_l2_mlp" + "MetricName": "tma_info_memory_latency_load_l2_mlp" }, { "BriefDescription": "Average Latency for L2 cache miss demand Loads", "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS.DEMAND_DATA_RD", "MetricGroup": "Memory_Lat;Offcore", - "MetricName": "tma_info_memory_oro_load_l2_miss_latency" + "MetricName": "tma_info_memory_load_l2_miss_latency" }, { "BriefDescription": "Average Parallel L2 cache miss demand Loads", "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD", "MetricGroup": "Memory_BW;Offcore", - "MetricName": "tma_info_memory_oro_load_l2_mlp" + "MetricName": "tma_info_memory_load_l2_mlp" }, { - "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l1d_cache_fill_bw", - "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_thread_l1d_cache_fill_bw_1t" + "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)", + "MetricExpr": "L1D_PEND_MISS.PENDING / (MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT)", + "MetricGroup": "Mem;MemoryBound;MemoryLat", + "MetricName": "tma_info_memory_load_miss_real_latency" }, { - "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l2_cache_fill_bw", - "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_thread_l2_cache_fill_bw_1t" + "BriefDescription": "STLB (2nd level TLB) data load speculative misses per kilo instruction (misses of any page-size that complete the page walk)", + "MetricExpr": "tma_info_memory_tlb_load_stlb_mpki", + "MetricGroup": "Mem;MemoryTLB", + "MetricName": "tma_info_memory_load_stlb_mpki" }, { - "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l3_cache_access_bw", - "MetricGroup": "Mem;MemoryBW;Offcore", - "MetricName": "tma_info_memory_thread_l3_cache_access_bw_1t" + "BriefDescription": "Un-cacheable retired load per kilo instruction", + "MetricExpr": "tma_info_memory_uc_load_pki", + "MetricGroup": "Mem", + "MetricName": "tma_info_memory_mix_uc_load_pki" }, { - "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l3_cache_fill_bw", - "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_thread_l3_cache_fill_bw_1t" + "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss", + "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES", + "MetricGroup": "Mem;MemoryBW;MemoryBound", + "MetricName": "tma_info_memory_mlp", + "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)" + }, + { + "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses", + "MetricExpr": "tma_info_memory_tlb_page_walks_utilization", + "MetricGroup": "Mem;MemoryTLB", + "MetricName": "tma_info_memory_page_walks_utilization" + }, + { + "BriefDescription": "STLB (2nd level TLB) data store speculative misses per kilo instruction (misses of any page-size that complete the page walk)", + "MetricExpr": "tma_info_memory_tlb_store_stlb_mpki", + "MetricGroup": "Mem;MemoryTLB", + "MetricName": "tma_info_memory_store_stlb_mpki" }, { "BriefDescription": "STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk)", @@ -1132,55 +1295,77 @@ "MetricName": "tma_info_memory_tlb_store_stlb_mpki" }, { - "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-thread", - "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@", + "BriefDescription": "Un-cacheable retired load per kilo instruction", + "MetricExpr": "1e3 * MEM_LOAD_MISC_RETIRED.UC / INST_RETIRED.ANY", + "MetricGroup": "Mem", + "MetricName": "tma_info_memory_uc_load_pki" + }, + { + "BriefDescription": "", + "MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@)", "MetricGroup": "Cor;Pipeline;PortsUtil;SMT", "MetricName": "tma_info_pipeline_execute" }, { + "BriefDescription": "Instructions per a microcode Assist invocation", + "MetricExpr": "INST_RETIRED.ANY / (FP_ASSIST.ANY + OTHER_ASSISTS.ANY)", + "MetricGroup": "MicroSeq;Pipeline;Ret;Retire", + "MetricName": "tma_info_pipeline_ipassist", + "MetricThreshold": "tma_info_pipeline_ipassist < 100e3", + "PublicDescription": "Instructions per a microcode Assist invocation. See Assists tree node for details (lower number means higher occurrence rate)" + }, + { "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.", "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / cpu@UOPS_RETIRED.RETIRE_SLOTS\\,cmask\\=1@", "MetricGroup": "Pipeline;Ret", "MetricName": "tma_info_pipeline_retire" }, { - "BriefDescription": "Measured Average Frequency for unhalted processors [GHz]", + "BriefDescription": "Measured Average Core Frequency for unhalted processors [GHz]", "MetricExpr": "tma_info_system_turbo_utilization * TSC / 1e9 / duration_time", "MetricGroup": "Power;Summary", - "MetricName": "tma_info_system_average_frequency" + "MetricName": "tma_info_system_core_frequency" }, { - "BriefDescription": "Average CPU Utilization", + "BriefDescription": "Average CPU Utilization (percentage)", "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC", "MetricGroup": "HPC;Summary", "MetricName": "tma_info_system_cpu_utilization" }, { + "BriefDescription": "Average number of utilized CPUs", + "MetricExpr": "#num_cpus_online * tma_info_system_cpu_utilization", + "MetricGroup": "Summary", + "MetricName": "tma_info_system_cpus_utilized" + }, + { "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]", "MetricExpr": "64 * (UNC_M_CAS_COUNT.RD + UNC_M_CAS_COUNT.WR) / 1e9 / duration_time", - "MetricGroup": "HPC;Mem;MemoryBW;SoC;tma_issueBW", + "MetricGroup": "HPC;MemOffcore;MemoryBW;SoC;tma_issueBW", "MetricName": "tma_info_system_dram_bw_use", - "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_info_bottleneck_memory_bandwidth, tma_mem_bandwidth, tma_sq_full" + "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_mem_bandwidth, tma_sq_full" }, { "BriefDescription": "Giga Floating Point Operations Per Second", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * (FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE) + 8 * (FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / 1e9 / duration_time", + "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * FP_ARITH_INST_RETIRED.4_FLOPS + 8 * FP_ARITH_INST_RETIRED.8_FLOPS + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / 1e9 / duration_time", "MetricGroup": "Cor;Flops;HPC", "MetricName": "tma_info_system_gflops", - "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine." + "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width" }, { "BriefDescription": "Average IO (network or disk) Bandwidth Use for Reads [GB / sec]", "MetricExpr": "(UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0 + UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1 + UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2 + UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3) * 4 / 1e9 / duration_time", - "MetricGroup": "IoBW;Mem;Server;SoC", - "MetricName": "tma_info_system_io_read_bw" + "MetricGroup": "IoBW;MemOffcore;Server;SoC", + "MetricName": "tma_info_system_io_read_bw", + "PublicDescription": "Average IO (network or disk) Bandwidth Use for Reads [GB / sec]. Bandwidth of IO reads that are initiated by end device controllers that are requesting memory from the CPU" }, { "BriefDescription": "Average IO (network or disk) Bandwidth Use for Writes [GB / sec]", "MetricExpr": "(UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3) * 4 / 1e9 / duration_time", - "MetricGroup": "IoBW;Mem;Server;SoC", - "MetricName": "tma_info_system_io_write_bw" + "MetricGroup": "IoBW;MemOffcore;Server;SoC", + "MetricName": "tma_info_system_io_write_bw", + "PublicDescription": "Average IO (network or disk) Bandwidth Use for Writes [GB / sec]. Bandwidth of IO writes that are initiated by end device controllers that are writing memory to the CPU" }, { "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]", @@ -1205,7 +1390,7 @@ { "BriefDescription": "Average latency of data read request to external DRAM memory [in nanoseconds]", "MetricExpr": "1e9 * (UNC_M_RPQ_OCCUPANCY / UNC_M_RPQ_INSERTS) / imc_0@event\\=0x0@", - "MetricGroup": "Mem;MemoryLat;Server;SoC", + "MetricGroup": "MemOffcore;MemoryLat;Server;SoC", "MetricName": "tma_info_system_mem_dram_read_latency", "PublicDescription": "Average latency of data read request to external DRAM memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches" }, @@ -1219,7 +1404,7 @@ { "BriefDescription": "Average latency of data read request to external 3D X-Point memory [in nanoseconds]", "MetricExpr": "(1e9 * (UNC_M_PMM_RPQ_OCCUPANCY.ALL / UNC_M_PMM_RPQ_INSERTS) / imc_0@event\\=0x0@ if #has_pmem > 0 else 0)", - "MetricGroup": "Mem;MemoryLat;Server;SoC", + "MetricGroup": "MemOffcore;MemoryLat;Server;SoC", "MetricName": "tma_info_system_mem_pmm_read_latency", "PublicDescription": "Average latency of data read request to external 3D X-Point memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches" }, @@ -1233,13 +1418,13 @@ { "BriefDescription": "Average 3DXP Memory Bandwidth Use for reads [GB / sec]", "MetricExpr": "(64 * UNC_M_PMM_RPQ_INSERTS / 1e9 / duration_time if #has_pmem > 0 else 0)", - "MetricGroup": "Mem;MemoryBW;Server;SoC", + "MetricGroup": "MemOffcore;MemoryBW;Server;SoC", "MetricName": "tma_info_system_pmm_read_bw" }, { "BriefDescription": "Average 3DXP Memory Bandwidth Use for Writes [GB / sec]", "MetricExpr": "(64 * UNC_M_PMM_WPQ_INSERTS / 1e9 / duration_time if #has_pmem > 0 else 0)", - "MetricGroup": "Mem;MemoryBW;Server;SoC", + "MetricGroup": "MemOffcore;MemoryBW;Server;SoC", "MetricName": "tma_info_system_pmm_write_bw" }, { @@ -1284,6 +1469,12 @@ "MetricName": "tma_info_system_turbo_utilization" }, { + "BriefDescription": "Measured Average Uncore Frequency for the SoC [GHz]", + "MetricExpr": "tma_info_system_socket_clks / 1e9 / duration_time", + "MetricGroup": "SoC", + "MetricName": "tma_info_system_uncore_frequency" + }, + { "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.", "MetricExpr": "CPU_CLK_UNHALTED.THREAD", "MetricGroup": "Pipeline", @@ -1330,8 +1521,8 @@ }, { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses", - "MetricExpr": "ICACHE_64B.IFTAG_STALL / tma_info_thread_clks", - "MetricGroup": "BigFoot;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group", + "MetricExpr": "ICACHE_TAG.STALLS / tma_info_thread_clks", + "MetricGroup": "BigFootprint;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group", "MetricName": "tma_itlb_misses", "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS_PS;FRONTEND_RETIRED.ITLB_MISS_PS", @@ -1340,7 +1531,7 @@ { "BriefDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache", "MetricExpr": "max((CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS) / tma_info_thread_clks, 0)", - "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group", "MetricName": "tma_l1_bound", "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", "PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_RETIRED.L1_HIT_PS;MEM_LOAD_RETIRED.FB_HIT_PS. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1", @@ -1350,7 +1541,7 @@ "BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads", "MetricConstraint": "NO_GROUP_EVENTS", "MetricExpr": "MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) / (MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=1@) * ((CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks)", - "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", "MetricName": "tma_l2_bound", "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", "PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L2_HIT_PS", @@ -1359,24 +1550,24 @@ { "BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core", "MetricExpr": "(CYCLE_ACTIVITY.STALLS_L2_MISS - CYCLE_ACTIVITY.STALLS_L3_MISS) / tma_info_thread_clks", - "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", "MetricName": "tma_l3_bound", "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS", "ScaleUnit": "100%" }, { - "BriefDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)", - "MetricExpr": "17 * tma_info_system_average_frequency * MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", + "BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)", + "MetricExpr": "17 * tma_info_system_core_frequency * (MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2)) / tma_info_thread_clks", "MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group", "MetricName": "tma_l3_hit_latency", "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_info_bottleneck_memory_latency, tma_mem_latency", + "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_info_bottleneck_cache_memory_latency, tma_mem_latency", "ScaleUnit": "100%" }, { "BriefDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs)", - "MetricExpr": "ILD_STALL.LCP / tma_info_thread_clks", + "MetricExpr": "DECODE.LCP / tma_info_thread_clks", "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB", "MetricName": "tma_lcp", "MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", @@ -1390,7 +1581,7 @@ "MetricName": "tma_light_operations", "MetricThreshold": "tma_light_operations > 0.6", "MetricgroupNoGroup": "TopdownL2", - "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST", + "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST", "ScaleUnit": "100%" }, { @@ -1421,10 +1612,10 @@ }, { "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory", - "MetricExpr": "59.5 * tma_info_system_average_frequency * MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", + "MetricExpr": "59.5 * tma_info_system_core_frequency * MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", "MetricGroup": "Server;TopdownL5;tma_L5_group;tma_mem_latency_group", - "MetricName": "tma_local_dram", - "MetricThreshold": "tma_local_dram > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))", + "MetricName": "tma_local_mem", + "MetricThreshold": "tma_local_mem > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))", "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory. Caching will improve the latency and increase performance. Sample with: MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM_PS", "ScaleUnit": "100%" }, @@ -1449,21 +1640,21 @@ "ScaleUnit": "100%" }, { - "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM)", + "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)", "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@) / tma_info_thread_clks", "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW", "MetricName": "tma_mem_bandwidth", "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_bottleneck_memory_bandwidth, tma_info_system_dram_bw_use, tma_sq_full", + "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_sq_full", "ScaleUnit": "100%" }, { - "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM)", + "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM)", "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth", "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat", "MetricName": "tma_mem_latency", "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_info_bottleneck_memory_latency, tma_l3_hit_latency", + "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_info_bottleneck_cache_memory_latency, tma_l3_hit_latency", "ScaleUnit": "100%" }, { @@ -1491,7 +1682,7 @@ "MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueMC;tma_issueMS", "MetricName": "tma_microcode_sequencer", "MetricThreshold": "tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1", - "PublicDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided. Sample with: IDQ.MS_UOPS. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_ms_switches", + "PublicDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided. Sample with: IDQ.MS_UOPS. Related metrics: tma_clears_resteers, tma_info_bottleneck_irregular_overhead, tma_l1_bound, tma_machine_clears, tma_ms_switches", "ScaleUnit": "100%" }, { @@ -1508,17 +1699,17 @@ "MetricExpr": "(IDQ.ALL_MITE_CYCLES_ANY_UOPS - IDQ.ALL_MITE_CYCLES_4_UOPS) / tma_info_core_core_clks / 2", "MetricGroup": "DSBmiss;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group", "MetricName": "tma_mite", - "MetricThreshold": "tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 4 > 0.35)", + "MetricThreshold": "tma_mite > 0.1 & tma_fetch_bandwidth > 0.2", "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck. Sample with: FRONTEND_RETIRED.ANY_DSB_MISS", "ScaleUnit": "100%" }, { - "BriefDescription": "The Mixing_Vectors metric gives the percentage of injected blend uops out of all uops issued", + "BriefDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles)", "MetricExpr": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH / UOPS_ISSUED.ANY", "MetricGroup": "TopdownL5;tma_L5_group;tma_issueMV;tma_ports_utilized_0_group", "MetricName": "tma_mixing_vectors", "MetricThreshold": "tma_mixing_vectors > 0.05", - "PublicDescription": "The Mixing_Vectors metric gives the percentage of injected blend uops out of all uops issued. Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches", + "PublicDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles). Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches", "ScaleUnit": "100%" }, { @@ -1527,13 +1718,13 @@ "MetricGroup": "FetchLat;MicroSeq;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueMC;tma_issueMS;tma_issueMV;tma_issueSO", "MetricName": "tma_ms_switches", "MetricThreshold": "tma_ms_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", - "PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation", + "PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_clears_resteers, tma_info_bottleneck_irregular_overhead, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation", "ScaleUnit": "100%" }, { "BriefDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused", "MetricExpr": "tma_light_operations * (BR_INST_RETIRED.ALL_BRANCHES - UOPS_RETIRED.MACRO_FUSED) / UOPS_RETIRED.RETIRE_SLOTS", - "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", + "MetricGroup": "Branches;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", "MetricName": "tma_non_fused_branches", "MetricThreshold": "tma_non_fused_branches > 0.1 & tma_light_operations > 0.6", "PublicDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused. Non-conditional branches like direct JMP or CALL would count here. Can be used to examine fusible conditional jumps that were not fused.", @@ -1542,15 +1733,15 @@ { "BriefDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions", "MetricExpr": "tma_light_operations * INST_RETIRED.NOP / UOPS_RETIRED.RETIRE_SLOTS", - "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", + "MetricGroup": "Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group", "MetricName": "tma_nop_instructions", - "MetricThreshold": "tma_nop_instructions > 0.1 & tma_light_operations > 0.6", + "MetricThreshold": "tma_nop_instructions > 0.1 & (tma_other_light_ops > 0.3 & tma_light_operations > 0.6)", "PublicDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body. Sample with: INST_RETIRED.NOP", "ScaleUnit": "100%" }, { "BriefDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes", - "MetricExpr": "max(0, tma_light_operations - (tma_fp_arith + tma_memory_operations + tma_fused_instructions + tma_non_fused_branches + tma_nop_instructions))", + "MetricExpr": "max(0, tma_light_operations - (tma_fp_arith + tma_memory_operations + tma_fused_instructions + tma_non_fused_branches))", "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", "MetricName": "tma_other_light_ops", "MetricThreshold": "tma_other_light_ops > 0.3 & tma_light_operations > 0.6", @@ -1558,9 +1749,25 @@ "ScaleUnit": "100%" }, { + "BriefDescription": "This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types).", + "MetricExpr": "max(tma_branch_mispredicts * (1 - BR_MISP_RETIRED.ALL_BRANCHES / (INT_MISC.CLEARS_COUNT - MACHINE_CLEARS.COUNT)), 0.0001)", + "MetricGroup": "BrMispredicts;TopdownL3;tma_L3_group;tma_branch_mispredicts_group", + "MetricName": "tma_other_mispredicts", + "MetricThreshold": "tma_other_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)", + "ScaleUnit": "100%" + }, + { + "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering.", + "MetricExpr": "max(tma_machine_clears * (1 - MACHINE_CLEARS.MEMORY_ORDERING / MACHINE_CLEARS.COUNT), 0.0001)", + "MetricGroup": "Machine_Clears;TopdownL3;tma_L3_group;tma_machine_clears_group", + "MetricName": "tma_other_nukes", + "MetricThreshold": "tma_other_nukes > 0.05 & (tma_machine_clears > 0.1 & tma_bad_speculation > 0.15)", + "ScaleUnit": "100%" + }, + { "BriefDescription": "This metric roughly estimates (based on idle latencies) how often the CPU was stalled on accesses to external 3D-Xpoint (Crystal Ridge, a.k.a", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "(((1 - ((19 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS)) + 10 * (MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS))) / (19 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS)) + 10 * (MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS)) + (25 * (MEM_LOAD_RETIRED.LOCAL_PMM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) if #has_pmem > 0 else 0) + 33 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) if #has_pmem > 0 else 0))) if #has_pmem > 0 else 0)) * (CYCLE_ACTIVITY.STALLS_L3_MISS / tma_info_thread_clks + (CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks - tma_l2_bound) if 1e6 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM + MEM_LOAD_RETIRED.LOCAL_PMM) > MEM_LOAD_RETIRED.L1_MISS else 0) if #has_pmem > 0 else 0)", + "MetricExpr": "(((1 - (19 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS)) + 10 * (MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS))) / (19 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS)) + 10 * (MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS)) + (25 * (MEM_LOAD_RETIRED.LOCAL_PMM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS)) + 33 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS))))) * (CYCLE_ACTIVITY.STALLS_L3_MISS / tma_info_thread_clks + (CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks - tma_l2_bound) if 1e6 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM + MEM_LOAD_RETIRED.LOCAL_PMM) > MEM_LOAD_RETIRED.L1_MISS else 0) if #has_pmem > 0 else 0)", "MetricGroup": "MemoryBound;Server;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", "MetricName": "tma_pmm_bound", "MetricThreshold": "tma_pmm_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", @@ -1622,12 +1829,12 @@ "ScaleUnit": "100%" }, { - "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU)", + "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU)", "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_6 / tma_info_core_core_clks", "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P", "MetricName": "tma_port_6", "MetricThreshold": "tma_port_6 > 0.6", - "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_6. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2", + "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_6. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2", "ScaleUnit": "100%" }, { @@ -1641,7 +1848,7 @@ }, { "BriefDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related)", - "MetricExpr": "((EXE_ACTIVITY.EXE_BOUND_0_PORTS + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL)) / tma_info_thread_clks if ARITH.DIVIDER_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY else (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL) / tma_info_thread_clks)", + "MetricExpr": "((tma_ports_utilized_0 * tma_info_thread_clks + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL)) / tma_info_thread_clks if ARITH.DIVIDER_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY else (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL) / tma_info_thread_clks)", "MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group", "MetricName": "tma_ports_utilization", "MetricThreshold": "tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)", @@ -1650,7 +1857,7 @@ }, { "BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)", - "MetricExpr": "(UOPS_EXECUTED.CORE_CYCLES_NONE / 2 if #SMT_on else CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY) / tma_info_core_core_clks", + "MetricExpr": "(EXE_ACTIVITY.EXE_BOUND_0_PORTS + tma_core_bound * RS_EVENTS.EMPTY_CYCLES) / tma_info_thread_clks * (CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY) / tma_info_thread_clks", "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group", "MetricName": "tma_ports_utilized_0", "MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", @@ -1680,13 +1887,13 @@ "MetricExpr": "(UOPS_EXECUTED.CORE_CYCLES_GE_3 / 2 if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_3) / tma_info_core_core_clks", "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group", "MetricName": "tma_ports_utilized_3m", - "MetricThreshold": "tma_ports_utilized_3m > 0.7 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", + "MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", "ScaleUnit": "100%" }, { "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote cache in other sockets including synchronizations issues", "MetricConstraint": "NO_GROUP_EVENTS_NMI", - "MetricExpr": "(89.5 * tma_info_system_average_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM + 89.5 * tma_info_system_average_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", + "MetricExpr": "(89.5 * tma_info_system_core_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM + 89.5 * tma_info_system_core_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", "MetricGroup": "Offcore;Server;Snoop;TopdownL5;tma_L5_group;tma_issueSyncxn;tma_mem_latency_group", "MetricName": "tma_remote_cache", "MetricThreshold": "tma_remote_cache > 0.05 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))", @@ -1695,10 +1902,10 @@ }, { "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory", - "MetricExpr": "127 * tma_info_system_average_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", + "MetricExpr": "127 * tma_info_system_core_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", "MetricGroup": "Server;Snoop;TopdownL5;tma_L5_group;tma_mem_latency_group", - "MetricName": "tma_remote_dram", - "MetricThreshold": "tma_remote_dram > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))", + "MetricName": "tma_remote_mem", + "MetricThreshold": "tma_remote_mem > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))", "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory. This is caused often due to non-optimal NUMA allocations. #link to NUMA article. Sample with: MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM_PS", "ScaleUnit": "100%" }, @@ -1715,18 +1922,18 @@ { "BriefDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations", "MetricExpr": "PARTIAL_RAT_STALLS.SCOREBOARD / tma_info_thread_clks", - "MetricGroup": "PortsUtil;TopdownL5;tma_L5_group;tma_issueSO;tma_ports_utilized_0_group", + "MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group;tma_issueSO", "MetricName": "tma_serializing_operation", - "MetricThreshold": "tma_serializing_operation > 0.1 & (tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)))", + "MetricThreshold": "tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)", "PublicDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out-of-order execution which may limit performance. Sample with: PARTIAL_RAT_STALLS.SCOREBOARD. Related metrics: tma_ms_switches", "ScaleUnit": "100%" }, { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions", "MetricExpr": "40 * ROB_MISC_EVENTS.PAUSE_INST / tma_info_thread_clks", - "MetricGroup": "TopdownL6;tma_L6_group;tma_serializing_operation_group", + "MetricGroup": "TopdownL4;tma_L4_group;tma_serializing_operation_group", "MetricName": "tma_slow_pause", - "MetricThreshold": "tma_slow_pause > 0.05 & (tma_serializing_operation > 0.1 & (tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))))", + "MetricThreshold": "tma_slow_pause > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions. Sample with: MISC_RETIRED.PAUSE_INST", "ScaleUnit": "100%" }, @@ -1755,7 +1962,7 @@ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group", "MetricName": "tma_sq_full", "MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_bottleneck_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth", + "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth", "ScaleUnit": "100%" }, { @@ -1813,10 +2020,10 @@ { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears", "MetricExpr": "9 * BACLEARS.ANY / tma_info_thread_clks", - "MetricGroup": "BigFoot;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group", + "MetricGroup": "BigFootprint;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group", "MetricName": "tma_unknown_branches", "MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))", - "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit). Sample with: BACLEARS.ANY", + "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit) hence called Unknown Branches. Sample with: BACLEARS.ANY", "ScaleUnit": "100%" }, { diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/frontend.json b/tools/perf/pmu-events/arch/x86/cascadelakex/frontend.json index 095904c77001..d6f543471b24 100644 --- a/tools/perf/pmu-events/arch/x86/cascadelakex/frontend.json +++ b/tools/perf/pmu-events/arch/x86/cascadelakex/frontend.json @@ -19,7 +19,7 @@ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switches", "EventCode": "0xAB", "EventName": "DSB2MITE_SWITCHES.COUNT", - "PublicDescription": "This event counts the number of the Decode Stream Buffer (DSB)-to-MITE switches including all misses because of missing Decode Stream Buffer (DSB) cache and u-arch forced misses.\nNote: Invoking MITE requires two or three cycles delay.", + "PublicDescription": "This event counts the number of the Decode Stream Buffer (DSB)-to-MITE switches including all misses because of missing Decode Stream Buffer (DSB) cache and u-arch forced misses. Note: Invoking MITE requires two or three cycles delay.", "SampleAfterValue": "2000003", "UMask": "0x1" }, @@ -267,11 +267,11 @@ "UMask": "0x4" }, { - "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops [This event is alias to IDQ.DSB_CYCLES_OK]", + "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 or more Uops [This event is alias to IDQ.DSB_CYCLES_OK]", "CounterMask": "4", "EventCode": "0x79", "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS", - "PublicDescription": "Counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ. [This event is alias to IDQ.DSB_CYCLES_OK]", + "PublicDescription": "Counts the number of cycles 4 or more uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ. [This event is alias to IDQ.DSB_CYCLES_OK]", "SampleAfterValue": "2000003", "UMask": "0x18" }, @@ -321,11 +321,11 @@ "UMask": "0x18" }, { - "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops [This event is alias to IDQ.ALL_DSB_CYCLES_4_UOPS]", + "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 or more Uops [This event is alias to IDQ.ALL_DSB_CYCLES_4_UOPS]", "CounterMask": "4", "EventCode": "0x79", "EventName": "IDQ.DSB_CYCLES_OK", - "PublicDescription": "Counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ. [This event is alias to IDQ.ALL_DSB_CYCLES_4_UOPS]", + "PublicDescription": "Counts the number of cycles 4 or more uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ. [This event is alias to IDQ.ALL_DSB_CYCLES_4_UOPS]", "SampleAfterValue": "2000003", "UMask": "0x18" }, diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/memory.json b/tools/perf/pmu-events/arch/x86/cascadelakex/memory.json index a00ad0aaf1ba..c69b2c33334b 100644 --- a/tools/perf/pmu-events/arch/x86/cascadelakex/memory.json +++ b/tools/perf/pmu-events/arch/x86/cascadelakex/memory.json @@ -6866,7 +6866,7 @@ "BriefDescription": "Number of times an RTM execution aborted due to any reasons (multiple categories may count as one).", "EventCode": "0xC9", "EventName": "RTM_RETIRED.ABORTED", - "PEBS": "1", + "PEBS": "2", "PublicDescription": "Number of times RTM abort was triggered.", "SampleAfterValue": "2000003", "UMask": "0x4" diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/metricgroups.json b/tools/perf/pmu-events/arch/x86/cascadelakex/metricgroups.json index bc6a9a4d27a9..904d299c95a3 100644 --- a/tools/perf/pmu-events/arch/x86/cascadelakex/metricgroups.json +++ b/tools/perf/pmu-events/arch/x86/cascadelakex/metricgroups.json @@ -2,10 +2,10 @@ "Backend": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Bad": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "BadSpec": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", - "BigFoot": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "BigFootprint": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "BrMispredicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Branches": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", - "CacheMisses": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "CacheHits": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "CodeGen": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Compute": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Cor": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", @@ -26,7 +26,9 @@ "L2Evicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "LSD": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MachineClears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "Machine_Clears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Mem": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "MemOffcore": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryBW": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryBound": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryLat": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", @@ -64,8 +66,10 @@ "tma_L5_group": "Metrics for top-down breakdown at level 5", "tma_L6_group": "Metrics for top-down breakdown at level 6", "tma_alu_op_utilization_group": "Metrics contributing to tma_alu_op_utilization category", + "tma_assists_group": "Metrics contributing to tma_assists category", "tma_backend_bound_group": "Metrics contributing to tma_backend_bound category", "tma_bad_speculation_group": "Metrics contributing to tma_bad_speculation category", + "tma_branch_mispredicts_group": "Metrics contributing to tma_branch_mispredicts category", "tma_branch_resteers_group": "Metrics contributing to tma_branch_resteers category", "tma_core_bound_group": "Metrics contributing to tma_core_bound category", "tma_dram_bound_group": "Metrics contributing to tma_dram_bound category", @@ -78,9 +82,9 @@ "tma_frontend_bound_group": "Metrics contributing to tma_frontend_bound category", "tma_heavy_operations_group": "Metrics contributing to tma_heavy_operations category", "tma_issue2P": "Metrics related by the issue $issue2P", - "tma_issueBC": "Metrics related by the issue $issueBC", "tma_issueBM": "Metrics related by the issue $issueBM", "tma_issueBW": "Metrics related by the issue $issueBW", + "tma_issueComp": "Metrics related by the issue $issueComp", "tma_issueD0": "Metrics related by the issue $issueD0", "tma_issueFB": "Metrics related by the issue $issueFB", "tma_issueFL": "Metrics related by the issue $issueFL", @@ -100,10 +104,12 @@ "tma_l3_bound_group": "Metrics contributing to tma_l3_bound category", "tma_light_operations_group": "Metrics contributing to tma_light_operations category", "tma_load_op_utilization_group": "Metrics contributing to tma_load_op_utilization category", + "tma_machine_clears_group": "Metrics contributing to tma_machine_clears category", "tma_mem_latency_group": "Metrics contributing to tma_mem_latency category", "tma_memory_bound_group": "Metrics contributing to tma_memory_bound category", "tma_microcode_sequencer_group": "Metrics contributing to tma_microcode_sequencer category", "tma_mite_group": "Metrics contributing to tma_mite category", + "tma_other_light_ops_group": "Metrics contributing to tma_other_light_ops category", "tma_ports_utilization_group": "Metrics contributing to tma_ports_utilization category", "tma_ports_utilized_0_group": "Metrics contributing to tma_ports_utilized_0 category", "tma_ports_utilized_3m_group": "Metrics contributing to tma_ports_utilized_3m category", diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/other.json b/tools/perf/pmu-events/arch/x86/cascadelakex/other.json index 3ab5e91a4c1c..95d42ac36717 100644 --- a/tools/perf/pmu-events/arch/x86/cascadelakex/other.json +++ b/tools/perf/pmu-events/arch/x86/cascadelakex/other.json @@ -19,7 +19,7 @@ "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX512 turbo schedule.", "EventCode": "0x28", "EventName": "CORE_POWER.LVL2_TURBO_LICENSE", - "PublicDescription": "Core cycles where the core was running with power-delivery for license level 2 (introduced in Skylake Server michroarchtecture). This includes high current AVX 512-bit instructions.", + "PublicDescription": "Core cycles where the core was running with power-delivery for license level 2 (introduced in Skylake Server microarchitecture). This includes high current AVX 512-bit instructions.", "SampleAfterValue": "200003", "UMask": "0x20" }, diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/pipeline.json b/tools/perf/pmu-events/arch/x86/cascadelakex/pipeline.json index 66d686cc933e..c50ddf5b40dd 100644 --- a/tools/perf/pmu-events/arch/x86/cascadelakex/pipeline.json +++ b/tools/perf/pmu-events/arch/x86/cascadelakex/pipeline.json @@ -396,7 +396,7 @@ "Errata": "SKL091, SKL044", "EventCode": "0xC0", "EventName": "INST_RETIRED.NOP", - "PEBS": "1", + "PEBS": "2", "SampleAfterValue": "2000003", "UMask": "0x2" }, diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-interconnect.json index 1a342dff1503..3fe9ce483bbe 100644 --- a/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-interconnect.json +++ b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-interconnect.json @@ -38,7 +38,7 @@ "EventCode": "0x10", "EventName": "UNC_I_COHERENT_OPS.CLFLUSH", "PerPkg": "1", - "PublicDescription": "Counts the number of coherency related operations servied by the IRP", + "PublicDescription": "Counts the number of coherency related operations serviced by the IRP", "UMask": "0x80", "Unit": "IRP" }, @@ -47,7 +47,7 @@ "EventCode": "0x10", "EventName": "UNC_I_COHERENT_OPS.CRD", "PerPkg": "1", - "PublicDescription": "Counts the number of coherency related operations servied by the IRP", + "PublicDescription": "Counts the number of coherency related operations serviced by the IRP", "UMask": "0x2", "Unit": "IRP" }, @@ -56,7 +56,7 @@ "EventCode": "0x10", "EventName": "UNC_I_COHERENT_OPS.DRD", "PerPkg": "1", - "PublicDescription": "Counts the number of coherency related operations servied by the IRP", + "PublicDescription": "Counts the number of coherency related operations serviced by the IRP", "UMask": "0x4", "Unit": "IRP" }, @@ -65,7 +65,7 @@ "EventCode": "0x10", "EventName": "UNC_I_COHERENT_OPS.PCIDCAHINT", "PerPkg": "1", - "PublicDescription": "Counts the number of coherency related operations servied by the IRP", + "PublicDescription": "Counts the number of coherency related operations serviced by the IRP", "UMask": "0x20", "Unit": "IRP" }, @@ -74,7 +74,7 @@ "EventCode": "0x10", "EventName": "UNC_I_COHERENT_OPS.PCIRDCUR", "PerPkg": "1", - "PublicDescription": "Counts the number of coherency related operations servied by the IRP", + "PublicDescription": "Counts the number of coherency related operations serviced by the IRP", "UMask": "0x1", "Unit": "IRP" }, @@ -101,7 +101,7 @@ "EventCode": "0x10", "EventName": "UNC_I_COHERENT_OPS.WBMTOI", "PerPkg": "1", - "PublicDescription": "Counts the number of coherency related operations servied by the IRP", + "PublicDescription": "Counts the number of coherency related operations serviced by the IRP", "UMask": "0x40", "Unit": "IRP" }, @@ -500,7 +500,7 @@ "EventCode": "0x11", "EventName": "UNC_I_TRANSACTIONS.WRITES", "PerPkg": "1", - "PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Trackes only write requests. Each write request should have a prefetch, so there is no need to explicitly track these requests. For writes that are tickled and have to retry, the counter will be incremented for each retry.", + "PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks only write requests. Each write request should have a prefetch, so there is no need to explicitly track these requests. For writes that are tickled and have to retry, the counter will be incremented for each retry.", "UMask": "0x2", "Unit": "IRP" }, diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-power.json b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-power.json index c6254af7a468..ceef46046488 100644 --- a/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-power.json +++ b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-power.json @@ -144,6 +144,7 @@ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0", "PerPkg": "1", "PublicDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", + "UMask": "0x40", "Unit": "PCU" }, { @@ -152,6 +153,7 @@ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3", "PerPkg": "1", "PublicDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", + "UMask": "0x80", "Unit": "PCU" }, { @@ -160,6 +162,7 @@ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6", "PerPkg": "1", "PublicDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", + "UMask": "0xc0", "Unit": "PCU" }, { diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/virtual-memory.json b/tools/perf/pmu-events/arch/x86/cascadelakex/virtual-memory.json index f59405877ae8..73feadaf7674 100644 --- a/tools/perf/pmu-events/arch/x86/cascadelakex/virtual-memory.json +++ b/tools/perf/pmu-events/arch/x86/cascadelakex/virtual-memory.json @@ -205,7 +205,7 @@ "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake.", "EventCode": "0x85", "EventName": "ITLB_MISSES.WALK_PENDING", - "PublicDescription": "Counts 1 per cycle for each PMH (Page Miss Handler) that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake michroarchitecture.", + "PublicDescription": "Counts 1 per cycle for each PMH (Page Miss Handler) that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake microarchitecture.", "SampleAfterValue": "100003", "UMask": "0x10" }, diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/frontend.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/frontend.json index 9e53da55d0c1..93d99318a623 100644 --- a/tools/perf/pmu-events/arch/x86/emeraldrapids/frontend.json +++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/frontend.json @@ -267,7 +267,7 @@ "CounterMask": "6", "EventCode": "0x79", "EventName": "IDQ.DSB_CYCLES_OK", - "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).", + "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the DSB (Decode Stream Buffer) path. Count includes uops that may 'bypass' the IDQ.", "SampleAfterValue": "2000003", "UMask": "0x8" }, diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/memory.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/memory.json index e8bf7c9c44e1..5420f529f491 100644 --- a/tools/perf/pmu-events/arch/x86/emeraldrapids/memory.json +++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/memory.json @@ -264,6 +264,7 @@ "BriefDescription": "Number of times an RTM execution aborted.", "EventCode": "0xc9", "EventName": "RTM_RETIRED.ABORTED", + "PEBS": "1", "PublicDescription": "Counts the number of times RTM abort was triggered.", "SampleAfterValue": "100003", "UMask": "0x4" diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/pipeline.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/pipeline.json index 1f8200fb8964..e2086bedeca8 100644 --- a/tools/perf/pmu-events/arch/x86/emeraldrapids/pipeline.json +++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/pipeline.json @@ -428,6 +428,7 @@ "BriefDescription": "INST_RETIRED.MACRO_FUSED", "EventCode": "0xc0", "EventName": "INST_RETIRED.MACRO_FUSED", + "PEBS": "1", "SampleAfterValue": "2000003", "UMask": "0x10" }, @@ -435,6 +436,7 @@ "BriefDescription": "Retired NOP instructions.", "EventCode": "0xc0", "EventName": "INST_RETIRED.NOP", + "PEBS": "1", "PublicDescription": "Counts all retired NOP or ENDBR32/64 instructions", "SampleAfterValue": "2000003", "UMask": "0x2" @@ -451,6 +453,7 @@ "BriefDescription": "Iterations of Repeat string retired instructions.", "EventCode": "0xc0", "EventName": "INST_RETIRED.REP_ITERATION", + "PEBS": "1", "PublicDescription": "Number of iterations of Repeat (REP) string retired instructions such as MOVS, CMPS, and SCAS. Each has a byte, word, and doubleword version and string instructions can be repeated using a repetition prefix, REP, that allows their architectural execution to be repeated a number of times as specified by the RCX register. Note the number of iterations is implementation-dependent.", "SampleAfterValue": "2000003", "UMask": "0x8" diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-cache.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-cache.json index bf5a511b99d1..141dab46682e 100644 --- a/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-cache.json +++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-cache.json @@ -3565,6 +3565,15 @@ "Unit": "CHA" }, { + "BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_CXL_EXP_LOCAL", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_CXL_EXP_LOCAL", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x20c8968201", + "Unit": "CHA" + }, + { "BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC", "EventCode": "0x35", "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_DDR", @@ -3716,6 +3725,15 @@ "Unit": "CHA" }, { + "BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFDATA_CXL_EXP_LOCAL", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFDATA_CXL_EXP_LOCAL", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x20ccd68201", + "Unit": "CHA" + }, + { "BriefDescription": "TOR Inserts; LLCPrefRFO misses from local IA", "EventCode": "0x35", "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFRFO", @@ -3742,6 +3760,15 @@ "Unit": "CHA" }, { + "BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFRFO_CXL_EXP_LOCAL", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFRFO_CXL_EXP_LOCAL", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x20c8868201", + "Unit": "CHA" + }, + { "BriefDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed locally", "EventCode": "0x35", "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCILF_DDR", @@ -3892,6 +3919,15 @@ "Unit": "CHA" }, { + "BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_CXL_EXP_LOCAL", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_CXL_EXP_LOCAL", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x20ccc68201", + "Unit": "CHA" + }, + { "BriefDescription": "TOR Inserts; RFO prefetch misses from local IA", "EventCode": "0x35", "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_LOCAL", @@ -4162,6 +4198,42 @@ "Unit": "CHA" }, { + "BriefDescription": "ItoMCacheNear (partial write) transactions from an IO device that addresses memory on the local socket", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR_LOCAL", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", + "UMask": "0xcd42ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "ItoMCacheNear (partial write) transactions from an IO device that addresses memory on a remote socket", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR_REMOTE", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", + "UMask": "0xcd437f04", + "Unit": "CHA" + }, + { + "BriefDescription": "ItoM (write) transactions from an IO device that addresses memory on the local socket", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOM_LOCAL", + "PerPkg": "1", + "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", + "UMask": "0xcc42ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "ItoM (write) transactions from an IO device that addresses memory on a remote socket", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOM_REMOTE", + "PerPkg": "1", + "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", + "UMask": "0xcc437f04", + "Unit": "CHA" + }, + { "BriefDescription": "TOR Inserts; Misses from local IO", "EventCode": "0x35", "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS", @@ -4171,7 +4243,7 @@ "Unit": "CHA" }, { - "BriefDescription": "TOR Inserts; ItoM misses from local IO", + "BriefDescription": "TOR Inserts : ItoM, indicating a full cacheline write request, from IO Devices that missed the LLC", "EventCode": "0x35", "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOM", "PerPkg": "1", @@ -4189,7 +4261,7 @@ "Unit": "CHA" }, { - "BriefDescription": "TOR Inserts; RdCur and FsRdCur misses from local IO", + "BriefDescription": "TOR Inserts; RdCur and FsRdCur requests from local IO that miss LLC", "EventCode": "0x35", "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_PCIRDCUR", "PerPkg": "1", @@ -4216,6 +4288,24 @@ "Unit": "CHA" }, { + "BriefDescription": "PCIRDCUR (read) transactions from an IO device that addresses memory on a remote socket", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR_LOCAL", + "PerPkg": "1", + "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", + "UMask": "0xc8f2ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "PCIRDCUR (read) transactions from an IO device that addresses memory on the local socket", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR_REMOTE", + "PerPkg": "1", + "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", + "UMask": "0xc8f37f04", + "Unit": "CHA" + }, + { "BriefDescription": "TOR Inserts; RFO from local IO", "EventCode": "0x35", "EventName": "UNC_CHA_TOR_INSERTS.IO_RFO", @@ -4429,6 +4519,46 @@ "Unit": "CHA" }, { + "BriefDescription": "TOR Inserts for INVXTOM opcodes received from a remote socket which miss the L3 and target memory in a CXL type 3 memory expander local to this socket.", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.RRQ_MISS_INVXTOM_CXL_EXP_LOCAL", + "PerPkg": "1", + "UMask": "0x20e87e8240", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Inserts for RDCODE opcodes received from a remote socket which miss the L3 and target memory in a CXL type 3 memory expander local to this socket.", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.RRQ_MISS_RDCODE_CXL_EXP_LOCAL", + "PerPkg": "1", + "UMask": "0x20e80e8240", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Inserts for RDCUR opcodes received from a remote socket which miss the L3 and target memory in a CXL type 3 memory expander local to this socket.", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.RRQ_MISS_RDCUR_CXL_EXP_LOCAL", + "PerPkg": "1", + "UMask": "0x20e8068240", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Inserts for RDDATA opcodes received from a remote socket which miss the L3 and target memory in a CXL type 3 memory expander local to this socket.", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.RRQ_MISS_RDDATA_CXL_EXP_LOCAL", + "PerPkg": "1", + "UMask": "0x20e8168240", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Inserts for RDINVOWN_OPT opcodes received from a remote socket which miss the L3 and target memory in a CXL type 3 memory expander local to this socket.", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.RRQ_MISS_RDINVOWN_OPT_CXL_EXP_LOCAL", + "PerPkg": "1", + "UMask": "0x20e8268240", + "Unit": "CHA" + }, + { "BriefDescription": "TOR Inserts; All Snoops from Remote", "EventCode": "0x35", "EventName": "UNC_CHA_TOR_INSERTS.SNPS_FROM_REM", @@ -5012,6 +5142,15 @@ "Unit": "CHA" }, { + "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_CXL_EXP_LOCAL", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_CXL_EXP_LOCAL", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x20c8968201", + "Unit": "CHA" + }, + { "BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC", "EventCode": "0x36", "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_DDR", @@ -5163,6 +5302,15 @@ "Unit": "CHA" }, { + "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFDATA_CXL_EXP_LOCAL", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFDATA_CXL_EXP_LOCAL", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x20ccd68201", + "Unit": "CHA" + }, + { "BriefDescription": "TOR Occupancy; LLCPrefRFO misses from local IA", "EventCode": "0x36", "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFRFO", @@ -5189,6 +5337,15 @@ "Unit": "CHA" }, { + "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFRFO_CXL_EXP_LOCAL", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFRFO_CXL_EXP_LOCAL", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x20c8868201", + "Unit": "CHA" + }, + { "BriefDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed locally", "EventCode": "0x36", "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCILF_DDR", @@ -5339,6 +5496,15 @@ "Unit": "CHA" }, { + "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF_CXL_EXP_LOCAL", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF_CXL_EXP_LOCAL", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x20ccc68201", + "Unit": "CHA" + }, + { "BriefDescription": "TOR Occupancy; RFO prefetch misses from local IA", "EventCode": "0x36", "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF_LOCAL", @@ -5602,6 +5768,42 @@ "Unit": "CHA" }, { + "BriefDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC and targets local memory", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOMCACHENEAR_LOCAL", + "PerPkg": "1", + "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", + "UMask": "0xcd42fe04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC and targets remote memory", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOMCACHENEAR_REMOTE", + "PerPkg": "1", + "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", + "UMask": "0xcd437e04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy; ITOM misses from local IO and targets local memory", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOM_LOCAL", + "PerPkg": "1", + "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", + "UMask": "0xcc42fe04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy; ITOM misses from local IO and targets remote memory", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOM_REMOTE", + "PerPkg": "1", + "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", + "UMask": "0xcc437e04", + "Unit": "CHA" + }, + { "BriefDescription": "TOR Occupancy; RdCur and FsRdCur misses from local IO", "EventCode": "0x36", "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_PCIRDCUR", @@ -5611,6 +5813,24 @@ "Unit": "CHA" }, { + "BriefDescription": "TOR Occupancy; RdCur and FsRdCur misses from local IO and targets local memory", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_PCIRDCUR_LOCAL", + "PerPkg": "1", + "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", + "UMask": "0xc8f2fe04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy; RdCur and FsRdCur misses from local IO and targets remote memory", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_PCIRDCUR_REMOTE", + "PerPkg": "1", + "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", + "UMask": "0xc8f37e04", + "Unit": "CHA" + }, + { "BriefDescription": "TOR Occupancy; RFO misses from local IO", "EventCode": "0x36", "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_RFO", @@ -5842,6 +6062,46 @@ "Unit": "CHA" }, { + "BriefDescription": "TOR Occupancy for INVXTOM opcodes received from a remote socket which miss the L3 and target memory in a CXL type 3 memory expander local to this socket.", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.RRQ_MISS_INVXTOM_CXL_EXP_LOCAL", + "PerPkg": "1", + "UMask": "0x20e87e8240", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for RDCODE opcodes received from a remote socket which miss the L3 and target memory in a CXL type 3 memory expander local to this socket.", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.RRQ_MISS_RDCODE_CXL_EXP_LOCAL", + "PerPkg": "1", + "UMask": "0x20e80e8240", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for RDCUR opcodes received from a remote socket which miss the L3 and target memory in a CXL type 3 memory expander local to this socket.", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.RRQ_MISS_RDCUR_CXL_EXP_LOCAL", + "PerPkg": "1", + "UMask": "0x20e8068240", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for RDDATA opcodes received from a remote socket which miss the L3 and target memory in a CXL type 3 memory expander local to this socket.", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.RRQ_MISS_RDDATA_CXL_EXP_LOCAL", + "PerPkg": "1", + "UMask": "0x20e8168240", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for RDINVOWN_OPT opcodes received from a remote socket which miss the L3 and target memory in a CXL type 3 memory expander local to this socket.", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.RRQ_MISS_RDINVOWN_OPT_CXL_EXP_LOCAL", + "PerPkg": "1", + "UMask": "0x20e8268240", + "Unit": "CHA" + }, + { "BriefDescription": "TOR Occupancy; All Snoops from Remote", "EventCode": "0x36", "EventName": "UNC_CHA_TOR_OCCUPANCY.SNPS_FROM_REM", diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-interconnect.json index 65d088556bae..22bb490e9666 100644 --- a/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-interconnect.json +++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-interconnect.json @@ -4888,7 +4888,7 @@ "Unit": "MDF" }, { - "BriefDescription": "Number of cycles incoming messages from the vertical ring that are bounced at the SBO\r\nIngress (V-EMIB) (AD)", + "BriefDescription": "Number of cycles incoming messages from the vertical ring that are bounced at the SBO Ingress (V-EMIB) (AD)", "EventCode": "0x4B", "EventName": "UNC_MDF_CRS_TxR_V_BOUNCES.AD", "PerPkg": "1", @@ -4897,7 +4897,7 @@ "Unit": "MDF" }, { - "BriefDescription": "Number of cycles incoming messages from the vertical ring that are bounced at the SBO\r\nIngress (V-EMIB) (AK)", + "BriefDescription": "Number of cycles incoming messages from the vertical ring that are bounced at the SBO Ingress (V-EMIB) (AK)", "EventCode": "0x4B", "EventName": "UNC_MDF_CRS_TxR_V_BOUNCES.AK", "PerPkg": "1", @@ -4906,7 +4906,7 @@ "Unit": "MDF" }, { - "BriefDescription": "Number of cycles incoming messages from the vertical ring that are bounced at the SBO\r\nIngress (V-EMIB) (AKC)", + "BriefDescription": "Number of cycles incoming messages from the vertical ring that are bounced at the SBO Ingress (V-EMIB) (AKC)", "EventCode": "0x4B", "EventName": "UNC_MDF_CRS_TxR_V_BOUNCES.AKC", "PerPkg": "1", @@ -4915,7 +4915,7 @@ "Unit": "MDF" }, { - "BriefDescription": "Number of cycles incoming messages from the vertical ring that are bounced at the SBO\r\nIngress (V-EMIB) (BL)", + "BriefDescription": "Number of cycles incoming messages from the vertical ring that are bounced at the SBO Ingress (V-EMIB) (BL)", "EventCode": "0x4B", "EventName": "UNC_MDF_CRS_TxR_V_BOUNCES.BL", "PerPkg": "1", @@ -4924,7 +4924,7 @@ "Unit": "MDF" }, { - "BriefDescription": "Number of cycles incoming messages from the vertical ring that are bounced at the SBO\r\nIngress (V-EMIB) (IV)", + "BriefDescription": "Number of cycles incoming messages from the vertical ring that are bounced at the SBO Ingress (V-EMIB) (IV)", "EventCode": "0x4B", "EventName": "UNC_MDF_CRS_TxR_V_BOUNCES.IV", "PerPkg": "1", @@ -5291,7 +5291,7 @@ "EventCode": "0x05", "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCB", "PerPkg": "1", - "PublicDescription": "Matches on Receive path of a UPI Port : Non-Coherent Bypass : Matches on Receive path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.", + "PublicDescription": "Matches on Receive path of a UPI Port : Non-Coherent Bypass : Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.", "UMask": "0xe", "Unit": "UPI" }, @@ -5300,7 +5300,7 @@ "EventCode": "0x05", "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCB_OPC", "PerPkg": "1", - "PublicDescription": "Matches on Receive path of a UPI Port : Non-Coherent Bypass, Match Opcode : Matches on Receive path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.", + "PublicDescription": "Matches on Receive path of a UPI Port : Non-Coherent Bypass, Match Opcode : Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.", "UMask": "0x10e", "Unit": "UPI" }, @@ -5309,7 +5309,7 @@ "EventCode": "0x05", "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCS", "PerPkg": "1", - "PublicDescription": "Matches on Receive path of a UPI Port : Non-Coherent Standard : Matches on Receive path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.", + "PublicDescription": "Matches on Receive path of a UPI Port : Non-Coherent Standard : Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.", "UMask": "0xf", "Unit": "UPI" }, @@ -5318,7 +5318,7 @@ "EventCode": "0x05", "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCS_OPC", "PerPkg": "1", - "PublicDescription": "Matches on Receive path of a UPI Port : Non-Coherent Standard, Match Opcode : Matches on Receive path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.", + "PublicDescription": "Matches on Receive path of a UPI Port : Non-Coherent Standard, Match Opcode : Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.", "UMask": "0x10f", "Unit": "UPI" }, @@ -5763,7 +5763,7 @@ "EventCode": "0x04", "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCB", "PerPkg": "1", - "PublicDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Bypass : Matches on Transmit path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.", + "PublicDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Bypass : Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.", "UMask": "0xe", "Unit": "UPI" }, @@ -5772,7 +5772,7 @@ "EventCode": "0x04", "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCB_OPC", "PerPkg": "1", - "PublicDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Bypass, Match Opcode : Matches on Transmit path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.", + "PublicDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Bypass, Match Opcode : Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.", "UMask": "0x10e", "Unit": "UPI" }, @@ -5781,7 +5781,7 @@ "EventCode": "0x04", "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCS", "PerPkg": "1", - "PublicDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Standard : Matches on Transmit path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.", + "PublicDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Standard : Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.", "UMask": "0xf", "Unit": "UPI" }, @@ -5790,7 +5790,7 @@ "EventCode": "0x04", "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCS_OPC", "PerPkg": "1", - "PublicDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Standard, Match Opcode : Matches on Transmit path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.", + "PublicDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Standard, Match Opcode : Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.", "UMask": "0x10f", "Unit": "UPI" }, diff --git a/tools/perf/pmu-events/arch/x86/grandridge/cache.json b/tools/perf/pmu-events/arch/x86/grandridge/cache.json index 7f0dc65a55d2..f937ba0e50e1 100644 --- a/tools/perf/pmu-events/arch/x86/grandridge/cache.json +++ b/tools/perf/pmu-events/arch/x86/grandridge/cache.json @@ -16,6 +16,148 @@ "UMask": "0x4f" }, { + "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to an instruction cache or TLB miss.", + "EventCode": "0x35", + "EventName": "MEM_BOUND_STALLS_IFETCH.ALL", + "SampleAfterValue": "1000003", + "UMask": "0x7f" + }, + { + "BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or TLB miss which hit in the L2 cache.", + "EventCode": "0x35", + "EventName": "MEM_BOUND_STALLS_IFETCH.L2_HIT", + "PublicDescription": "Counts the number of cycles the core is stalled due to an instruction cache or Translation Lookaside Buffer (TLB) miss which hit in the L2 cache.", + "SampleAfterValue": "1000003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to an icache or itlb miss which hit in the LLC.", + "EventCode": "0x35", + "EventName": "MEM_BOUND_STALLS_IFETCH.LLC_HIT", + "SampleAfterValue": "1000003", + "UMask": "0x6" + }, + { + "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to an icache or itlb miss which missed all the caches.", + "EventCode": "0x35", + "EventName": "MEM_BOUND_STALLS_IFETCH.LLC_MISS", + "SampleAfterValue": "1000003", + "UMask": "0x78" + }, + { + "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to an L1 demand load miss.", + "EventCode": "0x34", + "EventName": "MEM_BOUND_STALLS_LOAD.ALL", + "SampleAfterValue": "1000003", + "UMask": "0x7f" + }, + { + "BriefDescription": "Counts the number of cycles the core is stalled due to a demand load which hit in the L2 cache.", + "EventCode": "0x34", + "EventName": "MEM_BOUND_STALLS_LOAD.L2_HIT", + "PublicDescription": "Counts the number of cycles a core is stalled due to a demand load which hit in the L2 cache.", + "SampleAfterValue": "1000003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to a demand load miss which hit in the LLC.", + "EventCode": "0x34", + "EventName": "MEM_BOUND_STALLS_LOAD.LLC_HIT", + "SampleAfterValue": "1000003", + "UMask": "0x6" + }, + { + "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to a demand load miss which missed all the local caches.", + "EventCode": "0x34", + "EventName": "MEM_BOUND_STALLS_LOAD.LLC_MISS", + "SampleAfterValue": "1000003", + "UMask": "0x78" + }, + { + "BriefDescription": "Counts the number of load ops retired that miss the L3 cache and hit in DRAM", + "EventCode": "0xd3", + "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM", + "PEBS": "1", + "SampleAfterValue": "1000003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts the number of load ops retired that hit the L1 data cache.", + "EventCode": "0xd1", + "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts the number of load ops retired that miss in the L1 data cache.", + "EventCode": "0xd1", + "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0x40" + }, + { + "BriefDescription": "Counts the number of load ops retired that hit in the L2 cache.", + "EventCode": "0xd1", + "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0x2" + }, + { + "BriefDescription": "Counts the number of load ops retired that miss in the L2 cache.", + "EventCode": "0xd1", + "EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0x80" + }, + { + "BriefDescription": "Counts the number of load ops retired that hit in the L3 cache.", + "EventCode": "0xd1", + "EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0x1c" + }, + { + "BriefDescription": "Counts the number of loads that hit in a write combining buffer (WCB), excluding the first load that caused the WCB to allocate.", + "EventCode": "0xd1", + "EventName": "MEM_LOAD_UOPS_RETIRED.WCB_HIT", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0x20" + }, + { + "BriefDescription": "Counts the number of cycles that uops are blocked for any of the following reasons: load buffer, store buffer or RSV full.", + "EventCode": "0x04", + "EventName": "MEM_SCHEDULER_BLOCK.ALL", + "SampleAfterValue": "20003", + "UMask": "0x7" + }, + { + "BriefDescription": "Counts the number of cycles that uops are blocked due to a load buffer full condition.", + "EventCode": "0x04", + "EventName": "MEM_SCHEDULER_BLOCK.LD_BUF", + "SampleAfterValue": "20003", + "UMask": "0x2" + }, + { + "BriefDescription": "Counts the number of cycles that uops are blocked due to an RSV full condition.", + "EventCode": "0x04", + "EventName": "MEM_SCHEDULER_BLOCK.RSV", + "SampleAfterValue": "20003", + "UMask": "0x4" + }, + { + "BriefDescription": "Counts the number of cycles that uops are blocked due to a store buffer full condition.", + "EventCode": "0x04", + "EventName": "MEM_SCHEDULER_BLOCK.ST_BUF", + "SampleAfterValue": "20003", + "UMask": "0x1" + }, + { "BriefDescription": "Counts the number of load ops retired.", "Data_LA": "1", "EventCode": "0xd0", @@ -144,6 +286,42 @@ "UMask": "0x5" }, { + "BriefDescription": "Counts the number of load uops retired that performed one or more locks", + "Data_LA": "1", + "EventCode": "0xd0", + "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0x21" + }, + { + "BriefDescription": "Counts the number of memory uops retired that were splits.", + "Data_LA": "1", + "EventCode": "0xd0", + "EventName": "MEM_UOPS_RETIRED.SPLIT", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0x43" + }, + { + "BriefDescription": "Counts the number of retired split load uops.", + "Data_LA": "1", + "EventCode": "0xd0", + "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0x41" + }, + { + "BriefDescription": "Counts the number of retired split store uops.", + "Data_LA": "1", + "EventCode": "0xd0", + "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0x42" + }, + { "BriefDescription": "Counts the number of stores uops retired same as MEM_UOPS_RETIRED.ALL_STORES", "Data_LA": "1", "EventCode": "0xd0", @@ -151,5 +329,12 @@ "PEBS": "2", "SampleAfterValue": "1000003", "UMask": "0x6" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to an icache miss", + "EventCode": "0x71", + "EventName": "TOPDOWN_FE_BOUND.ICACHE", + "SampleAfterValue": "1000003", + "UMask": "0x20" } ] diff --git a/tools/perf/pmu-events/arch/x86/grandridge/floating-point.json b/tools/perf/pmu-events/arch/x86/grandridge/floating-point.json new file mode 100644 index 000000000000..00c9a8ae0f53 --- /dev/null +++ b/tools/perf/pmu-events/arch/x86/grandridge/floating-point.json @@ -0,0 +1,68 @@ +[ + { + "BriefDescription": "Counts the number of cycles when any of the floating point dividers are active.", + "CounterMask": "1", + "EventCode": "0xcd", + "EventName": "ARITH.FPDIV_ACTIVE", + "SampleAfterValue": "1000003", + "UMask": "0x2" + }, + { + "BriefDescription": "Counts the number of all types of floating point operations per uop with all default weighting", + "EventCode": "0xc8", + "EventName": "FP_FLOPS_RETIRED.ALL", + "PEBS": "1", + "SampleAfterValue": "1000003", + "UMask": "0x3" + }, + { + "BriefDescription": "This event is deprecated. [This event is alias to FP_FLOPS_RETIRED.FP64]", + "Deprecated": "1", + "EventCode": "0xc8", + "EventName": "FP_FLOPS_RETIRED.DP", + "PEBS": "1", + "SampleAfterValue": "1000003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts the number of floating point operations that produce 32 bit single precision results [This event is alias to FP_FLOPS_RETIRED.SP]", + "EventCode": "0xc8", + "EventName": "FP_FLOPS_RETIRED.FP32", + "PEBS": "1", + "SampleAfterValue": "1000003", + "UMask": "0x2" + }, + { + "BriefDescription": "Counts the number of floating point operations that produce 64 bit double precision results [This event is alias to FP_FLOPS_RETIRED.DP]", + "EventCode": "0xc8", + "EventName": "FP_FLOPS_RETIRED.FP64", + "PEBS": "1", + "SampleAfterValue": "1000003", + "UMask": "0x1" + }, + { + "BriefDescription": "This event is deprecated. [This event is alias to FP_FLOPS_RETIRED.FP32]", + "Deprecated": "1", + "EventCode": "0xc8", + "EventName": "FP_FLOPS_RETIRED.SP", + "PEBS": "1", + "SampleAfterValue": "1000003", + "UMask": "0x2" + }, + { + "BriefDescription": "Counts the number of floating point operations retired that required microcode assist.", + "EventCode": "0xc3", + "EventName": "MACHINE_CLEARS.FP_ASSIST", + "PublicDescription": "Counts the number of floating point operations retired that required microcode assist, which is not a reflection of the number of FP operations, instructions or uops.", + "SampleAfterValue": "20003", + "UMask": "0x4" + }, + { + "BriefDescription": "Counts the number of floating point divide uops retired (x87 and sse, including x87 sqrt).", + "EventCode": "0xc2", + "EventName": "UOPS_RETIRED.FPDIV", + "PEBS": "1", + "SampleAfterValue": "2000003", + "UMask": "0x8" + } +] diff --git a/tools/perf/pmu-events/arch/x86/grandridge/frontend.json b/tools/perf/pmu-events/arch/x86/grandridge/frontend.json index be8f1c7e195c..356d36aecc81 100644 --- a/tools/perf/pmu-events/arch/x86/grandridge/frontend.json +++ b/tools/perf/pmu-events/arch/x86/grandridge/frontend.json @@ -1,5 +1,21 @@ [ { + "BriefDescription": "Counts the total number of BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.", + "EventCode": "0xe6", + "EventName": "BACLEARS.ANY", + "PublicDescription": "Counts the total number of BACLEARS, which occur when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.", + "SampleAfterValue": "200003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts the number of instructions retired that were tagged because empty issue slots were seen before the uop due to ITLB miss", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.ITLB_MISS", + "PEBS": "1", + "SampleAfterValue": "1000003", + "UMask": "0x10" + }, + { "BriefDescription": "Counts every time the code stream enters into a new cache line by walking sequential from the previous line or being redirected by a jump.", "EventCode": "0x80", "EventName": "ICACHE.ACCESSES", diff --git a/tools/perf/pmu-events/arch/x86/grandridge/memory.json b/tools/perf/pmu-events/arch/x86/grandridge/memory.json index 79d8af45100c..e0ce2decc805 100644 --- a/tools/perf/pmu-events/arch/x86/grandridge/memory.json +++ b/tools/perf/pmu-events/arch/x86/grandridge/memory.json @@ -1,5 +1,71 @@ [ { + "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer is stalled due to any number of reasons, including an L1 miss, WCB full, pagewalk, store address block or store data block, on a load that retires.", + "EventCode": "0x05", + "EventName": "LD_HEAD.ANY_AT_RET", + "SampleAfterValue": "1000003", + "UMask": "0xff" + }, + { + "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer is stalled due to a core bound stall including a store address match, a DTLB miss or a page walk that detains the load from retiring.", + "EventCode": "0x05", + "EventName": "LD_HEAD.L1_BOUND_AT_RET", + "SampleAfterValue": "1000003", + "UMask": "0xf4" + }, + { + "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a DL1 miss.", + "EventCode": "0x05", + "EventName": "LD_HEAD.L1_MISS_AT_RET", + "SampleAfterValue": "1000003", + "UMask": "0x81" + }, + { + "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to other block cases.", + "EventCode": "0x05", + "EventName": "LD_HEAD.OTHER_AT_RET", + "PublicDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to other block cases such as pipeline conflicts, fences, etc.", + "SampleAfterValue": "1000003", + "UMask": "0xc0" + }, + { + "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a pagewalk.", + "EventCode": "0x05", + "EventName": "LD_HEAD.PGWALK_AT_RET", + "SampleAfterValue": "1000003", + "UMask": "0xa0" + }, + { + "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a store address match.", + "EventCode": "0x05", + "EventName": "LD_HEAD.ST_ADDR_AT_RET", + "SampleAfterValue": "1000003", + "UMask": "0x84" + }, + { + "BriefDescription": "Counts the number of machine clears due to memory ordering caused by a snoop from an external agent. Does not count internally generated machine clears such as those due to memory disambiguation.", + "EventCode": "0xc3", + "EventName": "MACHINE_CLEARS.MEMORY_ORDERING", + "SampleAfterValue": "20003", + "UMask": "0x2" + }, + { + "BriefDescription": "Counts misaligned loads that are 4K page splits.", + "EventCode": "0x13", + "EventName": "MISALIGN_MEM_REF.LOAD_PAGE_SPLIT", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0x2" + }, + { + "BriefDescription": "Counts misaligned stores that are 4K page splits.", + "EventCode": "0x13", + "EventName": "MISALIGN_MEM_REF.STORE_PAGE_SPLIT", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0x4" + }, + { "BriefDescription": "Counts demand data reads that were not supplied by the L3 cache.", "EventCode": "0xB7", "EventName": "OCR.DEMAND_DATA_RD.L3_MISS", diff --git a/tools/perf/pmu-events/arch/x86/grandridge/other.json b/tools/perf/pmu-events/arch/x86/grandridge/other.json index 2414f6ff53b0..70a9da7e97df 100644 --- a/tools/perf/pmu-events/arch/x86/grandridge/other.json +++ b/tools/perf/pmu-events/arch/x86/grandridge/other.json @@ -1,5 +1,14 @@ [ { + "BriefDescription": "This event is deprecated. [This event is alias to MISC_RETIRED.LBR_INSERTS]", + "Deprecated": "1", + "EventCode": "0xe4", + "EventName": "LBR_INSERTS.ANY", + "PEBS": "1", + "SampleAfterValue": "1000003", + "UMask": "0x1" + }, + { "BriefDescription": "Counts demand data reads that have any type of response.", "EventCode": "0xB7", "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE", @@ -16,5 +25,12 @@ "MSRValue": "0x10002", "SampleAfterValue": "100003", "UMask": "0x1" + }, + { + "BriefDescription": "Counts the number of issue slots in a UMWAIT or TPAUSE instruction where no uop issues due to the instruction putting the CPU into the C0.1 activity state.", + "EventCode": "0x75", + "EventName": "SERIALIZATION.C01_MS_SCB", + "SampleAfterValue": "200003", + "UMask": "0x4" } ] diff --git a/tools/perf/pmu-events/arch/x86/grandridge/pipeline.json b/tools/perf/pmu-events/arch/x86/grandridge/pipeline.json index 41212957ef21..90292dc03d33 100644 --- a/tools/perf/pmu-events/arch/x86/grandridge/pipeline.json +++ b/tools/perf/pmu-events/arch/x86/grandridge/pipeline.json @@ -1,5 +1,13 @@ [ { + "BriefDescription": "Counts the number of cycles when any of the dividers are active.", + "CounterMask": "1", + "EventCode": "0xcd", + "EventName": "ARITH.DIV_ACTIVE", + "SampleAfterValue": "1000003", + "UMask": "0x3" + }, + { "BriefDescription": "Counts the total number of branch instructions retired for all branch types.", "EventCode": "0xc4", "EventName": "BR_INST_RETIRED.ALL_BRANCHES", @@ -8,6 +16,71 @@ "SampleAfterValue": "200003" }, { + "BriefDescription": "Counts the number of retired JCC (Jump on Conditional Code) branch instructions retired, includes both taken and not taken branches.", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.COND", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0x7e" + }, + { + "BriefDescription": "Counts the number of taken JCC (Jump on Conditional Code) branch instructions retired.", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.COND_TAKEN", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0xfe" + }, + { + "BriefDescription": "Counts the number of far branch instructions retired, includes far jump, far call and return, and interrupt call and return.", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.FAR_BRANCH", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0xbf" + }, + { + "BriefDescription": "Counts the number of near indirect JMP and near indirect CALL branch instructions retired.", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.INDIRECT", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0xeb" + }, + { + "BriefDescription": "Counts the number of near indirect CALL branch instructions retired.", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.INDIRECT_CALL", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0xfb" + }, + { + "BriefDescription": "This event is deprecated. Refer to new event BR_INST_RETIRED.INDIRECT_CALL", + "Deprecated": "1", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.IND_CALL", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0xfb" + }, + { + "BriefDescription": "Counts the number of near CALL branch instructions retired.", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.NEAR_CALL", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0xf9" + }, + { + "BriefDescription": "Counts the number of near RET branch instructions retired.", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.NEAR_RETURN", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0xf7" + }, + { "BriefDescription": "Counts the total number of mispredicted branch instructions retired for all branch types.", "EventCode": "0xc5", "EventName": "BR_MISP_RETIRED.ALL_BRANCHES", @@ -16,6 +89,54 @@ "SampleAfterValue": "200003" }, { + "BriefDescription": "Counts the number of mispredicted JCC (Jump on Conditional Code) branch instructions retired.", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.COND", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0x7e" + }, + { + "BriefDescription": "Counts the number of mispredicted taken JCC (Jump on Conditional Code) branch instructions retired.", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.COND_TAKEN", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0xfe" + }, + { + "BriefDescription": "Counts the number of mispredicted near indirect JMP and near indirect CALL branch instructions retired.", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.INDIRECT", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0xeb" + }, + { + "BriefDescription": "Counts the number of mispredicted near indirect CALL branch instructions retired.", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.INDIRECT_CALL", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0xfb" + }, + { + "BriefDescription": "Counts the number of mispredicted near taken branch instructions retired.", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.NEAR_TAKEN", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0x80" + }, + { + "BriefDescription": "Counts the number of mispredicted near RET branch instructions retired.", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.RETURN", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0xf7" + }, + { "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles", "EventName": "CPU_CLK_UNHALTED.CORE", "SampleAfterValue": "2000003", @@ -68,29 +189,294 @@ "SampleAfterValue": "2000003" }, { - "BriefDescription": "Counts the number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear.", + "BriefDescription": "Counts the number of retired loads that are blocked because it initially appears to be store forward blocked, but subsequently is shown not to be blocked based on 4K alias check.", + "EventCode": "0x03", + "EventName": "LD_BLOCKS.ADDRESS_ALIAS", + "PEBS": "1", + "SampleAfterValue": "1000003", + "UMask": "0x4" + }, + { + "BriefDescription": "Counts the number of retired loads that are blocked because its address exactly matches an older store whose data is not ready.", + "EventCode": "0x03", + "EventName": "LD_BLOCKS.DATA_UNKNOWN", + "PEBS": "1", + "SampleAfterValue": "1000003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts the number of retired loads that are blocked because its address partially overlapped with an older store.", + "EventCode": "0x03", + "EventName": "LD_BLOCKS.STORE_FORWARD", + "PEBS": "1", + "SampleAfterValue": "1000003", + "UMask": "0x2" + }, + { + "BriefDescription": "Counts the number of machine clears due to memory ordering in which an internal load passes an older store within the same CPU.", + "EventCode": "0xc3", + "EventName": "MACHINE_CLEARS.DISAMBIGUATION", + "SampleAfterValue": "20003", + "UMask": "0x8" + }, + { + "BriefDescription": "Counts the number of machine clears due to a page fault. Counts both I-Side and D-Side (Loads/Stores) page faults. A page fault occurs when either the page is not present, or an access violation occurs.", + "EventCode": "0xc3", + "EventName": "MACHINE_CLEARS.PAGE_FAULT", + "SampleAfterValue": "20003", + "UMask": "0x20" + }, + { + "BriefDescription": "Counts the number of machine clears that flush the pipeline and restart the machine with the use of microcode due to SMC, MEMORY_ORDERING, FP_ASSISTS, PAGE_FAULT, DISAMBIGUATION, and FPC_VIRTUAL_TRAP.", + "EventCode": "0xc3", + "EventName": "MACHINE_CLEARS.SLOW", + "SampleAfterValue": "20003", + "UMask": "0x6f" + }, + { + "BriefDescription": "Counts the number of machine clears due to program modifying data (self modifying code) within 1K of a recently fetched code page.", + "EventCode": "0xc3", + "EventName": "MACHINE_CLEARS.SMC", + "SampleAfterValue": "20003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts the number of Last Branch Record (LBR) entries. Requires LBRs to be enabled and configured in IA32_LBR_CTL. [This event is alias to LBR_INSERTS.ANY]", + "EventCode": "0xe4", + "EventName": "MISC_RETIRED.LBR_INSERTS", + "PEBS": "1", + "SampleAfterValue": "1000003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts the number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. [This event is alias to TOPDOWN_BAD_SPECULATION.ALL_P]", "EventCode": "0x73", "EventName": "TOPDOWN_BAD_SPECULATION.ALL", - "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window, including relevant microcode flows, and while uops are not yet available in the instruction queue (IQ) or until an FE_BOUND event occurs besides OTHER and CISC. Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear.", + "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window, including relevant microcode flows, and while uops are not yet available in the instruction queue (IQ) or until an FE_BOUND event occurs besides OTHER and CISC. Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear. [This event is alias to TOPDOWN_BAD_SPECULATION.ALL_P]", "SampleAfterValue": "1000003" }, { - "BriefDescription": "Counts the number of retirement slots not consumed due to backend stalls", + "BriefDescription": "Counts the number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. [This event is alias to TOPDOWN_BAD_SPECULATION.ALL]", + "EventCode": "0x73", + "EventName": "TOPDOWN_BAD_SPECULATION.ALL_P", + "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window, including relevant microcode flows, and while uops are not yet available in the instruction queue (IQ) or until an FE_BOUND event occurs besides OTHER and CISC. Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear. [This event is alias to TOPDOWN_BAD_SPECULATION.ALL]", + "SampleAfterValue": "1000003" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to Fast Nukes such as Memory Ordering Machine clears and MRN nukes", + "EventCode": "0x73", + "EventName": "TOPDOWN_BAD_SPECULATION.FASTNUKE", + "SampleAfterValue": "1000003", + "UMask": "0x2" + }, + { + "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a machine clear (nuke) of any kind including memory ordering and memory disambiguation.", + "EventCode": "0x73", + "EventName": "TOPDOWN_BAD_SPECULATION.MACHINE_CLEARS", + "SampleAfterValue": "1000003", + "UMask": "0x3" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to Branch Mispredict", + "EventCode": "0x73", + "EventName": "TOPDOWN_BAD_SPECULATION.MISPREDICT", + "SampleAfterValue": "1000003", + "UMask": "0x4" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to a machine clear (nuke).", + "EventCode": "0x73", + "EventName": "TOPDOWN_BAD_SPECULATION.NUKE", + "SampleAfterValue": "1000003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts the number of retirement slots not consumed due to backend stalls [This event is alias to TOPDOWN_BE_BOUND.ALL_P]", "EventCode": "0x74", "EventName": "TOPDOWN_BE_BOUND.ALL", "SampleAfterValue": "1000003" }, { - "BriefDescription": "Counts the number of retirement slots not consumed due to front end stalls", + "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to due to certain allocation restrictions", + "EventCode": "0x74", + "EventName": "TOPDOWN_BE_BOUND.ALLOC_RESTRICTIONS", + "SampleAfterValue": "1000003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts the number of retirement slots not consumed due to backend stalls [This event is alias to TOPDOWN_BE_BOUND.ALL]", + "EventCode": "0x74", + "EventName": "TOPDOWN_BE_BOUND.ALL_P", + "SampleAfterValue": "1000003" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to memory reservation stall (scheduler not being able to accept another uop). This could be caused by RSV full or load/store buffer block.", + "EventCode": "0x74", + "EventName": "TOPDOWN_BE_BOUND.MEM_SCHEDULER", + "SampleAfterValue": "1000003", + "UMask": "0x2" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to IEC and FPC RAT stalls - which can be due to the FIQ and IEC reservation station stall (integer, FP and SIMD scheduler not being able to accept another uop. )", + "EventCode": "0x74", + "EventName": "TOPDOWN_BE_BOUND.NON_MEM_SCHEDULER", + "SampleAfterValue": "1000003", + "UMask": "0x8" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to mrbl stall. A 'marble' refers to a physical register file entry, also known as the physical destination (PDST).", + "EventCode": "0x74", + "EventName": "TOPDOWN_BE_BOUND.REGISTER", + "SampleAfterValue": "1000003", + "UMask": "0x20" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to ROB full", + "EventCode": "0x74", + "EventName": "TOPDOWN_BE_BOUND.REORDER_BUFFER", + "SampleAfterValue": "1000003", + "UMask": "0x40" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to iq/jeu scoreboards or ms scb", + "EventCode": "0x74", + "EventName": "TOPDOWN_BE_BOUND.SERIALIZATION", + "SampleAfterValue": "1000003", + "UMask": "0x10" + }, + { + "BriefDescription": "Counts the number of retirement slots not consumed due to front end stalls [This event is alias to TOPDOWN_FE_BOUND.ALL_P]", "EventCode": "0x71", "EventName": "TOPDOWN_FE_BOUND.ALL", "SampleAfterValue": "1000003" }, { - "BriefDescription": "Counts the number of consumed retirement slots. Similar to UOPS_RETIRED.ALL", + "BriefDescription": "Counts the number of retirement slots not consumed due to front end stalls [This event is alias to TOPDOWN_FE_BOUND.ALL]", + "EventCode": "0x71", + "EventName": "TOPDOWN_FE_BOUND.ALL_P", + "SampleAfterValue": "1000003" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BAClear", + "EventCode": "0x71", + "EventName": "TOPDOWN_FE_BOUND.BRANCH_DETECT", + "SampleAfterValue": "1000003", + "UMask": "0x2" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BTClear", + "EventCode": "0x71", + "EventName": "TOPDOWN_FE_BOUND.BRANCH_RESTEER", + "SampleAfterValue": "1000003", + "UMask": "0x40" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to ms", + "EventCode": "0x71", + "EventName": "TOPDOWN_FE_BOUND.CISC", + "SampleAfterValue": "1000003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to decode stall", + "EventCode": "0x71", + "EventName": "TOPDOWN_FE_BOUND.DECODE", + "SampleAfterValue": "1000003", + "UMask": "0x8" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to frontend bandwidth restrictions due to decode, predecode, cisc, and other limitations.", + "EventCode": "0x71", + "EventName": "TOPDOWN_FE_BOUND.FRONTEND_BANDWIDTH", + "SampleAfterValue": "1000003", + "UMask": "0x8d" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to latency related stalls including BACLEARs, BTCLEARs, ITLB misses, and ICache misses.", + "EventCode": "0x71", + "EventName": "TOPDOWN_FE_BOUND.FRONTEND_LATENCY", + "SampleAfterValue": "1000003", + "UMask": "0x72" + }, + { + "BriefDescription": "This event is deprecated. [This event is alias to TOPDOWN_FE_BOUND.ITLB_MISS]", + "Deprecated": "1", + "EventCode": "0x71", + "EventName": "TOPDOWN_FE_BOUND.ITLB", + "SampleAfterValue": "1000003", + "UMask": "0x10" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to itlb miss [This event is alias to TOPDOWN_FE_BOUND.ITLB]", + "EventCode": "0x71", + "EventName": "TOPDOWN_FE_BOUND.ITLB_MISS", + "SampleAfterValue": "1000003", + "UMask": "0x10" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend that do not categorize into any other common frontend stall", + "EventCode": "0x71", + "EventName": "TOPDOWN_FE_BOUND.OTHER", + "SampleAfterValue": "1000003", + "UMask": "0x80" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to predecode wrong", + "EventCode": "0x71", + "EventName": "TOPDOWN_FE_BOUND.PREDECODE", + "SampleAfterValue": "1000003", + "UMask": "0x4" + }, + { + "BriefDescription": "Counts the number of consumed retirement slots. Similar to UOPS_RETIRED.ALL [This event is alias to TOPDOWN_RETIRING.ALL_P]", "EventCode": "0x72", "EventName": "TOPDOWN_RETIRING.ALL", "PEBS": "1", "SampleAfterValue": "1000003" + }, + { + "BriefDescription": "Counts the number of consumed retirement slots. Similar to UOPS_RETIRED.ALL [This event is alias to TOPDOWN_RETIRING.ALL]", + "EventCode": "0x72", + "EventName": "TOPDOWN_RETIRING.ALL_P", + "PEBS": "1", + "SampleAfterValue": "1000003" + }, + { + "BriefDescription": "Counts the number of uops issued by the front end every cycle.", + "EventCode": "0x0e", + "EventName": "UOPS_ISSUED.ANY", + "PublicDescription": "Counts the number of uops issued by the front end every cycle. When 4-uops are requested and only 2-uops are delivered, the event counts 2. Uops_issued correlates to the number of ROB entries. If uop takes 2 ROB slots it counts as 2 uops_issued.", + "SampleAfterValue": "1000003" + }, + { + "BriefDescription": "Counts the total number of uops retired.", + "EventCode": "0xc2", + "EventName": "UOPS_RETIRED.ALL", + "PEBS": "1", + "SampleAfterValue": "2000003" + }, + { + "BriefDescription": "Counts the number of integer divide uops retired.", + "EventCode": "0xc2", + "EventName": "UOPS_RETIRED.IDIV", + "PEBS": "1", + "SampleAfterValue": "2000003", + "UMask": "0x10" + }, + { + "BriefDescription": "Counts the number of uops that are from the complex flows issued by the micro-sequencer (MS). This includes uops from flows due to complex instructions, faults, assists, and inserted flows.", + "EventCode": "0xc2", + "EventName": "UOPS_RETIRED.MS", + "PEBS": "1", + "SampleAfterValue": "2000003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts the number of x87 uops retired, includes those in ms flows", + "EventCode": "0xc2", + "EventName": "UOPS_RETIRED.X87", + "PEBS": "1", + "SampleAfterValue": "2000003", + "UMask": "0x2" } ] diff --git a/tools/perf/pmu-events/arch/x86/grandridge/uncore-cache.json b/tools/perf/pmu-events/arch/x86/grandridge/uncore-cache.json new file mode 100644 index 000000000000..36614429dd72 --- /dev/null +++ b/tools/perf/pmu-events/arch/x86/grandridge/uncore-cache.json @@ -0,0 +1,1821 @@ +[ + { + "BriefDescription": "Clockticks for CMS units attached to CHA", + "EventCode": "0x01", + "EventName": "UNC_CHACMS_CLOCKTICKS", + "PerPkg": "1", + "PortMask": "0x000", + "Unit": "CHACMS" + }, + { + "BriefDescription": "Number of CHA clock cycles while the event is enabled", + "EventCode": "0x01", + "EventName": "UNC_CHA_CLOCKTICKS", + "PerPkg": "1", + "PublicDescription": "Clockticks of the uncore caching and home agent (CHA)", + "Unit": "CHA" + }, + { + "BriefDescription": "Distress signal assertion for dynamic prefetch throttle (DPT). Threshold for distress signal assertion reached in TOR or IRQ (immediate cause for triggering).", + "EventCode": "0x59", + "EventName": "UNC_CHA_DISTRESS_ASSERTED.DPT_ANY", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x3", + "Unit": "CHA" + }, + { + "BriefDescription": "Distress signal assertion for dynamic prefetch throttle (DPT). Threshold for distress signal assertion reached in IRQ (immediate cause for triggering).", + "EventCode": "0x59", + "EventName": "UNC_CHA_DISTRESS_ASSERTED.DPT_IRQ", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "CHA" + }, + { + "BriefDescription": "Distress signal assertion for dynamic prefetch throttle (DPT). Threshold for distress signal assertion reached in TOR (immediate cause for triggering).", + "EventCode": "0x59", + "EventName": "UNC_CHA_DISTRESS_ASSERTED.DPT_TOR", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts when a normal (Non-Isochronous) full line write is issued from the CHA to the any of the memory controller channels.", + "EventCode": "0x5b", + "EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "CHA" + }, + { + "BriefDescription": "CHA to iMC Full Line Writes Issued : ISOCH Full Line : Counts the total number of full line writes issued from the HA into the memory controller.", + "EventCode": "0x5b", + "EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL_PRIORITY", + "PerPkg": "1", + "UMask": "0x4", + "Unit": "CHA" + }, + { + "BriefDescription": "CHA to iMC Full Line Writes Issued : Partial Non-ISOCH : Counts the total number of full line writes issued from the HA into the memory controller.", + "EventCode": "0x5b", + "EventName": "UNC_CHA_IMC_WRITES_COUNT.PARTIAL", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "CHA" + }, + { + "BriefDescription": "CHA to iMC Full Line Writes Issued : ISOCH Partial : Counts the total number of full line writes issued from the HA into the memory controller.", + "EventCode": "0x5b", + "EventName": "UNC_CHA_IMC_WRITES_COUNT.PARTIAL_PRIORITY", + "PerPkg": "1", + "UMask": "0x8", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: CRd Requests", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.CODE", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : CRd Requests", + "UMask": "0x1bd0ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: Read Requests and Read Prefetches", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.DATA_RD", + "PerPkg": "1", + "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions", + "UMask": "0x1bc1ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: Read Requests, Read Prefetches, and Snoops", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_ALL", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : Data Reads", + "UMask": "0x1fc1ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: Read Requests to Locally Homed Memory", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_LOCAL", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : Demand Data Reads, Core and LLC prefetches", + "UMask": "0x841ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: Read Requests, Read Prefetches, and Snoops which miss the Cache", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_MISS", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : Data Read Misses", + "UMask": "0x1fc101", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: All Requests to Locally Homed Memory", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.LOCALLY_HOMED_ADDRESS", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : Transactions homed locally", + "UMask": "0xbdfff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: Code Read Requests and Code Read Prefetches to Locally Homed Memory", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_CODE", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : CRd Requests", + "UMask": "0x19d0ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: Read Requests and Read Prefetches to Locally Homed Memory", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_DATA_RD", + "PerPkg": "1", + "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions", + "UMask": "0x19c1ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: Code Read Requests to Locally Homed Memory", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_DMND_CODE", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : CRd Requests", + "UMask": "0x1850ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: Read Requests to Locally Homed Memory", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_DMND_DATA_RD", + "PerPkg": "1", + "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions", + "UMask": "0x1841ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: RFO Requests to Locally Homed Memory", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_DMND_RFO", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : RFO Requests", + "UMask": "0x1848ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: LLC Prefetch Requests to Locally Homed Memory", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_LLC_PF", + "PerPkg": "1", + "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions", + "UMask": "0x189dff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: All Prefetches to Locally Homed Memory", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_PF", + "PerPkg": "1", + "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions", + "UMask": "0x199dff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: Code Prefetches to Locally Homed Memory", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_PF_CODE", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : CRd Requests", + "UMask": "0x1910ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: Read Prefetches to Locally Homed Memory", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_PF_DATA_RD", + "PerPkg": "1", + "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions", + "UMask": "0x1981ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: RFO Prefetches to Locally Homed Memory", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_PF_RFO", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : RFO Requests", + "UMask": "0x1908ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: RFO Requests and RFO Prefetches to Locally Homed Memory", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_RFO", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : RFO Requests", + "UMask": "0x19c8ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: All RFO and RFO Prefetches", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.RFO", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : All RFOs - Demand and Prefetches", + "UMask": "0x1bc8ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: RFO Requests and RFO Prefetches to Locally Homed Memory", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.RFO_LOCAL", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : Locally HOMed RFOs - Demand and Prefetches", + "UMask": "0x9c8ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: Writes to Locally Homed Memory (includes writebacks from L1/L2)", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.WRITE_LOCAL", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : Writes", + "UMask": "0x842ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", + "EventCode": "0x37", + "EventName": "UNC_CHA_LLC_VICTIMS.ALL", + "PerPkg": "1", + "PublicDescription": "Lines Victimized : All Lines Victimized", + "UMask": "0xf", + "Unit": "CHA" + }, + { + "BriefDescription": "Lines Victimized : IA traffic : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", + "EventCode": "0x37", + "EventName": "UNC_CHA_LLC_VICTIMS.IA", + "PerPkg": "1", + "UMask": "0x20", + "Unit": "CHA" + }, + { + "BriefDescription": "Lines Victimized : IO traffic : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", + "EventCode": "0x37", + "EventName": "UNC_CHA_LLC_VICTIMS.IO", + "PerPkg": "1", + "UMask": "0x10", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", + "EventCode": "0x37", + "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_ALL", + "PerPkg": "1", + "PublicDescription": "Lines Victimized : Local - All Lines", + "UMask": "0x200f", + "Unit": "CHA" + }, + { + "BriefDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", + "EventCode": "0x37", + "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_E", + "PerPkg": "1", + "PublicDescription": "Lines Victimized : Local - Lines in E State", + "UMask": "0x2002", + "Unit": "CHA" + }, + { + "BriefDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", + "EventCode": "0x37", + "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_F", + "PerPkg": "1", + "PublicDescription": "Lines Victimized : Local - Lines in F State", + "UMask": "0x2008", + "Unit": "CHA" + }, + { + "BriefDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", + "EventCode": "0x37", + "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_M", + "PerPkg": "1", + "PublicDescription": "Lines Victimized : Local - Lines in M State", + "UMask": "0x2001", + "Unit": "CHA" + }, + { + "BriefDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", + "EventCode": "0x37", + "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_S", + "PerPkg": "1", + "PublicDescription": "Lines Victimized : Local - Lines in S State", + "UMask": "0x2004", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", + "EventCode": "0x37", + "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_E", + "PerPkg": "1", + "PublicDescription": "Lines Victimized : Lines in E state", + "UMask": "0x2", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", + "EventCode": "0x37", + "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_M", + "PerPkg": "1", + "PublicDescription": "Lines Victimized : Lines in M state", + "UMask": "0x1", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", + "EventCode": "0x37", + "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_S", + "PerPkg": "1", + "PublicDescription": "Lines Victimized : Lines in S State", + "UMask": "0x4", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts when a RFO (the Read for Ownership issued before a write) request hit a cacheline in the S (Shared) state.", + "EventCode": "0x39", + "EventName": "UNC_CHA_MISC.RFO_HIT_S", + "PerPkg": "1", + "PublicDescription": "Cbo Misc : RFO HitS", + "UMask": "0x8", + "Unit": "CHA" + }, + { + "BriefDescription": "OSB Snoop Broadcast : Local InvItoE : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.", + "EventCode": "0x55", + "EventName": "UNC_CHA_OSB.LOCAL_INVITOE", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "CHA" + }, + { + "BriefDescription": "OSB Snoop Broadcast : Local Rd : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.", + "EventCode": "0x55", + "EventName": "UNC_CHA_OSB.LOCAL_READ", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "CHA" + }, + { + "BriefDescription": "OSB Snoop Broadcast : Off : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.", + "EventCode": "0x55", + "EventName": "UNC_CHA_OSB.OFF_PWRHEURISTIC", + "PerPkg": "1", + "UMask": "0x20", + "Unit": "CHA" + }, + { + "BriefDescription": "OSB Snoop Broadcast : RFO HitS Snoop Broadcast : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.", + "EventCode": "0x55", + "EventName": "UNC_CHA_OSB.RFO_HITS_SNP_BCAST", + "PerPkg": "1", + "UMask": "0x10", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts the total number of requests coming from a unit on this socket for exclusive ownership of a cache line without receiving data (INVITOE) to the CHA.", + "EventCode": "0x50", + "EventName": "UNC_CHA_REQUESTS.INVITOE", + "PerPkg": "1", + "PublicDescription": "HA Read and Write Requests : InvalItoE", + "UMask": "0x30", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts the total number of requests coming from a unit on this socket for exclusive ownership of a cache line without receiving data (INVITOE) to the CHA.", + "EventCode": "0x50", + "EventName": "UNC_CHA_REQUESTS.INVITOE_LOCAL", + "PerPkg": "1", + "UMask": "0x10", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts read requests made into this CHA. Reads include all read opcodes (including RFO: the Read for Ownership issued before a write) .", + "EventCode": "0x50", + "EventName": "UNC_CHA_REQUESTS.READS", + "PerPkg": "1", + "PublicDescription": "HA Read and Write Requests : Reads", + "UMask": "0x3", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts read requests coming from a unit on this socket made into this CHA. Reads include all read opcodes (including RFO: the Read for Ownership issued before a write).", + "EventCode": "0x50", + "EventName": "UNC_CHA_REQUESTS.READS_LOCAL", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts write requests made into the CHA, including streaming, evictions, HitM (Reads from another core to a Modified cacheline), etc.", + "EventCode": "0x50", + "EventName": "UNC_CHA_REQUESTS.WRITES", + "PerPkg": "1", + "PublicDescription": "HA Read and Write Requests : Writes", + "UMask": "0xc", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts write requests coming from a unit on this socket made into this CHA, including streaming, evictions, HitM (Reads from another core to a Modified cacheline), etc.", + "EventCode": "0x50", + "EventName": "UNC_CHA_REQUESTS.WRITES_LOCAL", + "PerPkg": "1", + "UMask": "0x4", + "Unit": "CHA" + }, + { + "BriefDescription": "All TOR Inserts", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.ALL", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : All", + "UMask": "0xc001ffff", + "Unit": "CHA" + }, + { + "BriefDescription": "All locally initiated requests from IA Cores", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : All requests from iA Cores", + "UMask": "0xc001ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "CLFlush events that are initiated from the Core", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_CLFLUSH", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : CLFlushes issued by iA Cores", + "UMask": "0xc8c7ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "CLFlushOpt events that are initiated from the Core", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_CLFLUSHOPT", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : CLFlushOpts issued by iA Cores", + "UMask": "0xc8d7ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "Code read from local IA that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_CRD", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : CRDs issued by iA Cores", + "UMask": "0xc80fff01", + "Unit": "CHA" + }, + { + "BriefDescription": "Code read prefetch from local IA that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_CRD_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Inserts; Code read prefetch from local IA that misses in the snoop filter", + "UMask": "0xc88fff01", + "Unit": "CHA" + }, + { + "BriefDescription": "Data read opt from local IA that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_DRD_OPT", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : DRd_Opts issued by iA Cores", + "UMask": "0xc827ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "Data read opt prefetch from local IA that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_DRD_OPT_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : DRd_Opt_Prefs issued by iA Cores", + "UMask": "0xc8a7ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "All locally initiated requests from IA Cores which hit the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : All requests from iA Cores that Hit the LLC", + "UMask": "0xc001fd01", + "Unit": "CHA" + }, + { + "BriefDescription": "Code read from local IA that hit the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CRD", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : CRds issued by iA Cores that Hit the LLC", + "UMask": "0xc80ffd01", + "Unit": "CHA" + }, + { + "BriefDescription": "Code read prefetch from local IA that hit the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CRD_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that hit the LLC", + "UMask": "0xc88ffd01", + "Unit": "CHA" + }, + { + "BriefDescription": "Data read opt from local IA that hit the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD_OPT", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : DRd_Opts issued by iA Cores that hit the LLC", + "UMask": "0xc827fd01", + "Unit": "CHA" + }, + { + "BriefDescription": "Data read opt prefetch from local IA that hit the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD_OPT_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : DRd_Opt_Prefs issued by iA Cores that hit the LLC", + "UMask": "0xc8a7fd01", + "Unit": "CHA" + }, + { + "BriefDescription": "ItoM requests from local IA cores that hit the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_ITOM", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMs issued by iA Cores that Hit LLC", + "UMask": "0xcc47fd01", + "Unit": "CHA" + }, + { + "BriefDescription": "Last level cache prefetch code read from local IA that hit the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFCODE", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : LLCPrefCode issued by iA Cores that hit the LLC", + "UMask": "0xcccffd01", + "Unit": "CHA" + }, + { + "BriefDescription": "Last level cache prefetch data read from local IA that hit the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFDATA", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : LLCPrefData issued by iA Cores that hit the LLC", + "UMask": "0xccd7fd01", + "Unit": "CHA" + }, + { + "BriefDescription": "Last level cache prefetch read for ownership from local IA that hit the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFRFO", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores that hit the LLC", + "UMask": "0xccc7fd01", + "Unit": "CHA" + }, + { + "BriefDescription": "Read for ownership from local IA that hit the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_RFO", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : RFOs issued by iA Cores that Hit the LLC", + "UMask": "0xc807fd01", + "Unit": "CHA" + }, + { + "BriefDescription": "Read for ownership prefetch from local IA that hit the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_RFO_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : RFO_Prefs issued by iA Cores that Hit the LLC", + "UMask": "0xc887fd01", + "Unit": "CHA" + }, + { + "BriefDescription": "ItoM events that are initiated from the Core", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_ITOM", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMs issued by iA Cores", + "UMask": "0xcc47ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "ItoMCacheNear requests from local IA cores", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_ITOMCACHENEAR", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMCacheNears issued by iA Cores", + "UMask": "0xcd47ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "Last level cache prefetch code read from local IA.", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_LLCPREFCODE", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : LLCPrefCode issued by iA Cores", + "UMask": "0xcccfff01", + "Unit": "CHA" + }, + { + "BriefDescription": "Last level cache prefetch data read from local IA.", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_LLCPREFDATA", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : LLCPrefData issued by iA Cores", + "UMask": "0xccd7ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "Last level cache prefetch read for ownership from local IA that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_LLCPREFRFO", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores", + "UMask": "0xccc7ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "All locally initiated requests from IA Cores which miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : All requests from iA Cores that Missed the LLC", + "UMask": "0xc001fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "Code read from local IA that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : CRds issued by iA Cores that Missed the LLC", + "UMask": "0xc80ffe01", + "Unit": "CHA" + }, + { + "BriefDescription": "CRDs from local IA cores to locally homed memory", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_LOCAL", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : CRd issued by iA Cores that Missed the LLC - HOMed locally", + "UMask": "0xc80efe01", + "Unit": "CHA" + }, + { + "BriefDescription": "Code read prefetch from local IA that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that Missed the LLC", + "UMask": "0xc88ffe01", + "Unit": "CHA" + }, + { + "BriefDescription": "CRD Prefetches from local IA cores to locally homed memory", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_PREF_LOCAL", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed locally", + "UMask": "0xc88efe01", + "Unit": "CHA" + }, + { + "BriefDescription": "Data read opt from local IA that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : DRd_Opt issued by iA Cores that missed the LLC", + "UMask": "0xc827fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "Inserts into the TOR from local IA cores which miss the LLC and snoop filter with the opcode DRd_Opt, and which target local memory", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_LOCAL", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : DRd_Opt issued by iA Cores that Missed the LLC - HOMed locally", + "UMask": "0xc826fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "Data read opt prefetch from local IA that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : DRd_Opt_Prefs issued by iA Cores that missed the LLC", + "UMask": "0xc8a7fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "Inserts into the TOR from local IA cores which miss the LLC and snoop filter with the opcode DRD_PREF_OPT, and target local memory", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_PREF_LOCAL", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : DRd_Opt_Prefs issued by iA Cores that missed the LLC", + "UMask": "0xc8a6fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "ItoM requests from local IA cores that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_ITOM", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMs issued by iA Cores that Missed LLC", + "UMask": "0xcc47fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "Last level cache prefetch code read from local IA that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFCODE", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : LLCPrefCode issued by iA Cores that missed the LLC", + "UMask": "0xcccffe01", + "Unit": "CHA" + }, + { + "BriefDescription": "Last level cache prefetch data read from local IA that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFDATA", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : LLCPrefData issued by iA Cores that missed the LLC", + "UMask": "0xccd7fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "Last level cache prefetch read for ownership from local IA that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFRFO", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores that missed the LLC", + "UMask": "0xccc7fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "WCILF requests from local IA cores to locally homed DDR addresses that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCILF_DDR", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed locally", + "UMask": "0xc8668601", + "Unit": "CHA" + }, + { + "BriefDescription": "WCIL requests from local IA cores to locally homed DDR addresses that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCIL_DDR", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed locally", + "UMask": "0xc86e8601", + "Unit": "CHA" + }, + { + "BriefDescription": "Read for ownership from local IA that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : RFOs issued by iA Cores that Missed the LLC", + "UMask": "0xc807fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "Read for ownership from local IA that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_LOCAL", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : RFOs issued by iA Cores that Missed the LLC - HOMed locally", + "UMask": "0xc806fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "Read for ownership prefetch from local IA that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : RFO_Prefs issued by iA Cores that Missed the LLC", + "UMask": "0xc887fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "Read for ownership prefetch from local IA that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_LOCAL", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : RFO_Prefs issued by iA Cores that Missed the LLC - HOMed locally", + "UMask": "0xc886fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "UCRDF requests from local IA cores that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_UCRDF", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : UCRdFs issued by iA Cores that Missed LLC", + "UMask": "0xc877de01", + "Unit": "CHA" + }, + { + "BriefDescription": "WCIL requests from a local IA core that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCIL", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : WCiLs issued by iA Cores that Missed the LLC", + "UMask": "0xc86ffe01", + "Unit": "CHA" + }, + { + "BriefDescription": "WCILF requests from local IA core that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCILF", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : WCiLF issued by iA Cores that Missed the LLC", + "UMask": "0xc867fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "WCILF requests from local IA cores to DDR homed addresses which miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCILF_DDR", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting DDR that missed the LLC", + "UMask": "0xc8678601", + "Unit": "CHA" + }, + { + "BriefDescription": "WCIL requests from local IA cores to DDR homed addresses which miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCIL_DDR", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : WCiLs issued by iA Cores targeting DDR that missed the LLC", + "UMask": "0xc86f8601", + "Unit": "CHA" + }, + { + "BriefDescription": "WIL requests from local IA cores that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WIL", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : WiLs issued by iA Cores that Missed LLC", + "UMask": "0xc87fde01", + "Unit": "CHA" + }, + { + "BriefDescription": "Read for ownership from local IA that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_RFO", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : RFOs issued by iA Cores", + "UMask": "0xc807ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "Read for ownership prefetch from local IA that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_RFO_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : RFO_Prefs issued by iA Cores", + "UMask": "0xc887ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "SpecItoM events that are initiated from the Core", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_SPECITOM", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : SpecItoMs issued by iA Cores", + "UMask": "0xcc57ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "WbEFtoEs issued by iA Cores. (Non Modified Write Backs)", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_WBEFTOE", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMs issued by IO Devices that Hit the LLC", + "UMask": "0xcc3fff01", + "Unit": "CHA" + }, + { + "BriefDescription": "WbEFtoIs issued by iA Cores . (Non Modified Write Backs)", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_WBEFTOI", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMs issued by IO Devices that Hit the LLC", + "UMask": "0xcc37ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "WbMtoEs issued by iA Cores . (Modified Write Backs)", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_WBMTOE", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMs issued by IO Devices that Hit the LLC", + "UMask": "0xcc2fff01", + "Unit": "CHA" + }, + { + "BriefDescription": "WbMtoI requests from local IA cores", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_WBMTOI", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : WbMtoIs issued by iA Cores", + "UMask": "0xcc27ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "WbStoIs issued by iA Cores . (Non Modified Write Backs)", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_WBSTOI", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMs issued by IO Devices that Hit the LLC", + "UMask": "0xcc67ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "WCIL requests from a local IA core", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_WCIL", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : WCiLs issued by iA Cores", + "UMask": "0xc86fff01", + "Unit": "CHA" + }, + { + "BriefDescription": "WCILF requests from local IA core", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_WCILF", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : WCiLF issued by iA Cores", + "UMask": "0xc867ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "All TOR inserts from local IO devices", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : All requests from IO Devices", + "UMask": "0xc001ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "CLFlush requests from IO devices", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_CLFLUSH", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : CLFlushes issued by IO Devices", + "UMask": "0xc8c3ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "All TOR inserts from local IO devices which hit the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : All requests from IO Devices that hit the LLC", + "UMask": "0xc001fd04", + "Unit": "CHA" + }, + { + "BriefDescription": "ItoMs from local IO devices which hit the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_ITOM", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMs issued by IO Devices that Hit the LLC", + "UMask": "0xcc43fd04", + "Unit": "CHA" + }, + { + "BriefDescription": "ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_ITOMCACHENEAR", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC", + "UMask": "0xcd43fd04", + "Unit": "CHA" + }, + { + "BriefDescription": "PCIRDCURs issued by IO devices which hit the LLC", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_PCIRDCUR", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : PCIRdCurs issued by IO Devices that hit the LLC", + "UMask": "0xc8f3fd04", + "Unit": "CHA" + }, + { + "BriefDescription": "RFOs from local IO devices which hit the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_RFO", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : RFOs issued by IO Devices that hit the LLC", + "UMask": "0xc803fd04", + "Unit": "CHA" + }, + { + "BriefDescription": "All TOR ItoM inserts from local IO devices", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOM", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMs issued by IO Devices", + "UMask": "0xcc43ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "ItoMCacheNears, indicating a partial write request, from IO Devices", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices", + "UMask": "0xcd43ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "All TOR inserts from local IO devices which miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : All requests from IO Devices that missed the LLC", + "UMask": "0xc001fe04", + "Unit": "CHA" + }, + { + "BriefDescription": "All TOR ItoM inserts from local IO devices which miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOM", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMs issued by IO Devices that missed the LLC", + "UMask": "0xcc43fe04", + "Unit": "CHA" + }, + { + "BriefDescription": "ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOMCACHENEAR", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC", + "UMask": "0xcd43fe04", + "Unit": "CHA" + }, + { + "BriefDescription": "PCIRDCURs issued by IO devices which miss the LLC", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_PCIRDCUR", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : PCIRdCurs issued by IO Devices that missed the LLC", + "UMask": "0xc8f3fe04", + "Unit": "CHA" + }, + { + "BriefDescription": "All TOR RFO inserts from local IO devices which miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_RFO", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : RFOs issued by IO Devices that missed the LLC", + "UMask": "0xc803fe04", + "Unit": "CHA" + }, + { + "BriefDescription": "PCIRDCURs issued by IO devices", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : PCIRdCurs issued by IO Devices", + "UMask": "0xc8f3ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "RFOs from local IO devices", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_RFO", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : RFOs issued by IO Devices", + "UMask": "0xc803ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "WBMtoI requests from IO devices", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_WBMTOI", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : WbMtoIs issued by IO Devices", + "UMask": "0xcc23ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Inserts for SF or LLC Evictions", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.LLC_OR_SF_EVICTIONS", + "PerPkg": "1", + "PublicDescription": "TOR allocation occurred as a result of SF/LLC evictions (came from the ISMQ)", + "UMask": "0xc001ff02", + "Unit": "CHA" + }, + { + "BriefDescription": "All locally initiated requests", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.LOC_ALL", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : All from Local iA and IO", + "UMask": "0xc000ff05", + "Unit": "CHA" + }, + { + "BriefDescription": "All from Local iA", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.LOC_IA", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : All from Local iA", + "UMask": "0xc000ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "All from Local IO", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.LOC_IO", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : All from Local IO", + "UMask": "0xc000ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "Occupancy for all TOR entries", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.ALL", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : All", + "UMask": "0xc001ffff", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for All locally initiated requests from IA Cores", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : All requests from iA Cores", + "UMask": "0xc001ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for CLFlush events that are initiated from the Core", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CLFLUSH", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : CLFlushes issued by iA Cores", + "UMask": "0xc8c7ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for CLFlushOpt events that are initiated from the Core", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CLFLUSHOPT", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : CLFlushOpts issued by iA Cores", + "UMask": "0xc8d7ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Code read from local IA that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CRD", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : CRDs issued by iA Cores", + "UMask": "0xc80fff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Code read prefetch from local IA that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CRD_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy; Code read prefetch from local IA that misses in the snoop filter", + "UMask": "0xc88fff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Data read opt from local IA that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD_OPT", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : DRd_Opts issued by iA Cores", + "UMask": "0xc827ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Data read opt prefetch from local IA that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD_OPT_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : DRd_Opt_Prefs issued by iA Cores", + "UMask": "0xc8a7ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for All locally initiated requests from IA Cores which hit the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : All requests from iA Cores that Hit the LLC", + "UMask": "0xc001fd01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Code read from local IA that hit the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CRD", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : CRds issued by iA Cores that Hit the LLC", + "UMask": "0xc80ffd01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Code read prefetch from local IA that hit the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CRD_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that hit the LLC", + "UMask": "0xc88ffd01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Data read opt from local IA that hit the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD_OPT", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : DRd_Opts issued by iA Cores that hit the LLC", + "UMask": "0xc827fd01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Data read opt prefetch from local IA that hit the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD_OPT_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : DRd_Opt_Prefs issued by iA Cores that hit the LLC", + "UMask": "0xc8a7fd01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for ItoM requests from local IA cores that hit the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_ITOM", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : ItoMs issued by iA Cores that Hit LLC", + "UMask": "0xcc47fd01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Last level cache prefetch code read from local IA that hit the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LLCPREFCODE", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : LLCPrefCode issued by iA Cores that hit the LLC", + "UMask": "0xcccffd01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Last level cache prefetch data read from local IA that hit the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LLCPREFDATA", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : LLCPrefData issued by iA Cores that hit the LLC", + "UMask": "0xccd7fd01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Last level cache prefetch read for ownership from local IA that hit the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LLCPREFRFO", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : LLCPrefRFO issued by iA Cores that hit the LLC", + "UMask": "0xccc7fd01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Read for ownership from local IA that hit the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_RFO", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : RFOs issued by iA Cores that Hit the LLC", + "UMask": "0xc807fd01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Read for ownership prefetch from local IA that hit the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_RFO_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores that Hit the LLC", + "UMask": "0xc887fd01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for ItoM events that are initiated from the Core", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_ITOM", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : ItoMs issued by iA Cores", + "UMask": "0xcc47ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for ItoMCacheNear requests from local IA cores", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_ITOMCACHENEAR", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : ItoMCacheNears issued by iA Cores", + "UMask": "0xcd47ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Last level cache prefetch code read from local IA.", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_LLCPREFCODE", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : LLCPrefCode issued by iA Cores", + "UMask": "0xcccfff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Last level cache prefetch data read from local IA.", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_LLCPREFDATA", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : LLCPrefData issued by iA Cores", + "UMask": "0xccd7ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Last level cache prefetch read for ownership from local IA that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_LLCPREFRFO", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : LLCPrefRFO issued by iA Cores", + "UMask": "0xccc7ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for All locally initiated requests from IA Cores which miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : All requests from iA Cores that Missed the LLC", + "UMask": "0xc001fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Code read from local IA that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : CRds issued by iA Cores that Missed the LLC", + "UMask": "0xc80ffe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for CRDs from local IA cores to locally homed memory", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_LOCAL", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : CRd issued by iA Cores that Missed the LLC - HOMed locally", + "UMask": "0xc80efe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Code read prefetch from local IA that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that Missed the LLC", + "UMask": "0xc88ffe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for CRD Prefetches from local IA cores to locally homed memory", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_PREF_LOCAL", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed locally", + "UMask": "0xc88efe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Data read opt from local IA that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : DRd_Opt issued by iA Cores that missed the LLC", + "UMask": "0xc827fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Data read opt prefetch from local IA that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : DRd_Opt_Prefs issued by iA Cores that missed the LLC", + "UMask": "0xc8a7fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for ItoM requests from local IA cores that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_ITOM", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : ItoMs issued by iA Cores that Missed LLC", + "UMask": "0xcc47fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Last level cache prefetch code read from local IA that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFCODE", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : LLCPrefCode issued by iA Cores that missed the LLC", + "UMask": "0xcccffe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Last level cache prefetch data read from local IA that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFDATA", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : LLCPrefData issued by iA Cores that missed the LLC", + "UMask": "0xccd7fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Last level cache prefetch read for ownership from local IA that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFRFO", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : LLCPrefRFO issued by iA Cores that missed the LLC", + "UMask": "0xccc7fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WCILF requests from local IA cores to locally homed DDR addresses that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCILF_DDR", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed locally", + "UMask": "0xc8668601", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WCIL requests from local IA cores to locally homed DDR addresses that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCIL_DDR", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed locally", + "UMask": "0xc86e8601", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Read for ownership from local IA that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : RFOs issued by iA Cores that Missed the LLC", + "UMask": "0xc807fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Read for ownership from local IA that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_LOCAL", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : RFOs issued by iA Cores that Missed the LLC - HOMed locally", + "UMask": "0xc806fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Read for ownership prefetch from local IA that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores that Missed the LLC", + "UMask": "0xc887fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Read for ownership prefetch from local IA that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF_LOCAL", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores that Missed the LLC - HOMed locally", + "UMask": "0xc886fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for UCRDF requests from local IA cores that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_UCRDF", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : UCRdFs issued by iA Cores that Missed LLC", + "UMask": "0xc877de01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WCIL requests from a local IA core that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCIL", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores that Missed the LLC", + "UMask": "0xc86ffe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WCILF requests from local IA core that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCILF", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : WCiLF issued by iA Cores that Missed the LLC", + "UMask": "0xc867fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WCILF requests from local IA cores to DDR homed addresses which miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCILF_DDR", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC", + "UMask": "0xc8678601", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WCIL requests from local IA cores to DDR homed addresses which miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCIL_DDR", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC", + "UMask": "0xc86f8601", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WIL requests from local IA cores that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WIL", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : WiLs issued by iA Cores that Missed LLC", + "UMask": "0xc87fde01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Read for ownership from local IA that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_RFO", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : RFOs issued by iA Cores", + "UMask": "0xc807ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Read for ownership prefetch from local IA that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_RFO_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores", + "UMask": "0xc887ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for SpecItoM events that are initiated from the Core", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_SPECITOM", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : SpecItoMs issued by iA Cores", + "UMask": "0xcc57ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WbMtoI requests from local IA cores", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WBMTOI", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : WbMtoIs issued by iA Cores", + "UMask": "0xcc27ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WCIL requests from a local IA core", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WCIL", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores", + "UMask": "0xc86fff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WCILF requests from local IA core", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WCILF", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : WCiLF issued by iA Cores", + "UMask": "0xc867ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for All TOR inserts from local IO devices", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : All requests from IO Devices", + "UMask": "0xc001ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for CLFlush requests from IO devices", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_CLFLUSH", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : CLFlushes issued by IO Devices", + "UMask": "0xc8c3ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for All TOR inserts from local IO devices which hit the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : All requests from IO Devices that hit the LLC", + "UMask": "0xc001fd04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for ItoMs from local IO devices which hit the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_ITOM", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : ItoMs issued by IO Devices that Hit the LLC", + "UMask": "0xcc43fd04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_ITOMCACHENEAR", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC", + "UMask": "0xcd43fd04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for PCIRDCURs issued by IO devices which hit the LLC", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_PCIRDCUR", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices that hit the LLC", + "UMask": "0xc8f3fd04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for RFOs from local IO devices which hit the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_RFO", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : RFOs issued by IO Devices that hit the LLC", + "UMask": "0xc803fd04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for All TOR ItoM inserts from local IO devices", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_ITOM", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : ItoMs issued by IO Devices", + "UMask": "0xcc43ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for ItoMCacheNears, indicating a partial write request, from IO Devices", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_ITOMCACHENEAR", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices", + "UMask": "0xcd43ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for All TOR inserts from local IO devices which miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : All requests from IO Devices that missed the LLC", + "UMask": "0xc001fe04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for All TOR ItoM inserts from local IO devices which miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOM", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : ItoMs issued by IO Devices that missed the LLC", + "UMask": "0xcc43fe04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOMCACHENEAR", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC", + "UMask": "0xcd43fe04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for PCIRDCURs issued by IO devices which miss the LLC", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_PCIRDCUR", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices that missed the LLC", + "UMask": "0xc8f3fe04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for All TOR RFO inserts from local IO devices which miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_RFO", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : RFOs issued by IO Devices that missed the LLC", + "UMask": "0xc803fe04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for PCIRDCURs issued by IO devices", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_PCIRDCUR", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices", + "UMask": "0xc8f3ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for RFOs from local IO devices", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_RFO", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : RFOs issued by IO Devices", + "UMask": "0xc803ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WBMtoI requests from IO devices", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_WBMTOI", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : WbMtoIs issued by IO Devices", + "UMask": "0xcc23ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for All locally initiated requests", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_ALL", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : All from Local iA and IO", + "UMask": "0xc000ff05", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for All from Local iA", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_IA", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : All from Local iA", + "UMask": "0xc000ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for All from Local IO", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_IO", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : All from Local IO", + "UMask": "0xc000ff04", + "Unit": "CHA" + } +] diff --git a/tools/perf/pmu-events/arch/x86/grandridge/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/grandridge/uncore-interconnect.json new file mode 100644 index 000000000000..9091f8fde51f --- /dev/null +++ b/tools/perf/pmu-events/arch/x86/grandridge/uncore-interconnect.json @@ -0,0 +1,175 @@ +[ + { + "BriefDescription": "Clockticks of the mesh to memory (B2CMI)", + "EventCode": "0x01", + "EventName": "UNC_B2CMI_CLOCKTICKS", + "PerPkg": "1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts the number of times B2CMI egress did D2C (direct to core)", + "EventCode": "0x16", + "EventName": "UNC_B2CMI_DIRECT2CORE_TAKEN", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts the number of times D2C wasn't honoured even though the incoming request had d2c set for non cisgress txn", + "EventCode": "0x18", + "EventName": "UNC_B2CMI_DIRECT2CORE_TXN_OVERRIDE", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts any read", + "EventCode": "0x24", + "EventName": "UNC_B2CMI_IMC_READS.ALL", + "PerPkg": "1", + "UMask": "0x104", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts normal reads issue to CMI", + "EventCode": "0x24", + "EventName": "UNC_B2CMI_IMC_READS.NORMAL", + "PerPkg": "1", + "UMask": "0x101", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts reads to 1lm non persistent memory regions", + "EventCode": "0x24", + "EventName": "UNC_B2CMI_IMC_READS.TO_DDR_AS_MEM", + "PerPkg": "1", + "UMask": "0x108", + "Unit": "B2CMI" + }, + { + "BriefDescription": "All Writes - All Channels", + "EventCode": "0x25", + "EventName": "UNC_B2CMI_IMC_WRITES.ALL", + "PerPkg": "1", + "UMask": "0x110", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Full Non-ISOCH - All Channels", + "EventCode": "0x25", + "EventName": "UNC_B2CMI_IMC_WRITES.FULL", + "PerPkg": "1", + "UMask": "0x101", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Partial Non-ISOCH - All Channels", + "EventCode": "0x25", + "EventName": "UNC_B2CMI_IMC_WRITES.PARTIAL", + "PerPkg": "1", + "UMask": "0x102", + "Unit": "B2CMI" + }, + { + "BriefDescription": "DDR - All Channels", + "EventCode": "0x25", + "EventName": "UNC_B2CMI_IMC_WRITES.TO_DDR_AS_MEM", + "PerPkg": "1", + "UMask": "0x120", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Prefetch CAM Inserts : XPT - Ch 0", + "EventCode": "0x56", + "EventName": "UNC_B2CMI_PREFCAM_INSERTS.CH0_XPT", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Prefetch CAM Inserts : XPT -All Channels", + "EventCode": "0x56", + "EventName": "UNC_B2CMI_PREFCAM_INSERTS.XPT_ALLCH", + "PerPkg": "1", + "PublicDescription": "Prefetch CAM Inserts : XPT - All Channels", + "UMask": "0x1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Prefetch CAM Occupancy : Channel 0", + "EventCode": "0x54", + "EventName": "UNC_B2CMI_PREFCAM_OCCUPANCY.CH0", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Tracker Inserts : Channel 0", + "EventCode": "0x32", + "EventName": "UNC_B2CMI_TRACKER_INSERTS.CH0", + "PerPkg": "1", + "UMask": "0x104", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Tracker Occupancy : Channel 0", + "EventCode": "0x33", + "EventName": "UNC_B2CMI_TRACKER_OCCUPANCY.CH0", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Write Tracker Inserts : Channel 0", + "EventCode": "0x40", + "EventName": "UNC_B2CMI_WR_TRACKER_INSERTS.CH0", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Total Write Cache Occupancy : Mem", + "EventCode": "0x0F", + "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.MEM", + "PerPkg": "1", + "UMask": "0x4", + "Unit": "IRP" + }, + { + "BriefDescription": "IRP Clockticks", + "EventCode": "0x01", + "EventName": "UNC_I_CLOCKTICKS", + "PerPkg": "1", + "Unit": "IRP" + }, + { + "BriefDescription": "Inbound read requests received by the IRP and inserted into the FAF queue", + "EventCode": "0x18", + "EventName": "UNC_I_FAF_INSERTS", + "PerPkg": "1", + "Unit": "IRP" + }, + { + "BriefDescription": "FAF occupancy", + "EventCode": "0x19", + "EventName": "UNC_I_FAF_OCCUPANCY", + "PerPkg": "1", + "Unit": "IRP" + }, + { + "BriefDescription": "Misc Events - Set 1 : Lost Forward : Snoop pulled away ownership before a write was committed", + "EventCode": "0x1F", + "EventName": "UNC_I_MISC1.LOST_FWD", + "PerPkg": "1", + "UMask": "0x10", + "Unit": "IRP" + }, + { + "BriefDescription": "Inbound write (fast path) requests to coherent memory, received by the IRP resulting in write ownership requests issued by IRP to the mesh.", + "EventCode": "0x11", + "EventName": "UNC_I_TRANSACTIONS.WR_PREF", + "PerPkg": "1", + "UMask": "0x8", + "Unit": "IRP" + } +] diff --git a/tools/perf/pmu-events/arch/x86/grandridge/uncore-io.json b/tools/perf/pmu-events/arch/x86/grandridge/uncore-io.json new file mode 100644 index 000000000000..c301ef95ae8d --- /dev/null +++ b/tools/perf/pmu-events/arch/x86/grandridge/uncore-io.json @@ -0,0 +1,1187 @@ +[ + { + "BriefDescription": "IIO Clockticks", + "EventCode": "0x01", + "EventName": "UNC_IIO_CLOCKTICKS", + "PerPkg": "1", + "PortMask": "0x000", + "Unit": "IIO" + }, + { + "BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.", + "EventCode": "0xC2", + "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.ALL_PARTS", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x70ff004", + "Unit": "IIO" + }, + { + "BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.", + "EventCode": "0xC2", + "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART0", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x001", + "UMask": "0x7001004", + "Unit": "IIO" + }, + { + "BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.", + "EventCode": "0xC2", + "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x002", + "UMask": "0x7002004", + "Unit": "IIO" + }, + { + "BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.", + "EventCode": "0xC2", + "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART2", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x004", + "UMask": "0x7004004", + "Unit": "IIO" + }, + { + "BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.", + "EventCode": "0xC2", + "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART3", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x008", + "UMask": "0x7008004", + "Unit": "IIO" + }, + { + "BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.", + "EventCode": "0xC2", + "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART4", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x010", + "UMask": "0x7010004", + "Unit": "IIO" + }, + { + "BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.", + "EventCode": "0xC2", + "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART5", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x020", + "UMask": "0x7020004", + "Unit": "IIO" + }, + { + "BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.", + "EventCode": "0xC2", + "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART6", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x040", + "UMask": "0x7040004", + "Unit": "IIO" + }, + { + "BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.", + "EventCode": "0xC2", + "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART7", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x080", + "UMask": "0x7080004", + "Unit": "IIO" + }, + { + "BriefDescription": "Count of allocations in the completion buffer", + "EventCode": "0xD5", + "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.ALL_PARTS", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x70ff0ff", + "Unit": "IIO" + }, + { + "BriefDescription": "Count of allocations in the completion buffer", + "EventCode": "0xD5", + "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART0", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x001", + "UMask": "0x7001001", + "Unit": "IIO" + }, + { + "BriefDescription": "Count of allocations in the completion buffer", + "EventCode": "0xD5", + "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x002", + "UMask": "0x7002002", + "Unit": "IIO" + }, + { + "BriefDescription": "Count of allocations in the completion buffer", + "EventCode": "0xD5", + "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART2", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x004", + "UMask": "0x7004004", + "Unit": "IIO" + }, + { + "BriefDescription": "Count of allocations in the completion buffer", + "EventCode": "0xD5", + "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART3", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x008", + "UMask": "0x7008008", + "Unit": "IIO" + }, + { + "BriefDescription": "Count of allocations in the completion buffer", + "EventCode": "0xD5", + "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART4", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x010", + "UMask": "0x7010010", + "Unit": "IIO" + }, + { + "BriefDescription": "Count of allocations in the completion buffer", + "EventCode": "0xD5", + "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART5", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x020", + "UMask": "0x7020020", + "Unit": "IIO" + }, + { + "BriefDescription": "Count of allocations in the completion buffer", + "EventCode": "0xD5", + "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART6", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x040", + "UMask": "0x7040040", + "Unit": "IIO" + }, + { + "BriefDescription": "Count of allocations in the completion buffer", + "EventCode": "0xD5", + "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART7", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x080", + "UMask": "0x7080080", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.ALL_PARTS", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x70ff004", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART0", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x001", + "UMask": "0x7001004", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x002", + "UMask": "0x7002004", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART2", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x004", + "UMask": "0x7004004", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART3", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x008", + "UMask": "0x7008004", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART4", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x010", + "UMask": "0x7010004", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART5", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x020", + "UMask": "0x7020004", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART6", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x040", + "UMask": "0x7040004", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART7", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x080", + "UMask": "0x7080004", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.ALL_PARTS", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x70ff001", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART0", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x001", + "UMask": "0x7001001", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x002", + "UMask": "0x7002001", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART2", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x004", + "UMask": "0x7004001", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART3", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x008", + "UMask": "0x7008001", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART4", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x010", + "UMask": "0x7010001", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART5", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x020", + "UMask": "0x7020001", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART6", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x040", + "UMask": "0x7040001", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART7", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x080", + "UMask": "0x7080001", + "Unit": "IIO" + }, + { + "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x001", + "UMask": "0x7001004", + "Unit": "IIO" + }, + { + "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x02", + "UMask": "0x7002004", + "Unit": "IIO" + }, + { + "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x04", + "UMask": "0x7004004", + "Unit": "IIO" + }, + { + "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x08", + "UMask": "0x7008004", + "Unit": "IIO" + }, + { + "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART4", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x10", + "UMask": "0x7010004", + "Unit": "IIO" + }, + { + "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART5", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x20", + "UMask": "0x7020004", + "Unit": "IIO" + }, + { + "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART6", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x40", + "UMask": "0x7040004", + "Unit": "IIO" + }, + { + "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART7", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x80", + "UMask": "0x7080004", + "Unit": "IIO" + }, + { + "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x001", + "UMask": "0x7001001", + "Unit": "IIO" + }, + { + "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x02", + "UMask": "0x7002001", + "Unit": "IIO" + }, + { + "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x04", + "UMask": "0x7004001", + "Unit": "IIO" + }, + { + "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x08", + "UMask": "0x7008001", + "Unit": "IIO" + }, + { + "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART4", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x10", + "UMask": "0x7010001", + "Unit": "IIO" + }, + { + "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART5", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x20", + "UMask": "0x7020001", + "Unit": "IIO" + }, + { + "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART6", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x40", + "UMask": "0x7040001", + "Unit": "IIO" + }, + { + "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART7", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x80", + "UMask": "0x7080001", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART0", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x001", + "UMask": "0x7001002", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x002", + "UMask": "0x7002002", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART2", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x004", + "UMask": "0x7004002", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART3", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x008", + "UMask": "0x7008002", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART4", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x010", + "UMask": "0x7010002", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART5", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x020", + "UMask": "0x7020002", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART6", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x040", + "UMask": "0x7040002", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART7", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x080", + "UMask": "0x7080002", + "Unit": "IIO" + }, + { + "BriefDescription": "IOTLB Hits to a 1G Page", + "EventCode": "0x40", + "EventName": "UNC_IIO_IOMMU0.1G_HITS", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x10", + "Unit": "IIO" + }, + { + "BriefDescription": "IOTLB Hits to a 2M Page", + "EventCode": "0x40", + "EventName": "UNC_IIO_IOMMU0.2M_HITS", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x8", + "Unit": "IIO" + }, + { + "BriefDescription": "IOTLB Hits to a 4K Page", + "EventCode": "0x40", + "EventName": "UNC_IIO_IOMMU0.4K_HITS", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "Context cache hits", + "EventCode": "0x40", + "EventName": "UNC_IIO_IOMMU0.CTXT_CACHE_HITS", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x80", + "Unit": "IIO" + }, + { + "BriefDescription": "Context cache lookups", + "EventCode": "0x40", + "EventName": "UNC_IIO_IOMMU0.CTXT_CACHE_LOOKUPS", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x40", + "Unit": "IIO" + }, + { + "BriefDescription": "IOTLB lookups first", + "EventCode": "0x40", + "EventName": "UNC_IIO_IOMMU0.FIRST_LOOKUPS", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x1", + "Unit": "IIO" + }, + { + "BriefDescription": "IOTLB Fills (same as IOTLB miss)", + "EventCode": "0x40", + "EventName": "UNC_IIO_IOMMU0.MISSES", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x20", + "Unit": "IIO" + }, + { + "BriefDescription": "IOMMU memory access (both low and high priority)", + "EventCode": "0x41", + "EventName": "UNC_IIO_IOMMU1.NUM_MEM_ACCESSES", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0xc0", + "Unit": "IIO" + }, + { + "BriefDescription": "Second Level Page Walk Cache Hit to a 1G page", + "EventCode": "0x41", + "EventName": "UNC_IIO_IOMMU1.SLPWC_1G_HITS", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "Second Level Page Walk Cache Hit to a 256T page", + "EventCode": "0x41", + "EventName": "UNC_IIO_IOMMU1.SLPWC_256T_HITS", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x10", + "Unit": "IIO" + }, + { + "BriefDescription": "Second Level Page Walk Cache Hit to a 512G page", + "EventCode": "0x41", + "EventName": "UNC_IIO_IOMMU1.SLPWC_512G_HITS", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x8", + "Unit": "IIO" + }, + { + "BriefDescription": "-", + "EventCode": "0x8e", + "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.ABORT", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x70ff080", + "Unit": "IIO" + }, + { + "BriefDescription": "-", + "EventCode": "0x8e", + "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.CONFINED_P2P", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x70ff040", + "Unit": "IIO" + }, + { + "BriefDescription": "-", + "EventCode": "0x8e", + "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.LOC_P2P", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x70ff020", + "Unit": "IIO" + }, + { + "BriefDescription": "-", + "EventCode": "0x8e", + "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.MCAST", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x70ff002", + "Unit": "IIO" + }, + { + "BriefDescription": "-", + "EventCode": "0x8e", + "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.MEM", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x70ff008", + "Unit": "IIO" + }, + { + "BriefDescription": "-", + "EventCode": "0x8e", + "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.MSGB", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x70ff001", + "Unit": "IIO" + }, + { + "BriefDescription": "-", + "EventCode": "0x8e", + "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.UBOX", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x70ff004", + "Unit": "IIO" + }, + { + "BriefDescription": "All 9 bits of Page Walk Tracker Occupancy", + "EventCode": "0x42", + "EventName": "UNC_IIO_PWT_OCCUPANCY", + "PerPkg": "1", + "PortMask": "0x000", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART0", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x001", + "UMask": "0x7001004", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x002", + "UMask": "0x7002004", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART2", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x004", + "UMask": "0x7004004", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART3", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x008", + "UMask": "0x7008004", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART4", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x010", + "UMask": "0x7010004", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART5", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x020", + "UMask": "0x7020004", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART6", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x040", + "UMask": "0x7040004", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART7", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x080", + "UMask": "0x7080004", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART0", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x001", + "UMask": "0x7001001", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x002", + "UMask": "0x7002001", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART2", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x004", + "UMask": "0x7004001", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART3", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x008", + "UMask": "0x7008001", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART4", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x010", + "UMask": "0x7010001", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART5", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x020", + "UMask": "0x7020001", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART6", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x040", + "UMask": "0x7040001", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART7", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x080", + "UMask": "0x7080001", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART0", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x001", + "UMask": "0x7001004", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x002", + "UMask": "0x7002004", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART2", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x004", + "UMask": "0x7004004", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART3", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x008", + "UMask": "0x7008004", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART4", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x010", + "UMask": "0x7010004", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART5", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x020", + "UMask": "0x7020004", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART6", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x040", + "UMask": "0x7040004", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART7", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x080", + "UMask": "0x7080004", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART0", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x001", + "UMask": "0x7001001", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x002", + "UMask": "0x7002001", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART2", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x004", + "UMask": "0x7004001", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART3", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x008", + "UMask": "0x7008001", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART4", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x010", + "UMask": "0x7010001", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART5", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x020", + "UMask": "0x7020001", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART6", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x040", + "UMask": "0x7040001", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART7", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x080", + "UMask": "0x7080001", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART0", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x001", + "UMask": "0x7001002", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x002", + "UMask": "0x7002002", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART2", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x004", + "UMask": "0x7004002", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART3", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x008", + "UMask": "0x7008002", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART4", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x010", + "UMask": "0x7010002", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART5", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x020", + "UMask": "0x7020002", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART6", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x040", + "UMask": "0x7040002", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART7", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x080", + "UMask": "0x7080002", + "Unit": "IIO" + } +] diff --git a/tools/perf/pmu-events/arch/x86/grandridge/uncore-memory.json b/tools/perf/pmu-events/arch/x86/grandridge/uncore-memory.json new file mode 100644 index 000000000000..a2405ed640c9 --- /dev/null +++ b/tools/perf/pmu-events/arch/x86/grandridge/uncore-memory.json @@ -0,0 +1,385 @@ +[ + { + "BriefDescription": "DRAM Activate Count : Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.", + "EventCode": "0x02", + "EventName": "UNC_M_ACT_COUNT.ALL", + "PerPkg": "1", + "UMask": "0xf7", + "Unit": "IMC" + }, + { + "BriefDescription": "DRAM Activate Count : Read transaction on Page Empty or Page Miss : Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.", + "EventCode": "0x02", + "EventName": "UNC_M_ACT_COUNT.RD", + "PerPkg": "1", + "UMask": "0xf1", + "Unit": "IMC" + }, + { + "BriefDescription": "DRAM Activate Count : Underfill Read transaction on Page Empty or Page Miss : Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.", + "EventCode": "0x02", + "EventName": "UNC_M_ACT_COUNT.UFILL", + "PerPkg": "1", + "UMask": "0xf4", + "Unit": "IMC" + }, + { + "BriefDescription": "DRAM Activate Count : Write transaction on Page Empty or Page Miss : Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.", + "EventCode": "0x02", + "EventName": "UNC_M_ACT_COUNT.WR", + "PerPkg": "1", + "UMask": "0xf2", + "Unit": "IMC" + }, + { + "BriefDescription": "CAS count for SubChannel 0, all CAS operations", + "EventCode": "0x05", + "EventName": "UNC_M_CAS_COUNT_SCH0.ALL", + "PerPkg": "1", + "UMask": "0xff", + "Unit": "IMC" + }, + { + "BriefDescription": "CAS count for SubChannel 0, all reads", + "EventCode": "0x05", + "EventName": "UNC_M_CAS_COUNT_SCH0.RD", + "PerPkg": "1", + "UMask": "0xcf", + "Unit": "IMC" + }, + { + "BriefDescription": "CAS count for SubChannel 0 regular reads", + "EventCode": "0x05", + "EventName": "UNC_M_CAS_COUNT_SCH0.RD_REG", + "PerPkg": "1", + "UMask": "0xc1", + "Unit": "IMC" + }, + { + "BriefDescription": "CAS count for SubChannel 0 underfill reads", + "EventCode": "0x05", + "EventName": "UNC_M_CAS_COUNT_SCH0.RD_UNDERFILL", + "PerPkg": "1", + "UMask": "0xc4", + "Unit": "IMC" + }, + { + "BriefDescription": "CAS count for SubChannel 0, all writes", + "EventCode": "0x05", + "EventName": "UNC_M_CAS_COUNT_SCH0.WR", + "PerPkg": "1", + "UMask": "0xf0", + "Unit": "IMC" + }, + { + "BriefDescription": "CAS count for SubChannel 0 regular writes", + "EventCode": "0x05", + "EventName": "UNC_M_CAS_COUNT_SCH0.WR_NONPRE", + "PerPkg": "1", + "UMask": "0xd0", + "Unit": "IMC" + }, + { + "BriefDescription": "CAS count for SubChannel 0 auto-precharge writes", + "EventCode": "0x05", + "EventName": "UNC_M_CAS_COUNT_SCH0.WR_PRE", + "PerPkg": "1", + "UMask": "0xe0", + "Unit": "IMC" + }, + { + "BriefDescription": "CAS count for SubChannel 1, all CAS operations", + "EventCode": "0x06", + "EventName": "UNC_M_CAS_COUNT_SCH1.ALL", + "PerPkg": "1", + "UMask": "0xff", + "Unit": "IMC" + }, + { + "BriefDescription": "CAS count for SubChannel 1, all reads", + "EventCode": "0x06", + "EventName": "UNC_M_CAS_COUNT_SCH1.RD", + "PerPkg": "1", + "UMask": "0xcf", + "Unit": "IMC" + }, + { + "BriefDescription": "CAS count for SubChannel 1 regular reads", + "EventCode": "0x06", + "EventName": "UNC_M_CAS_COUNT_SCH1.RD_REG", + "PerPkg": "1", + "UMask": "0xc1", + "Unit": "IMC" + }, + { + "BriefDescription": "CAS count for SubChannel 1 underfill reads", + "EventCode": "0x06", + "EventName": "UNC_M_CAS_COUNT_SCH1.RD_UNDERFILL", + "PerPkg": "1", + "UMask": "0xc4", + "Unit": "IMC" + }, + { + "BriefDescription": "CAS count for SubChannel 1, all writes", + "EventCode": "0x06", + "EventName": "UNC_M_CAS_COUNT_SCH1.WR", + "PerPkg": "1", + "UMask": "0xf0", + "Unit": "IMC" + }, + { + "BriefDescription": "CAS count for SubChannel 1 regular writes", + "EventCode": "0x06", + "EventName": "UNC_M_CAS_COUNT_SCH1.WR_NONPRE", + "PerPkg": "1", + "UMask": "0xd0", + "Unit": "IMC" + }, + { + "BriefDescription": "CAS count for SubChannel 1 auto-precharge writes", + "EventCode": "0x06", + "EventName": "UNC_M_CAS_COUNT_SCH1.WR_PRE", + "PerPkg": "1", + "UMask": "0xe0", + "Unit": "IMC" + }, + { + "BriefDescription": "Number of DRAM DCLK clock cycles while the event is enabled", + "EventCode": "0x01", + "EventName": "UNC_M_CLOCKTICKS", + "PerPkg": "1", + "PublicDescription": "DRAM Clockticks", + "UMask": "0x1", + "Unit": "IMC" + }, + { + "BriefDescription": "Number of DRAM HCLK clock cycles while the event is enabled", + "EventCode": "0x01", + "EventName": "UNC_M_HCLOCKTICKS", + "PerPkg": "1", + "PublicDescription": "DRAM Clockticks", + "Unit": "IMC" + }, + { + "BriefDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.", + "EventCode": "0x03", + "EventName": "UNC_M_PRE_COUNT.ALL", + "PerPkg": "1", + "UMask": "0xff", + "Unit": "IMC" + }, + { + "BriefDescription": "DRAM Precharge commands. : Precharge due to (?) : Counts the number of DRAM Precharge commands sent on this channel.", + "EventCode": "0x03", + "EventName": "UNC_M_PRE_COUNT.PGT", + "PerPkg": "1", + "UMask": "0xf8", + "Unit": "IMC" + }, + { + "BriefDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.", + "EventCode": "0x03", + "EventName": "UNC_M_PRE_COUNT.RD", + "PerPkg": "1", + "UMask": "0xf1", + "Unit": "IMC" + }, + { + "BriefDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.", + "EventCode": "0x03", + "EventName": "UNC_M_PRE_COUNT.UFILL", + "PerPkg": "1", + "UMask": "0xf4", + "Unit": "IMC" + }, + { + "BriefDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.", + "EventCode": "0x03", + "EventName": "UNC_M_PRE_COUNT.WR", + "PerPkg": "1", + "UMask": "0xf2", + "Unit": "IMC" + }, + { + "BriefDescription": "Read buffer inserts on subchannel 0", + "EventCode": "0x17", + "EventName": "UNC_M_RDB_INSERTS.SCH0", + "PerPkg": "1", + "UMask": "0x40", + "Unit": "IMC" + }, + { + "BriefDescription": "Read buffer inserts on subchannel 1", + "EventCode": "0x17", + "EventName": "UNC_M_RDB_INSERTS.SCH1", + "PerPkg": "1", + "UMask": "0x80", + "Unit": "IMC" + }, + { + "BriefDescription": "Read buffer occupancy on subchannel 0", + "EventCode": "0x1a", + "EventName": "UNC_M_RDB_OCCUPANCY_SCH0", + "PerPkg": "1", + "Unit": "IMC" + }, + { + "BriefDescription": "Read buffer occupancy on subchannel 1", + "EventCode": "0x1b", + "EventName": "UNC_M_RDB_OCCUPANCY_SCH1", + "PerPkg": "1", + "Unit": "IMC" + }, + { + "BriefDescription": "Read Pending Queue Allocations : Counts the number of allocations into the Read Pending Queue. This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This includes both ISOCH and non-ISOCH requests.", + "EventCode": "0x10", + "EventName": "UNC_M_RPQ_INSERTS.PCH0", + "PerPkg": "1", + "UMask": "0x50", + "Unit": "IMC" + }, + { + "BriefDescription": "Read Pending Queue Allocations : Counts the number of allocations into the Read Pending Queue. This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This includes both ISOCH and non-ISOCH requests.", + "EventCode": "0x10", + "EventName": "UNC_M_RPQ_INSERTS.PCH1", + "PerPkg": "1", + "UMask": "0xa0", + "Unit": "IMC" + }, + { + "BriefDescription": "Read Pending Queue inserts for subchannel 0, pseudochannel 0", + "EventCode": "0x10", + "EventName": "UNC_M_RPQ_INSERTS.SCH0_PCH0", + "PerPkg": "1", + "UMask": "0x10", + "Unit": "IMC" + }, + { + "BriefDescription": "Read Pending Queue inserts for subchannel 0, pseudochannel 1", + "EventCode": "0x10", + "EventName": "UNC_M_RPQ_INSERTS.SCH0_PCH1", + "PerPkg": "1", + "UMask": "0x20", + "Unit": "IMC" + }, + { + "BriefDescription": "Read Pending Queue inserts for subchannel 1, pseudochannel 0", + "EventCode": "0x10", + "EventName": "UNC_M_RPQ_INSERTS.SCH1_PCH0", + "PerPkg": "1", + "UMask": "0x40", + "Unit": "IMC" + }, + { + "BriefDescription": "Read Pending Queue inserts for subchannel 1, pseudochannel 1", + "EventCode": "0x10", + "EventName": "UNC_M_RPQ_INSERTS.SCH1_PCH1", + "PerPkg": "1", + "UMask": "0x80", + "Unit": "IMC" + }, + { + "BriefDescription": "Read pending queue occupancy for subchannel 0, pseudochannel 0", + "EventCode": "0x80", + "EventName": "UNC_M_RPQ_OCCUPANCY_SCH0_PCH0", + "PerPkg": "1", + "Unit": "IMC" + }, + { + "BriefDescription": "Read pending queue occupancy for subchannel 0, pseudochannel 1", + "EventCode": "0x81", + "EventName": "UNC_M_RPQ_OCCUPANCY_SCH0_PCH1", + "PerPkg": "1", + "Unit": "IMC" + }, + { + "BriefDescription": "Read pending queue occupancy for subchannel 1, pseudochannel 0", + "EventCode": "0x82", + "EventName": "UNC_M_RPQ_OCCUPANCY_SCH1_PCH0", + "PerPkg": "1", + "Unit": "IMC" + }, + { + "BriefDescription": "Read pending queue occupancy for subchannel 1, pseudochannel 1", + "EventCode": "0x83", + "EventName": "UNC_M_RPQ_OCCUPANCY_SCH1_PCH1", + "PerPkg": "1", + "Unit": "IMC" + }, + { + "BriefDescription": "Write Pending Queue Allocations", + "EventCode": "0x22", + "EventName": "UNC_M_WPQ_INSERTS.PCH0", + "PerPkg": "1", + "UMask": "0x50", + "Unit": "IMC" + }, + { + "BriefDescription": "Write Pending Queue Allocations", + "EventCode": "0x22", + "EventName": "UNC_M_WPQ_INSERTS.PCH1", + "PerPkg": "1", + "UMask": "0xa0", + "Unit": "IMC" + }, + { + "BriefDescription": "Write Pending Queue inserts for subchannel 0, pseudochannel 0", + "EventCode": "0x22", + "EventName": "UNC_M_WPQ_INSERTS.SCH0_PCH0", + "PerPkg": "1", + "UMask": "0x10", + "Unit": "IMC" + }, + { + "BriefDescription": "Write Pending Queue inserts for subchannel 0, pseudochannel 1", + "EventCode": "0x22", + "EventName": "UNC_M_WPQ_INSERTS.SCH0_PCH1", + "PerPkg": "1", + "UMask": "0x20", + "Unit": "IMC" + }, + { + "BriefDescription": "Write Pending Queue inserts for subchannel 1, pseudochannel 0", + "EventCode": "0x22", + "EventName": "UNC_M_WPQ_INSERTS.SCH1_PCH0", + "PerPkg": "1", + "UMask": "0x40", + "Unit": "IMC" + }, + { + "BriefDescription": "Write Pending Queue inserts for subchannel 1, pseudochannel 1", + "EventCode": "0x22", + "EventName": "UNC_M_WPQ_INSERTS.SCH1_PCH1", + "PerPkg": "1", + "UMask": "0x80", + "Unit": "IMC" + }, + { + "BriefDescription": "Write pending queue occupancy for subchannel 0, pseudochannel 0", + "EventCode": "0x84", + "EventName": "UNC_M_WPQ_OCCUPANCY_SCH0_PCH0", + "PerPkg": "1", + "Unit": "IMC" + }, + { + "BriefDescription": "Write pending queue occupancy for subchannel 0, pseudochannel 1", + "EventCode": "0x85", + "EventName": "UNC_M_WPQ_OCCUPANCY_SCH0_PCH1", + "PerPkg": "1", + "Unit": "IMC" + }, + { + "BriefDescription": "Write pending queue occupancy for subchannel 1, pseudochannel 0", + "EventCode": "0x86", + "EventName": "UNC_M_WPQ_OCCUPANCY_SCH1_PCH0", + "PerPkg": "1", + "Unit": "IMC" + }, + { + "BriefDescription": "Write pending queue occupancy for subchannel 1, pseudochannel 1", + "EventCode": "0x87", + "EventName": "UNC_M_WPQ_OCCUPANCY_SCH1_PCH1", + "PerPkg": "1", + "Unit": "IMC" + } +] diff --git a/tools/perf/pmu-events/arch/x86/grandridge/uncore-power.json b/tools/perf/pmu-events/arch/x86/grandridge/uncore-power.json new file mode 100644 index 000000000000..e3a66166e28c --- /dev/null +++ b/tools/perf/pmu-events/arch/x86/grandridge/uncore-power.json @@ -0,0 +1,10 @@ +[ + { + "BriefDescription": "PCU Clockticks", + "EventCode": "0x01", + "EventName": "UNC_P_CLOCKTICKS", + "PerPkg": "1", + "PublicDescription": "PCU Clockticks: The PCU runs off a fixed 1 GHz clock. This event counts the number of pclk cycles measured while the counter was enabled. The pclk, like the Memory Controller's dclk, counts at a constant rate making it a good measure of actual wall time.", + "Unit": "PCU" + } +] diff --git a/tools/perf/pmu-events/arch/x86/grandridge/virtual-memory.json b/tools/perf/pmu-events/arch/x86/grandridge/virtual-memory.json index bd5f2b634c98..371974c6d6c3 100644 --- a/tools/perf/pmu-events/arch/x86/grandridge/virtual-memory.json +++ b/tools/perf/pmu-events/arch/x86/grandridge/virtual-memory.json @@ -1,24 +1,131 @@ [ { - "BriefDescription": "Counts the number of page walks completed due to load DTLB misses to a 1G page.", + "BriefDescription": "Counts the number of first level TLB misses but second level hits due to a demand load that did not start a page walk. Accounts for all page sizes. Will result in a DTLB write from STLB.", + "EventCode": "0x08", + "EventName": "DTLB_LOAD_MISSES.STLB_HIT", + "SampleAfterValue": "200003", + "UMask": "0x20" + }, + { + "BriefDescription": "Counts the number of page walks completed due to load DTLB misses.", "EventCode": "0x08", "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED", - "SampleAfterValue": "1000003", + "SampleAfterValue": "200003", "UMask": "0xe" }, { + "BriefDescription": "Counts the number of page walks completed due to load DTLB misses to a 2M or 4M page.", + "EventCode": "0x08", + "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M", + "PublicDescription": "Counts the number of page walks completed due to loads (including SW prefetches) whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 2M or 4M pages. Includes page walks that page fault.", + "SampleAfterValue": "200003", + "UMask": "0x4" + }, + { + "BriefDescription": "Counts the number of page walks completed due to load DTLB misses to a 4K page.", + "EventCode": "0x08", + "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K", + "PublicDescription": "Counts the number of page walks completed due to loads (including SW prefetches) whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 4K pages. Includes page walks that page fault.", + "SampleAfterValue": "200003", + "UMask": "0x2" + }, + { + "BriefDescription": "Counts the number of page walks outstanding for Loads (demand or SW prefetch) in PMH every cycle.", + "EventCode": "0x08", + "EventName": "DTLB_LOAD_MISSES.WALK_PENDING", + "PublicDescription": "Counts the number of page walks outstanding for Loads (demand or SW prefetch) in PMH every cycle. A PMH page walk is outstanding from page walk start till PMH becomes idle again (ready to serve next walk). Includes EPT-walk intervals.", + "SampleAfterValue": "200003", + "UMask": "0x10" + }, + { + "BriefDescription": "Counts the number of first level TLB misses but second level hits due to stores that did not start a page walk. Accounts for all pages sizes. Will result in a DTLB write from STLB.", + "EventCode": "0x49", + "EventName": "DTLB_STORE_MISSES.STLB_HIT", + "SampleAfterValue": "2000003", + "UMask": "0x20" + }, + { "BriefDescription": "Counts the number of page walks completed due to store DTLB misses to a 1G page.", "EventCode": "0x49", "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED", - "SampleAfterValue": "1000003", + "SampleAfterValue": "2000003", "UMask": "0xe" }, { + "BriefDescription": "Counts the number of page walks completed due to store DTLB misses to a 2M or 4M page.", + "EventCode": "0x49", + "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M", + "PublicDescription": "Counts the number of page walks completed due to stores whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 2M or 4M pages. Includes page walks that page fault.", + "SampleAfterValue": "2000003", + "UMask": "0x4" + }, + { + "BriefDescription": "Counts the number of page walks completed due to store DTLB misses to a 4K page.", + "EventCode": "0x49", + "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K", + "PublicDescription": "Counts the number of page walks completed due to stores whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 4K pages. Includes page walks that page fault.", + "SampleAfterValue": "2000003", + "UMask": "0x2" + }, + { + "BriefDescription": "Counts the number of page walks outstanding in the page miss handler (PMH) for stores every cycle.", + "EventCode": "0x49", + "EventName": "DTLB_STORE_MISSES.WALK_PENDING", + "PublicDescription": "Counts the number of page walks outstanding in the page miss handler (PMH) for stores every cycle. A PMH page walk is outstanding from page walk start till PMH becomes idle again (ready to serve next walk). Includes EPT-walk intervals.", + "SampleAfterValue": "200003", + "UMask": "0x10" + }, + { + "BriefDescription": "Counts the number of page walks initiated by a instruction fetch that missed the first and second level TLBs.", + "EventCode": "0x85", + "EventName": "ITLB_MISSES.MISS_CAUSED_WALK", + "SampleAfterValue": "1000003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts the number of first level TLB misses but second level hits due to an instruction fetch that did not start a page walk. Account for all pages sizes. Will result in an ITLB write from STLB.", + "EventCode": "0x85", + "EventName": "ITLB_MISSES.STLB_HIT", + "SampleAfterValue": "2000003", + "UMask": "0x20" + }, + { "BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to any page size.", "EventCode": "0x85", "EventName": "ITLB_MISSES.WALK_COMPLETED", "PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to any page size. Includes page walks that page fault.", "SampleAfterValue": "200003", "UMask": "0xe" + }, + { + "BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to a 2M or 4M page.", + "EventCode": "0x85", + "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M", + "PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 2M or 4M pages. Includes page walks that page fault.", + "SampleAfterValue": "2000003", + "UMask": "0x4" + }, + { + "BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to a 4K page.", + "EventCode": "0x85", + "EventName": "ITLB_MISSES.WALK_COMPLETED_4K", + "PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 4K pages. Includes page walks that page fault.", + "SampleAfterValue": "2000003", + "UMask": "0x2" + }, + { + "BriefDescription": "Counts the number of page walks outstanding for iside in PMH every cycle.", + "EventCode": "0x85", + "EventName": "ITLB_MISSES.WALK_PENDING", + "PublicDescription": "Counts the number of page walks outstanding for iside in PMH every cycle. A PMH page walk is outstanding from page walk start till PMH becomes idle again (ready to serve next walk). Includes EPT-walk intervals. Walks could be counted by edge detecting on this event, but would count restarted suspended walks.", + "SampleAfterValue": "200003", + "UMask": "0x10" + }, + { + "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a DTLB miss.", + "EventCode": "0x05", + "EventName": "LD_HEAD.DTLB_MISS_AT_RET", + "SampleAfterValue": "1000003", + "UMask": "0x90" } ] diff --git a/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json b/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json index 79d89c263677..5631018ed388 100644 --- a/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json +++ b/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json @@ -84,12 +84,12 @@ "MetricExpr": "(UOPS_DISPATCHED_PORT.PORT_0 + UOPS_DISPATCHED_PORT.PORT_1 + UOPS_DISPATCHED_PORT.PORT_5 + UOPS_DISPATCHED_PORT.PORT_6) / tma_info_thread_slots", "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group", "MetricName": "tma_alu_op_utilization", - "MetricThreshold": "tma_alu_op_utilization > 0.6", + "MetricThreshold": "tma_alu_op_utilization > 0.4", "ScaleUnit": "100%" }, { "BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists", - "MetricExpr": "100 * OTHER_ASSISTS.ANY_WB_ASSIST / tma_info_thread_slots", + "MetricExpr": "66 * OTHER_ASSISTS.ANY_WB_ASSIST / tma_info_thread_slots", "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group", "MetricName": "tma_assists", "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)", @@ -202,7 +202,7 @@ "MetricExpr": "(IDQ.ALL_DSB_CYCLES_ANY_UOPS - IDQ.ALL_DSB_CYCLES_4_UOPS) / tma_info_core_core_clks / 2", "MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group", "MetricName": "tma_dsb", - "MetricThreshold": "tma_dsb > 0.15 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 4 > 0.35)", + "MetricThreshold": "tma_dsb > 0.15 & tma_fetch_bandwidth > 0.2", "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.", "ScaleUnit": "100%" }, @@ -257,7 +257,7 @@ "MetricExpr": "tma_frontend_bound - tma_fetch_latency", "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB", "MetricName": "tma_fetch_bandwidth", - "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 4 > 0.35", + "MetricThreshold": "tma_fetch_bandwidth > 0.2", "MetricgroupNoGroup": "TopdownL2", "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Related metrics: tma_dsb_switches, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp", "ScaleUnit": "100%" @@ -289,20 +289,20 @@ "MetricName": "tma_heavy_operations", "MetricThreshold": "tma_heavy_operations > 0.1", "MetricgroupNoGroup": "TopdownL2", - "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.", + "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences. ([ICL+] Note this may overcount due to approximation using indirect events; [ADL+] .)", "ScaleUnit": "100%" }, { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses.", "MetricExpr": "ICACHE.IFDATA_STALL / tma_info_thread_clks", - "MetricGroup": "BigFoot;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group", + "MetricGroup": "BigFootprint;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group", "MetricName": "tma_icache_misses", "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", "ScaleUnit": "100%" }, { "BriefDescription": "Instructions per retired mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).", - "MetricExpr": "tma_info_inst_mix_instructions / (UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * cpu@BR_MISP_EXEC.ALL_BRANCHES\\,umask\\=0xE4@)", + "MetricExpr": "tma_info_inst_mix_instructions / (UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * BR_MISP_EXEC.INDIRECT)", "MetricGroup": "Bad;BrMispredicts", "MetricName": "tma_info_bad_spec_ipmisp_indirect", "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1e3" @@ -327,7 +327,7 @@ "MetricName": "tma_info_core_coreipc" }, { - "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core", + "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor)", "MetricExpr": "(UOPS_EXECUTED.CORE / 2 / (cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@) if #SMT_on else UOPS_EXECUTED.CORE / (cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@))", "MetricGroup": "Backend;Cor;Pipeline;PortsUtil", "MetricName": "tma_info_core_ilp" @@ -397,96 +397,90 @@ }, { "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]", - "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l1d_cache_fill_bw", "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_core_l1d_cache_fill_bw" + "MetricName": "tma_info_memory_core_l1d_cache_fill_bw_2t" }, { "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]", - "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l2_cache_fill_bw", "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_core_l2_cache_fill_bw" + "MetricName": "tma_info_memory_core_l2_cache_fill_bw_2t" }, { "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l3_cache_fill_bw", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_core_l3_cache_fill_bw_2t" + }, + { + "BriefDescription": "", + "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time", "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_core_l3_cache_fill_bw" + "MetricName": "tma_info_memory_l1d_cache_fill_bw" }, { "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads", "MetricExpr": "1e3 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l1mpki" }, { + "BriefDescription": "", + "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l2_cache_fill_bw" + }, + { "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads", "MetricExpr": "1e3 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY", - "MetricGroup": "Backend;CacheMisses;Mem", + "MetricGroup": "Backend;CacheHits;Mem", "MetricName": "tma_info_memory_l2mpki" }, { + "BriefDescription": "", + "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l3_cache_fill_bw" + }, + { "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads", "MetricExpr": "1e3 * MEM_LOAD_UOPS_RETIRED.L3_MISS / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "Mem", "MetricName": "tma_info_memory_l3mpki" }, { - "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)", - "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "L1D_PEND_MISS.PENDING / (MEM_LOAD_UOPS_RETIRED.L1_MISS + MEM_LOAD_UOPS_RETIRED.HIT_LFB)", - "MetricGroup": "Mem;MemoryBound;MemoryLat", - "MetricName": "tma_info_memory_load_miss_real_latency" - }, - { - "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss", - "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES", - "MetricGroup": "Mem;MemoryBW;MemoryBound", - "MetricName": "tma_info_memory_mlp", - "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)" - }, - { "BriefDescription": "Average Parallel L2 cache miss data reads", "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", "MetricGroup": "Memory_BW;Offcore", - "MetricName": "tma_info_memory_oro_data_l2_mlp" + "MetricName": "tma_info_memory_latency_data_l2_mlp" }, { "BriefDescription": "Average Latency for L2 cache miss demand Loads", "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS.DEMAND_DATA_RD", "MetricGroup": "Memory_Lat;Offcore", - "MetricName": "tma_info_memory_oro_load_l2_miss_latency" + "MetricName": "tma_info_memory_latency_load_l2_miss_latency" }, { "BriefDescription": "Average Parallel L2 cache miss demand Loads", "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD", "MetricGroup": "Memory_BW;Offcore", - "MetricName": "tma_info_memory_oro_load_l2_mlp" - }, - { - "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l1d_cache_fill_bw", - "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_thread_l1d_cache_fill_bw_1t" - }, - { - "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l2_cache_fill_bw", - "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_thread_l2_cache_fill_bw_1t" + "MetricName": "tma_info_memory_latency_load_l2_mlp" }, { - "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "0", - "MetricGroup": "Mem;MemoryBW;Offcore", - "MetricName": "tma_info_memory_thread_l3_cache_access_bw_1t" + "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)", + "MetricConstraint": "NO_GROUP_EVENTS", + "MetricExpr": "L1D_PEND_MISS.PENDING / (MEM_LOAD_UOPS_RETIRED.L1_MISS + MEM_LOAD_UOPS_RETIRED.HIT_LFB)", + "MetricGroup": "Mem;MemoryBound;MemoryLat", + "MetricName": "tma_info_memory_load_miss_real_latency" }, { - "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l3_cache_fill_bw", - "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_thread_l3_cache_fill_bw_1t" + "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss", + "MetricConstraint": "NO_GROUP_EVENTS", + "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES", + "MetricGroup": "Mem;MemoryBW;MemoryBound", + "MetricName": "tma_info_memory_mlp", + "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)" }, { "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses", @@ -502,21 +496,27 @@ "MetricName": "tma_info_pipeline_retire" }, { - "BriefDescription": "Measured Average Frequency for unhalted processors [GHz]", + "BriefDescription": "Measured Average Core Frequency for unhalted processors [GHz]", "MetricExpr": "tma_info_system_turbo_utilization * TSC / 1e9 / duration_time", "MetricGroup": "Power;Summary", - "MetricName": "tma_info_system_average_frequency" + "MetricName": "tma_info_system_core_frequency" }, { - "BriefDescription": "Average CPU Utilization", + "BriefDescription": "Average CPU Utilization (percentage)", "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC", "MetricGroup": "HPC;Summary", "MetricName": "tma_info_system_cpu_utilization" }, { + "BriefDescription": "Average number of utilized CPUs", + "MetricExpr": "#num_cpus_online * tma_info_system_cpu_utilization", + "MetricGroup": "Summary", + "MetricName": "tma_info_system_cpus_utilized" + }, + { "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]", "MetricExpr": "64 * (UNC_ARB_TRK_REQUESTS.ALL + UNC_ARB_COH_TRK_REQUESTS.ALL) / 1e6 / duration_time / 1e3", - "MetricGroup": "HPC;Mem;MemoryBW;SoC;tma_issueBW", + "MetricGroup": "HPC;MemOffcore;MemoryBW;SoC;tma_issueBW", "MetricName": "tma_info_system_dram_bw_use", "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_mem_bandwidth, tma_sq_full" }, @@ -541,19 +541,6 @@ "MetricThreshold": "tma_info_system_kernel_utilization > 0.05" }, { - "BriefDescription": "Average number of parallel requests to external memory", - "MetricExpr": "UNC_ARB_TRK_OCCUPANCY.ALL / UNC_ARB_TRK_OCCUPANCY.CYCLES_WITH_ANY_REQUEST", - "MetricGroup": "Mem;SoC", - "MetricName": "tma_info_system_mem_parallel_requests", - "PublicDescription": "Average number of parallel requests to external memory. Accounts for all requests" - }, - { - "BriefDescription": "Average latency of all requests to external memory (in Uncore cycles)", - "MetricExpr": "UNC_ARB_TRK_OCCUPANCY.ALL / UNC_ARB_TRK_REQUESTS.ALL", - "MetricGroup": "Mem;SoC", - "MetricName": "tma_info_system_mem_request_latency" - }, - { "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active", "MetricExpr": "(1 - CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / (CPU_CLK_UNHALTED.REF_XCLK_ANY / 2) if #SMT_on else 0)", "MetricGroup": "SMT", @@ -612,7 +599,7 @@ { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses", "MetricExpr": "(14 * ITLB_MISSES.STLB_HIT + ITLB_MISSES.WALK_DURATION) / tma_info_thread_clks", - "MetricGroup": "BigFoot;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group", + "MetricGroup": "BigFootprint;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group", "MetricName": "tma_itlb_misses", "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: ITLB_MISSES.WALK_COMPLETED", @@ -621,7 +608,7 @@ { "BriefDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache", "MetricExpr": "max((min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.STALLS_LDM_PENDING) - CYCLE_ACTIVITY.STALLS_L1D_PENDING) / tma_info_thread_clks, 0)", - "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group", "MetricName": "tma_l1_bound", "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", "PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_UOPS_RETIRED.L1_HIT_PS;MEM_LOAD_UOPS_RETIRED.HIT_LFB_PS. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1", @@ -630,7 +617,7 @@ { "BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads", "MetricExpr": "(CYCLE_ACTIVITY.STALLS_L1D_PENDING - CYCLE_ACTIVITY.STALLS_L2_PENDING) / tma_info_thread_clks", - "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", "MetricName": "tma_l2_bound", "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", "PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L2_HIT_PS", @@ -640,20 +627,20 @@ "BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core", "MetricConstraint": "NO_GROUP_EVENTS_SMT", "MetricExpr": "MEM_LOAD_UOPS_RETIRED.L3_HIT / (MEM_LOAD_UOPS_RETIRED.L3_HIT + 7 * MEM_LOAD_UOPS_RETIRED.L3_MISS) * CYCLE_ACTIVITY.STALLS_L2_PENDING / tma_info_thread_clks", - "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", "MetricName": "tma_l3_bound", "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT_PS", "ScaleUnit": "100%" }, { - "BriefDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)", + "BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)", "MetricConstraint": "NO_GROUP_EVENTS", "MetricExpr": "29 * (MEM_LOAD_UOPS_RETIRED.L3_HIT * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_RETIRED.L3_MISS))) / tma_info_thread_clks", "MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group", "MetricName": "tma_l3_hit_latency", "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT_PS. Related metrics: tma_mem_latency", + "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT_PS. Related metrics: tma_mem_latency", "ScaleUnit": "100%" }, { @@ -672,7 +659,7 @@ "MetricName": "tma_light_operations", "MetricThreshold": "tma_light_operations > 0.6", "MetricgroupNoGroup": "TopdownL2", - "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST", + "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST", "ScaleUnit": "100%" }, { @@ -707,21 +694,21 @@ "ScaleUnit": "100%" }, { - "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM)", + "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)", "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=6@) / tma_info_thread_clks", "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW", "MetricName": "tma_mem_bandwidth", "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full", + "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full", "ScaleUnit": "100%" }, { - "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM)", + "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM)", "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth", "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat", "MetricName": "tma_mem_latency", "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_l3_hit_latency", + "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_l3_hit_latency", "ScaleUnit": "100%" }, { @@ -749,7 +736,7 @@ "MetricExpr": "(IDQ.ALL_MITE_CYCLES_ANY_UOPS - IDQ.ALL_MITE_CYCLES_4_UOPS) / tma_info_core_core_clks / 2", "MetricGroup": "DSBmiss;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group", "MetricName": "tma_mite", - "MetricThreshold": "tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 4 > 0.35)", + "MetricThreshold": "tma_mite > 0.1 & tma_fetch_bandwidth > 0.2", "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck.", "ScaleUnit": "100%" }, @@ -768,7 +755,7 @@ "MetricGroup": "Compute;TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P", "MetricName": "tma_port_0", "MetricThreshold": "tma_port_0 > 0.6", - "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED_PORT.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_512b, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2", + "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED_PORT.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2", "ScaleUnit": "100%" }, { @@ -777,7 +764,7 @@ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P", "MetricName": "tma_port_1", "MetricThreshold": "tma_port_1 > 0.6", - "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_512b, tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2", + "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2", "ScaleUnit": "100%" }, { @@ -813,16 +800,16 @@ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P", "MetricName": "tma_port_5", "MetricThreshold": "tma_port_5 > 0.6", - "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU). Sample with: UOPS_DISPATCHED.PORT_5. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2", + "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU). Sample with: UOPS_DISPATCHED.PORT_5. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2", "ScaleUnit": "100%" }, { - "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU)", + "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU)", "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_6 / tma_info_core_core_clks", "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P", "MetricName": "tma_port_6", "MetricThreshold": "tma_port_6 > 0.6", - "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_6. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2", + "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_6. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2", "ScaleUnit": "100%" }, { @@ -868,7 +855,7 @@ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issue2P;tma_ports_utilization_group", "MetricName": "tma_ports_utilized_2", "MetricThreshold": "tma_ports_utilized_2 > 0.15 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6", + "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6", "ScaleUnit": "100%" }, { @@ -876,7 +863,7 @@ "MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@ / 2 if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@) / tma_info_core_core_clks", "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group", "MetricName": "tma_ports_utilized_3m", - "MetricThreshold": "tma_ports_utilized_3m > 0.7 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", + "MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", "ScaleUnit": "100%" }, { @@ -952,14 +939,5 @@ "MetricName": "tma_store_op_utilization", "MetricThreshold": "tma_store_op_utilization > 0.6", "ScaleUnit": "100%" - }, - { - "BriefDescription": "This metric serves as an approximation of legacy x87 usage", - "MetricExpr": "INST_RETIRED.X87 * tma_info_thread_uoppi / UOPS_RETIRED.RETIRE_SLOTS", - "MetricGroup": "Compute;TopdownL4;tma_L4_group;tma_fp_arith_group", - "MetricName": "tma_x87_use", - "MetricThreshold": "tma_x87_use > 0.1", - "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint.", - "ScaleUnit": "100%" } ] diff --git a/tools/perf/pmu-events/arch/x86/haswell/memory.json b/tools/perf/pmu-events/arch/x86/haswell/memory.json index 2fc25e22a42a..6ba0ea6e3fa6 100644 --- a/tools/perf/pmu-events/arch/x86/haswell/memory.json +++ b/tools/perf/pmu-events/arch/x86/haswell/memory.json @@ -371,7 +371,7 @@ "BriefDescription": "Number of times an RTM execution aborted due to any reasons (multiple categories may count as one).", "EventCode": "0xc9", "EventName": "RTM_RETIRED.ABORTED", - "PEBS": "1", + "PEBS": "2", "SampleAfterValue": "2000003", "UMask": "0x4" }, diff --git a/tools/perf/pmu-events/arch/x86/haswell/metricgroups.json b/tools/perf/pmu-events/arch/x86/haswell/metricgroups.json index f6a0258e3241..8c808347f6da 100644 --- a/tools/perf/pmu-events/arch/x86/haswell/metricgroups.json +++ b/tools/perf/pmu-events/arch/x86/haswell/metricgroups.json @@ -2,10 +2,10 @@ "Backend": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Bad": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "BadSpec": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", - "BigFoot": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "BigFootprint": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "BrMispredicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Branches": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", - "CacheMisses": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "CacheHits": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Compute": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Cor": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "DSB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", @@ -24,7 +24,9 @@ "L2Evicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "LSD": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MachineClears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "Machine_Clears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Mem": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "MemOffcore": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryBW": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryBound": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryLat": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", @@ -94,6 +96,7 @@ "tma_l3_bound_group": "Metrics contributing to tma_l3_bound category", "tma_light_operations_group": "Metrics contributing to tma_light_operations category", "tma_load_op_utilization_group": "Metrics contributing to tma_load_op_utilization category", + "tma_machine_clears_group": "Metrics contributing to tma_machine_clears category", "tma_mem_latency_group": "Metrics contributing to tma_mem_latency category", "tma_memory_bound_group": "Metrics contributing to tma_memory_bound category", "tma_microcode_sequencer_group": "Metrics contributing to tma_microcode_sequencer category", diff --git a/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json b/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json index 5f451948c893..83d50d80a148 100644 --- a/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json +++ b/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json @@ -286,12 +286,12 @@ "MetricExpr": "(UOPS_DISPATCHED_PORT.PORT_0 + UOPS_DISPATCHED_PORT.PORT_1 + UOPS_DISPATCHED_PORT.PORT_5 + UOPS_DISPATCHED_PORT.PORT_6) / tma_info_thread_slots", "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group", "MetricName": "tma_alu_op_utilization", - "MetricThreshold": "tma_alu_op_utilization > 0.6", + "MetricThreshold": "tma_alu_op_utilization > 0.4", "ScaleUnit": "100%" }, { "BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists", - "MetricExpr": "100 * OTHER_ASSISTS.ANY_WB_ASSIST / tma_info_thread_slots", + "MetricExpr": "66 * OTHER_ASSISTS.ANY_WB_ASSIST / tma_info_thread_slots", "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group", "MetricName": "tma_assists", "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)", @@ -404,7 +404,7 @@ "MetricExpr": "(IDQ.ALL_DSB_CYCLES_ANY_UOPS - IDQ.ALL_DSB_CYCLES_4_UOPS) / tma_info_core_core_clks / 2", "MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group", "MetricName": "tma_dsb", - "MetricThreshold": "tma_dsb > 0.15 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 4 > 0.35)", + "MetricThreshold": "tma_dsb > 0.15 & tma_fetch_bandwidth > 0.2", "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.", "ScaleUnit": "100%" }, @@ -459,7 +459,7 @@ "MetricExpr": "tma_frontend_bound - tma_fetch_latency", "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB", "MetricName": "tma_fetch_bandwidth", - "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 4 > 0.35", + "MetricThreshold": "tma_fetch_bandwidth > 0.2", "MetricgroupNoGroup": "TopdownL2", "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Related metrics: tma_dsb_switches, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp", "ScaleUnit": "100%" @@ -491,20 +491,20 @@ "MetricName": "tma_heavy_operations", "MetricThreshold": "tma_heavy_operations > 0.1", "MetricgroupNoGroup": "TopdownL2", - "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.", + "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences. ([ICL+] Note this may overcount due to approximation using indirect events; [ADL+] .)", "ScaleUnit": "100%" }, { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses.", "MetricExpr": "ICACHE.IFDATA_STALL / tma_info_thread_clks", - "MetricGroup": "BigFoot;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group", + "MetricGroup": "BigFootprint;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group", "MetricName": "tma_icache_misses", "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", "ScaleUnit": "100%" }, { "BriefDescription": "Instructions per retired mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).", - "MetricExpr": "tma_info_inst_mix_instructions / (UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * cpu@BR_MISP_EXEC.ALL_BRANCHES\\,umask\\=0xE4@)", + "MetricExpr": "tma_info_inst_mix_instructions / (UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * BR_MISP_EXEC.INDIRECT)", "MetricGroup": "Bad;BrMispredicts", "MetricName": "tma_info_bad_spec_ipmisp_indirect", "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1e3" @@ -529,7 +529,7 @@ "MetricName": "tma_info_core_coreipc" }, { - "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core", + "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor)", "MetricExpr": "(UOPS_EXECUTED.CORE / 2 / (cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@) if #SMT_on else UOPS_EXECUTED.CORE / (cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@))", "MetricGroup": "Backend;Cor;Pipeline;PortsUtil", "MetricName": "tma_info_core_ilp" @@ -599,96 +599,132 @@ }, { "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]", - "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l1d_cache_fill_bw", "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_core_l1d_cache_fill_bw" + "MetricName": "tma_info_memory_core_l1d_cache_fill_bw_2t" }, { "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]", - "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l2_cache_fill_bw", "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_core_l2_cache_fill_bw" + "MetricName": "tma_info_memory_core_l2_cache_fill_bw_2t" }, { "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l3_cache_fill_bw", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_core_l3_cache_fill_bw_2t" + }, + { + "BriefDescription": "Average Parallel L2 cache miss data reads", + "MetricExpr": "tma_info_memory_latency_data_l2_mlp", + "MetricGroup": "Memory_BW;Offcore", + "MetricName": "tma_info_memory_data_l2_mlp" + }, + { + "BriefDescription": "", + "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l1d_cache_fill_bw" + }, + { + "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]", + "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / (duration_time * 1e3 / 1e3)", "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_core_l3_cache_fill_bw" + "MetricName": "tma_info_memory_l1d_cache_fill_bw_2t" }, { "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads", "MetricExpr": "1e3 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l1mpki" }, { + "BriefDescription": "", + "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l2_cache_fill_bw" + }, + { + "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]", + "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / (duration_time * 1e3 / 1e3)", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l2_cache_fill_bw_2t" + }, + { "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads", "MetricExpr": "1e3 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY", - "MetricGroup": "Backend;CacheMisses;Mem", + "MetricGroup": "Backend;CacheHits;Mem", "MetricName": "tma_info_memory_l2mpki" }, { + "BriefDescription": "", + "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l3_cache_fill_bw" + }, + { + "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]", + "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / (duration_time * 1e3 / 1e3)", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l3_cache_fill_bw_2t" + }, + { "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads", "MetricExpr": "1e3 * MEM_LOAD_UOPS_RETIRED.L3_MISS / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "Mem", "MetricName": "tma_info_memory_l3mpki" }, { - "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)", - "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "L1D_PEND_MISS.PENDING / (MEM_LOAD_UOPS_RETIRED.L1_MISS + MEM_LOAD_UOPS_RETIRED.HIT_LFB)", - "MetricGroup": "Mem;MemoryBound;MemoryLat", - "MetricName": "tma_info_memory_load_miss_real_latency" + "BriefDescription": "Average Parallel L2 cache miss data reads", + "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", + "MetricGroup": "Memory_BW;Offcore", + "MetricName": "tma_info_memory_latency_data_l2_mlp" }, { - "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss", - "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES", - "MetricGroup": "Mem;MemoryBW;MemoryBound", - "MetricName": "tma_info_memory_mlp", - "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)" + "BriefDescription": "Average Latency for L2 cache miss demand Loads", + "MetricExpr": "tma_info_memory_load_l2_miss_latency", + "MetricGroup": "Memory_Lat;Offcore", + "MetricName": "tma_info_memory_latency_load_l2_miss_latency" }, { - "BriefDescription": "Average Parallel L2 cache miss data reads", - "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", + "BriefDescription": "Average Parallel L2 cache miss demand Loads", + "MetricExpr": "tma_info_memory_load_l2_mlp", "MetricGroup": "Memory_BW;Offcore", - "MetricName": "tma_info_memory_oro_data_l2_mlp" + "MetricName": "tma_info_memory_latency_load_l2_mlp" }, { "BriefDescription": "Average Latency for L2 cache miss demand Loads", "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS.DEMAND_DATA_RD", "MetricGroup": "Memory_Lat;Offcore", - "MetricName": "tma_info_memory_oro_load_l2_miss_latency" + "MetricName": "tma_info_memory_load_l2_miss_latency" }, { "BriefDescription": "Average Parallel L2 cache miss demand Loads", "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD", "MetricGroup": "Memory_BW;Offcore", - "MetricName": "tma_info_memory_oro_load_l2_mlp" + "MetricName": "tma_info_memory_load_l2_mlp" }, { - "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l1d_cache_fill_bw", - "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_thread_l1d_cache_fill_bw_1t" - }, - { - "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l2_cache_fill_bw", - "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_thread_l2_cache_fill_bw_1t" + "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)", + "MetricConstraint": "NO_GROUP_EVENTS", + "MetricExpr": "L1D_PEND_MISS.PENDING / (MEM_LOAD_UOPS_RETIRED.L1_MISS + MEM_LOAD_UOPS_RETIRED.HIT_LFB)", + "MetricGroup": "Mem;MemoryBound;MemoryLat", + "MetricName": "tma_info_memory_load_miss_real_latency" }, { - "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "0", - "MetricGroup": "Mem;MemoryBW;Offcore", - "MetricName": "tma_info_memory_thread_l3_cache_access_bw_1t" + "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss", + "MetricConstraint": "NO_GROUP_EVENTS", + "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES", + "MetricGroup": "Mem;MemoryBW;MemoryBound", + "MetricName": "tma_info_memory_mlp", + "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)" }, { - "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l3_cache_fill_bw", - "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_thread_l3_cache_fill_bw_1t" + "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses", + "MetricExpr": "tma_info_memory_tlb_page_walks_utilization", + "MetricGroup": "Mem;MemoryTLB", + "MetricName": "tma_info_memory_page_walks_utilization" }, { "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses", @@ -704,21 +740,27 @@ "MetricName": "tma_info_pipeline_retire" }, { - "BriefDescription": "Measured Average Frequency for unhalted processors [GHz]", + "BriefDescription": "Measured Average Core Frequency for unhalted processors [GHz]", "MetricExpr": "tma_info_system_turbo_utilization * TSC / 1e9 / duration_time", "MetricGroup": "Power;Summary", - "MetricName": "tma_info_system_average_frequency" + "MetricName": "tma_info_system_core_frequency" }, { - "BriefDescription": "Average CPU Utilization", + "BriefDescription": "Average CPU Utilization (percentage)", "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC", "MetricGroup": "HPC;Summary", "MetricName": "tma_info_system_cpu_utilization" }, { + "BriefDescription": "Average number of utilized CPUs", + "MetricExpr": "#num_cpus_online * tma_info_system_cpu_utilization", + "MetricGroup": "Summary", + "MetricName": "tma_info_system_cpus_utilized" + }, + { "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]", "MetricExpr": "64 * (UNC_M_CAS_COUNT.RD + UNC_M_CAS_COUNT.WR) / 1e9 / duration_time", - "MetricGroup": "HPC;Mem;MemoryBW;SoC;tma_issueBW", + "MetricGroup": "HPC;MemOffcore;MemoryBW;SoC;tma_issueBW", "MetricName": "tma_info_system_dram_bw_use", "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_mem_bandwidth, tma_sq_full" }, @@ -775,6 +817,12 @@ "MetricName": "tma_info_system_turbo_utilization" }, { + "BriefDescription": "Measured Average Uncore Frequency for the SoC [GHz]", + "MetricExpr": "tma_info_system_socket_clks / 1e9 / duration_time", + "MetricGroup": "SoC", + "MetricName": "tma_info_system_uncore_frequency" + }, + { "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.", "MetricExpr": "CPU_CLK_UNHALTED.THREAD", "MetricGroup": "Pipeline", @@ -815,7 +863,7 @@ { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses", "MetricExpr": "(14 * ITLB_MISSES.STLB_HIT + ITLB_MISSES.WALK_DURATION) / tma_info_thread_clks", - "MetricGroup": "BigFoot;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group", + "MetricGroup": "BigFootprint;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group", "MetricName": "tma_itlb_misses", "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: ITLB_MISSES.WALK_COMPLETED", @@ -824,7 +872,7 @@ { "BriefDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache", "MetricExpr": "max((min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.STALLS_LDM_PENDING) - CYCLE_ACTIVITY.STALLS_L1D_PENDING) / tma_info_thread_clks, 0)", - "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group", "MetricName": "tma_l1_bound", "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", "PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_UOPS_RETIRED.L1_HIT_PS;MEM_LOAD_UOPS_RETIRED.HIT_LFB_PS. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1", @@ -833,7 +881,7 @@ { "BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads", "MetricExpr": "(CYCLE_ACTIVITY.STALLS_L1D_PENDING - CYCLE_ACTIVITY.STALLS_L2_PENDING) / tma_info_thread_clks", - "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", "MetricName": "tma_l2_bound", "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", "PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L2_HIT_PS", @@ -843,20 +891,20 @@ "BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core", "MetricConstraint": "NO_GROUP_EVENTS_SMT", "MetricExpr": "MEM_LOAD_UOPS_RETIRED.L3_HIT / (MEM_LOAD_UOPS_RETIRED.L3_HIT + 7 * MEM_LOAD_UOPS_RETIRED.L3_MISS) * CYCLE_ACTIVITY.STALLS_L2_PENDING / tma_info_thread_clks", - "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", "MetricName": "tma_l3_bound", "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT_PS", "ScaleUnit": "100%" }, { - "BriefDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)", + "BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)", "MetricConstraint": "NO_GROUP_EVENTS", "MetricExpr": "41 * (MEM_LOAD_UOPS_RETIRED.L3_HIT * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD))) / tma_info_thread_clks", "MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group", "MetricName": "tma_l3_hit_latency", "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT_PS. Related metrics: tma_mem_latency", + "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT_PS. Related metrics: tma_mem_latency", "ScaleUnit": "100%" }, { @@ -875,7 +923,7 @@ "MetricName": "tma_light_operations", "MetricThreshold": "tma_light_operations > 0.6", "MetricgroupNoGroup": "TopdownL2", - "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST", + "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST", "ScaleUnit": "100%" }, { @@ -890,11 +938,10 @@ }, { "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory", - "MetricConstraint": "NO_GROUP_EVENTS", "MetricExpr": "200 * (MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD))) / tma_info_thread_clks", "MetricGroup": "Server;TopdownL5;tma_L5_group;tma_mem_latency_group", - "MetricName": "tma_local_dram", - "MetricThreshold": "tma_local_dram > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))", + "MetricName": "tma_local_mem", + "MetricThreshold": "tma_local_mem > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))", "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory. Caching will improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM_PS", "ScaleUnit": "100%" }, @@ -920,21 +967,21 @@ "ScaleUnit": "100%" }, { - "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM)", + "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)", "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=6@) / tma_info_thread_clks", "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW", "MetricName": "tma_mem_bandwidth", "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full", + "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full", "ScaleUnit": "100%" }, { - "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM)", + "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM)", "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth", "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat", "MetricName": "tma_mem_latency", "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_l3_hit_latency", + "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_l3_hit_latency", "ScaleUnit": "100%" }, { @@ -962,7 +1009,7 @@ "MetricExpr": "(IDQ.ALL_MITE_CYCLES_ANY_UOPS - IDQ.ALL_MITE_CYCLES_4_UOPS) / tma_info_core_core_clks / 2", "MetricGroup": "DSBmiss;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group", "MetricName": "tma_mite", - "MetricThreshold": "tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 4 > 0.35)", + "MetricThreshold": "tma_mite > 0.1 & tma_fetch_bandwidth > 0.2", "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck.", "ScaleUnit": "100%" }, @@ -981,7 +1028,7 @@ "MetricGroup": "Compute;TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P", "MetricName": "tma_port_0", "MetricThreshold": "tma_port_0 > 0.6", - "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED_PORT.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_512b, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2", + "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED_PORT.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2", "ScaleUnit": "100%" }, { @@ -990,7 +1037,7 @@ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P", "MetricName": "tma_port_1", "MetricThreshold": "tma_port_1 > 0.6", - "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_512b, tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2", + "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2", "ScaleUnit": "100%" }, { @@ -1026,16 +1073,16 @@ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P", "MetricName": "tma_port_5", "MetricThreshold": "tma_port_5 > 0.6", - "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU). Sample with: UOPS_DISPATCHED.PORT_5. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2", + "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU). Sample with: UOPS_DISPATCHED.PORT_5. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2", "ScaleUnit": "100%" }, { - "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU)", + "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU)", "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_6 / tma_info_core_core_clks", "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P", "MetricName": "tma_port_6", "MetricThreshold": "tma_port_6 > 0.6", - "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_6. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2", + "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_6. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2", "ScaleUnit": "100%" }, { @@ -1081,7 +1128,7 @@ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issue2P;tma_ports_utilization_group", "MetricName": "tma_ports_utilized_2", "MetricThreshold": "tma_ports_utilized_2 > 0.15 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6", + "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6", "ScaleUnit": "100%" }, { @@ -1089,7 +1136,7 @@ "MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@ / 2 if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@) / tma_info_core_core_clks", "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group", "MetricName": "tma_ports_utilized_3m", - "MetricThreshold": "tma_ports_utilized_3m > 0.7 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", + "MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", "ScaleUnit": "100%" }, { @@ -1104,11 +1151,10 @@ }, { "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory", - "MetricConstraint": "NO_GROUP_EVENTS", "MetricExpr": "310 * (MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD))) / tma_info_thread_clks", "MetricGroup": "Server;Snoop;TopdownL5;tma_L5_group;tma_mem_latency_group", - "MetricName": "tma_remote_dram", - "MetricThreshold": "tma_remote_dram > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))", + "MetricName": "tma_remote_mem", + "MetricThreshold": "tma_remote_mem > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))", "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory. This is caused often due to non-optimal NUMA allocations. #link to NUMA article. Sample with: MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM_PS", "ScaleUnit": "100%" }, @@ -1187,15 +1233,6 @@ "ScaleUnit": "100%" }, { - "BriefDescription": "This metric serves as an approximation of legacy x87 usage", - "MetricExpr": "INST_RETIRED.X87 * tma_info_thread_uoppi / UOPS_RETIRED.RETIRE_SLOTS", - "MetricGroup": "Compute;TopdownL4;tma_L4_group;tma_fp_arith_group", - "MetricName": "tma_x87_use", - "MetricThreshold": "tma_x87_use > 0.1", - "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint.", - "ScaleUnit": "100%" - }, - { "BriefDescription": "Uncore operating frequency in GHz", "MetricExpr": "UNC_C_CLOCKTICKS / (#num_cores / #num_packages * #num_packages) / 1e9 / duration_time", "MetricName": "uncore_frequency", diff --git a/tools/perf/pmu-events/arch/x86/haswellx/metricgroups.json b/tools/perf/pmu-events/arch/x86/haswellx/metricgroups.json index f6a0258e3241..8c808347f6da 100644 --- a/tools/perf/pmu-events/arch/x86/haswellx/metricgroups.json +++ b/tools/perf/pmu-events/arch/x86/haswellx/metricgroups.json @@ -2,10 +2,10 @@ "Backend": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Bad": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "BadSpec": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", - "BigFoot": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "BigFootprint": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "BrMispredicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Branches": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", - "CacheMisses": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "CacheHits": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Compute": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Cor": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "DSB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", @@ -24,7 +24,9 @@ "L2Evicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "LSD": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MachineClears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "Machine_Clears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Mem": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "MemOffcore": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryBW": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryBound": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryLat": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", @@ -94,6 +96,7 @@ "tma_l3_bound_group": "Metrics contributing to tma_l3_bound category", "tma_light_operations_group": "Metrics contributing to tma_light_operations category", "tma_load_op_utilization_group": "Metrics contributing to tma_load_op_utilization category", + "tma_machine_clears_group": "Metrics contributing to tma_machine_clears category", "tma_mem_latency_group": "Metrics contributing to tma_mem_latency category", "tma_memory_bound_group": "Metrics contributing to tma_memory_bound category", "tma_microcode_sequencer_group": "Metrics contributing to tma_microcode_sequencer category", diff --git a/tools/perf/pmu-events/arch/x86/haswellx/uncore-power.json b/tools/perf/pmu-events/arch/x86/haswellx/uncore-power.json index daebf1050acb..c391325ee36b 100644 --- a/tools/perf/pmu-events/arch/x86/haswellx/uncore-power.json +++ b/tools/perf/pmu-events/arch/x86/haswellx/uncore-power.json @@ -426,6 +426,7 @@ "BriefDescription": "Number of cores in C-State; C0 and C1", "EventCode": "0x80", "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0", + "Filter": "occ_sel=1", "PerPkg": "1", "PublicDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", "Unit": "PCU" @@ -434,6 +435,7 @@ "BriefDescription": "Number of cores in C-State; C3", "EventCode": "0x80", "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3", + "Filter": "occ_sel=2", "PerPkg": "1", "PublicDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", "Unit": "PCU" @@ -442,6 +444,7 @@ "BriefDescription": "Number of cores in C-State; C6 and C7", "EventCode": "0x80", "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6", + "Filter": "occ_sel=3", "PerPkg": "1", "PublicDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", "Unit": "PCU" diff --git a/tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json b/tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json index b43a6c6d8b7f..f67cc73779f8 100644 --- a/tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json +++ b/tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json @@ -98,12 +98,12 @@ "MetricExpr": "(UOPS_DISPATCHED.PORT_0 + UOPS_DISPATCHED.PORT_1 + UOPS_DISPATCHED.PORT_5 + UOPS_DISPATCHED.PORT_6) / (4 * tma_info_core_core_clks)", "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group", "MetricName": "tma_alu_op_utilization", - "MetricThreshold": "tma_alu_op_utilization > 0.6", + "MetricThreshold": "tma_alu_op_utilization > 0.4", "ScaleUnit": "100%" }, { "BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists", - "MetricExpr": "100 * ASSISTS.ANY / tma_info_thread_slots", + "MetricExpr": "34 * ASSISTS.ANY / tma_info_thread_slots", "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group", "MetricName": "tma_assists", "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)", @@ -113,7 +113,7 @@ { "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend", "DefaultMetricgroupName": "TopdownL1", - "MetricExpr": "topdown\\-be\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 5 * cpu@INT_MISC.RECOVERY_CYCLES\\,cmask\\=1\\,edge@ / tma_info_thread_slots", + "MetricExpr": "topdown\\-be\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 5 * INT_MISC.CLEARS_COUNT / tma_info_thread_slots", "MetricGroup": "Default;TmaL1;TopdownL1;tma_L1_group", "MetricName": "tma_backend_bound", "MetricThreshold": "tma_backend_bound > 0.2", @@ -135,7 +135,7 @@ { "BriefDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions.", "MetricExpr": "tma_light_operations * BR_INST_RETIRED.ALL_BRANCHES / (tma_retiring * tma_info_thread_slots)", - "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", + "MetricGroup": "Branches;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", "MetricName": "tma_branch_instructions", "MetricThreshold": "tma_branch_instructions > 0.1 & tma_light_operations > 0.6", "ScaleUnit": "100%" @@ -180,7 +180,7 @@ { "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "(29 * tma_info_system_average_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM + 23.5 * tma_info_system_average_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", + "MetricExpr": "(29 * tma_info_system_core_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM + 23.5 * tma_info_system_core_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group", "MetricName": "tma_contested_accesses", "MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", @@ -200,7 +200,7 @@ { "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "23.5 * tma_info_system_average_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", + "MetricExpr": "23.5 * tma_info_system_core_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", "MetricGroup": "Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group", "MetricName": "tma_data_sharing", "MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", @@ -212,7 +212,7 @@ "MetricExpr": "(cpu@INST_DECODED.DECODERS\\,cmask\\=1@ - cpu@INST_DECODED.DECODERS\\,cmask\\=2@) / tma_info_core_core_clks / 2", "MetricGroup": "DSBmiss;FetchBW;TopdownL4;tma_L4_group;tma_issueD0;tma_mite_group", "MetricName": "tma_decoder0_alone", - "MetricThreshold": "tma_decoder0_alone > 0.1 & (tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 5 > 0.35))", + "MetricThreshold": "tma_decoder0_alone > 0.1 & (tma_mite > 0.1 & tma_fetch_bandwidth > 0.2)", "PublicDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder. Related metrics: tma_few_uops_instructions", "ScaleUnit": "100%" }, @@ -240,7 +240,7 @@ "MetricExpr": "(IDQ.DSB_CYCLES_ANY - IDQ.DSB_CYCLES_OK) / tma_info_core_core_clks / 2", "MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group", "MetricName": "tma_dsb", - "MetricThreshold": "tma_dsb > 0.15 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 5 > 0.35)", + "MetricThreshold": "tma_dsb > 0.15 & tma_fetch_bandwidth > 0.2", "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.", "ScaleUnit": "100%" }, @@ -259,7 +259,7 @@ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group", "MetricName": "tma_dtlb_load", "MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store, tma_info_bottleneck_memory_data_tlbs", + "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store, tma_info_bottleneck_memory_data_tlbs, tma_info_bottleneck_memory_synchronization", "ScaleUnit": "100%" }, { @@ -268,12 +268,12 @@ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group", "MetricName": "tma_dtlb_store", "MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load, tma_info_bottleneck_memory_data_tlbs", + "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load, tma_info_bottleneck_memory_data_tlbs, tma_info_bottleneck_memory_synchronization", "ScaleUnit": "100%" }, { "BriefDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing", - "MetricExpr": "32.5 * tma_info_system_average_frequency * OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM / tma_info_thread_clks", + "MetricExpr": "32.5 * tma_info_system_core_frequency * OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM / tma_info_thread_clks", "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group", "MetricName": "tma_false_sharing", "MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", @@ -286,7 +286,7 @@ "MetricGroup": "MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group", "MetricName": "tma_fb_full", "MetricThreshold": "tma_fb_full > 0.3", - "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_bottleneck_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores", + "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores", "ScaleUnit": "100%" }, { @@ -294,7 +294,7 @@ "MetricExpr": "max(0, tma_frontend_bound - tma_fetch_latency)", "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB", "MetricName": "tma_fetch_bandwidth", - "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 5 > 0.35", + "MetricThreshold": "tma_fetch_bandwidth > 0.2", "MetricgroupNoGroup": "TopdownL2", "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp", "ScaleUnit": "100%" @@ -328,6 +328,15 @@ "ScaleUnit": "100%" }, { + "BriefDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists", + "MetricExpr": "34 * ASSISTS.FP / tma_info_thread_slots", + "MetricGroup": "HPC;TopdownL5;tma_L5_group;tma_assists_group", + "MetricName": "tma_fp_assists", + "MetricThreshold": "tma_fp_assists > 0.1", + "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists. FP Assist may apply when working with very small floating point values (so-called Denormals).", + "ScaleUnit": "100%" + }, + { "BriefDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired", "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ / (tma_retiring * tma_info_thread_slots)", "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P", @@ -390,13 +399,13 @@ "MetricName": "tma_heavy_operations", "MetricThreshold": "tma_heavy_operations > 0.1", "MetricgroupNoGroup": "TopdownL2", - "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.", + "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences. ([ICL+] Note this may overcount due to approximation using indirect events; [ADL+] .)", "ScaleUnit": "100%" }, { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses", - "MetricExpr": "ICACHE_16B.IFDATA_STALL / tma_info_thread_clks", - "MetricGroup": "BigFoot;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group", + "MetricExpr": "ICACHE_DATA.STALLS / tma_info_thread_clks", + "MetricGroup": "BigFootprint;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group", "MetricName": "tma_icache_misses", "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS_PS;FRONTEND_RETIRED.L1I_MISS_PS", @@ -405,7 +414,7 @@ { "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "(tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) * tma_info_thread_slots / BR_MISP_RETIRED.ALL_BRANCHES", + "MetricExpr": "tma_info_bottleneck_mispredictions * tma_info_thread_slots / BR_MISP_RETIRED.ALL_BRANCHES / 100", "MetricGroup": "Bad;BrMispredicts;tma_issueBM", "MetricName": "tma_info_bad_spec_branch_misprediction_cost", "PublicDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear). Related metrics: tma_branch_mispredicts, tma_info_bottleneck_mispredictions, tma_mispredicts_resteers" @@ -446,6 +455,12 @@ "MetricThreshold": "tma_info_bad_spec_ipmispredict < 200" }, { + "BriefDescription": "Speculative to Retired ratio of all clears (covering mispredicts and nukes)", + "MetricExpr": "INT_MISC.CLEARS_COUNT / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT)", + "MetricGroup": "BrMispredicts", + "MetricName": "tma_info_bad_spec_spec_clears_ratio" + }, + { "BriefDescription": "Probability of Core Bound bottleneck hidden by SMT-profiling artifacts", "MetricConstraint": "NO_GROUP_EVENTS", "MetricExpr": "(100 * (1 - tma_core_bound / tma_ports_utilization if tma_core_bound < tma_ports_utilization else 1) if tma_info_system_smt_2t_utilization > 0.5 else 0)", @@ -472,67 +487,102 @@ "PublicDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck. Related metrics: " }, { + "BriefDescription": "Total pipeline cost of \"useful operations\" - the baseline operations not covered by Branching_Overhead nor Irregular_Overhead.", + "MetricExpr": "100 * (tma_retiring - (BR_INST_RETIRED.ALL_BRANCHES + BR_INST_RETIRED.NEAR_CALL) / tma_info_thread_slots - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)", + "MetricGroup": "Ret", + "MetricName": "tma_info_bottleneck_base_non_br", + "MetricThreshold": "tma_info_bottleneck_base_non_br > 20" + }, + { "BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)", "MetricConstraint": "NO_GROUP_EVENTS", "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)", - "MetricGroup": "BigFoot;Fed;Frontend;IcMiss;MemoryTLB;tma_issueBC", + "MetricGroup": "BigFootprint;Fed;Frontend;IcMiss;MemoryTLB", "MetricName": "tma_info_bottleneck_big_code", - "MetricThreshold": "tma_info_bottleneck_big_code > 20", - "PublicDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses). Related metrics: tma_info_bottleneck_branching_overhead" + "MetricThreshold": "tma_info_bottleneck_big_code > 20" }, { "BriefDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls)", - "MetricExpr": "100 * ((BR_INST_RETIRED.COND + 3 * BR_INST_RETIRED.NEAR_CALL + (BR_INST_RETIRED.NEAR_TAKEN - BR_INST_RETIRED.COND_TAKEN - 2 * BR_INST_RETIRED.NEAR_CALL)) / tma_info_thread_slots)", - "MetricGroup": "Ret;tma_issueBC", + "MetricExpr": "100 * ((BR_INST_RETIRED.ALL_BRANCHES + BR_INST_RETIRED.NEAR_CALL) / tma_info_thread_slots)", + "MetricGroup": "Ret", "MetricName": "tma_info_bottleneck_branching_overhead", - "MetricThreshold": "tma_info_bottleneck_branching_overhead > 10", - "PublicDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls). Related metrics: tma_info_bottleneck_big_code" + "MetricThreshold": "tma_info_bottleneck_branching_overhead > 5" + }, + { + "BriefDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks", + "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_fb_full / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))", + "MetricGroup": "Mem;MemoryBW;Offcore;tma_issueBW", + "MetricName": "tma_info_bottleneck_cache_memory_bandwidth", + "MetricThreshold": "tma_info_bottleneck_cache_memory_bandwidth > 20", + "PublicDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full" + }, + { + "BriefDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks", + "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_store_latency / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))", + "MetricGroup": "Mem;MemoryLat;Offcore;tma_issueLat", + "MetricName": "tma_info_bottleneck_cache_memory_latency", + "MetricThreshold": "tma_info_bottleneck_cache_memory_latency > 20", + "PublicDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks. Related metrics: tma_l3_hit_latency, tma_mem_latency" + }, + { + "BriefDescription": "Total pipeline cost when the execution is compute-bound - an estimation", + "MetricExpr": "100 * (tma_core_bound * tma_divider / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_core_bound * (tma_ports_utilization / (tma_divider + tma_ports_utilization + tma_serializing_operation)) * (tma_ports_utilized_3m / (tma_ports_utilized_0 + tma_ports_utilized_1 + tma_ports_utilized_2 + tma_ports_utilized_3m)))", + "MetricGroup": "Cor;tma_issueComp", + "MetricName": "tma_info_bottleneck_compute_bound_est", + "MetricThreshold": "tma_info_bottleneck_compute_bound_est > 20", + "PublicDescription": "Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy. Related metrics: " }, { "BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "100 * (tma_frontend_bound - tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) - tma_info_bottleneck_big_code", + "MetricExpr": "100 * (tma_frontend_bound - (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * (10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts)) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) - tma_info_bottleneck_big_code", "MetricGroup": "Fed;FetchBW;Frontend", "MetricName": "tma_info_bottleneck_instruction_fetch_bw", "MetricThreshold": "tma_info_bottleneck_instruction_fetch_bw > 20" }, { - "BriefDescription": "Total pipeline cost of (external) Memory Bandwidth related bottlenecks", - "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full))) + tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_fb_full / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk))", - "MetricGroup": "Mem;MemoryBW;Offcore;tma_issueBW", - "MetricName": "tma_info_bottleneck_memory_bandwidth", - "MetricThreshold": "tma_info_bottleneck_memory_bandwidth > 20", - "PublicDescription": "Total pipeline cost of (external) Memory Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full" + "BriefDescription": "Total pipeline cost of irregular execution (e.g", + "MetricExpr": "100 * (tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * (10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts)) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_branch_mispredicts + tma_machine_clears * tma_other_nukes / tma_other_nukes + tma_core_bound * (tma_serializing_operation + tma_core_bound * RS_EVENTS.EMPTY_CYCLES / tma_info_thread_clks * tma_ports_utilized_0) / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)", + "MetricGroup": "Bad;Cor;Ret;tma_issueMS", + "MetricName": "tma_info_bottleneck_irregular_overhead", + "MetricThreshold": "tma_info_bottleneck_irregular_overhead > 10", + "PublicDescription": "Total pipeline cost of irregular execution (e.g. FP-assists in HPC, Wait time with work imbalance multithreaded workloads, overhead in system services or virtualized environments). Related metrics: tma_microcode_sequencer, tma_ms_switches" }, { "BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "100 * tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_dtlb_load / max(tma_l1_bound, tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))", + "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_load / max(tma_l1_bound, tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))", "MetricGroup": "Mem;MemoryTLB;Offcore;tma_issueTLB", "MetricName": "tma_info_bottleneck_memory_data_tlbs", "MetricThreshold": "tma_info_bottleneck_memory_data_tlbs > 20", - "PublicDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs). Related metrics: tma_dtlb_load, tma_dtlb_store" + "PublicDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs). Related metrics: tma_dtlb_load, tma_dtlb_store, tma_info_bottleneck_memory_synchronization" }, { - "BriefDescription": "Total pipeline cost of Memory Latency related bottlenecks (external memory and off-core caches)", - "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound))", - "MetricGroup": "Mem;MemoryLat;Offcore;tma_issueLat", - "MetricName": "tma_info_bottleneck_memory_latency", - "MetricThreshold": "tma_info_bottleneck_memory_latency > 20", - "PublicDescription": "Total pipeline cost of Memory Latency related bottlenecks (external memory and off-core caches). Related metrics: tma_l3_hit_latency, tma_mem_latency" + "BriefDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors)", + "MetricExpr": "100 * (tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_contested_accesses + tma_data_sharing) / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * tma_false_sharing / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores - tma_store_latency)) + tma_machine_clears * (1 - tma_other_nukes / tma_other_nukes))", + "MetricGroup": "Mem;Offcore;tma_issueTLB", + "MetricName": "tma_info_bottleneck_memory_synchronization", + "MetricThreshold": "tma_info_bottleneck_memory_synchronization > 10", + "PublicDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors). Related metrics: tma_dtlb_load, tma_dtlb_store, tma_info_bottleneck_memory_data_tlbs" }, { "BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "100 * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))", + "MetricExpr": "100 * (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))", "MetricGroup": "Bad;BadSpec;BrMispredicts;tma_issueBM", "MetricName": "tma_info_bottleneck_mispredictions", "MetricThreshold": "tma_info_bottleneck_mispredictions > 20", "PublicDescription": "Total pipeline cost of Branch Misprediction related bottlenecks. Related metrics: tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost, tma_mispredicts_resteers" }, { + "BriefDescription": "Total pipeline cost of remaining bottlenecks (apart from those listed in the Info.Bottlenecks metrics class)", + "MetricExpr": "100 - (tma_info_bottleneck_big_code + tma_info_bottleneck_instruction_fetch_bw + tma_info_bottleneck_mispredictions + tma_info_bottleneck_cache_memory_bandwidth + tma_info_bottleneck_cache_memory_latency + tma_info_bottleneck_memory_data_tlbs + tma_info_bottleneck_memory_synchronization + tma_info_bottleneck_compute_bound_est + tma_info_bottleneck_irregular_overhead + tma_info_bottleneck_branching_overhead + tma_info_bottleneck_base_non_br)", + "MetricGroup": "Cor;Offcore", + "MetricName": "tma_info_bottleneck_other_bottlenecks", + "MetricThreshold": "tma_info_bottleneck_other_bottlenecks > 20", + "PublicDescription": "Total pipeline cost of remaining bottlenecks (apart from those listed in the Info.Bottlenecks metrics class). Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls." + }, + { "BriefDescription": "Fraction of branches that are CALL or RET", "MetricExpr": "(BR_INST_RETIRED.NEAR_CALL + BR_INST_RETIRED.NEAR_RETURN) / BR_INST_RETIRED.ALL_BRANCHES", "MetricGroup": "Bad;Branches", @@ -564,7 +614,7 @@ }, { "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core", - "MetricExpr": "CPU_CLK_UNHALTED.DISTRIBUTED", + "MetricExpr": "(CPU_CLK_UNHALTED.DISTRIBUTED if #SMT_on else tma_info_thread_clks)", "MetricGroup": "SMT", "MetricName": "tma_info_core_core_clks" }, @@ -575,8 +625,14 @@ "MetricName": "tma_info_core_coreipc" }, { + "BriefDescription": "uops Executed per Cycle", + "MetricExpr": "UOPS_EXECUTED.THREAD / tma_info_thread_clks", + "MetricGroup": "Power", + "MetricName": "tma_info_core_epc" + }, + { "BriefDescription": "Floating Point Operations Per Cycle", - "MetricExpr": "(cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * cpu@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE\\,umask\\=0x18@ + 8 * cpu@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE\\,umask\\=0x60@ + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / tma_info_core_core_clks", + "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * FP_ARITH_INST_RETIRED.4_FLOPS + 8 * FP_ARITH_INST_RETIRED.8_FLOPS + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / tma_info_core_core_clks", "MetricGroup": "Flops;Ret", "MetricName": "tma_info_core_flopc" }, @@ -588,8 +644,8 @@ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)." }, { - "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core", - "MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)", + "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor)", + "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@", "MetricGroup": "Backend;Cor;Pipeline;PortsUtil", "MetricName": "tma_info_core_ilp" }, @@ -669,7 +725,7 @@ "MetricGroup": "Flops;InsType", "MetricName": "tma_info_inst_mix_iparith", "MetricThreshold": "tma_info_inst_mix_iparith < 10", - "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). May undercount due to FMA double counting. Approximated prior to BDW." + "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW." }, { "BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate)", @@ -677,7 +733,7 @@ "MetricGroup": "Flops;FpVector;InsType", "MetricName": "tma_info_inst_mix_iparith_avx128", "MetricThreshold": "tma_info_inst_mix_iparith_avx128 < 10", - "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate)", @@ -685,7 +741,7 @@ "MetricGroup": "Flops;FpVector;InsType", "MetricName": "tma_info_inst_mix_iparith_avx256", "MetricThreshold": "tma_info_inst_mix_iparith_avx256 < 10", - "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate)", @@ -693,7 +749,7 @@ "MetricGroup": "Flops;FpVector;InsType", "MetricName": "tma_info_inst_mix_iparith_avx512", "MetricThreshold": "tma_info_inst_mix_iparith_avx512 < 10", - "PublicDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate)", @@ -701,7 +757,7 @@ "MetricGroup": "Flops;FpScalar;InsType", "MetricName": "tma_info_inst_mix_iparith_scalar_dp", "MetricThreshold": "tma_info_inst_mix_iparith_scalar_dp < 10", - "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate)", @@ -709,7 +765,7 @@ "MetricGroup": "Flops;FpScalar;InsType", "MetricName": "tma_info_inst_mix_iparith_scalar_sp", "MetricThreshold": "tma_info_inst_mix_iparith_scalar_sp < 10", - "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)", @@ -727,7 +783,7 @@ }, { "BriefDescription": "Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate)", - "MetricExpr": "INST_RETIRED.ANY / (cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * cpu@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE\\,umask\\=0x18@ + 8 * cpu@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE\\,umask\\=0x60@ + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE)", + "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.SCALAR + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * FP_ARITH_INST_RETIRED.4_FLOPS + 8 * FP_ARITH_INST_RETIRED.8_FLOPS + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE)", "MetricGroup": "Flops;InsType", "MetricName": "tma_info_inst_mix_ipflop", "MetricThreshold": "tma_info_inst_mix_ipflop < 10" @@ -740,6 +796,12 @@ "MetricThreshold": "tma_info_inst_mix_ipload < 3" }, { + "BriefDescription": "Instructions per PAUSE (lower number means higher occurrence rate)", + "MetricExpr": "tma_info_inst_mix_instructions / MISC_RETIRED.PAUSE_INST", + "MetricGroup": "Flops;FpVector;InsType", + "MetricName": "tma_info_inst_mix_ippause" + }, + { "BriefDescription": "Instructions per Store (lower number means higher occurrence rate)", "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_STORES", "MetricGroup": "InsType", @@ -763,136 +825,148 @@ }, { "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]", - "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l1d_cache_fill_bw", "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_core_l1d_cache_fill_bw" + "MetricName": "tma_info_memory_core_l1d_cache_fill_bw_2t" }, { "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]", - "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l2_cache_fill_bw", "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_core_l2_cache_fill_bw" + "MetricName": "tma_info_memory_core_l2_cache_fill_bw_2t" }, { "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l3_cache_access_bw", "MetricGroup": "Mem;MemoryBW;Offcore", - "MetricName": "tma_info_memory_core_l3_cache_access_bw" + "MetricName": "tma_info_memory_core_l3_cache_access_bw_2t" }, { "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l3_cache_fill_bw", "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_core_l3_cache_fill_bw" + "MetricName": "tma_info_memory_core_l3_cache_fill_bw_2t" }, { "BriefDescription": "Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss-handling entries)", "MetricExpr": "1e3 * MEM_LOAD_RETIRED.FB_HIT / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_fb_hpki" }, { + "BriefDescription": "", + "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l1d_cache_fill_bw" + }, + { "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads", "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l1mpki" }, { "BriefDescription": "L1 cache true misses per kilo instruction for all demand loads (including speculative)", "MetricExpr": "1e3 * L2_RQSTS.ALL_DEMAND_DATA_RD / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l1mpki_load" }, { + "BriefDescription": "", + "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l2_cache_fill_bw" + }, + { "BriefDescription": "L2 cache hits per kilo instruction for all demand loads (including speculative)", "MetricExpr": "1e3 * L2_RQSTS.DEMAND_DATA_RD_HIT / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l2hpki_load" }, { "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads", "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY", - "MetricGroup": "Backend;CacheMisses;Mem", + "MetricGroup": "Backend;CacheHits;Mem", "MetricName": "tma_info_memory_l2mpki" }, { "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all request types (including speculative)", "MetricExpr": "1e3 * (OFFCORE_REQUESTS.ALL_DATA_RD - OFFCORE_REQUESTS.DEMAND_DATA_RD + L2_RQSTS.ALL_DEMAND_MISS + L2_RQSTS.SWPF_MISS) / tma_info_inst_mix_instructions", - "MetricGroup": "CacheMisses;Mem;Offcore", + "MetricGroup": "CacheHits;Mem;Offcore", "MetricName": "tma_info_memory_l2mpki_all" }, { "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all demand loads (including speculative)", "MetricExpr": "1e3 * L2_RQSTS.DEMAND_DATA_RD_MISS / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l2mpki_load" }, { - "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads", - "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", - "MetricName": "tma_info_memory_l3mpki" + "BriefDescription": "", + "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW;Offcore", + "MetricName": "tma_info_memory_l3_cache_access_bw" }, { - "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)", - "MetricExpr": "L1D_PEND_MISS.PENDING / (MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT)", - "MetricGroup": "Mem;MemoryBound;MemoryLat", - "MetricName": "tma_info_memory_load_miss_real_latency" + "BriefDescription": "", + "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l3_cache_fill_bw" }, { - "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss", - "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES", - "MetricGroup": "Mem;MemoryBW;MemoryBound", - "MetricName": "tma_info_memory_mlp", - "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)" + "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads", + "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY", + "MetricGroup": "Mem", + "MetricName": "tma_info_memory_l3mpki" }, { "BriefDescription": "Average Parallel L2 cache miss data reads", "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", "MetricGroup": "Memory_BW;Offcore", - "MetricName": "tma_info_memory_oro_data_l2_mlp" + "MetricName": "tma_info_memory_latency_data_l2_mlp" }, { "BriefDescription": "Average Latency for L2 cache miss demand Loads", "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS.DEMAND_DATA_RD", "MetricGroup": "Memory_Lat;Offcore", - "MetricName": "tma_info_memory_oro_load_l2_miss_latency" + "MetricName": "tma_info_memory_latency_load_l2_miss_latency" }, { "BriefDescription": "Average Parallel L2 cache miss demand Loads", "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / cpu@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=1@", "MetricGroup": "Memory_BW;Offcore", - "MetricName": "tma_info_memory_oro_load_l2_mlp" + "MetricName": "tma_info_memory_latency_load_l2_mlp" }, { "BriefDescription": "Average Latency for L3 cache miss demand Loads", "MetricExpr": "cpu@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,umask\\=0x10@ / OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD", "MetricGroup": "Memory_Lat;Offcore", - "MetricName": "tma_info_memory_oro_load_l3_miss_latency" + "MetricName": "tma_info_memory_latency_load_l3_miss_latency" }, { - "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l1d_cache_fill_bw", - "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_thread_l1d_cache_fill_bw_1t" + "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)", + "MetricExpr": "L1D_PEND_MISS.PENDING / (MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT)", + "MetricGroup": "Mem;MemoryBound;MemoryLat", + "MetricName": "tma_info_memory_load_miss_real_latency" }, { - "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l2_cache_fill_bw", - "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_thread_l2_cache_fill_bw_1t" + "BriefDescription": "\"Bus lock\" per kilo instruction", + "MetricExpr": "1e3 * SQ_MISC.BUS_LOCK / INST_RETIRED.ANY", + "MetricGroup": "Mem", + "MetricName": "tma_info_memory_mix_bus_lock_pki" }, { - "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l3_cache_access_bw", - "MetricGroup": "Mem;MemoryBW;Offcore", - "MetricName": "tma_info_memory_thread_l3_cache_access_bw_1t" + "BriefDescription": "Un-cacheable retired load per kilo instruction", + "MetricExpr": "1e3 * MEM_LOAD_MISC_RETIRED.UC / INST_RETIRED.ANY", + "MetricGroup": "Mem", + "MetricName": "tma_info_memory_mix_uc_load_pki" }, { - "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l3_cache_fill_bw", - "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_thread_l3_cache_fill_bw_1t" + "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss", + "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES", + "MetricGroup": "Mem;MemoryBW;MemoryBound", + "MetricName": "tma_info_memory_mlp", + "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)" }, { "BriefDescription": "STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk)", @@ -920,42 +994,56 @@ "MetricName": "tma_info_memory_tlb_store_stlb_mpki" }, { - "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-thread", - "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@", + "BriefDescription": "", + "MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@)", "MetricGroup": "Cor;Pipeline;PortsUtil;SMT", "MetricName": "tma_info_pipeline_execute" }, { + "BriefDescription": "Instructions per a microcode Assist invocation", + "MetricExpr": "INST_RETIRED.ANY / ASSISTS.ANY", + "MetricGroup": "MicroSeq;Pipeline;Ret;Retire", + "MetricName": "tma_info_pipeline_ipassist", + "MetricThreshold": "tma_info_pipeline_ipassist < 100e3", + "PublicDescription": "Instructions per a microcode Assist invocation. See Assists tree node for details (lower number means higher occurrence rate)" + }, + { "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.", "MetricExpr": "tma_retiring * tma_info_thread_slots / cpu@UOPS_RETIRED.SLOTS\\,cmask\\=1@", "MetricGroup": "Pipeline;Ret", "MetricName": "tma_info_pipeline_retire" }, { - "BriefDescription": "Measured Average Frequency for unhalted processors [GHz]", + "BriefDescription": "Measured Average Core Frequency for unhalted processors [GHz]", "MetricExpr": "tma_info_system_turbo_utilization * TSC / 1e9 / duration_time", "MetricGroup": "Power;Summary", - "MetricName": "tma_info_system_average_frequency" + "MetricName": "tma_info_system_core_frequency" }, { - "BriefDescription": "Average CPU Utilization", + "BriefDescription": "Average CPU Utilization (percentage)", "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC", "MetricGroup": "HPC;Summary", "MetricName": "tma_info_system_cpu_utilization" }, { + "BriefDescription": "Average number of utilized CPUs", + "MetricExpr": "#num_cpus_online * tma_info_system_cpu_utilization", + "MetricGroup": "Summary", + "MetricName": "tma_info_system_cpus_utilized" + }, + { "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]", "MetricExpr": "64 * (UNC_ARB_TRK_REQUESTS.ALL + UNC_ARB_COH_TRK_REQUESTS.ALL) / 1e6 / duration_time / 1e3", - "MetricGroup": "HPC;Mem;MemoryBW;SoC;tma_issueBW", + "MetricGroup": "HPC;MemOffcore;MemoryBW;SoC;tma_issueBW", "MetricName": "tma_info_system_dram_bw_use", - "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_info_bottleneck_memory_bandwidth, tma_mem_bandwidth, tma_sq_full" + "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_mem_bandwidth, tma_sq_full" }, { "BriefDescription": "Giga Floating Point Operations Per Second", - "MetricExpr": "(cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * cpu@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE\\,umask\\=0x18@ + 8 * cpu@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE\\,umask\\=0x60@ + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / 1e9 / duration_time", + "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * FP_ARITH_INST_RETIRED.4_FLOPS + 8 * FP_ARITH_INST_RETIRED.8_FLOPS + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / 1e9 / duration_time", "MetricGroup": "Cor;Flops;HPC", "MetricName": "tma_info_system_gflops", - "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine." + "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width" }, { "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]", @@ -1071,8 +1159,8 @@ }, { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses", - "MetricExpr": "ICACHE_64B.IFTAG_STALL / tma_info_thread_clks", - "MetricGroup": "BigFoot;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group", + "MetricExpr": "ICACHE_TAG.STALLS / tma_info_thread_clks", + "MetricGroup": "BigFootprint;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group", "MetricName": "tma_itlb_misses", "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS_PS;FRONTEND_RETIRED.ITLB_MISS_PS", @@ -1081,7 +1169,7 @@ { "BriefDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache", "MetricExpr": "max((CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS) / tma_info_thread_clks, 0)", - "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group", "MetricName": "tma_l1_bound", "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", "PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_RETIRED.L1_HIT_PS;MEM_LOAD_RETIRED.FB_HIT_PS. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1", @@ -1091,7 +1179,7 @@ "BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads", "MetricConstraint": "NO_GROUP_EVENTS", "MetricExpr": "MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) / (MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + L1D_PEND_MISS.FB_FULL_PERIODS) * ((CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks)", - "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", "MetricName": "tma_l2_bound", "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", "PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L2_HIT_PS", @@ -1101,24 +1189,24 @@ "BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core", "MetricConstraint": "NO_GROUP_EVENTS_NMI", "MetricExpr": "(CYCLE_ACTIVITY.STALLS_L2_MISS - CYCLE_ACTIVITY.STALLS_L3_MISS) / tma_info_thread_clks", - "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", "MetricName": "tma_l3_bound", "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS", "ScaleUnit": "100%" }, { - "BriefDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)", - "MetricExpr": "9 * tma_info_system_average_frequency * MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", + "BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)", + "MetricExpr": "9 * tma_info_system_core_frequency * (MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2)) / tma_info_thread_clks", "MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group", "MetricName": "tma_l3_hit_latency", "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_info_bottleneck_memory_latency, tma_mem_latency", + "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_info_bottleneck_cache_memory_latency, tma_mem_latency", "ScaleUnit": "100%" }, { "BriefDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs)", - "MetricExpr": "ILD_STALL.LCP / tma_info_thread_clks", + "MetricExpr": "DECODE.LCP / tma_info_thread_clks", "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB", "MetricName": "tma_lcp", "MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", @@ -1132,7 +1220,7 @@ "MetricName": "tma_light_operations", "MetricThreshold": "tma_light_operations > 0.6", "MetricgroupNoGroup": "TopdownL2", - "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST", + "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST", "ScaleUnit": "100%" }, { @@ -1175,7 +1263,7 @@ "MetricExpr": "(LSD.CYCLES_ACTIVE - LSD.CYCLES_OK) / tma_info_core_core_clks / 2", "MetricGroup": "FetchBW;LSD;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group", "MetricName": "tma_lsd", - "MetricThreshold": "tma_lsd > 0.15 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 5 > 0.35)", + "MetricThreshold": "tma_lsd > 0.15 & tma_fetch_bandwidth > 0.2", "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to LSD (Loop Stream Detector) unit. LSD typically does well sustaining Uop supply. However; in some rare cases; optimal uop-delivery could not be reached for small loops whose size (in terms of number of uops) does not suit well the LSD structure.", "ScaleUnit": "100%" }, @@ -1190,21 +1278,21 @@ "ScaleUnit": "100%" }, { - "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM)", + "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)", "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@) / tma_info_thread_clks", "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW", "MetricName": "tma_mem_bandwidth", "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_bottleneck_memory_bandwidth, tma_info_system_dram_bw_use, tma_sq_full", + "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_sq_full", "ScaleUnit": "100%" }, { - "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM)", + "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM)", "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth", "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat", "MetricName": "tma_mem_latency", "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_info_bottleneck_memory_latency, tma_l3_hit_latency", + "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_info_bottleneck_cache_memory_latency, tma_l3_hit_latency", "ScaleUnit": "100%" }, { @@ -1228,11 +1316,11 @@ }, { "BriefDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit", - "MetricExpr": "tma_retiring * tma_info_thread_slots / UOPS_ISSUED.ANY * IDQ.MS_UOPS / tma_info_thread_slots", + "MetricExpr": "UOPS_RETIRED.SLOTS / UOPS_ISSUED.ANY * IDQ.MS_UOPS / tma_info_thread_slots", "MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueMC;tma_issueMS", "MetricName": "tma_microcode_sequencer", "MetricThreshold": "tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1", - "PublicDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided. Sample with: IDQ.MS_UOPS. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_ms_switches", + "PublicDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided. Sample with: IDQ.MS_UOPS. Related metrics: tma_clears_resteers, tma_info_bottleneck_irregular_overhead, tma_l1_bound, tma_machine_clears, tma_ms_switches", "ScaleUnit": "100%" }, { @@ -1249,7 +1337,7 @@ "MetricExpr": "(IDQ.MITE_CYCLES_ANY - IDQ.MITE_CYCLES_OK) / tma_info_core_core_clks / 2", "MetricGroup": "DSBmiss;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group", "MetricName": "tma_mite", - "MetricThreshold": "tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 5 > 0.35)", + "MetricThreshold": "tma_mite > 0.1 & tma_fetch_bandwidth > 0.2", "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck. Sample with: FRONTEND_RETIRED.ANY_DSB_MISS", "ScaleUnit": "100%" }, @@ -1258,16 +1346,16 @@ "MetricExpr": "(cpu@IDQ.MITE_UOPS\\,cmask\\=4@ - cpu@IDQ.MITE_UOPS\\,cmask\\=5@) / tma_info_thread_clks", "MetricGroup": "DSBmiss;FetchBW;TopdownL4;tma_L4_group;tma_mite_group", "MetricName": "tma_mite_4wide", - "MetricThreshold": "tma_mite_4wide > 0.05 & (tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 5 > 0.35))", + "MetricThreshold": "tma_mite_4wide > 0.05 & (tma_mite > 0.1 & tma_fetch_bandwidth > 0.2)", "ScaleUnit": "100%" }, { - "BriefDescription": "The Mixing_Vectors metric gives the percentage of injected blend uops out of all uops issued", + "BriefDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles)", "MetricExpr": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH / UOPS_ISSUED.ANY", "MetricGroup": "TopdownL5;tma_L5_group;tma_issueMV;tma_ports_utilized_0_group", "MetricName": "tma_mixing_vectors", "MetricThreshold": "tma_mixing_vectors > 0.05", - "PublicDescription": "The Mixing_Vectors metric gives the percentage of injected blend uops out of all uops issued. Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches", + "PublicDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles). Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches", "ScaleUnit": "100%" }, { @@ -1276,22 +1364,22 @@ "MetricGroup": "FetchLat;MicroSeq;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueMC;tma_issueMS;tma_issueMV;tma_issueSO", "MetricName": "tma_ms_switches", "MetricThreshold": "tma_ms_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", - "PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation", + "PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_clears_resteers, tma_info_bottleneck_irregular_overhead, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation", "ScaleUnit": "100%" }, { "BriefDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions", "MetricExpr": "tma_light_operations * INST_RETIRED.NOP / (tma_retiring * tma_info_thread_slots)", - "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", + "MetricGroup": "Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group", "MetricName": "tma_nop_instructions", - "MetricThreshold": "tma_nop_instructions > 0.1 & tma_light_operations > 0.6", + "MetricThreshold": "tma_nop_instructions > 0.1 & (tma_other_light_ops > 0.3 & tma_light_operations > 0.6)", "PublicDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body. Sample with: INST_RETIRED.NOP", "ScaleUnit": "100%" }, { "BriefDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "max(0, tma_light_operations - (tma_fp_arith + tma_memory_operations + tma_branch_instructions + tma_nop_instructions))", + "MetricExpr": "max(0, tma_light_operations - (tma_fp_arith + tma_memory_operations + tma_branch_instructions))", "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", "MetricName": "tma_other_light_ops", "MetricThreshold": "tma_other_light_ops > 0.3 & tma_light_operations > 0.6", @@ -1299,6 +1387,22 @@ "ScaleUnit": "100%" }, { + "BriefDescription": "This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types).", + "MetricExpr": "max(tma_branch_mispredicts * (1 - BR_MISP_RETIRED.ALL_BRANCHES / (INT_MISC.CLEARS_COUNT - MACHINE_CLEARS.COUNT)), 0.0001)", + "MetricGroup": "BrMispredicts;TopdownL3;tma_L3_group;tma_branch_mispredicts_group", + "MetricName": "tma_other_mispredicts", + "MetricThreshold": "tma_other_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)", + "ScaleUnit": "100%" + }, + { + "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering.", + "MetricExpr": "max(tma_machine_clears * (1 - MACHINE_CLEARS.MEMORY_ORDERING / MACHINE_CLEARS.COUNT), 0.0001)", + "MetricGroup": "Machine_Clears;TopdownL3;tma_L3_group;tma_machine_clears_group", + "MetricName": "tma_other_nukes", + "MetricThreshold": "tma_other_nukes > 0.05 & (tma_machine_clears > 0.1 & tma_bad_speculation > 0.15)", + "ScaleUnit": "100%" + }, + { "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch)", "MetricExpr": "UOPS_DISPATCHED.PORT_0 / tma_info_core_core_clks", "MetricGroup": "Compute;TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P", @@ -1326,17 +1430,17 @@ "ScaleUnit": "100%" }, { - "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU)", + "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU)", "MetricExpr": "UOPS_DISPATCHED.PORT_6 / tma_info_core_core_clks", "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P", "MetricName": "tma_port_6", "MetricThreshold": "tma_port_6 > 0.6", - "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED.PORT_6. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2", + "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED.PORT_6. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2", "ScaleUnit": "100%" }, { "BriefDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related)", - "MetricExpr": "((cpu@EXE_ACTIVITY.3_PORTS_UTIL\\,umask\\=0x80@ + tma_serializing_operation * (CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY) + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL)) / tma_info_thread_clks if ARITH.DIVIDER_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY else (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL) / tma_info_thread_clks)", + "MetricExpr": "((tma_ports_utilized_0 * tma_info_thread_clks + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL)) / tma_info_thread_clks if ARITH.DIVIDER_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY else (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL) / tma_info_thread_clks)", "MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group", "MetricName": "tma_ports_utilization", "MetricThreshold": "tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)", @@ -1345,7 +1449,7 @@ }, { "BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)", - "MetricExpr": "cpu@EXE_ACTIVITY.3_PORTS_UTIL\\,umask\\=0x80@ / tma_info_thread_clks + tma_serializing_operation * (CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY) / tma_info_thread_clks", + "MetricExpr": "(cpu@EXE_ACTIVITY.3_PORTS_UTIL\\,umask\\=0x80@ + tma_core_bound * RS_EVENTS.EMPTY_CYCLES) / tma_info_thread_clks * (CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY) / tma_info_thread_clks", "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group", "MetricName": "tma_ports_utilized_0", "MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", @@ -1375,7 +1479,7 @@ "MetricExpr": "UOPS_EXECUTED.CYCLES_GE_3 / tma_info_thread_clks", "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group", "MetricName": "tma_ports_utilized_3m", - "MetricThreshold": "tma_ports_utilized_3m > 0.7 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", + "MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", "PublicDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Sample with: UOPS_EXECUTED.CYCLES_GE_3", "ScaleUnit": "100%" }, @@ -1393,18 +1497,18 @@ { "BriefDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations", "MetricExpr": "RESOURCE_STALLS.SCOREBOARD / tma_info_thread_clks", - "MetricGroup": "PortsUtil;TopdownL5;tma_L5_group;tma_issueSO;tma_ports_utilized_0_group", + "MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group;tma_issueSO", "MetricName": "tma_serializing_operation", - "MetricThreshold": "tma_serializing_operation > 0.1 & (tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)))", + "MetricThreshold": "tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)", "PublicDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out-of-order execution which may limit performance. Sample with: RESOURCE_STALLS.SCOREBOARD. Related metrics: tma_ms_switches", "ScaleUnit": "100%" }, { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions", "MetricExpr": "140 * MISC_RETIRED.PAUSE_INST / tma_info_thread_clks", - "MetricGroup": "TopdownL6;tma_L6_group;tma_serializing_operation_group", + "MetricGroup": "TopdownL4;tma_L4_group;tma_serializing_operation_group", "MetricName": "tma_slow_pause", - "MetricThreshold": "tma_slow_pause > 0.05 & (tma_serializing_operation > 0.1 & (tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))))", + "MetricThreshold": "tma_slow_pause > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions. Sample with: MISC_RETIRED.PAUSE_INST", "ScaleUnit": "100%" }, @@ -1433,7 +1537,7 @@ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group", "MetricName": "tma_sq_full", "MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_bottleneck_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth", + "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth", "ScaleUnit": "100%" }, { @@ -1501,10 +1605,10 @@ { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears", "MetricExpr": "10 * BACLEARS.ANY / tma_info_thread_clks", - "MetricGroup": "BigFoot;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group", + "MetricGroup": "BigFootprint;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group", "MetricName": "tma_unknown_branches", "MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))", - "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit). Sample with: BACLEARS.ANY", + "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit) hence called Unknown Branches. Sample with: BACLEARS.ANY", "ScaleUnit": "100%" }, { diff --git a/tools/perf/pmu-events/arch/x86/icelake/memory.json b/tools/perf/pmu-events/arch/x86/icelake/memory.json index e8d2ec1c029b..f84763220549 100644 --- a/tools/perf/pmu-events/arch/x86/icelake/memory.json +++ b/tools/perf/pmu-events/arch/x86/icelake/memory.json @@ -259,6 +259,7 @@ "BriefDescription": "Number of times an RTM execution aborted.", "EventCode": "0xc9", "EventName": "RTM_RETIRED.ABORTED", + "PEBS": "1", "PublicDescription": "Counts the number of times RTM abort was triggered.", "SampleAfterValue": "100003", "UMask": "0x4" diff --git a/tools/perf/pmu-events/arch/x86/icelake/metricgroups.json b/tools/perf/pmu-events/arch/x86/icelake/metricgroups.json index a151ba9cccb0..5452a1448ded 100644 --- a/tools/perf/pmu-events/arch/x86/icelake/metricgroups.json +++ b/tools/perf/pmu-events/arch/x86/icelake/metricgroups.json @@ -2,10 +2,10 @@ "Backend": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Bad": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "BadSpec": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", - "BigFoot": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "BigFootprint": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "BrMispredicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Branches": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", - "CacheMisses": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "CacheHits": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "CodeGen": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Compute": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Cor": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", @@ -25,7 +25,9 @@ "L2Evicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "LSD": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MachineClears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "Machine_Clears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Mem": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "MemOffcore": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryBW": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryBound": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryLat": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", @@ -63,8 +65,10 @@ "tma_L5_group": "Metrics for top-down breakdown at level 5", "tma_L6_group": "Metrics for top-down breakdown at level 6", "tma_alu_op_utilization_group": "Metrics contributing to tma_alu_op_utilization category", + "tma_assists_group": "Metrics contributing to tma_assists category", "tma_backend_bound_group": "Metrics contributing to tma_backend_bound category", "tma_bad_speculation_group": "Metrics contributing to tma_bad_speculation category", + "tma_branch_mispredicts_group": "Metrics contributing to tma_branch_mispredicts category", "tma_branch_resteers_group": "Metrics contributing to tma_branch_resteers category", "tma_core_bound_group": "Metrics contributing to tma_core_bound category", "tma_dram_bound_group": "Metrics contributing to tma_dram_bound category", @@ -77,9 +81,9 @@ "tma_frontend_bound_group": "Metrics contributing to tma_frontend_bound category", "tma_heavy_operations_group": "Metrics contributing to tma_heavy_operations category", "tma_issue2P": "Metrics related by the issue $issue2P", - "tma_issueBC": "Metrics related by the issue $issueBC", "tma_issueBM": "Metrics related by the issue $issueBM", "tma_issueBW": "Metrics related by the issue $issueBW", + "tma_issueComp": "Metrics related by the issue $issueComp", "tma_issueD0": "Metrics related by the issue $issueD0", "tma_issueFB": "Metrics related by the issue $issueFB", "tma_issueFL": "Metrics related by the issue $issueFL", @@ -99,10 +103,12 @@ "tma_l3_bound_group": "Metrics contributing to tma_l3_bound category", "tma_light_operations_group": "Metrics contributing to tma_light_operations category", "tma_load_op_utilization_group": "Metrics contributing to tma_load_op_utilization category", + "tma_machine_clears_group": "Metrics contributing to tma_machine_clears category", "tma_mem_latency_group": "Metrics contributing to tma_mem_latency category", "tma_memory_bound_group": "Metrics contributing to tma_memory_bound category", "tma_microcode_sequencer_group": "Metrics contributing to tma_microcode_sequencer category", "tma_mite_group": "Metrics contributing to tma_mite category", + "tma_other_light_ops_group": "Metrics contributing to tma_other_light_ops category", "tma_ports_utilization_group": "Metrics contributing to tma_ports_utilization category", "tma_ports_utilized_0_group": "Metrics contributing to tma_ports_utilized_0 category", "tma_ports_utilized_3m_group": "Metrics contributing to tma_ports_utilized_3m category", diff --git a/tools/perf/pmu-events/arch/x86/icelake/other.json b/tools/perf/pmu-events/arch/x86/icelake/other.json index cfb590632918..4fdc87339555 100644 --- a/tools/perf/pmu-events/arch/x86/icelake/other.json +++ b/tools/perf/pmu-events/arch/x86/icelake/other.json @@ -19,7 +19,7 @@ "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX512 turbo schedule.", "EventCode": "0x28", "EventName": "CORE_POWER.LVL2_TURBO_LICENSE", - "PublicDescription": "Core cycles where the core was running with power-delivery for license level 2 (introduced in Skylake Server microarchtecture). This includes high current AVX 512-bit instructions.", + "PublicDescription": "Core cycles where the core was running with power-delivery for license level 2 (introduced in Skylake Server microarchitecture). This includes high current AVX 512-bit instructions.", "SampleAfterValue": "200003", "UMask": "0x20" }, diff --git a/tools/perf/pmu-events/arch/x86/icelake/pipeline.json b/tools/perf/pmu-events/arch/x86/icelake/pipeline.json index 375b78044f14..c7313fd4fdf4 100644 --- a/tools/perf/pmu-events/arch/x86/icelake/pipeline.json +++ b/tools/perf/pmu-events/arch/x86/icelake/pipeline.json @@ -529,7 +529,7 @@ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread", "EventCode": "0x5e", "EventName": "RS_EVENTS.EMPTY_CYCLES", - "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into stravation periods (e.g. branch mispredictions or i-cache misses)", + "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into starvation periods (e.g. branch mispredictions or i-cache misses)", "SampleAfterValue": "1000003", "UMask": "0x1" }, @@ -553,14 +553,6 @@ "UMask": "0x2" }, { - "BriefDescription": "TMA slots wasted due to incorrect speculation by branch mispredictions", - "EventCode": "0xa4", - "EventName": "TOPDOWN.BR_MISPREDICT_SLOTS", - "PublicDescription": "Number of TMA slots that were wasted due to incorrect speculation by branch mispredictions. This event estimates number of operations that were issued but not retired from the speculative path as well as the out-of-order engine recovery past a branch misprediction.", - "SampleAfterValue": "10000003", - "UMask": "0x8" - }, - { "BriefDescription": "TMA slots available for an unhalted logical processor. Fixed counter - architectural event", "EventName": "TOPDOWN.SLOTS", "PublicDescription": "Number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method (TMA). The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core. Software can use this event as the denominator for the top-level metrics of the TMA method. This architectural event is counted on a designated fixed counter (Fixed Counter 3).", diff --git a/tools/perf/pmu-events/arch/x86/icelakex/frontend.json b/tools/perf/pmu-events/arch/x86/icelakex/frontend.json index f6edc4222f42..66669d062e68 100644 --- a/tools/perf/pmu-events/arch/x86/icelakex/frontend.json +++ b/tools/perf/pmu-events/arch/x86/icelakex/frontend.json @@ -282,7 +282,7 @@ "CounterMask": "5", "EventCode": "0x79", "EventName": "IDQ.DSB_CYCLES_OK", - "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).", + "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the DSB (Decode Stream Buffer) path. Count includes uops that may 'bypass' the IDQ.", "SampleAfterValue": "2000003", "UMask": "0x8" }, diff --git a/tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json b/tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json index 71d78a7841ea..769ba12bef87 100644 --- a/tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json +++ b/tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json @@ -302,12 +302,12 @@ "MetricExpr": "(UOPS_DISPATCHED.PORT_0 + UOPS_DISPATCHED.PORT_1 + UOPS_DISPATCHED.PORT_5 + UOPS_DISPATCHED.PORT_6) / (4 * tma_info_core_core_clks)", "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group", "MetricName": "tma_alu_op_utilization", - "MetricThreshold": "tma_alu_op_utilization > 0.6", + "MetricThreshold": "tma_alu_op_utilization > 0.4", "ScaleUnit": "100%" }, { "BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists", - "MetricExpr": "100 * ASSISTS.ANY / tma_info_thread_slots", + "MetricExpr": "34 * ASSISTS.ANY / tma_info_thread_slots", "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group", "MetricName": "tma_assists", "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)", @@ -317,7 +317,7 @@ { "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend", "DefaultMetricgroupName": "TopdownL1", - "MetricExpr": "topdown\\-be\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 5 * cpu@INT_MISC.RECOVERY_CYCLES\\,cmask\\=1\\,edge@ / tma_info_thread_slots", + "MetricExpr": "topdown\\-be\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 5 * INT_MISC.CLEARS_COUNT / tma_info_thread_slots", "MetricGroup": "Default;TmaL1;TopdownL1;tma_L1_group", "MetricName": "tma_backend_bound", "MetricThreshold": "tma_backend_bound > 0.2", @@ -339,7 +339,7 @@ { "BriefDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions.", "MetricExpr": "tma_light_operations * BR_INST_RETIRED.ALL_BRANCHES / (tma_retiring * tma_info_thread_slots)", - "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", + "MetricGroup": "Branches;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", "MetricName": "tma_branch_instructions", "MetricThreshold": "tma_branch_instructions > 0.1 & tma_light_operations > 0.6", "ScaleUnit": "100%" @@ -384,7 +384,7 @@ { "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "(44 * tma_info_system_average_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM * (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) + 43.5 * tma_info_system_average_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", + "MetricExpr": "(44 * tma_info_system_core_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM * (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) + 43.5 * tma_info_system_core_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group", "MetricName": "tma_contested_accesses", "MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", @@ -404,7 +404,7 @@ { "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "43.5 * tma_info_system_average_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM * (1 - OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", + "MetricExpr": "43.5 * tma_info_system_core_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM * (1 - OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", "MetricGroup": "Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group", "MetricName": "tma_data_sharing", "MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", @@ -416,7 +416,7 @@ "MetricExpr": "(cpu@INST_DECODED.DECODERS\\,cmask\\=1@ - cpu@INST_DECODED.DECODERS\\,cmask\\=2@) / tma_info_core_core_clks / 2", "MetricGroup": "DSBmiss;FetchBW;TopdownL4;tma_L4_group;tma_issueD0;tma_mite_group", "MetricName": "tma_decoder0_alone", - "MetricThreshold": "tma_decoder0_alone > 0.1 & (tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 5 > 0.35))", + "MetricThreshold": "tma_decoder0_alone > 0.1 & (tma_mite > 0.1 & tma_fetch_bandwidth > 0.2)", "PublicDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder. Related metrics: tma_few_uops_instructions", "ScaleUnit": "100%" }, @@ -444,7 +444,7 @@ "MetricExpr": "(IDQ.DSB_CYCLES_ANY - IDQ.DSB_CYCLES_OK) / tma_info_core_core_clks / 2", "MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group", "MetricName": "tma_dsb", - "MetricThreshold": "tma_dsb > 0.15 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 5 > 0.35)", + "MetricThreshold": "tma_dsb > 0.15 & tma_fetch_bandwidth > 0.2", "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.", "ScaleUnit": "100%" }, @@ -463,7 +463,7 @@ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group", "MetricName": "tma_dtlb_load", "MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store, tma_info_bottleneck_memory_data_tlbs", + "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store, tma_info_bottleneck_memory_data_tlbs, tma_info_bottleneck_memory_synchronization", "ScaleUnit": "100%" }, { @@ -472,12 +472,12 @@ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group", "MetricName": "tma_dtlb_store", "MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load, tma_info_bottleneck_memory_data_tlbs", + "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load, tma_info_bottleneck_memory_data_tlbs, tma_info_bottleneck_memory_synchronization", "ScaleUnit": "100%" }, { "BriefDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing", - "MetricExpr": "48 * tma_info_system_average_frequency * OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM / tma_info_thread_clks", + "MetricExpr": "48 * tma_info_system_core_frequency * OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM / tma_info_thread_clks", "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group", "MetricName": "tma_false_sharing", "MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", @@ -490,7 +490,7 @@ "MetricGroup": "MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group", "MetricName": "tma_fb_full", "MetricThreshold": "tma_fb_full > 0.3", - "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_bottleneck_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores", + "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores", "ScaleUnit": "100%" }, { @@ -498,7 +498,7 @@ "MetricExpr": "max(0, tma_frontend_bound - tma_fetch_latency)", "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB", "MetricName": "tma_fetch_bandwidth", - "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 5 > 0.35", + "MetricThreshold": "tma_fetch_bandwidth > 0.2", "MetricgroupNoGroup": "TopdownL2", "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp", "ScaleUnit": "100%" @@ -532,6 +532,15 @@ "ScaleUnit": "100%" }, { + "BriefDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists", + "MetricExpr": "34 * ASSISTS.FP / tma_info_thread_slots", + "MetricGroup": "HPC;TopdownL5;tma_L5_group;tma_assists_group", + "MetricName": "tma_fp_assists", + "MetricThreshold": "tma_fp_assists > 0.1", + "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists. FP Assist may apply when working with very small floating point values (so-called Denormals).", + "ScaleUnit": "100%" + }, + { "BriefDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired", "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ / (tma_retiring * tma_info_thread_slots)", "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P", @@ -594,13 +603,13 @@ "MetricName": "tma_heavy_operations", "MetricThreshold": "tma_heavy_operations > 0.1", "MetricgroupNoGroup": "TopdownL2", - "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.", + "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences. ([ICL+] Note this may overcount due to approximation using indirect events; [ADL+] .)", "ScaleUnit": "100%" }, { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses", - "MetricExpr": "ICACHE_16B.IFDATA_STALL / tma_info_thread_clks", - "MetricGroup": "BigFoot;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group", + "MetricExpr": "ICACHE_DATA.STALLS / tma_info_thread_clks", + "MetricGroup": "BigFootprint;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group", "MetricName": "tma_icache_misses", "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS_PS;FRONTEND_RETIRED.L1I_MISS_PS", @@ -609,7 +618,7 @@ { "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "(tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) * tma_info_thread_slots / BR_MISP_RETIRED.ALL_BRANCHES", + "MetricExpr": "tma_info_bottleneck_mispredictions * tma_info_thread_slots / BR_MISP_RETIRED.ALL_BRANCHES / 100", "MetricGroup": "Bad;BrMispredicts;tma_issueBM", "MetricName": "tma_info_bad_spec_branch_misprediction_cost", "PublicDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear). Related metrics: tma_branch_mispredicts, tma_info_bottleneck_mispredictions, tma_mispredicts_resteers" @@ -644,12 +653,36 @@ }, { "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)", - "MetricExpr": "tma_info_core_ipmispredict", + "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES", "MetricGroup": "Bad;BadSpec;BrMispredicts", "MetricName": "tma_info_bad_spec_ipmispredict", "MetricThreshold": "tma_info_bad_spec_ipmispredict < 200" }, { + "BriefDescription": "Speculative to Retired ratio of all clears (covering mispredicts and nukes)", + "MetricExpr": "INT_MISC.CLEARS_COUNT / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT)", + "MetricGroup": "BrMispredicts", + "MetricName": "tma_info_bad_spec_spec_clears_ratio" + }, + { + "BriefDescription": "Probability of Core Bound bottleneck hidden by SMT-profiling artifacts", + "MetricExpr": "(100 * (1 - max(0, topdown\\-be\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 5 * INT_MISC.CLEARS_COUNT / slots - (CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES) * (topdown\\-be\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 5 * INT_MISC.CLEARS_COUNT / slots)) / (((cpu@EXE_ACTIVITY.3_PORTS_UTIL\\,umask\\=0x80@ + max(0, topdown\\-be\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 5 * INT_MISC.CLEARS_COUNT / slots - (CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES) * (topdown\\-be\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 5 * INT_MISC.CLEARS_COUNT / slots)) * RS_EVENTS.EMPTY_CYCLES) / CPU_CLK_UNHALTED.THREAD * (CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY) / CPU_CLK_UNHALTED.THREAD * CPU_CLK_UNHALTED.THREAD + (EXE_ACTIVITY.1_PORTS_UTIL + topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) * EXE_ACTIVITY.2_PORTS_UTIL)) / CPU_CLK_UNHALTED.THREAD if ARITH.DIVIDER_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY else (EXE_ACTIVITY.1_PORTS_UTIL + topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) * EXE_ACTIVITY.2_PORTS_UTIL) / CPU_CLK_UNHALTED.THREAD) if max(0, topdown\\-be\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 5 * INT_MISC.CLEARS_COUNT / slots - (CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES) * (topdown\\-be\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 5 * INT_MISC.CLEARS_COUNT / slots)) < (((cpu@EXE_ACTIVITY.3_PORTS_UTIL\\,umask\\=0x80@ + max(0, topdown\\-be\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 5 * INT_MISC.CLEARS_COUNT / slots - (CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES) * (topdown\\-be\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 5 * INT_MISC.CLEARS_COUNT / slots)) * RS_EVENTS.EMPTY_CYCLES) / CPU_CLK_UNHALTED.THREAD * (CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY) / CPU_CLK_UNHALTED.THREAD * CPU_CLK_UNHALTED.THREAD + (EXE_ACTIVITY.1_PORTS_UTIL + topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) * EXE_ACTIVITY.2_PORTS_UTIL)) / CPU_CLK_UNHALTED.THREAD if ARITH.DIVIDER_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY else (EXE_ACTIVITY.1_PORTS_UTIL + topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) * EXE_ACTIVITY.2_PORTS_UTIL) / CPU_CLK_UNHALTED.THREAD) else 1) if tma_info_system_smt_2t_utilization > 0.5 else 0)", + "MetricGroup": "Cor;SMT", + "MetricName": "tma_info_botlnk_core_bound_likely" + }, + { + "BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck.", + "MetricExpr": "100 * (100 * ((5 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE - INT_MISC.UOP_DROPPING) / slots * (DSB2MITE_SWITCHES.PENALTY_CYCLES / CPU_CLK_UNHALTED.THREAD) / (ICACHE_DATA.STALLS / CPU_CLK_UNHALTED.THREAD + ICACHE_TAG.STALLS / CPU_CLK_UNHALTED.THREAD + (INT_MISC.CLEAR_RESTEER_CYCLES / CPU_CLK_UNHALTED.THREAD + 10 * BACLEARS.ANY / CPU_CLK_UNHALTED.THREAD) + min(3 * IDQ.MS_SWITCHES / CPU_CLK_UNHALTED.THREAD, 1) + DECODE.LCP / CPU_CLK_UNHALTED.THREAD + DSB2MITE_SWITCHES.PENALTY_CYCLES / CPU_CLK_UNHALTED.THREAD) + max(0, topdown\\-fe\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) - INT_MISC.UOP_DROPPING / slots - (5 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE - INT_MISC.UOP_DROPPING) / slots) * ((IDQ.MITE_CYCLES_ANY - IDQ.MITE_CYCLES_OK) / (CPU_CLK_UNHALTED.DISTRIBUTED if #SMT_on else CPU_CLK_UNHALTED.THREAD) / 2) / ((IDQ.MITE_CYCLES_ANY - IDQ.MITE_CYCLES_OK) / (CPU_CLK_UNHALTED.DISTRIBUTED if #SMT_on else CPU_CLK_UNHALTED.THREAD) / 2 + (IDQ.DSB_CYCLES_ANY - IDQ.DSB_CYCLES_OK) / (CPU_CLK_UNHALTED.DISTRIBUTED if #SMT_on else CPU_CLK_UNHALTED.THREAD) / 2)))", + "MetricGroup": "DSBmiss;Fed", + "MetricName": "tma_info_botlnk_dsb_misses" + }, + { + "BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck.", + "MetricExpr": "100 * (100 * ((5 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE - INT_MISC.UOP_DROPPING) / slots * (ICACHE_DATA.STALLS / CPU_CLK_UNHALTED.THREAD) / (ICACHE_DATA.STALLS / CPU_CLK_UNHALTED.THREAD + ICACHE_TAG.STALLS / CPU_CLK_UNHALTED.THREAD + (INT_MISC.CLEAR_RESTEER_CYCLES / CPU_CLK_UNHALTED.THREAD + 10 * BACLEARS.ANY / CPU_CLK_UNHALTED.THREAD) + min(3 * IDQ.MS_SWITCHES / CPU_CLK_UNHALTED.THREAD, 1) + DECODE.LCP / CPU_CLK_UNHALTED.THREAD + DSB2MITE_SWITCHES.PENALTY_CYCLES / CPU_CLK_UNHALTED.THREAD)))", + "MetricGroup": "Fed;FetchLat;IcMiss", + "MetricName": "tma_info_botlnk_ic_misses" + }, + { "BriefDescription": "Probability of Core Bound bottleneck hidden by SMT-profiling artifacts", "MetricConstraint": "NO_GROUP_EVENTS", "MetricExpr": "(100 * (1 - tma_core_bound / tma_ports_utilization if tma_core_bound < tma_ports_utilization else 1) if tma_info_system_smt_2t_utilization > 0.5 else 0)", @@ -676,67 +709,102 @@ "PublicDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck. Related metrics: " }, { + "BriefDescription": "Total pipeline cost of \"useful operations\" - the baseline operations not covered by Branching_Overhead nor Irregular_Overhead.", + "MetricExpr": "100 * (tma_retiring - (BR_INST_RETIRED.ALL_BRANCHES + BR_INST_RETIRED.NEAR_CALL) / tma_info_thread_slots - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)", + "MetricGroup": "Ret", + "MetricName": "tma_info_bottleneck_base_non_br", + "MetricThreshold": "tma_info_bottleneck_base_non_br > 20" + }, + { "BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)", "MetricConstraint": "NO_GROUP_EVENTS", "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)", - "MetricGroup": "BigFoot;Fed;Frontend;IcMiss;MemoryTLB;tma_issueBC", + "MetricGroup": "BigFootprint;Fed;Frontend;IcMiss;MemoryTLB", "MetricName": "tma_info_bottleneck_big_code", - "MetricThreshold": "tma_info_bottleneck_big_code > 20", - "PublicDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses). Related metrics: tma_info_bottleneck_branching_overhead" + "MetricThreshold": "tma_info_bottleneck_big_code > 20" }, { "BriefDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls)", - "MetricExpr": "100 * ((BR_INST_RETIRED.COND + 3 * BR_INST_RETIRED.NEAR_CALL + (BR_INST_RETIRED.NEAR_TAKEN - BR_INST_RETIRED.COND_TAKEN - 2 * BR_INST_RETIRED.NEAR_CALL)) / tma_info_thread_slots)", - "MetricGroup": "Ret;tma_issueBC", + "MetricExpr": "100 * ((BR_INST_RETIRED.ALL_BRANCHES + BR_INST_RETIRED.NEAR_CALL) / tma_info_thread_slots)", + "MetricGroup": "Ret", "MetricName": "tma_info_bottleneck_branching_overhead", - "MetricThreshold": "tma_info_bottleneck_branching_overhead > 10", - "PublicDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls). Related metrics: tma_info_bottleneck_big_code" + "MetricThreshold": "tma_info_bottleneck_branching_overhead > 5" + }, + { + "BriefDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks", + "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_fb_full / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))", + "MetricGroup": "Mem;MemoryBW;Offcore;tma_issueBW", + "MetricName": "tma_info_bottleneck_cache_memory_bandwidth", + "MetricThreshold": "tma_info_bottleneck_cache_memory_bandwidth > 20", + "PublicDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full" + }, + { + "BriefDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks", + "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_store_latency / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))", + "MetricGroup": "Mem;MemoryLat;Offcore;tma_issueLat", + "MetricName": "tma_info_bottleneck_cache_memory_latency", + "MetricThreshold": "tma_info_bottleneck_cache_memory_latency > 20", + "PublicDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks. Related metrics: tma_l3_hit_latency, tma_mem_latency" + }, + { + "BriefDescription": "Total pipeline cost when the execution is compute-bound - an estimation", + "MetricExpr": "100 * (tma_core_bound * tma_divider / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_core_bound * (tma_ports_utilization / (tma_divider + tma_ports_utilization + tma_serializing_operation)) * (tma_ports_utilized_3m / (tma_ports_utilized_0 + tma_ports_utilized_1 + tma_ports_utilized_2 + tma_ports_utilized_3m)))", + "MetricGroup": "Cor;tma_issueComp", + "MetricName": "tma_info_bottleneck_compute_bound_est", + "MetricThreshold": "tma_info_bottleneck_compute_bound_est > 20", + "PublicDescription": "Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy. Related metrics: " }, { "BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "100 * (tma_frontend_bound - tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) - tma_info_bottleneck_big_code", + "MetricExpr": "100 * (tma_frontend_bound - (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * (10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts)) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) - tma_info_bottleneck_big_code", "MetricGroup": "Fed;FetchBW;Frontend", "MetricName": "tma_info_bottleneck_instruction_fetch_bw", "MetricThreshold": "tma_info_bottleneck_instruction_fetch_bw > 20" }, { - "BriefDescription": "Total pipeline cost of (external) Memory Bandwidth related bottlenecks", - "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full))) + tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_fb_full / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk))", - "MetricGroup": "Mem;MemoryBW;Offcore;tma_issueBW", - "MetricName": "tma_info_bottleneck_memory_bandwidth", - "MetricThreshold": "tma_info_bottleneck_memory_bandwidth > 20", - "PublicDescription": "Total pipeline cost of (external) Memory Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full" + "BriefDescription": "Total pipeline cost of irregular execution (e.g", + "MetricExpr": "100 * (tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * (10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts)) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_branch_mispredicts + tma_machine_clears * tma_other_nukes / tma_other_nukes + tma_core_bound * (tma_serializing_operation + tma_core_bound * RS_EVENTS.EMPTY_CYCLES / tma_info_thread_clks * tma_ports_utilized_0) / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)", + "MetricGroup": "Bad;Cor;Ret;tma_issueMS", + "MetricName": "tma_info_bottleneck_irregular_overhead", + "MetricThreshold": "tma_info_bottleneck_irregular_overhead > 10", + "PublicDescription": "Total pipeline cost of irregular execution (e.g. FP-assists in HPC, Wait time with work imbalance multithreaded workloads, overhead in system services or virtualized environments). Related metrics: tma_microcode_sequencer, tma_ms_switches" }, { "BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "100 * tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_dtlb_load / max(tma_l1_bound, tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))", + "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_dtlb_load / max(tma_l1_bound, tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))", "MetricGroup": "Mem;MemoryTLB;Offcore;tma_issueTLB", "MetricName": "tma_info_bottleneck_memory_data_tlbs", "MetricThreshold": "tma_info_bottleneck_memory_data_tlbs > 20", - "PublicDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs). Related metrics: tma_dtlb_load, tma_dtlb_store" + "PublicDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs). Related metrics: tma_dtlb_load, tma_dtlb_store, tma_info_bottleneck_memory_synchronization" }, { - "BriefDescription": "Total pipeline cost of Memory Latency related bottlenecks (external memory and off-core caches)", - "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound))", - "MetricGroup": "Mem;MemoryLat;Offcore;tma_issueLat", - "MetricName": "tma_info_bottleneck_memory_latency", - "MetricThreshold": "tma_info_bottleneck_memory_latency > 20", - "PublicDescription": "Total pipeline cost of Memory Latency related bottlenecks (external memory and off-core caches). Related metrics: tma_l3_hit_latency, tma_mem_latency" + "BriefDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors)", + "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) * tma_remote_cache / (tma_local_mem + tma_remote_cache + tma_remote_mem) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_contested_accesses + tma_data_sharing) / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * tma_false_sharing / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores - tma_store_latency)) + tma_machine_clears * (1 - tma_other_nukes / tma_other_nukes))", + "MetricGroup": "Mem;Offcore;tma_issueTLB", + "MetricName": "tma_info_bottleneck_memory_synchronization", + "MetricThreshold": "tma_info_bottleneck_memory_synchronization > 10", + "PublicDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors). Related metrics: tma_dtlb_load, tma_dtlb_store, tma_info_bottleneck_memory_data_tlbs" }, { "BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "100 * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))", + "MetricExpr": "100 * (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))", "MetricGroup": "Bad;BadSpec;BrMispredicts;tma_issueBM", "MetricName": "tma_info_bottleneck_mispredictions", "MetricThreshold": "tma_info_bottleneck_mispredictions > 20", "PublicDescription": "Total pipeline cost of Branch Misprediction related bottlenecks. Related metrics: tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost, tma_mispredicts_resteers" }, { + "BriefDescription": "Total pipeline cost of remaining bottlenecks (apart from those listed in the Info.Bottlenecks metrics class)", + "MetricExpr": "100 - (tma_info_bottleneck_big_code + tma_info_bottleneck_instruction_fetch_bw + tma_info_bottleneck_mispredictions + tma_info_bottleneck_cache_memory_bandwidth + tma_info_bottleneck_cache_memory_latency + tma_info_bottleneck_memory_data_tlbs + tma_info_bottleneck_memory_synchronization + tma_info_bottleneck_compute_bound_est + tma_info_bottleneck_irregular_overhead + tma_info_bottleneck_branching_overhead + tma_info_bottleneck_base_non_br)", + "MetricGroup": "Cor;Offcore", + "MetricName": "tma_info_bottleneck_other_bottlenecks", + "MetricThreshold": "tma_info_bottleneck_other_bottlenecks > 20", + "PublicDescription": "Total pipeline cost of remaining bottlenecks (apart from those listed in the Info.Bottlenecks metrics class). Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls." + }, + { "BriefDescription": "Fraction of branches that are CALL or RET", "MetricExpr": "(BR_INST_RETIRED.NEAR_CALL + BR_INST_RETIRED.NEAR_RETURN) / BR_INST_RETIRED.ALL_BRANCHES", "MetricGroup": "Bad;Branches", @@ -768,7 +836,7 @@ }, { "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core", - "MetricExpr": "CPU_CLK_UNHALTED.DISTRIBUTED", + "MetricExpr": "(CPU_CLK_UNHALTED.DISTRIBUTED if #SMT_on else tma_info_thread_clks)", "MetricGroup": "SMT", "MetricName": "tma_info_core_core_clks" }, @@ -779,8 +847,14 @@ "MetricName": "tma_info_core_coreipc" }, { + "BriefDescription": "uops Executed per Cycle", + "MetricExpr": "UOPS_EXECUTED.THREAD / tma_info_thread_clks", + "MetricGroup": "Power", + "MetricName": "tma_info_core_epc" + }, + { "BriefDescription": "Floating Point Operations Per Cycle", - "MetricExpr": "(cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * cpu@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE\\,umask\\=0x18@ + 8 * cpu@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE\\,umask\\=0x60@ + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / tma_info_core_core_clks", + "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * FP_ARITH_INST_RETIRED.4_FLOPS + 8 * FP_ARITH_INST_RETIRED.8_FLOPS + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / tma_info_core_core_clks", "MetricGroup": "Flops;Ret", "MetricName": "tma_info_core_flopc" }, @@ -792,19 +866,12 @@ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)." }, { - "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core", - "MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)", + "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor)", + "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@", "MetricGroup": "Backend;Cor;Pipeline;PortsUtil", "MetricName": "tma_info_core_ilp" }, { - "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)", - "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES", - "MetricGroup": "Bad;BadSpec;BrMispredicts;TopdownL1;tma_L1_group", - "MetricName": "tma_info_core_ipmispredict", - "MetricgroupNoGroup": "TopdownL1" - }, - { "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)", "MetricExpr": "IDQ.DSB_UOPS / UOPS_ISSUED.ANY", "MetricGroup": "DSB;Fed;FetchBW;tma_issueFB", @@ -874,7 +941,7 @@ "MetricGroup": "Flops;InsType", "MetricName": "tma_info_inst_mix_iparith", "MetricThreshold": "tma_info_inst_mix_iparith < 10", - "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). May undercount due to FMA double counting. Approximated prior to BDW." + "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW." }, { "BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate)", @@ -882,7 +949,7 @@ "MetricGroup": "Flops;FpVector;InsType", "MetricName": "tma_info_inst_mix_iparith_avx128", "MetricThreshold": "tma_info_inst_mix_iparith_avx128 < 10", - "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate)", @@ -890,7 +957,7 @@ "MetricGroup": "Flops;FpVector;InsType", "MetricName": "tma_info_inst_mix_iparith_avx256", "MetricThreshold": "tma_info_inst_mix_iparith_avx256 < 10", - "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate)", @@ -898,7 +965,7 @@ "MetricGroup": "Flops;FpVector;InsType", "MetricName": "tma_info_inst_mix_iparith_avx512", "MetricThreshold": "tma_info_inst_mix_iparith_avx512 < 10", - "PublicDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate)", @@ -906,7 +973,7 @@ "MetricGroup": "Flops;FpScalar;InsType", "MetricName": "tma_info_inst_mix_iparith_scalar_dp", "MetricThreshold": "tma_info_inst_mix_iparith_scalar_dp < 10", - "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate)", @@ -914,7 +981,7 @@ "MetricGroup": "Flops;FpScalar;InsType", "MetricName": "tma_info_inst_mix_iparith_scalar_sp", "MetricThreshold": "tma_info_inst_mix_iparith_scalar_sp < 10", - "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)", @@ -932,7 +999,7 @@ }, { "BriefDescription": "Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate)", - "MetricExpr": "INST_RETIRED.ANY / (cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * cpu@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE\\,umask\\=0x18@ + 8 * cpu@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE\\,umask\\=0x60@ + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE)", + "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.SCALAR + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * FP_ARITH_INST_RETIRED.4_FLOPS + 8 * FP_ARITH_INST_RETIRED.8_FLOPS + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE)", "MetricGroup": "Flops;InsType", "MetricName": "tma_info_inst_mix_ipflop", "MetricThreshold": "tma_info_inst_mix_ipflop < 10" @@ -945,6 +1012,12 @@ "MetricThreshold": "tma_info_inst_mix_ipload < 3" }, { + "BriefDescription": "Instructions per PAUSE (lower number means higher occurrence rate)", + "MetricExpr": "tma_info_inst_mix_instructions / MISC_RETIRED.PAUSE_INST", + "MetricGroup": "Flops;FpVector;InsType", + "MetricName": "tma_info_inst_mix_ippause" + }, + { "BriefDescription": "Instructions per Store (lower number means higher occurrence rate)", "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_STORES", "MetricGroup": "InsType", @@ -967,16 +1040,28 @@ "PublicDescription": "Instruction per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_lcp" }, { + "BriefDescription": "\"Bus lock\" per kilo instruction", + "MetricExpr": "tma_info_memory_mix_bus_lock_pki", + "MetricGroup": "Mem", + "MetricName": "tma_info_memory_bus_lock_pki" + }, + { + "BriefDescription": "STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk)", + "MetricExpr": "tma_info_memory_tlb_code_stlb_mpki", + "MetricGroup": "Fed;MemoryTLB", + "MetricName": "tma_info_memory_code_stlb_mpki" + }, + { "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]", - "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l1d_cache_fill_bw", "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_core_l1d_cache_fill_bw" + "MetricName": "tma_info_memory_core_l1d_cache_fill_bw_2t" }, { "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]", - "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l2_cache_fill_bw", "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_core_l2_cache_fill_bw" + "MetricName": "tma_info_memory_core_l2_cache_fill_bw_2t" }, { "BriefDescription": "Rate of non silent evictions from the L2 cache per Kilo instruction", @@ -992,124 +1077,214 @@ }, { "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l3_cache_access_bw", "MetricGroup": "Mem;MemoryBW;Offcore", - "MetricName": "tma_info_memory_core_l3_cache_access_bw" + "MetricName": "tma_info_memory_core_l3_cache_access_bw_2t" }, { "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l3_cache_fill_bw", "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_core_l3_cache_fill_bw" + "MetricName": "tma_info_memory_core_l3_cache_fill_bw_2t" + }, + { + "BriefDescription": "Average Parallel L2 cache miss data reads", + "MetricExpr": "tma_info_memory_latency_data_l2_mlp", + "MetricGroup": "Memory_BW;Offcore", + "MetricName": "tma_info_memory_data_l2_mlp" }, { "BriefDescription": "Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss-handling entries)", "MetricExpr": "1e3 * MEM_LOAD_RETIRED.FB_HIT / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_fb_hpki" }, { + "BriefDescription": "", + "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l1d_cache_fill_bw" + }, + { + "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]", + "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / (duration_time * 1e3 / 1e3)", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l1d_cache_fill_bw_2t" + }, + { "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads", "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l1mpki" }, { "BriefDescription": "L1 cache true misses per kilo instruction for all demand loads (including speculative)", "MetricExpr": "1e3 * L2_RQSTS.ALL_DEMAND_DATA_RD / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l1mpki_load" }, { + "BriefDescription": "", + "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l2_cache_fill_bw" + }, + { + "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]", + "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / (duration_time * 1e3 / 1e3)", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l2_cache_fill_bw_2t" + }, + { + "BriefDescription": "Rate of non silent evictions from the L2 cache per Kilo instruction", + "MetricExpr": "1e3 * L2_LINES_OUT.NON_SILENT / INST_RETIRED.ANY", + "MetricGroup": "L2Evicts;Mem;Server", + "MetricName": "tma_info_memory_l2_evictions_nonsilent_pki" + }, + { + "BriefDescription": "Rate of silent evictions from the L2 cache per Kilo instruction where the evicted lines are dropped (no writeback to L3 or memory)", + "MetricExpr": "1e3 * L2_LINES_OUT.SILENT / INST_RETIRED.ANY", + "MetricGroup": "L2Evicts;Mem;Server", + "MetricName": "tma_info_memory_l2_evictions_silent_pki" + }, + { "BriefDescription": "L2 cache hits per kilo instruction for all demand loads (including speculative)", "MetricExpr": "1e3 * L2_RQSTS.DEMAND_DATA_RD_HIT / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l2hpki_load" }, { "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads", "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY", - "MetricGroup": "Backend;CacheMisses;Mem", + "MetricGroup": "Backend;CacheHits;Mem", "MetricName": "tma_info_memory_l2mpki" }, { "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all request types (including speculative)", "MetricExpr": "1e3 * (OFFCORE_REQUESTS.ALL_DATA_RD - OFFCORE_REQUESTS.DEMAND_DATA_RD + L2_RQSTS.ALL_DEMAND_MISS + L2_RQSTS.SWPF_MISS) / tma_info_inst_mix_instructions", - "MetricGroup": "CacheMisses;Mem;Offcore", + "MetricGroup": "CacheHits;Mem;Offcore", "MetricName": "tma_info_memory_l2mpki_all" }, { "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all demand loads (including speculative)", "MetricExpr": "1e3 * L2_RQSTS.DEMAND_DATA_RD_MISS / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l2mpki_load" }, { + "BriefDescription": "", + "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW;Offcore", + "MetricName": "tma_info_memory_l3_cache_access_bw" + }, + { + "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]", + "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1e9 / (duration_time * 1e3 / 1e3)", + "MetricGroup": "Mem;MemoryBW;Offcore", + "MetricName": "tma_info_memory_l3_cache_access_bw_2t" + }, + { + "BriefDescription": "", + "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l3_cache_fill_bw" + }, + { + "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]", + "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / (duration_time * 1e3 / 1e3)", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l3_cache_fill_bw_2t" + }, + { "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads", "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "Mem", "MetricName": "tma_info_memory_l3mpki" }, { - "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)", - "MetricExpr": "L1D_PEND_MISS.PENDING / (MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT)", - "MetricGroup": "Mem;MemoryBound;MemoryLat", - "MetricName": "tma_info_memory_load_miss_real_latency" + "BriefDescription": "Average Parallel L2 cache miss data reads", + "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", + "MetricGroup": "Memory_BW;Offcore", + "MetricName": "tma_info_memory_latency_data_l2_mlp" }, { - "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss", - "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES", - "MetricGroup": "Mem;MemoryBW;MemoryBound", - "MetricName": "tma_info_memory_mlp", - "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)" + "BriefDescription": "Average Latency for L2 cache miss demand Loads", + "MetricExpr": "tma_info_memory_load_l2_miss_latency", + "MetricGroup": "Memory_Lat;Offcore", + "MetricName": "tma_info_memory_latency_load_l2_miss_latency" }, { - "BriefDescription": "Average Parallel L2 cache miss data reads", - "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", + "BriefDescription": "Average Parallel L2 cache miss demand Loads", + "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / cpu@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=1@", "MetricGroup": "Memory_BW;Offcore", - "MetricName": "tma_info_memory_oro_data_l2_mlp" + "MetricName": "tma_info_memory_latency_load_l2_mlp" + }, + { + "BriefDescription": "Average Latency for L3 cache miss demand Loads", + "MetricExpr": "tma_info_memory_load_l3_miss_latency", + "MetricGroup": "Memory_Lat;Offcore", + "MetricName": "tma_info_memory_latency_load_l3_miss_latency" }, { "BriefDescription": "Average Latency for L2 cache miss demand Loads", "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS.DEMAND_DATA_RD", "MetricGroup": "Memory_Lat;Offcore", - "MetricName": "tma_info_memory_oro_load_l2_miss_latency" + "MetricName": "tma_info_memory_load_l2_miss_latency" }, { "BriefDescription": "Average Parallel L2 cache miss demand Loads", - "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / cpu@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=1@", + "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / cpu@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=0x1@", "MetricGroup": "Memory_BW;Offcore", - "MetricName": "tma_info_memory_oro_load_l2_mlp" + "MetricName": "tma_info_memory_load_l2_mlp" }, { "BriefDescription": "Average Latency for L3 cache miss demand Loads", "MetricExpr": "cpu@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,umask\\=0x10@ / OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD", "MetricGroup": "Memory_Lat;Offcore", - "MetricName": "tma_info_memory_oro_load_l3_miss_latency" + "MetricName": "tma_info_memory_load_l3_miss_latency" }, { - "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l1d_cache_fill_bw", - "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_thread_l1d_cache_fill_bw_1t" + "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)", + "MetricExpr": "L1D_PEND_MISS.PENDING / (MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT)", + "MetricGroup": "Mem;MemoryBound;MemoryLat", + "MetricName": "tma_info_memory_load_miss_real_latency" }, { - "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l2_cache_fill_bw", - "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_thread_l2_cache_fill_bw_1t" + "BriefDescription": "STLB (2nd level TLB) data load speculative misses per kilo instruction (misses of any page-size that complete the page walk)", + "MetricExpr": "tma_info_memory_tlb_load_stlb_mpki", + "MetricGroup": "Mem;MemoryTLB", + "MetricName": "tma_info_memory_load_stlb_mpki" }, { - "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l3_cache_access_bw", - "MetricGroup": "Mem;MemoryBW;Offcore", - "MetricName": "tma_info_memory_thread_l3_cache_access_bw_1t" + "BriefDescription": "\"Bus lock\" per kilo instruction", + "MetricExpr": "1e3 * SQ_MISC.BUS_LOCK / INST_RETIRED.ANY", + "MetricGroup": "Mem", + "MetricName": "tma_info_memory_mix_bus_lock_pki" }, { - "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l3_cache_fill_bw", - "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_thread_l3_cache_fill_bw_1t" + "BriefDescription": "Un-cacheable retired load per kilo instruction", + "MetricExpr": "tma_info_memory_uc_load_pki", + "MetricGroup": "Mem", + "MetricName": "tma_info_memory_mix_uc_load_pki" + }, + { + "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss", + "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES", + "MetricGroup": "Mem;MemoryBW;MemoryBound", + "MetricName": "tma_info_memory_mlp", + "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)" + }, + { + "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses", + "MetricExpr": "(ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING) / (2 * (CPU_CLK_UNHALTED.DISTRIBUTED if #SMT_on else CPU_CLK_UNHALTED.THREAD))", + "MetricGroup": "Mem;MemoryTLB", + "MetricName": "tma_info_memory_page_walks_utilization" + }, + { + "BriefDescription": "STLB (2nd level TLB) data store speculative misses per kilo instruction (misses of any page-size that complete the page walk)", + "MetricExpr": "tma_info_memory_tlb_store_stlb_mpki", + "MetricGroup": "Mem;MemoryTLB", + "MetricName": "tma_info_memory_store_stlb_mpki" }, { "BriefDescription": "STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk)", @@ -1137,54 +1312,76 @@ "MetricName": "tma_info_memory_tlb_store_stlb_mpki" }, { - "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-thread", - "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@", + "BriefDescription": "Un-cacheable retired load per kilo instruction", + "MetricExpr": "1e3 * MEM_LOAD_MISC_RETIRED.UC / INST_RETIRED.ANY", + "MetricGroup": "Mem", + "MetricName": "tma_info_memory_uc_load_pki" + }, + { + "BriefDescription": "", + "MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@)", "MetricGroup": "Cor;Pipeline;PortsUtil;SMT", "MetricName": "tma_info_pipeline_execute" }, { + "BriefDescription": "Instructions per a microcode Assist invocation", + "MetricExpr": "INST_RETIRED.ANY / ASSISTS.ANY", + "MetricGroup": "MicroSeq;Pipeline;Ret;Retire", + "MetricName": "tma_info_pipeline_ipassist", + "MetricThreshold": "tma_info_pipeline_ipassist < 100e3", + "PublicDescription": "Instructions per a microcode Assist invocation. See Assists tree node for details (lower number means higher occurrence rate)" + }, + { "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.", "MetricExpr": "tma_retiring * tma_info_thread_slots / cpu@UOPS_RETIRED.SLOTS\\,cmask\\=1@", "MetricGroup": "Pipeline;Ret", "MetricName": "tma_info_pipeline_retire" }, { - "BriefDescription": "Measured Average Frequency for unhalted processors [GHz]", + "BriefDescription": "Measured Average Core Frequency for unhalted processors [GHz]", "MetricExpr": "tma_info_system_turbo_utilization * TSC / 1e9 / duration_time", "MetricGroup": "Power;Summary", - "MetricName": "tma_info_system_average_frequency" + "MetricName": "tma_info_system_core_frequency" }, { - "BriefDescription": "Average CPU Utilization", + "BriefDescription": "Average CPU Utilization (percentage)", "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC", "MetricGroup": "HPC;Summary", "MetricName": "tma_info_system_cpu_utilization" }, { + "BriefDescription": "Average number of utilized CPUs", + "MetricExpr": "#num_cpus_online * tma_info_system_cpu_utilization", + "MetricGroup": "Summary", + "MetricName": "tma_info_system_cpus_utilized" + }, + { "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]", "MetricExpr": "64 * (UNC_M_CAS_COUNT.RD + UNC_M_CAS_COUNT.WR) / 1e9 / duration_time", - "MetricGroup": "HPC;Mem;MemoryBW;SoC;tma_issueBW", + "MetricGroup": "HPC;MemOffcore;MemoryBW;SoC;tma_issueBW", "MetricName": "tma_info_system_dram_bw_use", - "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_info_bottleneck_memory_bandwidth, tma_mem_bandwidth, tma_sq_full" + "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_mem_bandwidth, tma_sq_full" }, { "BriefDescription": "Giga Floating Point Operations Per Second", - "MetricExpr": "(cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * cpu@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE\\,umask\\=0x18@ + 8 * cpu@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE\\,umask\\=0x60@ + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / 1e9 / duration_time", + "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * FP_ARITH_INST_RETIRED.4_FLOPS + 8 * FP_ARITH_INST_RETIRED.8_FLOPS + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / 1e9 / duration_time", "MetricGroup": "Cor;Flops;HPC", "MetricName": "tma_info_system_gflops", - "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine." + "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width" }, { "BriefDescription": "Average IO (network or disk) Bandwidth Use for Reads [GB / sec]", - "MetricExpr": "(UNC_CHA_TOR_INSERTS.IO_HIT_ITOM + UNC_CHA_TOR_INSERTS.IO_MISS_ITOM + UNC_CHA_TOR_INSERTS.IO_HIT_ITOMCACHENEAR + UNC_CHA_TOR_INSERTS.IO_MISS_ITOMCACHENEAR) * 64 / 1e9 / duration_time", - "MetricGroup": "IoBW;Mem;Server;SoC", - "MetricName": "tma_info_system_io_read_bw" + "MetricExpr": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR * 64 / 1e9 / duration_time", + "MetricGroup": "IoBW;MemOffcore;Server;SoC", + "MetricName": "tma_info_system_io_read_bw", + "PublicDescription": "Average IO (network or disk) Bandwidth Use for Reads [GB / sec]. Bandwidth of IO reads that are initiated by end device controllers that are requesting memory from the CPU" }, { "BriefDescription": "Average IO (network or disk) Bandwidth Use for Writes [GB / sec]", - "MetricExpr": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR * 64 / 1e9 / duration_time", - "MetricGroup": "IoBW;Mem;Server;SoC", - "MetricName": "tma_info_system_io_write_bw" + "MetricExpr": "(UNC_CHA_TOR_INSERTS.IO_HIT_ITOM + UNC_CHA_TOR_INSERTS.IO_MISS_ITOM + UNC_CHA_TOR_INSERTS.IO_HIT_ITOMCACHENEAR + UNC_CHA_TOR_INSERTS.IO_MISS_ITOMCACHENEAR) * 64 / 1e9 / duration_time", + "MetricGroup": "IoBW;MemOffcore;Server;SoC", + "MetricName": "tma_info_system_io_write_bw", + "PublicDescription": "Average IO (network or disk) Bandwidth Use for Writes [GB / sec]. Bandwidth of IO writes that are initiated by end device controllers that are writing memory to the CPU" }, { "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]", @@ -1209,7 +1406,7 @@ { "BriefDescription": "Average latency of data read request to external DRAM memory [in nanoseconds]", "MetricExpr": "1e9 * (UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_DDR / UNC_CHA_TOR_INSERTS.IA_MISS_DRD_DDR) / cha_0@event\\=0x0@", - "MetricGroup": "Mem;MemoryLat;Server;SoC", + "MetricGroup": "MemOffcore;MemoryLat;Server;SoC", "MetricName": "tma_info_system_mem_dram_read_latency", "PublicDescription": "Average latency of data read request to external DRAM memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches" }, @@ -1223,7 +1420,7 @@ { "BriefDescription": "Average latency of data read request to external 3D X-Point memory [in nanoseconds]", "MetricExpr": "(1e9 * (UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PMM / UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PMM) / cha_0@event\\=0x0@ if #has_pmem > 0 else 0)", - "MetricGroup": "Mem;MemoryLat;Server;SoC", + "MetricGroup": "MemOffcore;MemoryLat;Server;SoC", "MetricName": "tma_info_system_mem_pmm_read_latency", "PublicDescription": "Average latency of data read request to external 3D X-Point memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches" }, @@ -1237,13 +1434,13 @@ { "BriefDescription": "Average 3DXP Memory Bandwidth Use for reads [GB / sec]", "MetricExpr": "(64 * UNC_M_PMM_RPQ_INSERTS / 1e9 / duration_time if #has_pmem > 0 else 0)", - "MetricGroup": "Mem;MemoryBW;Server;SoC", + "MetricGroup": "MemOffcore;MemoryBW;Server;SoC", "MetricName": "tma_info_system_pmm_read_bw" }, { "BriefDescription": "Average 3DXP Memory Bandwidth Use for Writes [GB / sec]", "MetricExpr": "(64 * UNC_M_PMM_WPQ_INSERTS / 1e9 / duration_time if #has_pmem > 0 else 0)", - "MetricGroup": "Mem;MemoryBW;Server;SoC", + "MetricGroup": "MemOffcore;MemoryBW;Server;SoC", "MetricName": "tma_info_system_pmm_write_bw" }, { @@ -1288,6 +1485,12 @@ "MetricName": "tma_info_system_turbo_utilization" }, { + "BriefDescription": "Measured Average Uncore Frequency for the SoC [GHz]", + "MetricExpr": "tma_info_system_socket_clks / 1e9 / duration_time", + "MetricGroup": "SoC", + "MetricName": "tma_info_system_uncore_frequency" + }, + { "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.", "MetricExpr": "CPU_CLK_UNHALTED.THREAD", "MetricGroup": "Pipeline", @@ -1340,8 +1543,8 @@ }, { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses", - "MetricExpr": "ICACHE_64B.IFTAG_STALL / tma_info_thread_clks", - "MetricGroup": "BigFoot;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group", + "MetricExpr": "ICACHE_TAG.STALLS / tma_info_thread_clks", + "MetricGroup": "BigFootprint;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group", "MetricName": "tma_itlb_misses", "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS_PS;FRONTEND_RETIRED.ITLB_MISS_PS", @@ -1350,7 +1553,7 @@ { "BriefDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache", "MetricExpr": "max((CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS) / tma_info_thread_clks, 0)", - "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group", "MetricName": "tma_l1_bound", "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", "PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_RETIRED.L1_HIT_PS;MEM_LOAD_RETIRED.FB_HIT_PS. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1", @@ -1360,7 +1563,7 @@ "BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads", "MetricConstraint": "NO_GROUP_EVENTS", "MetricExpr": "MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) / (MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + L1D_PEND_MISS.FB_FULL_PERIODS) * ((CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks)", - "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", "MetricName": "tma_l2_bound", "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", "PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L2_HIT_PS", @@ -1370,24 +1573,24 @@ "BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core", "MetricConstraint": "NO_GROUP_EVENTS_NMI", "MetricExpr": "(CYCLE_ACTIVITY.STALLS_L2_MISS - CYCLE_ACTIVITY.STALLS_L3_MISS) / tma_info_thread_clks", - "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", "MetricName": "tma_l3_bound", "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS", "ScaleUnit": "100%" }, { - "BriefDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)", - "MetricExpr": "19 * tma_info_system_average_frequency * MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", + "BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)", + "MetricExpr": "19 * tma_info_system_core_frequency * (MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2)) / tma_info_thread_clks", "MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group", "MetricName": "tma_l3_hit_latency", "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_info_bottleneck_memory_latency, tma_mem_latency", + "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_info_bottleneck_cache_memory_latency, tma_mem_latency", "ScaleUnit": "100%" }, { "BriefDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs)", - "MetricExpr": "ILD_STALL.LCP / tma_info_thread_clks", + "MetricExpr": "DECODE.LCP / tma_info_thread_clks", "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB", "MetricName": "tma_lcp", "MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", @@ -1401,7 +1604,7 @@ "MetricName": "tma_light_operations", "MetricThreshold": "tma_light_operations > 0.6", "MetricgroupNoGroup": "TopdownL2", - "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST", + "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST", "ScaleUnit": "100%" }, { @@ -1431,10 +1634,10 @@ }, { "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory", - "MetricExpr": "43.5 * tma_info_system_average_frequency * MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", + "MetricExpr": "43.5 * tma_info_system_core_frequency * MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", "MetricGroup": "Server;TopdownL5;tma_L5_group;tma_mem_latency_group", - "MetricName": "tma_local_dram", - "MetricThreshold": "tma_local_dram > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))", + "MetricName": "tma_local_mem", + "MetricThreshold": "tma_local_mem > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))", "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory. Caching will improve the latency and increase performance. Sample with: MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM_PS", "ScaleUnit": "100%" }, @@ -1459,21 +1662,21 @@ "ScaleUnit": "100%" }, { - "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM)", + "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)", "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@) / tma_info_thread_clks", "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW", "MetricName": "tma_mem_bandwidth", "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_bottleneck_memory_bandwidth, tma_info_system_dram_bw_use, tma_sq_full", + "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_sq_full", "ScaleUnit": "100%" }, { - "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM)", + "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM)", "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth", "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat", "MetricName": "tma_mem_latency", "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_info_bottleneck_memory_latency, tma_l3_hit_latency", + "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_info_bottleneck_cache_memory_latency, tma_l3_hit_latency", "ScaleUnit": "100%" }, { @@ -1497,11 +1700,11 @@ }, { "BriefDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit", - "MetricExpr": "tma_retiring * tma_info_thread_slots / UOPS_ISSUED.ANY * IDQ.MS_UOPS / tma_info_thread_slots", + "MetricExpr": "UOPS_RETIRED.SLOTS / UOPS_ISSUED.ANY * IDQ.MS_UOPS / tma_info_thread_slots", "MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueMC;tma_issueMS", "MetricName": "tma_microcode_sequencer", "MetricThreshold": "tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1", - "PublicDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided. Sample with: IDQ.MS_UOPS. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_ms_switches", + "PublicDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided. Sample with: IDQ.MS_UOPS. Related metrics: tma_clears_resteers, tma_info_bottleneck_irregular_overhead, tma_l1_bound, tma_machine_clears, tma_ms_switches", "ScaleUnit": "100%" }, { @@ -1518,7 +1721,7 @@ "MetricExpr": "(IDQ.MITE_CYCLES_ANY - IDQ.MITE_CYCLES_OK) / tma_info_core_core_clks / 2", "MetricGroup": "DSBmiss;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group", "MetricName": "tma_mite", - "MetricThreshold": "tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 5 > 0.35)", + "MetricThreshold": "tma_mite > 0.1 & tma_fetch_bandwidth > 0.2", "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck. Sample with: FRONTEND_RETIRED.ANY_DSB_MISS", "ScaleUnit": "100%" }, @@ -1527,16 +1730,16 @@ "MetricExpr": "(cpu@IDQ.MITE_UOPS\\,cmask\\=4@ - cpu@IDQ.MITE_UOPS\\,cmask\\=5@) / tma_info_thread_clks", "MetricGroup": "DSBmiss;FetchBW;TopdownL4;tma_L4_group;tma_mite_group", "MetricName": "tma_mite_4wide", - "MetricThreshold": "tma_mite_4wide > 0.05 & (tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 5 > 0.35))", + "MetricThreshold": "tma_mite_4wide > 0.05 & (tma_mite > 0.1 & tma_fetch_bandwidth > 0.2)", "ScaleUnit": "100%" }, { - "BriefDescription": "The Mixing_Vectors metric gives the percentage of injected blend uops out of all uops issued", + "BriefDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles)", "MetricExpr": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH / UOPS_ISSUED.ANY", "MetricGroup": "TopdownL5;tma_L5_group;tma_issueMV;tma_ports_utilized_0_group", "MetricName": "tma_mixing_vectors", "MetricThreshold": "tma_mixing_vectors > 0.05", - "PublicDescription": "The Mixing_Vectors metric gives the percentage of injected blend uops out of all uops issued. Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches", + "PublicDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles). Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches", "ScaleUnit": "100%" }, { @@ -1545,22 +1748,22 @@ "MetricGroup": "FetchLat;MicroSeq;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueMC;tma_issueMS;tma_issueMV;tma_issueSO", "MetricName": "tma_ms_switches", "MetricThreshold": "tma_ms_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", - "PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation", + "PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_clears_resteers, tma_info_bottleneck_irregular_overhead, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation", "ScaleUnit": "100%" }, { "BriefDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions", "MetricExpr": "tma_light_operations * INST_RETIRED.NOP / (tma_retiring * tma_info_thread_slots)", - "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", + "MetricGroup": "Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group", "MetricName": "tma_nop_instructions", - "MetricThreshold": "tma_nop_instructions > 0.1 & tma_light_operations > 0.6", + "MetricThreshold": "tma_nop_instructions > 0.1 & (tma_other_light_ops > 0.3 & tma_light_operations > 0.6)", "PublicDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body. Sample with: INST_RETIRED.NOP", "ScaleUnit": "100%" }, { "BriefDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "max(0, tma_light_operations - (tma_fp_arith + tma_memory_operations + tma_branch_instructions + tma_nop_instructions))", + "MetricExpr": "max(0, tma_light_operations - (tma_fp_arith + tma_memory_operations + tma_branch_instructions))", "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", "MetricName": "tma_other_light_ops", "MetricThreshold": "tma_other_light_ops > 0.3 & tma_light_operations > 0.6", @@ -1568,8 +1771,24 @@ "ScaleUnit": "100%" }, { + "BriefDescription": "This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types).", + "MetricExpr": "max(tma_branch_mispredicts * (1 - BR_MISP_RETIRED.ALL_BRANCHES / (INT_MISC.CLEARS_COUNT - MACHINE_CLEARS.COUNT)), 0.0001)", + "MetricGroup": "BrMispredicts;TopdownL3;tma_L3_group;tma_branch_mispredicts_group", + "MetricName": "tma_other_mispredicts", + "MetricThreshold": "tma_other_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)", + "ScaleUnit": "100%" + }, + { + "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering.", + "MetricExpr": "max(tma_machine_clears * (1 - MACHINE_CLEARS.MEMORY_ORDERING / MACHINE_CLEARS.COUNT), 0.0001)", + "MetricGroup": "Machine_Clears;TopdownL3;tma_L3_group;tma_machine_clears_group", + "MetricName": "tma_other_nukes", + "MetricThreshold": "tma_other_nukes > 0.05 & (tma_machine_clears > 0.1 & tma_bad_speculation > 0.15)", + "ScaleUnit": "100%" + }, + { "BriefDescription": "This metric roughly estimates (based on idle latencies) how often the CPU was stalled on accesses to external 3D-Xpoint (Crystal Ridge, a.k.a", - "MetricExpr": "(((1 - ((19 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS)) + 10 * (MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS))) / (19 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS)) + 10 * (MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS)) + (25 * (MEM_LOAD_RETIRED.LOCAL_PMM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) if #has_pmem > 0 else 0) + 33 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) if #has_pmem > 0 else 0))) if #has_pmem > 0 else 0)) * (CYCLE_ACTIVITY.STALLS_L3_MISS / tma_info_thread_clks + (CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks - tma_l2_bound) if 1e6 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM + MEM_LOAD_RETIRED.LOCAL_PMM) > MEM_LOAD_RETIRED.L1_MISS else 0) if #has_pmem > 0 else 0)", + "MetricExpr": "(((1 - (19 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS)) + 10 * (MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS))) / (19 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS)) + 10 * (MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS)) + (25 * (MEM_LOAD_RETIRED.LOCAL_PMM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS)) + 33 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS))))) * (CYCLE_ACTIVITY.STALLS_L3_MISS / tma_info_thread_clks + (CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks - tma_l2_bound) if 1e6 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM + MEM_LOAD_RETIRED.LOCAL_PMM) > MEM_LOAD_RETIRED.L1_MISS else 0) if #has_pmem > 0 else 0)", "MetricGroup": "MemoryBound;Server;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", "MetricName": "tma_pmm_bound", "MetricThreshold": "tma_pmm_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", @@ -1604,17 +1823,17 @@ "ScaleUnit": "100%" }, { - "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU)", + "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU)", "MetricExpr": "UOPS_DISPATCHED.PORT_6 / tma_info_core_core_clks", "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P", "MetricName": "tma_port_6", "MetricThreshold": "tma_port_6 > 0.6", - "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED.PORT_6. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2", + "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED.PORT_6. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2", "ScaleUnit": "100%" }, { "BriefDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related)", - "MetricExpr": "((cpu@EXE_ACTIVITY.3_PORTS_UTIL\\,umask\\=0x80@ + tma_serializing_operation * (CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY) + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL)) / tma_info_thread_clks if ARITH.DIVIDER_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY else (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL) / tma_info_thread_clks)", + "MetricExpr": "((tma_ports_utilized_0 * tma_info_thread_clks + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL)) / tma_info_thread_clks if ARITH.DIVIDER_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY else (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL) / tma_info_thread_clks)", "MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group", "MetricName": "tma_ports_utilization", "MetricThreshold": "tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)", @@ -1623,7 +1842,7 @@ }, { "BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)", - "MetricExpr": "cpu@EXE_ACTIVITY.3_PORTS_UTIL\\,umask\\=0x80@ / tma_info_thread_clks + tma_serializing_operation * (CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY) / tma_info_thread_clks", + "MetricExpr": "(cpu@EXE_ACTIVITY.3_PORTS_UTIL\\,umask\\=0x80@ + tma_core_bound * RS_EVENTS.EMPTY_CYCLES) / tma_info_thread_clks * (CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY) / tma_info_thread_clks", "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group", "MetricName": "tma_ports_utilized_0", "MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", @@ -1653,13 +1872,13 @@ "MetricExpr": "UOPS_EXECUTED.CYCLES_GE_3 / tma_info_thread_clks", "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group", "MetricName": "tma_ports_utilized_3m", - "MetricThreshold": "tma_ports_utilized_3m > 0.7 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", + "MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", "PublicDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Sample with: UOPS_EXECUTED.CYCLES_GE_3", "ScaleUnit": "100%" }, { "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote cache in other sockets including synchronizations issues", - "MetricExpr": "(97 * tma_info_system_average_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM + 97 * tma_info_system_average_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", + "MetricExpr": "(97 * tma_info_system_core_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM + 97 * tma_info_system_core_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", "MetricGroup": "Offcore;Server;Snoop;TopdownL5;tma_L5_group;tma_issueSyncxn;tma_mem_latency_group", "MetricName": "tma_remote_cache", "MetricThreshold": "tma_remote_cache > 0.05 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))", @@ -1668,10 +1887,10 @@ }, { "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory", - "MetricExpr": "108 * tma_info_system_average_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", + "MetricExpr": "108 * tma_info_system_core_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", "MetricGroup": "Server;Snoop;TopdownL5;tma_L5_group;tma_mem_latency_group", - "MetricName": "tma_remote_dram", - "MetricThreshold": "tma_remote_dram > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))", + "MetricName": "tma_remote_mem", + "MetricThreshold": "tma_remote_mem > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))", "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory. This is caused often due to non-optimal NUMA allocations. #link to NUMA article. Sample with: MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM_PS", "ScaleUnit": "100%" }, @@ -1689,18 +1908,18 @@ { "BriefDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations", "MetricExpr": "RESOURCE_STALLS.SCOREBOARD / tma_info_thread_clks", - "MetricGroup": "PortsUtil;TopdownL5;tma_L5_group;tma_issueSO;tma_ports_utilized_0_group", + "MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group;tma_issueSO", "MetricName": "tma_serializing_operation", - "MetricThreshold": "tma_serializing_operation > 0.1 & (tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)))", + "MetricThreshold": "tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)", "PublicDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out-of-order execution which may limit performance. Sample with: RESOURCE_STALLS.SCOREBOARD. Related metrics: tma_ms_switches", "ScaleUnit": "100%" }, { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions", "MetricExpr": "37 * MISC_RETIRED.PAUSE_INST / tma_info_thread_clks", - "MetricGroup": "TopdownL6;tma_L6_group;tma_serializing_operation_group", + "MetricGroup": "TopdownL4;tma_L4_group;tma_serializing_operation_group", "MetricName": "tma_slow_pause", - "MetricThreshold": "tma_slow_pause > 0.05 & (tma_serializing_operation > 0.1 & (tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))))", + "MetricThreshold": "tma_slow_pause > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions. Sample with: MISC_RETIRED.PAUSE_INST", "ScaleUnit": "100%" }, @@ -1729,7 +1948,7 @@ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group", "MetricName": "tma_sq_full", "MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_bottleneck_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth", + "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth", "ScaleUnit": "100%" }, { @@ -1797,10 +2016,10 @@ { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears", "MetricExpr": "10 * BACLEARS.ANY / tma_info_thread_clks", - "MetricGroup": "BigFoot;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group", + "MetricGroup": "BigFootprint;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group", "MetricName": "tma_unknown_branches", "MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))", - "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit). Sample with: BACLEARS.ANY", + "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit) hence called Unknown Branches. Sample with: BACLEARS.ANY", "ScaleUnit": "100%" }, { diff --git a/tools/perf/pmu-events/arch/x86/icelakex/memory.json b/tools/perf/pmu-events/arch/x86/icelakex/memory.json index f36ac04f8d76..875b584b8443 100644 --- a/tools/perf/pmu-events/arch/x86/icelakex/memory.json +++ b/tools/perf/pmu-events/arch/x86/icelakex/memory.json @@ -319,6 +319,7 @@ "BriefDescription": "Number of times an RTM execution aborted.", "EventCode": "0xc9", "EventName": "RTM_RETIRED.ABORTED", + "PEBS": "1", "PublicDescription": "Counts the number of times RTM abort was triggered.", "SampleAfterValue": "100003", "UMask": "0x4" diff --git a/tools/perf/pmu-events/arch/x86/icelakex/metricgroups.json b/tools/perf/pmu-events/arch/x86/icelakex/metricgroups.json index bc6a9a4d27a9..904d299c95a3 100644 --- a/tools/perf/pmu-events/arch/x86/icelakex/metricgroups.json +++ b/tools/perf/pmu-events/arch/x86/icelakex/metricgroups.json @@ -2,10 +2,10 @@ "Backend": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Bad": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "BadSpec": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", - "BigFoot": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "BigFootprint": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "BrMispredicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Branches": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", - "CacheMisses": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "CacheHits": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "CodeGen": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Compute": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Cor": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", @@ -26,7 +26,9 @@ "L2Evicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "LSD": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MachineClears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "Machine_Clears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Mem": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "MemOffcore": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryBW": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryBound": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryLat": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", @@ -64,8 +66,10 @@ "tma_L5_group": "Metrics for top-down breakdown at level 5", "tma_L6_group": "Metrics for top-down breakdown at level 6", "tma_alu_op_utilization_group": "Metrics contributing to tma_alu_op_utilization category", + "tma_assists_group": "Metrics contributing to tma_assists category", "tma_backend_bound_group": "Metrics contributing to tma_backend_bound category", "tma_bad_speculation_group": "Metrics contributing to tma_bad_speculation category", + "tma_branch_mispredicts_group": "Metrics contributing to tma_branch_mispredicts category", "tma_branch_resteers_group": "Metrics contributing to tma_branch_resteers category", "tma_core_bound_group": "Metrics contributing to tma_core_bound category", "tma_dram_bound_group": "Metrics contributing to tma_dram_bound category", @@ -78,9 +82,9 @@ "tma_frontend_bound_group": "Metrics contributing to tma_frontend_bound category", "tma_heavy_operations_group": "Metrics contributing to tma_heavy_operations category", "tma_issue2P": "Metrics related by the issue $issue2P", - "tma_issueBC": "Metrics related by the issue $issueBC", "tma_issueBM": "Metrics related by the issue $issueBM", "tma_issueBW": "Metrics related by the issue $issueBW", + "tma_issueComp": "Metrics related by the issue $issueComp", "tma_issueD0": "Metrics related by the issue $issueD0", "tma_issueFB": "Metrics related by the issue $issueFB", "tma_issueFL": "Metrics related by the issue $issueFL", @@ -100,10 +104,12 @@ "tma_l3_bound_group": "Metrics contributing to tma_l3_bound category", "tma_light_operations_group": "Metrics contributing to tma_light_operations category", "tma_load_op_utilization_group": "Metrics contributing to tma_load_op_utilization category", + "tma_machine_clears_group": "Metrics contributing to tma_machine_clears category", "tma_mem_latency_group": "Metrics contributing to tma_mem_latency category", "tma_memory_bound_group": "Metrics contributing to tma_memory_bound category", "tma_microcode_sequencer_group": "Metrics contributing to tma_microcode_sequencer category", "tma_mite_group": "Metrics contributing to tma_mite category", + "tma_other_light_ops_group": "Metrics contributing to tma_other_light_ops category", "tma_ports_utilization_group": "Metrics contributing to tma_ports_utilization category", "tma_ports_utilized_0_group": "Metrics contributing to tma_ports_utilized_0 category", "tma_ports_utilized_3m_group": "Metrics contributing to tma_ports_utilized_3m category", diff --git a/tools/perf/pmu-events/arch/x86/icelakex/uncore-cache.json b/tools/perf/pmu-events/arch/x86/icelakex/uncore-cache.json index b6ce14ebf844..a950ba3ddcb4 100644 --- a/tools/perf/pmu-events/arch/x86/icelakex/uncore-cache.json +++ b/tools/perf/pmu-events/arch/x86/icelakex/uncore-cache.json @@ -1580,7 +1580,7 @@ "Unit": "CHA" }, { - "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.CODE_READ", + "BriefDescription": "This event is deprecated.", "Deprecated": "1", "EventCode": "0x34", "EventName": "UNC_CHA_LLC_LOOKUP.CODE", @@ -1677,7 +1677,7 @@ "Unit": "CHA" }, { - "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.DATA_READ", + "BriefDescription": "This event is deprecated.", "Deprecated": "1", "EventCode": "0x34", "EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_ALL", @@ -6783,6 +6783,24 @@ "Unit": "CHA" }, { + "BriefDescription": "PCIRDCUR (read) transactions from an IO device that addresses memory on the local socket", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR_LOCAL", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : PCIRdCurs issued by IO Devices and targets local memory : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", + "UMask": "0xc8f2ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "PCIRDCUR (read) transactions from an IO device that addresses memory on a remote socket", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR_REMOTE", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : PCIRdCurs issued by IO Devices and targets remote memory : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", + "UMask": "0xc8f37f04", + "Unit": "CHA" + }, + { "BriefDescription": "TOR Inserts : RFOs issued by IO Devices", "EventCode": "0x35", "EventName": "UNC_CHA_TOR_INSERTS.IO_RFO", diff --git a/tools/perf/pmu-events/arch/x86/icelakex/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/icelakex/uncore-interconnect.json index a066a009c511..6997e6f7d366 100644 --- a/tools/perf/pmu-events/arch/x86/icelakex/uncore-interconnect.json +++ b/tools/perf/pmu-events/arch/x86/icelakex/uncore-interconnect.json @@ -13523,7 +13523,7 @@ "EventCode": "0x05", "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCB", "PerPkg": "1", - "PublicDescription": "Matches on Receive path of a UPI Port : Non-Coherent Bypass : Matches on Receive path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.", + "PublicDescription": "Matches on Receive path of a UPI Port : Non-Coherent Bypass : Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.", "UMask": "0xe", "Unit": "UPI" }, @@ -13532,7 +13532,7 @@ "EventCode": "0x05", "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCB_OPC", "PerPkg": "1", - "PublicDescription": "Matches on Receive path of a UPI Port : Non-Coherent Bypass, Match Opcode : Matches on Receive path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.", + "PublicDescription": "Matches on Receive path of a UPI Port : Non-Coherent Bypass, Match Opcode : Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.", "UMask": "0x10e", "Unit": "UPI" }, @@ -13541,7 +13541,7 @@ "EventCode": "0x05", "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCS", "PerPkg": "1", - "PublicDescription": "Matches on Receive path of a UPI Port : Non-Coherent Standard : Matches on Receive path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.", + "PublicDescription": "Matches on Receive path of a UPI Port : Non-Coherent Standard : Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.", "UMask": "0xf", "Unit": "UPI" }, @@ -13550,7 +13550,7 @@ "EventCode": "0x05", "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCS_OPC", "PerPkg": "1", - "PublicDescription": "Matches on Receive path of a UPI Port : Non-Coherent Standard, Match Opcode : Matches on Receive path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.", + "PublicDescription": "Matches on Receive path of a UPI Port : Non-Coherent Standard, Match Opcode : Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.", "UMask": "0x10f", "Unit": "UPI" }, @@ -13559,7 +13559,7 @@ "EventCode": "0x05", "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.REQ", "PerPkg": "1", - "PublicDescription": "Matches on Receive path of a UPI Port : Request : Matches on Receive path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.", + "PublicDescription": "Matches on Receive path of a UPI Port : Request : Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.", "UMask": "0x8", "Unit": "UPI" }, @@ -13568,7 +13568,7 @@ "EventCode": "0x05", "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.REQ_OPC", "PerPkg": "1", - "PublicDescription": "Matches on Receive path of a UPI Port : Request, Match Opcode : Matches on Receive path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.", + "PublicDescription": "Matches on Receive path of a UPI Port : Request, Match Opcode : Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.", "UMask": "0x108", "Unit": "UPI" }, @@ -13577,7 +13577,7 @@ "EventCode": "0x05", "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSPCNFLT", "PerPkg": "1", - "PublicDescription": "Matches on Receive path of a UPI Port : Response - Conflict : Matches on Receive path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.", + "PublicDescription": "Matches on Receive path of a UPI Port : Response - Conflict : Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.", "UMask": "0x1aa", "Unit": "UPI" }, @@ -13586,7 +13586,7 @@ "EventCode": "0x05", "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSPI", "PerPkg": "1", - "PublicDescription": "Matches on Receive path of a UPI Port : Response - Invalid : Matches on Receive path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.", + "PublicDescription": "Matches on Receive path of a UPI Port : Response - Invalid : Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.", "UMask": "0x12a", "Unit": "UPI" }, @@ -13595,7 +13595,7 @@ "EventCode": "0x05", "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_DATA", "PerPkg": "1", - "PublicDescription": "Matches on Receive path of a UPI Port : Response - Data : Matches on Receive path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.", + "PublicDescription": "Matches on Receive path of a UPI Port : Response - Data : Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.", "UMask": "0xc", "Unit": "UPI" }, @@ -13604,7 +13604,7 @@ "EventCode": "0x05", "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_DATA_OPC", "PerPkg": "1", - "PublicDescription": "Matches on Receive path of a UPI Port : Response - Data, Match Opcode : Matches on Receive path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.", + "PublicDescription": "Matches on Receive path of a UPI Port : Response - Data, Match Opcode : Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.", "UMask": "0x10c", "Unit": "UPI" }, @@ -13613,7 +13613,7 @@ "EventCode": "0x05", "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_NODATA", "PerPkg": "1", - "PublicDescription": "Matches on Receive path of a UPI Port : Response - No Data : Matches on Receive path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.", + "PublicDescription": "Matches on Receive path of a UPI Port : Response - No Data : Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.", "UMask": "0xa", "Unit": "UPI" }, @@ -13622,7 +13622,7 @@ "EventCode": "0x05", "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_NODATA_OPC", "PerPkg": "1", - "PublicDescription": "Matches on Receive path of a UPI Port : Response - No Data, Match Opcode : Matches on Receive path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.", + "PublicDescription": "Matches on Receive path of a UPI Port : Response - No Data, Match Opcode : Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.", "UMask": "0x10a", "Unit": "UPI" }, @@ -13631,7 +13631,7 @@ "EventCode": "0x05", "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.SNP", "PerPkg": "1", - "PublicDescription": "Matches on Receive path of a UPI Port : Snoop : Matches on Receive path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.", + "PublicDescription": "Matches on Receive path of a UPI Port : Snoop : Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.", "UMask": "0x9", "Unit": "UPI" }, @@ -13640,7 +13640,7 @@ "EventCode": "0x05", "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.SNP_OPC", "PerPkg": "1", - "PublicDescription": "Matches on Receive path of a UPI Port : Snoop, Match Opcode : Matches on Receive path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.", + "PublicDescription": "Matches on Receive path of a UPI Port : Snoop, Match Opcode : Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.", "UMask": "0x109", "Unit": "UPI" }, @@ -13649,7 +13649,7 @@ "EventCode": "0x05", "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.WB", "PerPkg": "1", - "PublicDescription": "Matches on Receive path of a UPI Port : Writeback : Matches on Receive path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.", + "PublicDescription": "Matches on Receive path of a UPI Port : Writeback : Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.", "UMask": "0xd", "Unit": "UPI" }, @@ -13658,7 +13658,7 @@ "EventCode": "0x05", "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.WB_OPC", "PerPkg": "1", - "PublicDescription": "Matches on Receive path of a UPI Port : Writeback, Match Opcode : Matches on Receive path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.", + "PublicDescription": "Matches on Receive path of a UPI Port : Writeback, Match Opcode : Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.", "UMask": "0x10d", "Unit": "UPI" }, @@ -14038,7 +14038,7 @@ "EventCode": "0x04", "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCB", "PerPkg": "1", - "PublicDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Bypass : Matches on Transmit path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.", + "PublicDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Bypass : Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.", "UMask": "0xe", "Unit": "UPI" }, @@ -14047,7 +14047,7 @@ "EventCode": "0x04", "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCB_OPC", "PerPkg": "1", - "PublicDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Bypass, Match Opcode : Matches on Transmit path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.", + "PublicDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Bypass, Match Opcode : Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.", "UMask": "0x10e", "Unit": "UPI" }, @@ -14056,7 +14056,7 @@ "EventCode": "0x04", "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCS", "PerPkg": "1", - "PublicDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Standard : Matches on Transmit path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.", + "PublicDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Standard : Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.", "UMask": "0xf", "Unit": "UPI" }, @@ -14065,7 +14065,7 @@ "EventCode": "0x04", "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCS_OPC", "PerPkg": "1", - "PublicDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Standard, Match Opcode : Matches on Transmit path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.", + "PublicDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Standard, Match Opcode : Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.", "UMask": "0x10f", "Unit": "UPI" }, @@ -14074,7 +14074,7 @@ "EventCode": "0x04", "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.REQ", "PerPkg": "1", - "PublicDescription": "Matches on Transmit path of a UPI Port : Request : Matches on Transmit path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.", + "PublicDescription": "Matches on Transmit path of a UPI Port : Request : Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.", "UMask": "0x8", "Unit": "UPI" }, @@ -14083,7 +14083,7 @@ "EventCode": "0x04", "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.REQ_OPC", "PerPkg": "1", - "PublicDescription": "Matches on Transmit path of a UPI Port : Request, Match Opcode : Matches on Transmit path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.", + "PublicDescription": "Matches on Transmit path of a UPI Port : Request, Match Opcode : Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.", "UMask": "0x108", "Unit": "UPI" }, @@ -14092,7 +14092,7 @@ "EventCode": "0x04", "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSPCNFLT", "PerPkg": "1", - "PublicDescription": "Matches on Transmit path of a UPI Port : Response - Conflict : Matches on Transmit path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.", + "PublicDescription": "Matches on Transmit path of a UPI Port : Response - Conflict : Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.", "UMask": "0x1aa", "Unit": "UPI" }, @@ -14101,7 +14101,7 @@ "EventCode": "0x04", "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSPI", "PerPkg": "1", - "PublicDescription": "Matches on Transmit path of a UPI Port : Response - Invalid : Matches on Transmit path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.", + "PublicDescription": "Matches on Transmit path of a UPI Port : Response - Invalid : Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.", "UMask": "0x12a", "Unit": "UPI" }, @@ -14110,7 +14110,7 @@ "EventCode": "0x04", "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_DATA", "PerPkg": "1", - "PublicDescription": "Matches on Transmit path of a UPI Port : Response - Data : Matches on Transmit path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.", + "PublicDescription": "Matches on Transmit path of a UPI Port : Response - Data : Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.", "UMask": "0xc", "Unit": "UPI" }, @@ -14119,7 +14119,7 @@ "EventCode": "0x04", "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_DATA_OPC", "PerPkg": "1", - "PublicDescription": "Matches on Transmit path of a UPI Port : Response - Data, Match Opcode : Matches on Transmit path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.", + "PublicDescription": "Matches on Transmit path of a UPI Port : Response - Data, Match Opcode : Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.", "UMask": "0x10c", "Unit": "UPI" }, @@ -14128,7 +14128,7 @@ "EventCode": "0x04", "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_NODATA", "PerPkg": "1", - "PublicDescription": "Matches on Transmit path of a UPI Port : Response - No Data : Matches on Transmit path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.", + "PublicDescription": "Matches on Transmit path of a UPI Port : Response - No Data : Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.", "UMask": "0xa", "Unit": "UPI" }, @@ -14137,7 +14137,7 @@ "EventCode": "0x04", "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_NODATA_OPC", "PerPkg": "1", - "PublicDescription": "Matches on Transmit path of a UPI Port : Response - No Data, Match Opcode : Matches on Transmit path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.", + "PublicDescription": "Matches on Transmit path of a UPI Port : Response - No Data, Match Opcode : Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.", "UMask": "0x10a", "Unit": "UPI" }, @@ -14146,7 +14146,7 @@ "EventCode": "0x04", "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.SNP", "PerPkg": "1", - "PublicDescription": "Matches on Transmit path of a UPI Port : Snoop : Matches on Transmit path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.", + "PublicDescription": "Matches on Transmit path of a UPI Port : Snoop : Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.", "UMask": "0x9", "Unit": "UPI" }, @@ -14155,7 +14155,7 @@ "EventCode": "0x04", "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.SNP_OPC", "PerPkg": "1", - "PublicDescription": "Matches on Transmit path of a UPI Port : Snoop, Match Opcode : Matches on Transmit path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.", + "PublicDescription": "Matches on Transmit path of a UPI Port : Snoop, Match Opcode : Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.", "UMask": "0x109", "Unit": "UPI" }, @@ -14164,7 +14164,7 @@ "EventCode": "0x04", "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.WB", "PerPkg": "1", - "PublicDescription": "Matches on Transmit path of a UPI Port : Writeback : Matches on Transmit path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.", + "PublicDescription": "Matches on Transmit path of a UPI Port : Writeback : Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.", "UMask": "0xd", "Unit": "UPI" }, @@ -14173,7 +14173,7 @@ "EventCode": "0x04", "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.WB_OPC", "PerPkg": "1", - "PublicDescription": "Matches on Transmit path of a UPI Port : Writeback, Match Opcode : Matches on Transmit path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.", + "PublicDescription": "Matches on Transmit path of a UPI Port : Writeback, Match Opcode : Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.", "UMask": "0x10d", "Unit": "UPI" }, diff --git a/tools/perf/pmu-events/arch/x86/icelakex/uncore-io.json b/tools/perf/pmu-events/arch/x86/icelakex/uncore-io.json index 9cef8862c428..1b8a719b81a5 100644 --- a/tools/perf/pmu-events/arch/x86/icelakex/uncore-io.json +++ b/tools/perf/pmu-events/arch/x86/icelakex/uncore-io.json @@ -2477,17 +2477,6 @@ "Unit": "IIO" }, { - "BriefDescription": "Number requests sent to PCIe from main die : From IRP", - "EventCode": "0xC2", - "EventName": "UNC_IIO_NUM_REQ_FROM_CPU.IRP", - "FCMask": "0x07", - "PerPkg": "1", - "PortMask": "0xFF", - "PublicDescription": "Number requests sent to PCIe from main die : From IRP : Captures Posted/Non-posted allocations from IRP. i.e. either non-confined P2P traffic or from the CPU", - "UMask": "0x1", - "Unit": "IIO" - }, - { "BriefDescription": "Number requests sent to PCIe from main die : From ITC", "EventCode": "0xC2", "EventName": "UNC_IIO_NUM_REQ_FROM_CPU.ITC", diff --git a/tools/perf/pmu-events/arch/x86/icelakex/uncore-power.json b/tools/perf/pmu-events/arch/x86/icelakex/uncore-power.json index ee4dac6fc797..920cab6ffe37 100644 --- a/tools/perf/pmu-events/arch/x86/icelakex/uncore-power.json +++ b/tools/perf/pmu-events/arch/x86/icelakex/uncore-power.json @@ -151,6 +151,7 @@ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0", "PerPkg": "1", "PublicDescription": "Number of cores in C-State : C0 and C1 : This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", + "UMask": "0x40", "Unit": "PCU" }, { @@ -159,6 +160,7 @@ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3", "PerPkg": "1", "PublicDescription": "Number of cores in C-State : C3 : This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", + "UMask": "0x80", "Unit": "PCU" }, { @@ -167,6 +169,7 @@ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6", "PerPkg": "1", "PublicDescription": "Number of cores in C-State : C6 and C7 : This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", + "UMask": "0xc0", "Unit": "PCU" }, { diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json b/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json index 33fe555252b2..5f3f0b5aebad 100644 --- a/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json +++ b/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json @@ -84,12 +84,12 @@ "MetricExpr": "(UOPS_DISPATCHED_PORT.PORT_0 + UOPS_DISPATCHED_PORT.PORT_1 + UOPS_DISPATCHED_PORT.PORT_5) / (3 * tma_info_core_core_clks)", "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group", "MetricName": "tma_alu_op_utilization", - "MetricThreshold": "tma_alu_op_utilization > 0.6", + "MetricThreshold": "tma_alu_op_utilization > 0.4", "ScaleUnit": "100%" }, { "BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists", - "MetricExpr": "100 * OTHER_ASSISTS.ANY_WB_ASSIST / tma_info_thread_slots", + "MetricExpr": "66 * OTHER_ASSISTS.ANY_WB_ASSIST / tma_info_thread_slots", "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group", "MetricName": "tma_assists", "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)", @@ -202,7 +202,7 @@ "MetricExpr": "(IDQ.ALL_DSB_CYCLES_ANY_UOPS - IDQ.ALL_DSB_CYCLES_4_UOPS) / tma_info_core_core_clks / 2", "MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group", "MetricName": "tma_dsb", - "MetricThreshold": "tma_dsb > 0.15 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 4 > 0.35)", + "MetricThreshold": "tma_dsb > 0.15 & tma_fetch_bandwidth > 0.2", "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.", "ScaleUnit": "100%" }, @@ -257,7 +257,7 @@ "MetricExpr": "tma_frontend_bound - tma_fetch_latency", "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB", "MetricName": "tma_fetch_bandwidth", - "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 4 > 0.35", + "MetricThreshold": "tma_fetch_bandwidth > 0.2", "MetricgroupNoGroup": "TopdownL2", "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Related metrics: tma_dsb_switches, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp", "ScaleUnit": "100%" @@ -287,7 +287,7 @@ "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P", "MetricName": "tma_fp_scalar", "MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)", - "PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2", + "PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2", "ScaleUnit": "100%" }, { @@ -296,7 +296,25 @@ "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P", "MetricName": "tma_fp_vector", "MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)", - "PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2", + "PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2", + "ScaleUnit": "100%" + }, + { + "BriefDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors", + "MetricExpr": "(FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE + FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE) / UOPS_EXECUTED.THREAD", + "MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P", + "MetricName": "tma_fp_vector_128b", + "MetricThreshold": "tma_fp_vector_128b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))", + "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2", + "ScaleUnit": "100%" + }, + { + "BriefDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors", + "MetricExpr": "(SIMD_FP_256.PACKED_DOUBLE + SIMD_FP_256.PACKED_SINGLE) / UOPS_EXECUTED.THREAD", + "MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P", + "MetricName": "tma_fp_vector_256b", + "MetricThreshold": "tma_fp_vector_256b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))", + "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2", "ScaleUnit": "100%" }, { @@ -316,20 +334,20 @@ "MetricName": "tma_heavy_operations", "MetricThreshold": "tma_heavy_operations > 0.1", "MetricgroupNoGroup": "TopdownL2", - "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.", + "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences. ([ICL+] Note this may overcount due to approximation using indirect events; [ADL+] .)", "ScaleUnit": "100%" }, { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses.", "MetricExpr": "ICACHE.IFETCH_STALL / tma_info_thread_clks - tma_itlb_misses", - "MetricGroup": "BigFoot;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group", + "MetricGroup": "BigFootprint;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group", "MetricName": "tma_icache_misses", "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", "ScaleUnit": "100%" }, { "BriefDescription": "Instructions per retired mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).", - "MetricExpr": "tma_info_inst_mix_instructions / (UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * cpu@BR_MISP_EXEC.ALL_BRANCHES\\,umask\\=0xE4@)", + "MetricExpr": "tma_info_inst_mix_instructions / (UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * BR_MISP_EXEC.INDIRECT)", "MetricGroup": "Bad;BrMispredicts", "MetricName": "tma_info_bad_spec_ipmisp_indirect", "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1e3" @@ -360,8 +378,8 @@ "MetricName": "tma_info_core_flopc" }, { - "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core", - "MetricExpr": "UOPS_EXECUTED.THREAD / (cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)", + "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor)", + "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@", "MetricGroup": "Backend;Cor;Pipeline;PortsUtil", "MetricName": "tma_info_core_ilp" }, @@ -398,7 +416,7 @@ "MetricGroup": "Flops;InsType", "MetricName": "tma_info_inst_mix_iparith", "MetricThreshold": "tma_info_inst_mix_iparith < 10", - "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). May undercount due to FMA double counting. Approximated prior to BDW." + "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW." }, { "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)", @@ -438,96 +456,90 @@ }, { "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]", - "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l1d_cache_fill_bw", "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_core_l1d_cache_fill_bw" + "MetricName": "tma_info_memory_core_l1d_cache_fill_bw_2t" }, { "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]", - "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l2_cache_fill_bw", "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_core_l2_cache_fill_bw" + "MetricName": "tma_info_memory_core_l2_cache_fill_bw_2t" }, { "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l3_cache_fill_bw", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_core_l3_cache_fill_bw_2t" + }, + { + "BriefDescription": "", + "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time", "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_core_l3_cache_fill_bw" + "MetricName": "tma_info_memory_l1d_cache_fill_bw" }, { "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads", "MetricExpr": "1e3 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l1mpki" }, { + "BriefDescription": "", + "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l2_cache_fill_bw" + }, + { "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads", "MetricExpr": "1e3 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY", - "MetricGroup": "Backend;CacheMisses;Mem", + "MetricGroup": "Backend;CacheHits;Mem", "MetricName": "tma_info_memory_l2mpki" }, { + "BriefDescription": "", + "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l3_cache_fill_bw" + }, + { "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads", "MetricExpr": "1e3 * MEM_LOAD_UOPS_RETIRED.LLC_MISS / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "Mem", "MetricName": "tma_info_memory_l3mpki" }, { - "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)", - "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "L1D_PEND_MISS.PENDING / (MEM_LOAD_UOPS_RETIRED.L1_MISS + MEM_LOAD_UOPS_RETIRED.HIT_LFB)", - "MetricGroup": "Mem;MemoryBound;MemoryLat", - "MetricName": "tma_info_memory_load_miss_real_latency" - }, - { - "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss", - "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES", - "MetricGroup": "Mem;MemoryBW;MemoryBound", - "MetricName": "tma_info_memory_mlp", - "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)" - }, - { "BriefDescription": "Average Parallel L2 cache miss data reads", "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", "MetricGroup": "Memory_BW;Offcore", - "MetricName": "tma_info_memory_oro_data_l2_mlp" + "MetricName": "tma_info_memory_latency_data_l2_mlp" }, { "BriefDescription": "Average Latency for L2 cache miss demand Loads", "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS.DEMAND_DATA_RD", "MetricGroup": "Memory_Lat;Offcore", - "MetricName": "tma_info_memory_oro_load_l2_miss_latency" + "MetricName": "tma_info_memory_latency_load_l2_miss_latency" }, { "BriefDescription": "Average Parallel L2 cache miss demand Loads", "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD", "MetricGroup": "Memory_BW;Offcore", - "MetricName": "tma_info_memory_oro_load_l2_mlp" - }, - { - "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l1d_cache_fill_bw", - "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_thread_l1d_cache_fill_bw_1t" - }, - { - "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l2_cache_fill_bw", - "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_thread_l2_cache_fill_bw_1t" + "MetricName": "tma_info_memory_latency_load_l2_mlp" }, { - "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "0", - "MetricGroup": "Mem;MemoryBW;Offcore", - "MetricName": "tma_info_memory_thread_l3_cache_access_bw_1t" + "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)", + "MetricConstraint": "NO_GROUP_EVENTS", + "MetricExpr": "L1D_PEND_MISS.PENDING / (MEM_LOAD_UOPS_RETIRED.L1_MISS + MEM_LOAD_UOPS_RETIRED.HIT_LFB)", + "MetricGroup": "Mem;MemoryBound;MemoryLat", + "MetricName": "tma_info_memory_load_miss_real_latency" }, { - "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l3_cache_fill_bw", - "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_thread_l3_cache_fill_bw_1t" + "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss", + "MetricConstraint": "NO_GROUP_EVENTS", + "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES", + "MetricGroup": "Mem;MemoryBW;MemoryBound", + "MetricName": "tma_info_memory_mlp", + "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)" }, { "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses", @@ -537,8 +549,8 @@ "MetricThreshold": "tma_info_memory_tlb_page_walks_utilization > 0.5" }, { - "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-thread", - "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@", + "BriefDescription": "", + "MetricExpr": "UOPS_EXECUTED.THREAD / (cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)", "MetricGroup": "Cor;Pipeline;PortsUtil;SMT", "MetricName": "tma_info_pipeline_execute" }, @@ -549,21 +561,27 @@ "MetricName": "tma_info_pipeline_retire" }, { - "BriefDescription": "Measured Average Frequency for unhalted processors [GHz]", + "BriefDescription": "Measured Average Core Frequency for unhalted processors [GHz]", "MetricExpr": "tma_info_system_turbo_utilization * TSC / 1e9 / duration_time", "MetricGroup": "Power;Summary", - "MetricName": "tma_info_system_average_frequency" + "MetricName": "tma_info_system_core_frequency" }, { - "BriefDescription": "Average CPU Utilization", + "BriefDescription": "Average CPU Utilization (percentage)", "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC", "MetricGroup": "HPC;Summary", "MetricName": "tma_info_system_cpu_utilization" }, { + "BriefDescription": "Average number of utilized CPUs", + "MetricExpr": "#num_cpus_online * tma_info_system_cpu_utilization", + "MetricGroup": "Summary", + "MetricName": "tma_info_system_cpus_utilized" + }, + { "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]", "MetricExpr": "64 * (UNC_ARB_TRK_REQUESTS.ALL + UNC_ARB_COH_TRK_REQUESTS.ALL) / 1e6 / duration_time / 1e3", - "MetricGroup": "HPC;Mem;MemoryBW;SoC;tma_issueBW", + "MetricGroup": "HPC;MemOffcore;MemoryBW;SoC;tma_issueBW", "MetricName": "tma_info_system_dram_bw_use", "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_mem_bandwidth, tma_sq_full" }, @@ -572,7 +590,7 @@ "MetricExpr": "(FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * (FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE) + 8 * SIMD_FP_256.PACKED_SINGLE) / 1e9 / duration_time", "MetricGroup": "Cor;Flops;HPC", "MetricName": "tma_info_system_gflops", - "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine." + "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width" }, { "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]", @@ -595,19 +613,6 @@ "MetricThreshold": "tma_info_system_kernel_utilization > 0.05" }, { - "BriefDescription": "Average number of parallel requests to external memory", - "MetricExpr": "UNC_ARB_TRK_OCCUPANCY.ALL / UNC_ARB_TRK_OCCUPANCY.CYCLES_WITH_ANY_REQUEST", - "MetricGroup": "Mem;SoC", - "MetricName": "tma_info_system_mem_parallel_requests", - "PublicDescription": "Average number of parallel requests to external memory. Accounts for all requests" - }, - { - "BriefDescription": "Average latency of all requests to external memory (in Uncore cycles)", - "MetricExpr": "UNC_ARB_TRK_OCCUPANCY.ALL / UNC_ARB_TRK_REQUESTS.ALL", - "MetricGroup": "Mem;SoC", - "MetricName": "tma_info_system_mem_request_latency" - }, - { "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active", "MetricExpr": "(1 - CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / (CPU_CLK_UNHALTED.REF_XCLK_ANY / 2) if #SMT_on else 0)", "MetricGroup": "SMT", @@ -673,7 +678,7 @@ { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses", "MetricExpr": "(12 * ITLB_MISSES.STLB_HIT + ITLB_MISSES.WALK_DURATION) / tma_info_thread_clks", - "MetricGroup": "BigFoot;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group", + "MetricGroup": "BigFootprint;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group", "MetricName": "tma_itlb_misses", "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: ITLB_MISSES.WALK_COMPLETED", @@ -682,7 +687,7 @@ { "BriefDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache", "MetricExpr": "max((min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.STALLS_LDM_PENDING) - CYCLE_ACTIVITY.STALLS_L1D_PENDING) / tma_info_thread_clks, 0)", - "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group", "MetricName": "tma_l1_bound", "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", "PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_UOPS_RETIRED.L1_HIT_PS;MEM_LOAD_UOPS_RETIRED.HIT_LFB_PS. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1", @@ -691,7 +696,7 @@ { "BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads", "MetricExpr": "(CYCLE_ACTIVITY.STALLS_L1D_PENDING - CYCLE_ACTIVITY.STALLS_L2_PENDING) / tma_info_thread_clks", - "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", "MetricName": "tma_l2_bound", "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", "PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L2_HIT_PS", @@ -701,20 +706,20 @@ "BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core", "MetricConstraint": "NO_GROUP_EVENTS_SMT", "MetricExpr": "MEM_LOAD_UOPS_RETIRED.LLC_HIT / (MEM_LOAD_UOPS_RETIRED.LLC_HIT + 7 * MEM_LOAD_UOPS_RETIRED.LLC_MISS) * CYCLE_ACTIVITY.STALLS_L2_PENDING / tma_info_thread_clks", - "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", "MetricName": "tma_l3_bound", "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT_PS", "ScaleUnit": "100%" }, { - "BriefDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)", + "BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)", "MetricConstraint": "NO_GROUP_EVENTS", "MetricExpr": "29 * (MEM_LOAD_UOPS_RETIRED.LLC_HIT * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.LLC_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_RETIRED.LLC_MISS))) / tma_info_thread_clks", "MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group", "MetricName": "tma_l3_hit_latency", "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT_PS. Related metrics: tma_mem_latency", + "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT_PS. Related metrics: tma_mem_latency", "ScaleUnit": "100%" }, { @@ -733,7 +738,7 @@ "MetricName": "tma_light_operations", "MetricThreshold": "tma_light_operations > 0.6", "MetricgroupNoGroup": "TopdownL2", - "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST", + "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST", "ScaleUnit": "100%" }, { @@ -768,21 +773,21 @@ "ScaleUnit": "100%" }, { - "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM)", + "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)", "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=6@) / tma_info_thread_clks", "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW", "MetricName": "tma_mem_bandwidth", "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full", + "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full", "ScaleUnit": "100%" }, { - "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM)", + "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM)", "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth", "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat", "MetricName": "tma_mem_latency", "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_l3_hit_latency", + "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_l3_hit_latency", "ScaleUnit": "100%" }, { @@ -810,7 +815,7 @@ "MetricExpr": "(IDQ.ALL_MITE_CYCLES_ANY_UOPS - IDQ.ALL_MITE_CYCLES_4_UOPS) / tma_info_core_core_clks / 2", "MetricGroup": "DSBmiss;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group", "MetricName": "tma_mite", - "MetricThreshold": "tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 4 > 0.35)", + "MetricThreshold": "tma_mite > 0.1 & tma_fetch_bandwidth > 0.2", "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck.", "ScaleUnit": "100%" }, @@ -829,7 +834,7 @@ "MetricGroup": "Compute;TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P", "MetricName": "tma_port_0", "MetricThreshold": "tma_port_0 > 0.6", - "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED_PORT.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_512b, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2", + "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED_PORT.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2", "ScaleUnit": "100%" }, { @@ -838,7 +843,7 @@ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P", "MetricName": "tma_port_1", "MetricThreshold": "tma_port_1 > 0.6", - "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_512b, tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2", + "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2", "ScaleUnit": "100%" }, { @@ -874,7 +879,7 @@ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P", "MetricName": "tma_port_5", "MetricThreshold": "tma_port_5 > 0.6", - "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU). Sample with: UOPS_DISPATCHED.PORT_5. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2", + "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU). Sample with: UOPS_DISPATCHED.PORT_5. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2", "ScaleUnit": "100%" }, { @@ -911,7 +916,7 @@ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issue2P;tma_ports_utilization_group", "MetricName": "tma_ports_utilized_2", "MetricThreshold": "tma_ports_utilized_2 > 0.15 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6", + "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6", "ScaleUnit": "100%" }, { @@ -919,7 +924,7 @@ "MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@ / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC) / tma_info_core_core_clks", "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group", "MetricName": "tma_ports_utilized_3m", - "MetricThreshold": "tma_ports_utilized_3m > 0.7 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", + "MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", "ScaleUnit": "100%" }, { diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/metricgroups.json b/tools/perf/pmu-events/arch/x86/ivybridge/metricgroups.json index f6a0258e3241..8c808347f6da 100644 --- a/tools/perf/pmu-events/arch/x86/ivybridge/metricgroups.json +++ b/tools/perf/pmu-events/arch/x86/ivybridge/metricgroups.json @@ -2,10 +2,10 @@ "Backend": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Bad": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "BadSpec": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", - "BigFoot": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "BigFootprint": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "BrMispredicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Branches": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", - "CacheMisses": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "CacheHits": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Compute": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Cor": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "DSB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", @@ -24,7 +24,9 @@ "L2Evicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "LSD": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MachineClears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "Machine_Clears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Mem": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "MemOffcore": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryBW": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryBound": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryLat": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", @@ -94,6 +96,7 @@ "tma_l3_bound_group": "Metrics contributing to tma_l3_bound category", "tma_light_operations_group": "Metrics contributing to tma_light_operations category", "tma_load_op_utilization_group": "Metrics contributing to tma_load_op_utilization category", + "tma_machine_clears_group": "Metrics contributing to tma_machine_clears category", "tma_mem_latency_group": "Metrics contributing to tma_mem_latency category", "tma_memory_bound_group": "Metrics contributing to tma_memory_bound category", "tma_microcode_sequencer_group": "Metrics contributing to tma_microcode_sequencer category", diff --git a/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json b/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json index f5e46a768fdd..e6f5b05a71b5 100644 --- a/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json +++ b/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json @@ -84,12 +84,12 @@ "MetricExpr": "(UOPS_DISPATCHED_PORT.PORT_0 + UOPS_DISPATCHED_PORT.PORT_1 + UOPS_DISPATCHED_PORT.PORT_5) / (3 * tma_info_core_core_clks)", "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group", "MetricName": "tma_alu_op_utilization", - "MetricThreshold": "tma_alu_op_utilization > 0.6", + "MetricThreshold": "tma_alu_op_utilization > 0.4", "ScaleUnit": "100%" }, { "BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists", - "MetricExpr": "100 * OTHER_ASSISTS.ANY_WB_ASSIST / tma_info_thread_slots", + "MetricExpr": "66 * OTHER_ASSISTS.ANY_WB_ASSIST / tma_info_thread_slots", "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group", "MetricName": "tma_assists", "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)", @@ -202,7 +202,7 @@ "MetricExpr": "(IDQ.ALL_DSB_CYCLES_ANY_UOPS - IDQ.ALL_DSB_CYCLES_4_UOPS) / tma_info_core_core_clks / 2", "MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group", "MetricName": "tma_dsb", - "MetricThreshold": "tma_dsb > 0.15 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 4 > 0.35)", + "MetricThreshold": "tma_dsb > 0.15 & tma_fetch_bandwidth > 0.2", "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.", "ScaleUnit": "100%" }, @@ -257,7 +257,7 @@ "MetricExpr": "tma_frontend_bound - tma_fetch_latency", "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB", "MetricName": "tma_fetch_bandwidth", - "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 4 > 0.35", + "MetricThreshold": "tma_fetch_bandwidth > 0.2", "MetricgroupNoGroup": "TopdownL2", "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Related metrics: tma_dsb_switches, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp", "ScaleUnit": "100%" @@ -287,7 +287,7 @@ "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P", "MetricName": "tma_fp_scalar", "MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)", - "PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2", + "PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2", "ScaleUnit": "100%" }, { @@ -296,7 +296,25 @@ "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P", "MetricName": "tma_fp_vector", "MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)", - "PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2", + "PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2", + "ScaleUnit": "100%" + }, + { + "BriefDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors", + "MetricExpr": "(FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE + FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE) / UOPS_EXECUTED.THREAD", + "MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P", + "MetricName": "tma_fp_vector_128b", + "MetricThreshold": "tma_fp_vector_128b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))", + "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2", + "ScaleUnit": "100%" + }, + { + "BriefDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors", + "MetricExpr": "(SIMD_FP_256.PACKED_DOUBLE + SIMD_FP_256.PACKED_SINGLE) / UOPS_EXECUTED.THREAD", + "MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P", + "MetricName": "tma_fp_vector_256b", + "MetricThreshold": "tma_fp_vector_256b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))", + "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2", "ScaleUnit": "100%" }, { @@ -316,20 +334,20 @@ "MetricName": "tma_heavy_operations", "MetricThreshold": "tma_heavy_operations > 0.1", "MetricgroupNoGroup": "TopdownL2", - "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.", + "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences. ([ICL+] Note this may overcount due to approximation using indirect events; [ADL+] .)", "ScaleUnit": "100%" }, { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses.", "MetricExpr": "ICACHE.IFETCH_STALL / tma_info_thread_clks - tma_itlb_misses", - "MetricGroup": "BigFoot;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group", + "MetricGroup": "BigFootprint;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group", "MetricName": "tma_icache_misses", "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", "ScaleUnit": "100%" }, { "BriefDescription": "Instructions per retired mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).", - "MetricExpr": "tma_info_inst_mix_instructions / (UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * cpu@BR_MISP_EXEC.ALL_BRANCHES\\,umask\\=0xE4@)", + "MetricExpr": "tma_info_inst_mix_instructions / (UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * BR_MISP_EXEC.INDIRECT)", "MetricGroup": "Bad;BrMispredicts", "MetricName": "tma_info_bad_spec_ipmisp_indirect", "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1e3" @@ -360,8 +378,8 @@ "MetricName": "tma_info_core_flopc" }, { - "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core", - "MetricExpr": "UOPS_EXECUTED.THREAD / (cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)", + "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor)", + "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@", "MetricGroup": "Backend;Cor;Pipeline;PortsUtil", "MetricName": "tma_info_core_ilp" }, @@ -398,7 +416,7 @@ "MetricGroup": "Flops;InsType", "MetricName": "tma_info_inst_mix_iparith", "MetricThreshold": "tma_info_inst_mix_iparith < 10", - "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). May undercount due to FMA double counting. Approximated prior to BDW." + "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW." }, { "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)", @@ -438,96 +456,90 @@ }, { "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]", - "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l1d_cache_fill_bw", "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_core_l1d_cache_fill_bw" + "MetricName": "tma_info_memory_core_l1d_cache_fill_bw_2t" }, { "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]", - "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l2_cache_fill_bw", "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_core_l2_cache_fill_bw" + "MetricName": "tma_info_memory_core_l2_cache_fill_bw_2t" }, { "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l3_cache_fill_bw", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_core_l3_cache_fill_bw_2t" + }, + { + "BriefDescription": "", + "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time", "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_core_l3_cache_fill_bw" + "MetricName": "tma_info_memory_l1d_cache_fill_bw" }, { "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads", "MetricExpr": "1e3 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l1mpki" }, { + "BriefDescription": "", + "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l2_cache_fill_bw" + }, + { "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads", "MetricExpr": "1e3 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY", - "MetricGroup": "Backend;CacheMisses;Mem", + "MetricGroup": "Backend;CacheHits;Mem", "MetricName": "tma_info_memory_l2mpki" }, { + "BriefDescription": "", + "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l3_cache_fill_bw" + }, + { "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads", "MetricExpr": "1e3 * MEM_LOAD_UOPS_RETIRED.LLC_MISS / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "Mem", "MetricName": "tma_info_memory_l3mpki" }, { - "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)", - "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "L1D_PEND_MISS.PENDING / (MEM_LOAD_UOPS_RETIRED.L1_MISS + MEM_LOAD_UOPS_RETIRED.HIT_LFB)", - "MetricGroup": "Mem;MemoryBound;MemoryLat", - "MetricName": "tma_info_memory_load_miss_real_latency" - }, - { - "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss", - "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES", - "MetricGroup": "Mem;MemoryBW;MemoryBound", - "MetricName": "tma_info_memory_mlp", - "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)" - }, - { "BriefDescription": "Average Parallel L2 cache miss data reads", "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", "MetricGroup": "Memory_BW;Offcore", - "MetricName": "tma_info_memory_oro_data_l2_mlp" + "MetricName": "tma_info_memory_latency_data_l2_mlp" }, { "BriefDescription": "Average Latency for L2 cache miss demand Loads", "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS.DEMAND_DATA_RD", "MetricGroup": "Memory_Lat;Offcore", - "MetricName": "tma_info_memory_oro_load_l2_miss_latency" + "MetricName": "tma_info_memory_latency_load_l2_miss_latency" }, { "BriefDescription": "Average Parallel L2 cache miss demand Loads", "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD", "MetricGroup": "Memory_BW;Offcore", - "MetricName": "tma_info_memory_oro_load_l2_mlp" - }, - { - "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l1d_cache_fill_bw", - "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_thread_l1d_cache_fill_bw_1t" + "MetricName": "tma_info_memory_latency_load_l2_mlp" }, { - "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l2_cache_fill_bw", - "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_thread_l2_cache_fill_bw_1t" - }, - { - "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "0", - "MetricGroup": "Mem;MemoryBW;Offcore", - "MetricName": "tma_info_memory_thread_l3_cache_access_bw_1t" + "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)", + "MetricConstraint": "NO_GROUP_EVENTS", + "MetricExpr": "L1D_PEND_MISS.PENDING / (MEM_LOAD_UOPS_RETIRED.L1_MISS + MEM_LOAD_UOPS_RETIRED.HIT_LFB)", + "MetricGroup": "Mem;MemoryBound;MemoryLat", + "MetricName": "tma_info_memory_load_miss_real_latency" }, { - "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l3_cache_fill_bw", - "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_thread_l3_cache_fill_bw_1t" + "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss", + "MetricConstraint": "NO_GROUP_EVENTS", + "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES", + "MetricGroup": "Mem;MemoryBW;MemoryBound", + "MetricName": "tma_info_memory_mlp", + "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)" }, { "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses", @@ -537,8 +549,8 @@ "MetricThreshold": "tma_info_memory_tlb_page_walks_utilization > 0.5" }, { - "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-thread", - "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@", + "BriefDescription": "", + "MetricExpr": "UOPS_EXECUTED.THREAD / (cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)", "MetricGroup": "Cor;Pipeline;PortsUtil;SMT", "MetricName": "tma_info_pipeline_execute" }, @@ -549,21 +561,27 @@ "MetricName": "tma_info_pipeline_retire" }, { - "BriefDescription": "Measured Average Frequency for unhalted processors [GHz]", + "BriefDescription": "Measured Average Core Frequency for unhalted processors [GHz]", "MetricExpr": "tma_info_system_turbo_utilization * TSC / 1e9 / duration_time", "MetricGroup": "Power;Summary", - "MetricName": "tma_info_system_average_frequency" + "MetricName": "tma_info_system_core_frequency" }, { - "BriefDescription": "Average CPU Utilization", + "BriefDescription": "Average CPU Utilization (percentage)", "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC", "MetricGroup": "HPC;Summary", "MetricName": "tma_info_system_cpu_utilization" }, { + "BriefDescription": "Average number of utilized CPUs", + "MetricExpr": "#num_cpus_online * tma_info_system_cpu_utilization", + "MetricGroup": "Summary", + "MetricName": "tma_info_system_cpus_utilized" + }, + { "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]", "MetricExpr": "64 * (UNC_M_CAS_COUNT.RD + UNC_M_CAS_COUNT.WR) / 1e9 / duration_time", - "MetricGroup": "HPC;Mem;MemoryBW;SoC;tma_issueBW", + "MetricGroup": "HPC;MemOffcore;MemoryBW;SoC;tma_issueBW", "MetricName": "tma_info_system_dram_bw_use", "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_mem_bandwidth, tma_sq_full" }, @@ -572,7 +590,7 @@ "MetricExpr": "(FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * (FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE) + 8 * SIMD_FP_256.PACKED_SINGLE) / 1e9 / duration_time", "MetricGroup": "Cor;Flops;HPC", "MetricName": "tma_info_system_gflops", - "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine." + "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width" }, { "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]", @@ -627,6 +645,12 @@ "MetricName": "tma_info_system_turbo_utilization" }, { + "BriefDescription": "Measured Average Uncore Frequency for the SoC [GHz]", + "MetricExpr": "tma_info_system_socket_clks / 1e9 / duration_time", + "MetricGroup": "SoC", + "MetricName": "tma_info_system_uncore_frequency" + }, + { "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.", "MetricExpr": "CPU_CLK_UNHALTED.THREAD", "MetricGroup": "Pipeline", @@ -674,7 +698,7 @@ { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses", "MetricExpr": "(12 * ITLB_MISSES.STLB_HIT + ITLB_MISSES.WALK_DURATION) / tma_info_thread_clks", - "MetricGroup": "BigFoot;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group", + "MetricGroup": "BigFootprint;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group", "MetricName": "tma_itlb_misses", "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: ITLB_MISSES.WALK_COMPLETED", @@ -683,7 +707,7 @@ { "BriefDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache", "MetricExpr": "max((min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.STALLS_LDM_PENDING) - CYCLE_ACTIVITY.STALLS_L1D_PENDING) / tma_info_thread_clks, 0)", - "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group", "MetricName": "tma_l1_bound", "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", "PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_UOPS_RETIRED.L1_HIT_PS;MEM_LOAD_UOPS_RETIRED.HIT_LFB_PS. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1", @@ -692,7 +716,7 @@ { "BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads", "MetricExpr": "(CYCLE_ACTIVITY.STALLS_L1D_PENDING - CYCLE_ACTIVITY.STALLS_L2_PENDING) / tma_info_thread_clks", - "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", "MetricName": "tma_l2_bound", "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", "PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L2_HIT_PS", @@ -702,20 +726,20 @@ "BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core", "MetricConstraint": "NO_GROUP_EVENTS_SMT", "MetricExpr": "MEM_LOAD_UOPS_RETIRED.LLC_HIT / (MEM_LOAD_UOPS_RETIRED.LLC_HIT + 7 * MEM_LOAD_UOPS_RETIRED.LLC_MISS) * CYCLE_ACTIVITY.STALLS_L2_PENDING / tma_info_thread_clks", - "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", "MetricName": "tma_l3_bound", "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT_PS", "ScaleUnit": "100%" }, { - "BriefDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)", + "BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)", "MetricConstraint": "NO_GROUP_EVENTS", "MetricExpr": "41 * (MEM_LOAD_UOPS_RETIRED.LLC_HIT * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.LLC_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_FWD))) / tma_info_thread_clks", "MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group", "MetricName": "tma_l3_hit_latency", "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT_PS. Related metrics: tma_mem_latency", + "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT_PS. Related metrics: tma_mem_latency", "ScaleUnit": "100%" }, { @@ -734,7 +758,7 @@ "MetricName": "tma_light_operations", "MetricThreshold": "tma_light_operations > 0.6", "MetricgroupNoGroup": "TopdownL2", - "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST", + "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST", "ScaleUnit": "100%" }, { @@ -749,11 +773,10 @@ }, { "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory", - "MetricConstraint": "NO_GROUP_EVENTS", "MetricExpr": "200 * (MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.LLC_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_FWD))) / tma_info_thread_clks", "MetricGroup": "Server;TopdownL5;tma_L5_group;tma_mem_latency_group", - "MetricName": "tma_local_dram", - "MetricThreshold": "tma_local_dram > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))", + "MetricName": "tma_local_mem", + "MetricThreshold": "tma_local_mem > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))", "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory. Caching will improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM_PS", "ScaleUnit": "100%" }, @@ -779,21 +802,21 @@ "ScaleUnit": "100%" }, { - "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM)", + "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)", "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=6@) / tma_info_thread_clks", "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW", "MetricName": "tma_mem_bandwidth", "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full", + "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full", "ScaleUnit": "100%" }, { - "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM)", + "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM)", "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth", "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat", "MetricName": "tma_mem_latency", "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_l3_hit_latency", + "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_l3_hit_latency", "ScaleUnit": "100%" }, { @@ -821,7 +844,7 @@ "MetricExpr": "(IDQ.ALL_MITE_CYCLES_ANY_UOPS - IDQ.ALL_MITE_CYCLES_4_UOPS) / tma_info_core_core_clks / 2", "MetricGroup": "DSBmiss;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group", "MetricName": "tma_mite", - "MetricThreshold": "tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 4 > 0.35)", + "MetricThreshold": "tma_mite > 0.1 & tma_fetch_bandwidth > 0.2", "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck.", "ScaleUnit": "100%" }, @@ -840,7 +863,7 @@ "MetricGroup": "Compute;TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P", "MetricName": "tma_port_0", "MetricThreshold": "tma_port_0 > 0.6", - "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED_PORT.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_512b, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2", + "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED_PORT.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2", "ScaleUnit": "100%" }, { @@ -849,7 +872,7 @@ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P", "MetricName": "tma_port_1", "MetricThreshold": "tma_port_1 > 0.6", - "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_512b, tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2", + "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2", "ScaleUnit": "100%" }, { @@ -885,7 +908,7 @@ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P", "MetricName": "tma_port_5", "MetricThreshold": "tma_port_5 > 0.6", - "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU). Sample with: UOPS_DISPATCHED.PORT_5. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2", + "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU). Sample with: UOPS_DISPATCHED.PORT_5. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2", "ScaleUnit": "100%" }, { @@ -922,7 +945,7 @@ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issue2P;tma_ports_utilization_group", "MetricName": "tma_ports_utilized_2", "MetricThreshold": "tma_ports_utilized_2 > 0.15 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6", + "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6", "ScaleUnit": "100%" }, { @@ -930,7 +953,7 @@ "MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@ / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC) / tma_info_core_core_clks", "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group", "MetricName": "tma_ports_utilized_3m", - "MetricThreshold": "tma_ports_utilized_3m > 0.7 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", + "MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", "ScaleUnit": "100%" }, { @@ -945,11 +968,10 @@ }, { "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory", - "MetricConstraint": "NO_GROUP_EVENTS", "MetricExpr": "310 * (MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.LLC_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_FWD))) / tma_info_thread_clks", "MetricGroup": "Server;Snoop;TopdownL5;tma_L5_group;tma_mem_latency_group", - "MetricName": "tma_remote_dram", - "MetricThreshold": "tma_remote_dram > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))", + "MetricName": "tma_remote_mem", + "MetricThreshold": "tma_remote_mem > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))", "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory. This is caused often due to non-optimal NUMA allocations. #link to NUMA article. Sample with: MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM_PS", "ScaleUnit": "100%" }, diff --git a/tools/perf/pmu-events/arch/x86/ivytown/metricgroups.json b/tools/perf/pmu-events/arch/x86/ivytown/metricgroups.json index f6a0258e3241..8c808347f6da 100644 --- a/tools/perf/pmu-events/arch/x86/ivytown/metricgroups.json +++ b/tools/perf/pmu-events/arch/x86/ivytown/metricgroups.json @@ -2,10 +2,10 @@ "Backend": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Bad": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "BadSpec": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", - "BigFoot": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "BigFootprint": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "BrMispredicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Branches": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", - "CacheMisses": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "CacheHits": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Compute": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Cor": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "DSB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", @@ -24,7 +24,9 @@ "L2Evicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "LSD": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MachineClears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "Machine_Clears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Mem": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "MemOffcore": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryBW": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryBound": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryLat": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", @@ -94,6 +96,7 @@ "tma_l3_bound_group": "Metrics contributing to tma_l3_bound category", "tma_light_operations_group": "Metrics contributing to tma_light_operations category", "tma_load_op_utilization_group": "Metrics contributing to tma_load_op_utilization category", + "tma_machine_clears_group": "Metrics contributing to tma_machine_clears category", "tma_mem_latency_group": "Metrics contributing to tma_mem_latency category", "tma_memory_bound_group": "Metrics contributing to tma_memory_bound category", "tma_microcode_sequencer_group": "Metrics contributing to tma_microcode_sequencer category", diff --git a/tools/perf/pmu-events/arch/x86/ivytown/uncore-power.json b/tools/perf/pmu-events/arch/x86/ivytown/uncore-power.json index 5df1ebfb89ea..ad6c531a9e38 100644 --- a/tools/perf/pmu-events/arch/x86/ivytown/uncore-power.json +++ b/tools/perf/pmu-events/arch/x86/ivytown/uncore-power.json @@ -514,6 +514,7 @@ "BriefDescription": "Number of cores in C-State; C0 and C1", "EventCode": "0x80", "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0", + "Filter": "occ_sel=1", "PerPkg": "1", "PublicDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", "Unit": "PCU" @@ -522,6 +523,7 @@ "BriefDescription": "Number of cores in C-State; C3", "EventCode": "0x80", "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3", + "Filter": "occ_sel=2", "PerPkg": "1", "PublicDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", "Unit": "PCU" @@ -530,6 +532,7 @@ "BriefDescription": "Number of cores in C-State; C6 and C7", "EventCode": "0x80", "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6", + "Filter": "occ_sel=3", "PerPkg": "1", "PublicDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", "Unit": "PCU" diff --git a/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json b/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json index 35b1a3aa728d..fc8c3f785be1 100644 --- a/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json +++ b/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json @@ -163,7 +163,7 @@ "MetricExpr": "tma_frontend_bound - tma_fetch_latency", "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB", "MetricName": "tma_fetch_bandwidth", - "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 4 > 0.35", + "MetricThreshold": "tma_fetch_bandwidth > 0.2", "MetricgroupNoGroup": "TopdownL2", "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Related metrics: tma_dsb_switches, tma_info_frontend_dsb_coverage, tma_lcp", "ScaleUnit": "100%" @@ -193,7 +193,7 @@ "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P", "MetricName": "tma_fp_scalar", "MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)", - "PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_512b, tma_port_6, tma_ports_utilized_2", + "PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_6, tma_ports_utilized_2", "ScaleUnit": "100%" }, { @@ -202,7 +202,25 @@ "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P", "MetricName": "tma_fp_vector", "MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)", - "PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_512b, tma_port_6, tma_ports_utilized_2", + "PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_6, tma_ports_utilized_2", + "ScaleUnit": "100%" + }, + { + "BriefDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors", + "MetricExpr": "(FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE + FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE) / UOPS_DISPATCHED.THREAD", + "MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P", + "MetricName": "tma_fp_vector_128b", + "MetricThreshold": "tma_fp_vector_128b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))", + "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_6, tma_ports_utilized_2", + "ScaleUnit": "100%" + }, + { + "BriefDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors", + "MetricExpr": "(SIMD_FP_256.PACKED_DOUBLE + SIMD_FP_256.PACKED_SINGLE) / UOPS_DISPATCHED.THREAD", + "MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P", + "MetricName": "tma_fp_vector_256b", + "MetricThreshold": "tma_fp_vector_256b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))", + "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_512b, tma_port_6, tma_ports_utilized_2", "ScaleUnit": "100%" }, { @@ -222,7 +240,7 @@ "MetricName": "tma_heavy_operations", "MetricThreshold": "tma_heavy_operations > 0.1", "MetricgroupNoGroup": "TopdownL2", - "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.", + "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences. ([ICL+] Note this may overcount due to approximation using indirect events; [ADL+] .)", "ScaleUnit": "100%" }, { @@ -244,7 +262,7 @@ "MetricName": "tma_info_core_flopc" }, { - "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core", + "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor)", "MetricExpr": "UOPS_DISPATCHED.THREAD / (cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@ / 2 if #SMT_on else cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@)", "MetricGroup": "Backend;Cor;Pipeline;PortsUtil", "MetricName": "tma_info_core_ilp" @@ -271,21 +289,27 @@ "MetricName": "tma_info_pipeline_retire" }, { - "BriefDescription": "Measured Average Frequency for unhalted processors [GHz]", + "BriefDescription": "Measured Average Core Frequency for unhalted processors [GHz]", "MetricExpr": "tma_info_system_turbo_utilization * TSC / 1e9 / duration_time", "MetricGroup": "Power;Summary", - "MetricName": "tma_info_system_average_frequency" + "MetricName": "tma_info_system_core_frequency" }, { - "BriefDescription": "Average CPU Utilization", + "BriefDescription": "Average CPU Utilization (percentage)", "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC", "MetricGroup": "HPC;Summary", "MetricName": "tma_info_system_cpu_utilization" }, { + "BriefDescription": "Average number of utilized CPUs", + "MetricExpr": "#num_cpus_online * tma_info_system_cpu_utilization", + "MetricGroup": "Summary", + "MetricName": "tma_info_system_cpus_utilized" + }, + { "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]", "MetricExpr": "64 * (UNC_M_CAS_COUNT.RD + UNC_M_CAS_COUNT.WR) / 1e9 / duration_time", - "MetricGroup": "HPC;Mem;MemoryBW;SoC;tma_issueBW", + "MetricGroup": "HPC;MemOffcore;MemoryBW;SoC;tma_issueBW", "MetricName": "tma_info_system_dram_bw_use", "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_mem_bandwidth" }, @@ -294,7 +318,7 @@ "MetricExpr": "(FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * (FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE) + 8 * SIMD_FP_256.PACKED_SINGLE) / 1e9 / duration_time", "MetricGroup": "Cor;Flops;HPC", "MetricName": "tma_info_system_gflops", - "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine." + "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width" }, { "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]", @@ -349,6 +373,12 @@ "MetricName": "tma_info_system_turbo_utilization" }, { + "BriefDescription": "Measured Average Uncore Frequency for the SoC [GHz]", + "MetricExpr": "tma_info_system_socket_clks / 1e9 / duration_time", + "MetricGroup": "SoC", + "MetricName": "tma_info_system_uncore_frequency" + }, + { "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.", "MetricExpr": "CPU_CLK_UNHALTED.THREAD", "MetricGroup": "Pipeline", @@ -389,7 +419,7 @@ { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses", "MetricExpr": "(12 * ITLB_MISSES.STLB_HIT + ITLB_MISSES.WALK_DURATION) / tma_info_thread_clks", - "MetricGroup": "BigFoot;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group", + "MetricGroup": "BigFootprint;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group", "MetricName": "tma_itlb_misses", "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: ITLB_MISSES.WALK_COMPLETED", @@ -399,7 +429,7 @@ "BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core", "MetricConstraint": "NO_GROUP_EVENTS_SMT", "MetricExpr": "MEM_LOAD_UOPS_RETIRED.LLC_HIT / (MEM_LOAD_UOPS_RETIRED.LLC_HIT + 7 * MEM_LOAD_UOPS_RETIRED.LLC_MISS) * CYCLE_ACTIVITY.STALLS_L2_PENDING / tma_info_thread_clks", - "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", "MetricName": "tma_l3_bound", "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT_PS", @@ -421,7 +451,7 @@ "MetricName": "tma_light_operations", "MetricThreshold": "tma_light_operations > 0.6", "MetricgroupNoGroup": "TopdownL2", - "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST", + "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST", "ScaleUnit": "100%" }, { @@ -436,21 +466,21 @@ "ScaleUnit": "100%" }, { - "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM)", + "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)", "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=6@) / tma_info_thread_clks", "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW", "MetricName": "tma_mem_bandwidth", "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_info_system_dram_bw_use", + "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_info_system_dram_bw_use", "ScaleUnit": "100%" }, { - "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM)", + "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM)", "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth", "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat", "MetricName": "tma_mem_latency", "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: ", + "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: ", "ScaleUnit": "100%" }, { diff --git a/tools/perf/pmu-events/arch/x86/jaketown/metricgroups.json b/tools/perf/pmu-events/arch/x86/jaketown/metricgroups.json index bebb85945d62..a2c27794c0d8 100644 --- a/tools/perf/pmu-events/arch/x86/jaketown/metricgroups.json +++ b/tools/perf/pmu-events/arch/x86/jaketown/metricgroups.json @@ -2,10 +2,10 @@ "Backend": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Bad": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "BadSpec": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", - "BigFoot": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "BigFootprint": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "BrMispredicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Branches": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", - "CacheMisses": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "CacheHits": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Compute": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Cor": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "DSB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", @@ -23,7 +23,9 @@ "L2Evicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "LSD": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MachineClears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "Machine_Clears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Mem": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "MemOffcore": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryBW": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryBound": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryLat": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", @@ -88,6 +90,7 @@ "tma_issueTLB": "Metrics related by the issue $issueTLB", "tma_l1_bound_group": "Metrics contributing to tma_l1_bound category", "tma_light_operations_group": "Metrics contributing to tma_light_operations category", + "tma_machine_clears_group": "Metrics contributing to tma_machine_clears category", "tma_mem_latency_group": "Metrics contributing to tma_mem_latency category", "tma_memory_bound_group": "Metrics contributing to tma_memory_bound category", "tma_microcode_sequencer_group": "Metrics contributing to tma_microcode_sequencer category", diff --git a/tools/perf/pmu-events/arch/x86/jaketown/uncore-power.json b/tools/perf/pmu-events/arch/x86/jaketown/uncore-power.json index b3ee5d741015..6f98fc1728e6 100644 --- a/tools/perf/pmu-events/arch/x86/jaketown/uncore-power.json +++ b/tools/perf/pmu-events/arch/x86/jaketown/uncore-power.json @@ -233,6 +233,7 @@ "BriefDescription": "Number of cores in C0", "EventCode": "0x80", "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0", + "Filter": "occ_sel=1", "PerPkg": "1", "PublicDescription": "This is an occupancy event that tracks the number of cores that are in C0. It can be used by itself to get the average number of cores in C0, with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", "Unit": "PCU" @@ -241,6 +242,7 @@ "BriefDescription": "Number of cores in C0", "EventCode": "0x80", "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3", + "Filter": "occ_sel=2", "PerPkg": "1", "PublicDescription": "This is an occupancy event that tracks the number of cores that are in C0. It can be used by itself to get the average number of cores in C0, with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", "Unit": "PCU" @@ -249,6 +251,7 @@ "BriefDescription": "Number of cores in C0", "EventCode": "0x80", "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6", + "Filter": "occ_sel=3", "PerPkg": "1", "PublicDescription": "This is an occupancy event that tracks the number of cores that are in C0. It can be used by itself to get the average number of cores in C0, with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", "Unit": "PCU" diff --git a/tools/perf/pmu-events/arch/x86/lunarlake/cache.json b/tools/perf/pmu-events/arch/x86/lunarlake/cache.json index 1823149067b5..fb48be357c4e 100644 --- a/tools/perf/pmu-events/arch/x86/lunarlake/cache.json +++ b/tools/perf/pmu-events/arch/x86/lunarlake/cache.json @@ -31,7 +31,7 @@ "EventCode": "0x2e", "EventName": "LONGEST_LAT_CACHE.REFERENCE", "PublicDescription": "Counts the number of cacheable memory requests that access the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the platform has an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.", - "SampleAfterValue": "1000003", + "SampleAfterValue": "200003", "UMask": "0x4f", "Unit": "cpu_atom" }, @@ -94,7 +94,7 @@ "MSRIndex": "0x3F6", "MSRValue": "0x400", "PEBS": "2", - "SampleAfterValue": "1000003", + "SampleAfterValue": "200003", "UMask": "0x5", "Unit": "cpu_atom" }, @@ -106,7 +106,7 @@ "MSRIndex": "0x3F6", "MSRValue": "0x80", "PEBS": "2", - "SampleAfterValue": "1000003", + "SampleAfterValue": "200003", "UMask": "0x5", "Unit": "cpu_atom" }, @@ -118,7 +118,7 @@ "MSRIndex": "0x3F6", "MSRValue": "0x10", "PEBS": "2", - "SampleAfterValue": "1000003", + "SampleAfterValue": "200003", "UMask": "0x5", "Unit": "cpu_atom" }, @@ -130,7 +130,7 @@ "MSRIndex": "0x3F6", "MSRValue": "0x800", "PEBS": "2", - "SampleAfterValue": "1000003", + "SampleAfterValue": "200003", "UMask": "0x5", "Unit": "cpu_atom" }, @@ -142,7 +142,7 @@ "MSRIndex": "0x3F6", "MSRValue": "0x100", "PEBS": "2", - "SampleAfterValue": "1000003", + "SampleAfterValue": "200003", "UMask": "0x5", "Unit": "cpu_atom" }, @@ -154,7 +154,7 @@ "MSRIndex": "0x3F6", "MSRValue": "0x20", "PEBS": "2", - "SampleAfterValue": "1000003", + "SampleAfterValue": "200003", "UMask": "0x5", "Unit": "cpu_atom" }, @@ -166,7 +166,7 @@ "MSRIndex": "0x3F6", "MSRValue": "0x4", "PEBS": "2", - "SampleAfterValue": "1000003", + "SampleAfterValue": "200003", "UMask": "0x5", "Unit": "cpu_atom" }, @@ -178,7 +178,7 @@ "MSRIndex": "0x3F6", "MSRValue": "0x200", "PEBS": "2", - "SampleAfterValue": "1000003", + "SampleAfterValue": "200003", "UMask": "0x5", "Unit": "cpu_atom" }, @@ -190,7 +190,7 @@ "MSRIndex": "0x3F6", "MSRValue": "0x40", "PEBS": "2", - "SampleAfterValue": "1000003", + "SampleAfterValue": "200003", "UMask": "0x5", "Unit": "cpu_atom" }, @@ -202,7 +202,7 @@ "MSRIndex": "0x3F6", "MSRValue": "0x8", "PEBS": "2", - "SampleAfterValue": "1000003", + "SampleAfterValue": "200003", "UMask": "0x5", "Unit": "cpu_atom" }, @@ -212,7 +212,7 @@ "EventCode": "0xd0", "EventName": "MEM_UOPS_RETIRED.STORE_LATENCY", "PEBS": "2", - "SampleAfterValue": "1000003", + "SampleAfterValue": "200003", "UMask": "0x6", "Unit": "cpu_atom" } diff --git a/tools/perf/pmu-events/arch/x86/lunarlake/frontend.json b/tools/perf/pmu-events/arch/x86/lunarlake/frontend.json index 5e4ef81b43d6..3a24934e8d6e 100644 --- a/tools/perf/pmu-events/arch/x86/lunarlake/frontend.json +++ b/tools/perf/pmu-events/arch/x86/lunarlake/frontend.json @@ -19,7 +19,7 @@ "BriefDescription": "This event counts a subset of the Topdown Slots event that were no operation was delivered to the back-end pipeline due to instruction fetch limitations when the back-end could have accepted more operations. Common examples include instruction cache misses or x86 instruction decode limitations.", "EventCode": "0x9c", "EventName": "IDQ_BUBBLES.CORE", - "PublicDescription": "This event counts a subset of the Topdown Slots event that were no operation was delivered to the back-end pipeline due to instruction fetch limitations when the back-end could have accepted more operations. Common examples include instruction cache misses or x86 instruction decode limitations.\nSoftware can use this event as the numerator for the Frontend Bound metric (or top-level category) of the Top-down Microarchitecture Analysis method.", + "PublicDescription": "This event counts a subset of the Topdown Slots event that were no operation was delivered to the back-end pipeline due to instruction fetch limitations when the back-end could have accepted more operations. Common examples include instruction cache misses or x86 instruction decode limitations. Software can use this event as the numerator for the Frontend Bound metric (or top-level category) of the Top-down Microarchitecture Analysis method.", "SampleAfterValue": "1000003", "UMask": "0x1", "Unit": "cpu_core" diff --git a/tools/perf/pmu-events/arch/x86/lunarlake/memory.json b/tools/perf/pmu-events/arch/x86/lunarlake/memory.json index 51d70ba00bd4..9c188d80b7b9 100644 --- a/tools/perf/pmu-events/arch/x86/lunarlake/memory.json +++ b/tools/perf/pmu-events/arch/x86/lunarlake/memory.json @@ -155,7 +155,7 @@ "EventCode": "0x2A,0x2B", "EventName": "OCR.DEMAND_DATA_RD.L3_MISS", "MSRIndex": "0x1a6,0x1a7", - "MSRValue": "0x3FBFC00001", + "MSRValue": "0xFE7F8000001", "SampleAfterValue": "100003", "UMask": "0x1", "Unit": "cpu_core" @@ -175,7 +175,7 @@ "EventCode": "0x2A,0x2B", "EventName": "OCR.DEMAND_RFO.L3_MISS", "MSRIndex": "0x1a6,0x1a7", - "MSRValue": "0x3FBFC00002", + "MSRValue": "0xFE7F8000002", "SampleAfterValue": "100003", "UMask": "0x1", "Unit": "cpu_core" diff --git a/tools/perf/pmu-events/arch/x86/lunarlake/other.json b/tools/perf/pmu-events/arch/x86/lunarlake/other.json index 69adaed5686d..377f717db6cc 100644 --- a/tools/perf/pmu-events/arch/x86/lunarlake/other.json +++ b/tools/perf/pmu-events/arch/x86/lunarlake/other.json @@ -24,7 +24,7 @@ "EventCode": "0xB7", "EventName": "OCR.DEMAND_DATA_RD.DRAM", "MSRIndex": "0x1a6,0x1a7", - "MSRValue": "0x184000001", + "MSRValue": "0x1FBC000001", "SampleAfterValue": "100003", "UMask": "0x1", "Unit": "cpu_atom" @@ -34,7 +34,7 @@ "EventCode": "0x2A,0x2B", "EventName": "OCR.DEMAND_DATA_RD.DRAM", "MSRIndex": "0x1a6,0x1a7", - "MSRValue": "0x184000001", + "MSRValue": "0x1E780000001", "SampleAfterValue": "100003", "UMask": "0x1", "Unit": "cpu_core" diff --git a/tools/perf/pmu-events/arch/x86/lunarlake/pipeline.json b/tools/perf/pmu-events/arch/x86/lunarlake/pipeline.json index 2bde664fdc0f..2c9f85ec8c4a 100644 --- a/tools/perf/pmu-events/arch/x86/lunarlake/pipeline.json +++ b/tools/perf/pmu-events/arch/x86/lunarlake/pipeline.json @@ -38,11 +38,19 @@ { "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles", "EventName": "CPU_CLK_UNHALTED.CORE", - "SampleAfterValue": "1000003", + "SampleAfterValue": "2000003", "UMask": "0x2", "Unit": "cpu_atom" }, { + "BriefDescription": "Core cycles when the core is not in a halt state.", + "EventName": "CPU_CLK_UNHALTED.CORE", + "PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the programmable counters available for other events.", + "SampleAfterValue": "2000003", + "UMask": "0x2", + "Unit": "cpu_core" + }, + { "BriefDescription": "Counts the number of unhalted core clock cycles [This event is alias to CPU_CLK_UNHALTED.THREAD_P]", "EventCode": "0x3c", "EventName": "CPU_CLK_UNHALTED.CORE_P", @@ -50,9 +58,17 @@ "Unit": "cpu_atom" }, { + "BriefDescription": "Thread cycles when thread is not in halt state [This event is alias to CPU_CLK_UNHALTED.THREAD_P]", + "EventCode": "0x3c", + "EventName": "CPU_CLK_UNHALTED.CORE_P", + "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time. [This event is alias to CPU_CLK_UNHALTED.THREAD_P]", + "SampleAfterValue": "2000003", + "Unit": "cpu_core" + }, + { "BriefDescription": "Fixed Counter: Counts the number of unhalted reference clock cycles", "EventName": "CPU_CLK_UNHALTED.REF_TSC", - "SampleAfterValue": "1000003", + "SampleAfterValue": "2000003", "UMask": "0x3", "Unit": "cpu_atom" }, @@ -65,6 +81,15 @@ "Unit": "cpu_core" }, { + "BriefDescription": "Counts the number of unhalted reference clock cycles", + "EventCode": "0x3c", + "EventName": "CPU_CLK_UNHALTED.REF_TSC_P", + "PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is not affected by core frequency changes and increments at a fixed frequency that is also used for the Time Stamp Counter (TSC). This event uses a programmable general purpose performance counter.", + "SampleAfterValue": "2000003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { "BriefDescription": "Reference cycles when the core is not in halt state.", "EventCode": "0x3c", "EventName": "CPU_CLK_UNHALTED.REF_TSC_P", @@ -74,9 +99,16 @@ "Unit": "cpu_core" }, { - "BriefDescription": "Core cycles when the thread is not in halt state", + "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles", + "EventName": "CPU_CLK_UNHALTED.THREAD", + "SampleAfterValue": "2000003", + "UMask": "0x2", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Core cycles when the thread is not in a halt state.", "EventName": "CPU_CLK_UNHALTED.THREAD", - "PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events.", + "PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the programmable counters available for other events.", "SampleAfterValue": "2000003", "UMask": "0x2", "Unit": "cpu_core" @@ -89,10 +121,10 @@ "Unit": "cpu_atom" }, { - "BriefDescription": "Thread cycles when thread is not in halt state", + "BriefDescription": "Thread cycles when thread is not in halt state [This event is alias to CPU_CLK_UNHALTED.CORE_P]", "EventCode": "0x3c", "EventName": "CPU_CLK_UNHALTED.THREAD_P", - "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.", + "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time. [This event is alias to CPU_CLK_UNHALTED.CORE_P]", "SampleAfterValue": "2000003", "Unit": "cpu_core" }, @@ -100,7 +132,7 @@ "BriefDescription": "Fixed Counter: Counts the number of instructions retired", "EventName": "INST_RETIRED.ANY", "PEBS": "1", - "SampleAfterValue": "1000003", + "SampleAfterValue": "2000003", "UMask": "0x1", "Unit": "cpu_atom" }, @@ -149,10 +181,28 @@ "Unit": "cpu_core" }, { + "BriefDescription": "Counts the number of LBR entries recorded. Requires LBRs to be enabled in IA32_LBR_CTL.", + "EventCode": "0xe4", + "EventName": "MISC_RETIRED.LBR_INSERTS", + "PEBS": "1", + "SampleAfterValue": "1000003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "LBR record is inserted", + "EventCode": "0xe4", + "EventName": "MISC_RETIRED.LBR_INSERTS", + "PEBS": "1", + "SampleAfterValue": "1000003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { "BriefDescription": "This event counts a subset of the Topdown Slots event that were not consumed by the back-end pipeline due to lack of back-end resources, as a result of memory subsystem delays, execution units limitations, or other conditions.", "EventCode": "0xa4", "EventName": "TOPDOWN.BACKEND_BOUND_SLOTS", - "PublicDescription": "This event counts a subset of the Topdown Slots event that were not consumed by the back-end pipeline due to lack of back-end resources, as a result of memory subsystem delays, execution units limitations, or other conditions.\nSoftware can use this event as the numerator for the Backend Bound metric (or top-level category) of the Top-down Microarchitecture Analysis method.", + "PublicDescription": "This event counts a subset of the Topdown Slots event that were not consumed by the back-end pipeline due to lack of back-end resources, as a result of memory subsystem delays, execution units limitations, or other conditions. Software can use this event as the numerator for the Backend Bound metric (or top-level category) of the Top-down Microarchitecture Analysis method.", "SampleAfterValue": "10000003", "UMask": "0x2", "Unit": "cpu_core" @@ -175,15 +225,21 @@ "Unit": "cpu_core" }, { - "BriefDescription": "Fixed Counter: Counts the number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear.", + "BriefDescription": "Counts the number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. [This event is alias to TOPDOWN_BAD_SPECULATION.ALL_P]", + "EventCode": "0x73", "EventName": "TOPDOWN_BAD_SPECULATION.ALL", - "PublicDescription": "Fixed Counter: Counts the number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Counts all issue slots blocked during this recovery window including relevant microcode flows and while uops are not yet available in the IQ. Also, includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear.", "SampleAfterValue": "1000003", - "UMask": "0x5", "Unit": "cpu_atom" }, { - "BriefDescription": "Counts the number of retirement slots not consumed due to backend stalls", + "BriefDescription": "Counts the number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. [This event is alias to TOPDOWN_BAD_SPECULATION.ALL]", + "EventCode": "0x73", + "EventName": "TOPDOWN_BAD_SPECULATION.ALL_P", + "SampleAfterValue": "1000003", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of retirement slots not consumed due to backend stalls [This event is alias to TOPDOWN_BE_BOUND.ALL_P]", "EventCode": "0xa4", "EventName": "TOPDOWN_BE_BOUND.ALL", "SampleAfterValue": "1000003", @@ -191,6 +247,14 @@ "Unit": "cpu_atom" }, { + "BriefDescription": "Counts the number of retirement slots not consumed due to backend stalls [This event is alias to TOPDOWN_BE_BOUND.ALL]", + "EventCode": "0xa4", + "EventName": "TOPDOWN_BE_BOUND.ALL_P", + "SampleAfterValue": "1000003", + "UMask": "0x2", + "Unit": "cpu_atom" + }, + { "BriefDescription": "Fixed Counter: Counts the number of retirement slots not consumed due to front end stalls", "EventName": "TOPDOWN_FE_BOUND.ALL", "SampleAfterValue": "1000003", @@ -198,7 +262,15 @@ "Unit": "cpu_atom" }, { - "BriefDescription": "Fixed Counter: Counts the number of consumed retirement slots. Similar to UOPS_RETIRED.ALL", + "BriefDescription": "Counts the number of retirement slots not consumed due to front end stalls", + "EventCode": "0x9c", + "EventName": "TOPDOWN_FE_BOUND.ALL_P", + "SampleAfterValue": "1000003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Fixed Counter: Counts the number of consumed retirement slots.", "EventName": "TOPDOWN_RETIRING.ALL", "PEBS": "1", "SampleAfterValue": "1000003", @@ -206,10 +278,19 @@ "Unit": "cpu_atom" }, { + "BriefDescription": "Counts the number of consumed retirement slots.", + "EventCode": "0xc2", + "EventName": "TOPDOWN_RETIRING.ALL_P", + "PEBS": "1", + "SampleAfterValue": "1000003", + "UMask": "0x2", + "Unit": "cpu_atom" + }, + { "BriefDescription": "This event counts a subset of the Topdown Slots event that are utilized by operations that eventually get retired (committed) by the processor pipeline. Usually, this event positively correlates with higher performance for example, as measured by the instructions-per-cycle metric.", "EventCode": "0xc2", "EventName": "UOPS_RETIRED.SLOTS", - "PublicDescription": "This event counts a subset of the Topdown Slots event that are utilized by operations that eventually get retired (committed) by the processor pipeline. Usually, this event positively correlates with higher performance for example, as measured by the instructions-per-cycle metric.\nSoftware can use this event as the numerator for the Retiring metric (or top-level category) of the Top-down Microarchitecture Analysis method.", + "PublicDescription": "This event counts a subset of the Topdown Slots event that are utilized by operations that eventually get retired (committed) by the processor pipeline. Usually, this event positively correlates with higher performance for example, as measured by the instructions-per-cycle metric. Software can use this event as the numerator for the Retiring metric (or top-level category) of the Top-down Microarchitecture Analysis method.", "SampleAfterValue": "2000003", "UMask": "0x2", "Unit": "cpu_core" diff --git a/tools/perf/pmu-events/arch/x86/mapfile.csv b/tools/perf/pmu-events/arch/x86/mapfile.csv index 4d1deed4437a..c9891630be10 100644 --- a/tools/perf/pmu-events/arch/x86/mapfile.csv +++ b/tools/perf/pmu-events/arch/x86/mapfile.csv @@ -1,38 +1,38 @@ Family-model,Version,Filename,EventType -GenuineIntel-6-(97|9A|B7|BA|BF),v1.23,alderlake,core -GenuineIntel-6-BE,v1.23,alderlaken,core +GenuineIntel-6-(97|9A|B7|BA|BF),v1.24,alderlake,core +GenuineIntel-6-BE,v1.24,alderlaken,core GenuineIntel-6-(1C|26|27|35|36),v5,bonnell,core -GenuineIntel-6-(3D|47),v28,broadwell,core +GenuineIntel-6-(3D|47),v29,broadwell,core GenuineIntel-6-56,v11,broadwellde,core GenuineIntel-6-4F,v22,broadwellx,core -GenuineIntel-6-55-[56789ABCDEF],v1.20,cascadelakex,core +GenuineIntel-6-55-[56789ABCDEF],v1.21,cascadelakex,core GenuineIntel-6-9[6C],v1.04,elkhartlake,core -GenuineIntel-6-CF,v1.02,emeraldrapids,core +GenuineIntel-6-CF,v1.06,emeraldrapids,core GenuineIntel-6-5[CF],v13,goldmont,core GenuineIntel-6-7A,v1.01,goldmontplus,core -GenuineIntel-6-B6,v1.00,grandridge,core +GenuineIntel-6-B6,v1.02,grandridge,core GenuineIntel-6-A[DE],v1.01,graniterapids,core -GenuineIntel-6-(3C|45|46),v33,haswell,core +GenuineIntel-6-(3C|45|46),v35,haswell,core GenuineIntel-6-3F,v28,haswellx,core -GenuineIntel-6-7[DE],v1.19,icelake,core -GenuineIntel-6-6[AC],v1.23,icelakex,core +GenuineIntel-6-7[DE],v1.21,icelake,core +GenuineIntel-6-6[AC],v1.24,icelakex,core GenuineIntel-6-3A,v24,ivybridge,core GenuineIntel-6-3E,v24,ivytown,core GenuineIntel-6-2D,v24,jaketown,core GenuineIntel-6-(57|85),v16,knightslanding,core -GenuineIntel-6-BD,v1.00,lunarlake,core -GenuineIntel-6-A[AC],v1.06,meteorlake,core +GenuineIntel-6-BD,v1.01,lunarlake,core +GenuineIntel-6-A[AC],v1.08,meteorlake,core GenuineIntel-6-1[AEF],v4,nehalemep,core GenuineIntel-6-2E,v4,nehalemex,core -GenuineIntel-6-A7,v1.01,rocketlake,core +GenuineIntel-6-A7,v1.02,rocketlake,core GenuineIntel-6-2A,v19,sandybridge,core -GenuineIntel-6-8F,v1.17,sapphirerapids,core -GenuineIntel-6-AF,v1.00,sierraforest,core +GenuineIntel-6-8F,v1.20,sapphirerapids,core +GenuineIntel-6-AF,v1.02,sierraforest,core GenuineIntel-6-(37|4A|4C|4D|5A),v15,silvermont,core -GenuineIntel-6-(4E|5E|8E|9E|A5|A6),v57,skylake,core -GenuineIntel-6-55-[01234],v1.32,skylakex,core -GenuineIntel-6-86,v1.21,snowridgex,core -GenuineIntel-6-8[CD],v1.13,tigerlake,core +GenuineIntel-6-(4E|5E|8E|9E|A5|A6),v58,skylake,core +GenuineIntel-6-55-[01234],v1.33,skylakex,core +GenuineIntel-6-86,v1.22,snowridgex,core +GenuineIntel-6-8[CD],v1.15,tigerlake,core GenuineIntel-6-2C,v5,westmereep-dp,core GenuineIntel-6-25,v4,westmereep-sp,core GenuineIntel-6-2F,v4,westmereex,core @@ -40,3 +40,4 @@ AuthenticAMD-23-([12][0-9A-F]|[0-9A-F]),v2,amdzen1,core AuthenticAMD-23-[[:xdigit:]]+,v1,amdzen2,core AuthenticAMD-25-([245][[:xdigit:]]|[[:xdigit:]]),v1,amdzen3,core AuthenticAMD-25-[[:xdigit:]]+,v1,amdzen4,core +AuthenticAMD-26-[[:xdigit:]]+,v1,amdzen5,core diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/cache.json b/tools/perf/pmu-events/arch/x86/meteorlake/cache.json index 5fef87502d4b..af7acb15f661 100644 --- a/tools/perf/pmu-events/arch/x86/meteorlake/cache.json +++ b/tools/perf/pmu-events/arch/x86/meteorlake/cache.json @@ -319,7 +319,7 @@ "EventCode": "0x35", "EventName": "MEM_BOUND_STALLS_IFETCH.ALL", "SampleAfterValue": "1000003", - "UMask": "0x6f", + "UMask": "0x7f", "Unit": "cpu_atom" }, { @@ -344,7 +344,7 @@ "EventCode": "0x35", "EventName": "MEM_BOUND_STALLS_IFETCH.LLC_MISS", "SampleAfterValue": "1000003", - "UMask": "0x68", + "UMask": "0x78", "Unit": "cpu_atom" }, { @@ -352,7 +352,7 @@ "EventCode": "0x34", "EventName": "MEM_BOUND_STALLS_LOAD.ALL", "SampleAfterValue": "1000003", - "UMask": "0x6f", + "UMask": "0x7f", "Unit": "cpu_atom" }, { @@ -377,7 +377,7 @@ "EventCode": "0x34", "EventName": "MEM_BOUND_STALLS_LOAD.LLC_MISS", "SampleAfterValue": "1000003", - "UMask": "0x68", + "UMask": "0x78", "Unit": "cpu_atom" }, { @@ -967,6 +967,16 @@ "Unit": "cpu_core" }, { + "BriefDescription": "Counts demand data reads that were supplied by the L3 cache.", + "EventCode": "0xB7", + "EventName": "OCR.DEMAND_DATA_RD.L3_HIT", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x3F803C0001", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { "BriefDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.", "EventCode": "0xB7", "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM", @@ -987,6 +997,16 @@ "Unit": "cpu_core" }, { + "BriefDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.", + "EventCode": "0xB7", + "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x4003C0001", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { "BriefDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.", "EventCode": "0xB7", "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD", @@ -1007,6 +1027,16 @@ "Unit": "cpu_core" }, { + "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache.", + "EventCode": "0xB7", + "EventName": "OCR.DEMAND_RFO.L3_HIT", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x3F803C0002", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.", "EventCode": "0xB7", "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM", diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/floating-point.json b/tools/perf/pmu-events/arch/x86/meteorlake/floating-point.json index f66506ee37ef..30e604d2120f 100644 --- a/tools/perf/pmu-events/arch/x86/meteorlake/floating-point.json +++ b/tools/perf/pmu-events/arch/x86/meteorlake/floating-point.json @@ -1,5 +1,14 @@ [ { + "BriefDescription": "Counts the number of cycles when any of the floating point dividers are active.", + "CounterMask": "1", + "EventCode": "0xcd", + "EventName": "ARITH.FPDIV_ACTIVE", + "SampleAfterValue": "1000003", + "UMask": "0x2", + "Unit": "cpu_atom" + }, + { "BriefDescription": "This event counts the cycles the floating point divider is busy.", "CounterMask": "1", "EventCode": "0xb0", @@ -26,7 +35,7 @@ "Unit": "cpu_core" }, { - "BriefDescription": "FP_ARITH_DISPATCHED.PORT_0", + "BriefDescription": "FP_ARITH_DISPATCHED.PORT_0 [This event is alias to FP_ARITH_DISPATCHED.V0]", "EventCode": "0xb3", "EventName": "FP_ARITH_DISPATCHED.PORT_0", "SampleAfterValue": "2000003", @@ -34,7 +43,7 @@ "Unit": "cpu_core" }, { - "BriefDescription": "FP_ARITH_DISPATCHED.PORT_1", + "BriefDescription": "FP_ARITH_DISPATCHED.PORT_1 [This event is alias to FP_ARITH_DISPATCHED.V1]", "EventCode": "0xb3", "EventName": "FP_ARITH_DISPATCHED.PORT_1", "SampleAfterValue": "2000003", @@ -42,7 +51,7 @@ "Unit": "cpu_core" }, { - "BriefDescription": "FP_ARITH_DISPATCHED.PORT_5", + "BriefDescription": "FP_ARITH_DISPATCHED.PORT_5 [This event is alias to FP_ARITH_DISPATCHED.V2]", "EventCode": "0xb3", "EventName": "FP_ARITH_DISPATCHED.PORT_5", "SampleAfterValue": "2000003", @@ -50,6 +59,30 @@ "Unit": "cpu_core" }, { + "BriefDescription": "FP_ARITH_DISPATCHED.V0 [This event is alias to FP_ARITH_DISPATCHED.PORT_0]", + "EventCode": "0xb3", + "EventName": "FP_ARITH_DISPATCHED.V0", + "SampleAfterValue": "2000003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "FP_ARITH_DISPATCHED.V1 [This event is alias to FP_ARITH_DISPATCHED.PORT_1]", + "EventCode": "0xb3", + "EventName": "FP_ARITH_DISPATCHED.V1", + "SampleAfterValue": "2000003", + "UMask": "0x2", + "Unit": "cpu_core" + }, + { + "BriefDescription": "FP_ARITH_DISPATCHED.V2 [This event is alias to FP_ARITH_DISPATCHED.PORT_5]", + "EventCode": "0xb3", + "EventName": "FP_ARITH_DISPATCHED.V2", + "SampleAfterValue": "2000003", + "UMask": "0x4", + "Unit": "cpu_core" + }, + { "BriefDescription": "Counts number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.", "EventCode": "0xc7", "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE", @@ -131,6 +164,53 @@ "Unit": "cpu_core" }, { + "BriefDescription": "Counts the number of all types of floating point operations per uop with all default weighting", + "EventCode": "0xc8", + "EventName": "FP_FLOPS_RETIRED.ALL", + "PEBS": "1", + "SampleAfterValue": "1000003", + "UMask": "0x3", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "This event is deprecated. [This event is alias to FP_FLOPS_RETIRED.FP64]", + "Deprecated": "1", + "EventCode": "0xc8", + "EventName": "FP_FLOPS_RETIRED.DP", + "PEBS": "1", + "SampleAfterValue": "1000003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of floating point operations that produce 32 bit single precision results [This event is alias to FP_FLOPS_RETIRED.SP]", + "EventCode": "0xc8", + "EventName": "FP_FLOPS_RETIRED.FP32", + "PEBS": "1", + "SampleAfterValue": "1000003", + "UMask": "0x2", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of floating point operations that produce 64 bit double precision results [This event is alias to FP_FLOPS_RETIRED.DP]", + "EventCode": "0xc8", + "EventName": "FP_FLOPS_RETIRED.FP64", + "PEBS": "1", + "SampleAfterValue": "1000003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "This event is deprecated. [This event is alias to FP_FLOPS_RETIRED.FP32]", + "Deprecated": "1", + "EventCode": "0xc8", + "EventName": "FP_FLOPS_RETIRED.SP", + "PEBS": "1", + "SampleAfterValue": "1000003", + "UMask": "0x2", + "Unit": "cpu_atom" + }, + { "BriefDescription": "Counts the number of floating point operations retired that required microcode assist.", "EventCode": "0xc3", "EventName": "MACHINE_CLEARS.FP_ASSIST", diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/frontend.json b/tools/perf/pmu-events/arch/x86/meteorlake/frontend.json index 9da8689eda81..f3b7b211afb5 100644 --- a/tools/perf/pmu-events/arch/x86/meteorlake/frontend.json +++ b/tools/perf/pmu-events/arch/x86/meteorlake/frontend.json @@ -378,7 +378,7 @@ "CounterMask": "6", "EventCode": "0x79", "EventName": "IDQ.DSB_CYCLES_OK", - "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).", + "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the DSB (Decode Stream Buffer) path. Count includes uops that may 'bypass' the IDQ.", "SampleAfterValue": "2000003", "UMask": "0x8", "Unit": "cpu_core" @@ -455,7 +455,7 @@ "BriefDescription": "This event counts a subset of the Topdown Slots event that were no operation was delivered to the back-end pipeline due to instruction fetch limitations when the back-end could have accepted more operations. Common examples include instruction cache misses or x86 instruction decode limitations.", "EventCode": "0x9c", "EventName": "IDQ_BUBBLES.CORE", - "PublicDescription": "This event counts a subset of the Topdown Slots event that were no operation was delivered to the back-end pipeline due to instruction fetch limitations when the back-end could have accepted more operations. Common examples include instruction cache misses or x86 instruction decode limitations.\nThe count may be distributed among unhalted logical processors (hyper-threads) who share the same physical core, in processors that support Intel Hyper-Threading Technology. Software can use this event as the numerator for the Frontend Bound metric (or top-level category) of the Top-down Microarchitecture Analysis method.", + "PublicDescription": "This event counts a subset of the Topdown Slots event that were no operation was delivered to the back-end pipeline due to instruction fetch limitations when the back-end could have accepted more operations. Common examples include instruction cache misses or x86 instruction decode limitations. The count may be distributed among unhalted logical processors (hyper-threads) who share the same physical core, in processors that support Intel Hyper-Threading Technology. Software can use this event as the numerator for the Frontend Bound metric (or top-level category) of the Top-down Microarchitecture Analysis method.", "SampleAfterValue": "1000003", "UMask": "0x1", "Unit": "cpu_core" diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/memory.json b/tools/perf/pmu-events/arch/x86/meteorlake/memory.json index a5b83293f157..617d0e255fd5 100644 --- a/tools/perf/pmu-events/arch/x86/meteorlake/memory.json +++ b/tools/perf/pmu-events/arch/x86/meteorlake/memory.json @@ -298,6 +298,16 @@ }, { "BriefDescription": "Counts demand data reads that were not supplied by the L3 cache.", + "EventCode": "0xB7", + "EventName": "OCR.DEMAND_DATA_RD.L3_MISS", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x3FBFC00001", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts demand data reads that were not supplied by the L3 cache.", "EventCode": "0x2A,0x2B", "EventName": "OCR.DEMAND_DATA_RD.L3_MISS", "MSRIndex": "0x1a6,0x1a7", @@ -307,6 +317,16 @@ "Unit": "cpu_core" }, { + "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache.", + "EventCode": "0xB7", + "EventName": "OCR.DEMAND_RFO.L3_MISS", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x3FBFC00002", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { "BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache.", "EventCode": "0x2A,0x2B", "EventName": "OCR.DEMAND_RFO.L3_MISS", diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/other.json b/tools/perf/pmu-events/arch/x86/meteorlake/other.json index d55e792c0c43..0bc2cb2eabb3 100644 --- a/tools/perf/pmu-events/arch/x86/meteorlake/other.json +++ b/tools/perf/pmu-events/arch/x86/meteorlake/other.json @@ -8,6 +8,26 @@ "Unit": "cpu_core" }, { + "BriefDescription": "This event is deprecated. [This event is alias to MISC_RETIRED.LBR_INSERTS]", + "Deprecated": "1", + "EventCode": "0xe4", + "EventName": "LBR_INSERTS.ANY", + "PEBS": "1", + "SampleAfterValue": "1000003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts demand data reads that have any type of response.", + "EventCode": "0xB7", + "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x10001", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { "BriefDescription": "Counts demand data reads that have any type of response.", "EventCode": "0x2A,0x2B", "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE", @@ -19,6 +39,16 @@ }, { "BriefDescription": "Counts demand data reads that were supplied by DRAM.", + "EventCode": "0xB7", + "EventName": "OCR.DEMAND_DATA_RD.DRAM", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x184000001", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts demand data reads that were supplied by DRAM.", "EventCode": "0x2A,0x2B", "EventName": "OCR.DEMAND_DATA_RD.DRAM", "MSRIndex": "0x1a6,0x1a7", @@ -28,6 +58,16 @@ "Unit": "cpu_core" }, { + "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.", + "EventCode": "0xB7", + "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x10002", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { "BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.", "EventCode": "0x2A,0x2B", "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE", @@ -38,6 +78,16 @@ "Unit": "cpu_core" }, { + "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM.", + "EventCode": "0xB7", + "EventName": "OCR.DEMAND_RFO.DRAM", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x184000002", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { "BriefDescription": "Counts streaming stores that have any type of response.", "EventCode": "0xB7", "EventName": "OCR.STREAMING_WR.ANY_RESPONSE", @@ -87,7 +137,7 @@ "Unit": "cpu_core" }, { - "BriefDescription": "Counts the number of issue slots in a UMWAIT or TPAUSE instruction where no uop issues due to the instruction putting the CPU into the C0.1 activity state. For Tremont, UMWAIT and TPAUSE will only put the CPU into C0.1 activity state (not C0.2 activity state)", + "BriefDescription": "Counts the number of issue slots in a UMWAIT or TPAUSE instruction where no uop issues due to the instruction putting the CPU into the C0.1 activity state.", "EventCode": "0x75", "EventName": "SERIALIZATION.C01_MS_SCB", "SampleAfterValue": "200003", diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/pipeline.json b/tools/perf/pmu-events/arch/x86/meteorlake/pipeline.json index deaa7aba93f7..5ff4a7a32250 100644 --- a/tools/perf/pmu-events/arch/x86/meteorlake/pipeline.json +++ b/tools/perf/pmu-events/arch/x86/meteorlake/pipeline.json @@ -1,5 +1,14 @@ [ { + "BriefDescription": "Counts the number of cycles when any of the dividers are active.", + "CounterMask": "1", + "EventCode": "0xcd", + "EventName": "ARITH.DIV_ACTIVE", + "SampleAfterValue": "1000003", + "UMask": "0x3", + "Unit": "cpu_atom" + }, + { "BriefDescription": "Cycles when divide unit is busy executing divide or square root operations.", "CounterMask": "1", "EventCode": "0xb0", @@ -46,6 +55,15 @@ "Unit": "cpu_core" }, { + "BriefDescription": "Counts the number of retired JCC (Jump on Conditional Code) branch instructions retired, includes both taken and not taken branches.", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.COND", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0x7e", + "Unit": "cpu_atom" + }, + { "BriefDescription": "Conditional branch instructions retired.", "EventCode": "0xc4", "EventName": "BR_INST_RETIRED.COND", @@ -66,6 +84,15 @@ "Unit": "cpu_core" }, { + "BriefDescription": "Counts the number of taken JCC (Jump on Conditional Code) branch instructions retired.", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.COND_TAKEN", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0xfe", + "Unit": "cpu_atom" + }, + { "BriefDescription": "Taken conditional branch instructions retired.", "EventCode": "0xc4", "EventName": "BR_INST_RETIRED.COND_TAKEN", @@ -95,6 +122,15 @@ "Unit": "cpu_core" }, { + "BriefDescription": "Counts the number of near indirect JMP and near indirect CALL branch instructions retired.", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.INDIRECT", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0xeb", + "Unit": "cpu_atom" + }, + { "BriefDescription": "Indirect near branch instructions retired (excluding returns)", "EventCode": "0xc4", "EventName": "BR_INST_RETIRED.INDIRECT", @@ -105,6 +141,25 @@ "Unit": "cpu_core" }, { + "BriefDescription": "Counts the number of near indirect CALL branch instructions retired.", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.INDIRECT_CALL", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0xfb", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "This event is deprecated. Refer to new event BR_INST_RETIRED.INDIRECT_CALL", + "Deprecated": "1", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.IND_CALL", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0xfb", + "Unit": "cpu_atom" + }, + { "BriefDescription": "Counts the number of near CALL branch instructions retired.", "EventCode": "0xc4", "EventName": "BR_INST_RETIRED.NEAR_CALL", @@ -124,6 +179,15 @@ "Unit": "cpu_core" }, { + "BriefDescription": "Counts the number of near RET branch instructions retired.", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.NEAR_RETURN", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0xf7", + "Unit": "cpu_atom" + }, + { "BriefDescription": "Return instructions retired.", "EventCode": "0xc4", "EventName": "BR_INST_RETIRED.NEAR_RETURN", @@ -671,6 +735,7 @@ "BriefDescription": "INST_RETIRED.MACRO_FUSED", "EventCode": "0xc0", "EventName": "INST_RETIRED.MACRO_FUSED", + "PEBS": "1", "SampleAfterValue": "2000003", "UMask": "0x10", "Unit": "cpu_core" @@ -679,6 +744,7 @@ "BriefDescription": "Retired NOP instructions.", "EventCode": "0xc0", "EventName": "INST_RETIRED.NOP", + "PEBS": "1", "PublicDescription": "Counts all retired NOP or ENDBR32/64 or PREFETCHIT0/1 instructions", "SampleAfterValue": "2000003", "UMask": "0x2", @@ -697,6 +763,7 @@ "BriefDescription": "Iterations of Repeat string retired instructions.", "EventCode": "0xc0", "EventName": "INST_RETIRED.REP_ITERATION", + "PEBS": "1", "PublicDescription": "Number of iterations of Repeat (REP) string retired instructions such as MOVS, CMPS, and SCAS. Each has a byte, word, and doubleword version and string instructions can be repeated using a repetition prefix, REP, that allows their architectural execution to be repeated a number of times as specified by the RCX register. Note the number of iterations is implementation-dependent.", "SampleAfterValue": "2000003", "UMask": "0x8", @@ -980,6 +1047,15 @@ "Unit": "cpu_core" }, { + "BriefDescription": "Counts the number of Last Branch Record (LBR) entries. Requires LBRs to be enabled and configured in IA32_LBR_CTL. [This event is alias to LBR_INSERTS.ANY]", + "EventCode": "0xe4", + "EventName": "MISC_RETIRED.LBR_INSERTS", + "PEBS": "1", + "SampleAfterValue": "1000003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { "BriefDescription": "Counts cycles where the pipeline is stalled due to serializing operations.", "EventCode": "0xa2", "EventName": "RESOURCE_STALLS.SCOREBOARD", @@ -991,7 +1067,7 @@ "BriefDescription": "This event counts a subset of the Topdown Slots event that were not consumed by the back-end pipeline due to lack of back-end resources, as a result of memory subsystem delays, execution units limitations, or other conditions.", "EventCode": "0xa4", "EventName": "TOPDOWN.BACKEND_BOUND_SLOTS", - "PublicDescription": "This event counts a subset of the Topdown Slots event that were not consumed by the back-end pipeline due to lack of back-end resources, as a result of memory subsystem delays, execution units limitations, or other conditions.\nThe count is distributed among unhalted logical processors (hyper-threads) who share the same physical core, in processors that support Intel Hyper-Threading Technology. Software can use this event as the numerator for the Backend Bound metric (or top-level category) of the Top-down Microarchitecture Analysis method.", + "PublicDescription": "This event counts a subset of the Topdown Slots event that were not consumed by the back-end pipeline due to lack of back-end resources, as a result of memory subsystem delays, execution units limitations, or other conditions. The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core, in processors that support Intel Hyper-Threading Technology. Software can use this event as the numerator for the Backend Bound metric (or top-level category) of the Top-down Microarchitecture Analysis method.", "SampleAfterValue": "10000003", "UMask": "0x2", "Unit": "cpu_core" @@ -1040,10 +1116,18 @@ "Unit": "cpu_core" }, { - "BriefDescription": "Counts the number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear.", + "BriefDescription": "Counts the number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. [This event is alias to TOPDOWN_BAD_SPECULATION.ALL_P]", "EventCode": "0x73", "EventName": "TOPDOWN_BAD_SPECULATION.ALL", - "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window, including relevant microcode flows, and while uops are not yet available in the instruction queue (IQ) or until an FE_BOUND event occurs besides OTHER and CISC. Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear.", + "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window, including relevant microcode flows, and while uops are not yet available in the instruction queue (IQ) or until an FE_BOUND event occurs besides OTHER and CISC. Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear. [This event is alias to TOPDOWN_BAD_SPECULATION.ALL_P]", + "SampleAfterValue": "1000003", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. [This event is alias to TOPDOWN_BAD_SPECULATION.ALL]", + "EventCode": "0x73", + "EventName": "TOPDOWN_BAD_SPECULATION.ALL_P", + "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window, including relevant microcode flows, and while uops are not yet available in the instruction queue (IQ) or until an FE_BOUND event occurs besides OTHER and CISC. Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear. [This event is alias to TOPDOWN_BAD_SPECULATION.ALL]", "SampleAfterValue": "1000003", "Unit": "cpu_atom" }, @@ -1080,7 +1164,7 @@ "Unit": "cpu_atom" }, { - "BriefDescription": "Counts the number of retirement slots not consumed due to backend stalls", + "BriefDescription": "Counts the number of retirement slots not consumed due to backend stalls [This event is alias to TOPDOWN_BE_BOUND.ALL_P]", "EventCode": "0x74", "EventName": "TOPDOWN_BE_BOUND.ALL", "SampleAfterValue": "1000003", @@ -1095,6 +1179,13 @@ "Unit": "cpu_atom" }, { + "BriefDescription": "Counts the number of retirement slots not consumed due to backend stalls [This event is alias to TOPDOWN_BE_BOUND.ALL]", + "EventCode": "0x74", + "EventName": "TOPDOWN_BE_BOUND.ALL_P", + "SampleAfterValue": "1000003", + "Unit": "cpu_atom" + }, + { "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to memory reservation stall (scheduler not being able to accept another uop). This could be caused by RSV full or load/store buffer block.", "EventCode": "0x74", "EventName": "TOPDOWN_BE_BOUND.MEM_SCHEDULER", @@ -1135,13 +1226,20 @@ "Unit": "cpu_atom" }, { - "BriefDescription": "Counts the number of retirement slots not consumed due to front end stalls", + "BriefDescription": "Counts the number of retirement slots not consumed due to front end stalls [This event is alias to TOPDOWN_FE_BOUND.ALL_P]", "EventCode": "0x71", "EventName": "TOPDOWN_FE_BOUND.ALL", "SampleAfterValue": "1000003", "Unit": "cpu_atom" }, { + "BriefDescription": "Counts the number of retirement slots not consumed due to front end stalls [This event is alias to TOPDOWN_FE_BOUND.ALL]", + "EventCode": "0x71", + "EventName": "TOPDOWN_FE_BOUND.ALL_P", + "SampleAfterValue": "1000003", + "Unit": "cpu_atom" + }, + { "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BAClear", "EventCode": "0x71", "EventName": "TOPDOWN_FE_BOUND.BRANCH_DETECT", @@ -1223,7 +1321,7 @@ "Unit": "cpu_atom" }, { - "BriefDescription": "Counts the number of consumed retirement slots. Similar to UOPS_RETIRED.ALL", + "BriefDescription": "Counts the number of consumed retirement slots. Similar to UOPS_RETIRED.ALL [This event is alias to TOPDOWN_RETIRING.ALL_P]", "EventCode": "0x72", "EventName": "TOPDOWN_RETIRING.ALL", "PEBS": "1", @@ -1231,6 +1329,14 @@ "Unit": "cpu_atom" }, { + "BriefDescription": "Counts the number of consumed retirement slots. Similar to UOPS_RETIRED.ALL [This event is alias to TOPDOWN_RETIRING.ALL]", + "EventCode": "0x72", + "EventName": "TOPDOWN_RETIRING.ALL_P", + "PEBS": "1", + "SampleAfterValue": "1000003", + "Unit": "cpu_atom" + }, + { "BriefDescription": "Number of non dec-by-all uops decoded by decoder", "EventCode": "0x76", "EventName": "UOPS_DECODED.DEC0_UOPS", @@ -1515,7 +1621,7 @@ "BriefDescription": "This event counts a subset of the Topdown Slots event that are utilized by operations that eventually get retired (committed) by the processor pipeline. Usually, this event positively correlates with higher performance for example, as measured by the instructions-per-cycle metric.", "EventCode": "0xc2", "EventName": "UOPS_RETIRED.SLOTS", - "PublicDescription": "This event counts a subset of the Topdown Slots event that are utilized by operations that eventually get retired (committed) by the processor pipeline. Usually, this event positively correlates with higher performance for example, as measured by the instructions-per-cycle metric.\nSoftware can use this event as the numerator for the Retiring metric (or top-level category) of the Top-down Microarchitecture Analysis method.", + "PublicDescription": "This event counts a subset of the Topdown Slots event that are utilized by operations that eventually get retired (committed) by the processor pipeline. Usually, this event positively correlates with higher performance for example, as measured by the instructions-per-cycle metric. Software can use this event as the numerator for the Retiring metric (or top-level category) of the Top-down Microarchitecture Analysis method.", "SampleAfterValue": "2000003", "UMask": "0x2", "Unit": "cpu_core" diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/meteorlake/uncore-interconnect.json index 08b5c7574cfc..901d8510f90f 100644 --- a/tools/perf/pmu-events/arch/x86/meteorlake/uncore-interconnect.json +++ b/tools/perf/pmu-events/arch/x86/meteorlake/uncore-interconnect.json @@ -1,5 +1,21 @@ [ { + "BriefDescription": "Each cycle counts number of coherent reads pending on data return from memory controller that were issued by any core.", + "EventCode": "0x85", + "EventName": "UNC_ARB_DAT_OCCUPANCY.RD", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "ARB" + }, + { + "BriefDescription": "Number of entries allocated. Account for Any type: e.g. Snoop, etc.", + "EventCode": "0x84", + "EventName": "UNC_HAC_ARB_COH_TRK_REQUESTS.ALL", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "HAC_ARB" + }, + { "BriefDescription": "Number of all coherent Data Read entries. Doesn't include prefetches", "EventCode": "0x81", "EventName": "UNC_HAC_ARB_REQ_TRK_REQUEST.DRD", @@ -9,7 +25,7 @@ }, { "BriefDescription": "Number of all CMI transactions", - "EventCode": "0x8a", + "EventCode": "0x8A", "EventName": "UNC_HAC_ARB_TRANSACTIONS.ALL", "PerPkg": "1", "UMask": "0x1", @@ -17,7 +33,7 @@ }, { "BriefDescription": "Number of all CMI reads", - "EventCode": "0x8a", + "EventCode": "0x8A", "EventName": "UNC_HAC_ARB_TRANSACTIONS.READS", "PerPkg": "1", "UMask": "0x2", @@ -25,7 +41,7 @@ }, { "BriefDescription": "Number of all CMI writes not including Mflush", - "EventCode": "0x8a", + "EventCode": "0x8A", "EventName": "UNC_HAC_ARB_TRANSACTIONS.WRITES", "PerPkg": "1", "UMask": "0x4", diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/virtual-memory.json b/tools/perf/pmu-events/arch/x86/meteorlake/virtual-memory.json index 056c2a885a32..55798e64c58a 100644 --- a/tools/perf/pmu-events/arch/x86/meteorlake/virtual-memory.json +++ b/tools/perf/pmu-events/arch/x86/meteorlake/virtual-memory.json @@ -71,6 +71,15 @@ "Unit": "cpu_core" }, { + "BriefDescription": "Counts the number of page walks completed due to load DTLB misses to a 4K page.", + "EventCode": "0x08", + "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K", + "PublicDescription": "Counts the number of page walks completed due to loads (including SW prefetches) whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 4K pages. Includes page walks that page fault.", + "SampleAfterValue": "200003", + "UMask": "0x2", + "Unit": "cpu_atom" + }, + { "BriefDescription": "Page walks completed due to a demand data load to a 4K page.", "EventCode": "0x12", "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K", @@ -151,6 +160,15 @@ "Unit": "cpu_core" }, { + "BriefDescription": "Counts the number of page walks completed due to store DTLB misses to a 2M or 4M page.", + "EventCode": "0x49", + "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M", + "PublicDescription": "Counts the number of page walks completed due to stores whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 2M or 4M pages. Includes page walks that page fault.", + "SampleAfterValue": "2000003", + "UMask": "0x4", + "Unit": "cpu_atom" + }, + { "BriefDescription": "Page walks completed due to a demand data store to a 2M/4M page.", "EventCode": "0x13", "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M", @@ -160,6 +178,15 @@ "Unit": "cpu_core" }, { + "BriefDescription": "Counts the number of page walks completed due to store DTLB misses to a 4K page.", + "EventCode": "0x49", + "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K", + "PublicDescription": "Counts the number of page walks completed due to stores whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 4K pages. Includes page walks that page fault.", + "SampleAfterValue": "2000003", + "UMask": "0x2", + "Unit": "cpu_atom" + }, + { "BriefDescription": "Page walks completed due to a demand data store to a 4K page.", "EventCode": "0x13", "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K", @@ -258,6 +285,15 @@ "Unit": "cpu_core" }, { + "BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to a 4K page.", + "EventCode": "0x85", + "EventName": "ITLB_MISSES.WALK_COMPLETED_4K", + "PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 4K pages. Includes page walks that page fault.", + "SampleAfterValue": "2000003", + "UMask": "0x2", + "Unit": "cpu_atom" + }, + { "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)", "EventCode": "0x11", "EventName": "ITLB_MISSES.WALK_COMPLETED_4K", diff --git a/tools/perf/pmu-events/arch/x86/rocketlake/memory.json b/tools/perf/pmu-events/arch/x86/rocketlake/memory.json index e8d2ec1c029b..f84763220549 100644 --- a/tools/perf/pmu-events/arch/x86/rocketlake/memory.json +++ b/tools/perf/pmu-events/arch/x86/rocketlake/memory.json @@ -259,6 +259,7 @@ "BriefDescription": "Number of times an RTM execution aborted.", "EventCode": "0xc9", "EventName": "RTM_RETIRED.ABORTED", + "PEBS": "1", "PublicDescription": "Counts the number of times RTM abort was triggered.", "SampleAfterValue": "100003", "UMask": "0x4" diff --git a/tools/perf/pmu-events/arch/x86/rocketlake/metricgroups.json b/tools/perf/pmu-events/arch/x86/rocketlake/metricgroups.json index a151ba9cccb0..5452a1448ded 100644 --- a/tools/perf/pmu-events/arch/x86/rocketlake/metricgroups.json +++ b/tools/perf/pmu-events/arch/x86/rocketlake/metricgroups.json @@ -2,10 +2,10 @@ "Backend": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Bad": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "BadSpec": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", - "BigFoot": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "BigFootprint": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "BrMispredicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Branches": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", - "CacheMisses": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "CacheHits": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "CodeGen": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Compute": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Cor": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", @@ -25,7 +25,9 @@ "L2Evicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "LSD": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MachineClears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "Machine_Clears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Mem": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "MemOffcore": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryBW": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryBound": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryLat": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", @@ -63,8 +65,10 @@ "tma_L5_group": "Metrics for top-down breakdown at level 5", "tma_L6_group": "Metrics for top-down breakdown at level 6", "tma_alu_op_utilization_group": "Metrics contributing to tma_alu_op_utilization category", + "tma_assists_group": "Metrics contributing to tma_assists category", "tma_backend_bound_group": "Metrics contributing to tma_backend_bound category", "tma_bad_speculation_group": "Metrics contributing to tma_bad_speculation category", + "tma_branch_mispredicts_group": "Metrics contributing to tma_branch_mispredicts category", "tma_branch_resteers_group": "Metrics contributing to tma_branch_resteers category", "tma_core_bound_group": "Metrics contributing to tma_core_bound category", "tma_dram_bound_group": "Metrics contributing to tma_dram_bound category", @@ -77,9 +81,9 @@ "tma_frontend_bound_group": "Metrics contributing to tma_frontend_bound category", "tma_heavy_operations_group": "Metrics contributing to tma_heavy_operations category", "tma_issue2P": "Metrics related by the issue $issue2P", - "tma_issueBC": "Metrics related by the issue $issueBC", "tma_issueBM": "Metrics related by the issue $issueBM", "tma_issueBW": "Metrics related by the issue $issueBW", + "tma_issueComp": "Metrics related by the issue $issueComp", "tma_issueD0": "Metrics related by the issue $issueD0", "tma_issueFB": "Metrics related by the issue $issueFB", "tma_issueFL": "Metrics related by the issue $issueFL", @@ -99,10 +103,12 @@ "tma_l3_bound_group": "Metrics contributing to tma_l3_bound category", "tma_light_operations_group": "Metrics contributing to tma_light_operations category", "tma_load_op_utilization_group": "Metrics contributing to tma_load_op_utilization category", + "tma_machine_clears_group": "Metrics contributing to tma_machine_clears category", "tma_mem_latency_group": "Metrics contributing to tma_mem_latency category", "tma_memory_bound_group": "Metrics contributing to tma_memory_bound category", "tma_microcode_sequencer_group": "Metrics contributing to tma_microcode_sequencer category", "tma_mite_group": "Metrics contributing to tma_mite category", + "tma_other_light_ops_group": "Metrics contributing to tma_other_light_ops category", "tma_ports_utilization_group": "Metrics contributing to tma_ports_utilization category", "tma_ports_utilized_0_group": "Metrics contributing to tma_ports_utilized_0 category", "tma_ports_utilized_3m_group": "Metrics contributing to tma_ports_utilized_3m category", diff --git a/tools/perf/pmu-events/arch/x86/rocketlake/other.json b/tools/perf/pmu-events/arch/x86/rocketlake/other.json index cfb590632918..4fdc87339555 100644 --- a/tools/perf/pmu-events/arch/x86/rocketlake/other.json +++ b/tools/perf/pmu-events/arch/x86/rocketlake/other.json @@ -19,7 +19,7 @@ "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX512 turbo schedule.", "EventCode": "0x28", "EventName": "CORE_POWER.LVL2_TURBO_LICENSE", - "PublicDescription": "Core cycles where the core was running with power-delivery for license level 2 (introduced in Skylake Server microarchtecture). This includes high current AVX 512-bit instructions.", + "PublicDescription": "Core cycles where the core was running with power-delivery for license level 2 (introduced in Skylake Server microarchitecture). This includes high current AVX 512-bit instructions.", "SampleAfterValue": "200003", "UMask": "0x20" }, diff --git a/tools/perf/pmu-events/arch/x86/rocketlake/pipeline.json b/tools/perf/pmu-events/arch/x86/rocketlake/pipeline.json index 375b78044f14..c7313fd4fdf4 100644 --- a/tools/perf/pmu-events/arch/x86/rocketlake/pipeline.json +++ b/tools/perf/pmu-events/arch/x86/rocketlake/pipeline.json @@ -529,7 +529,7 @@ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread", "EventCode": "0x5e", "EventName": "RS_EVENTS.EMPTY_CYCLES", - "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into stravation periods (e.g. branch mispredictions or i-cache misses)", + "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into starvation periods (e.g. branch mispredictions or i-cache misses)", "SampleAfterValue": "1000003", "UMask": "0x1" }, @@ -553,14 +553,6 @@ "UMask": "0x2" }, { - "BriefDescription": "TMA slots wasted due to incorrect speculation by branch mispredictions", - "EventCode": "0xa4", - "EventName": "TOPDOWN.BR_MISPREDICT_SLOTS", - "PublicDescription": "Number of TMA slots that were wasted due to incorrect speculation by branch mispredictions. This event estimates number of operations that were issued but not retired from the speculative path as well as the out-of-order engine recovery past a branch misprediction.", - "SampleAfterValue": "10000003", - "UMask": "0x8" - }, - { "BriefDescription": "TMA slots available for an unhalted logical processor. Fixed counter - architectural event", "EventName": "TOPDOWN.SLOTS", "PublicDescription": "Number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method (TMA). The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core. Software can use this event as the denominator for the top-level metrics of the TMA method. This architectural event is counted on a designated fixed counter (Fixed Counter 3).", diff --git a/tools/perf/pmu-events/arch/x86/rocketlake/rkl-metrics.json b/tools/perf/pmu-events/arch/x86/rocketlake/rkl-metrics.json index 27433fc15ede..1dad462e58b1 100644 --- a/tools/perf/pmu-events/arch/x86/rocketlake/rkl-metrics.json +++ b/tools/perf/pmu-events/arch/x86/rocketlake/rkl-metrics.json @@ -98,12 +98,12 @@ "MetricExpr": "(UOPS_DISPATCHED.PORT_0 + UOPS_DISPATCHED.PORT_1 + UOPS_DISPATCHED.PORT_5 + UOPS_DISPATCHED.PORT_6) / (4 * tma_info_core_core_clks)", "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group", "MetricName": "tma_alu_op_utilization", - "MetricThreshold": "tma_alu_op_utilization > 0.6", + "MetricThreshold": "tma_alu_op_utilization > 0.4", "ScaleUnit": "100%" }, { "BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists", - "MetricExpr": "100 * ASSISTS.ANY / tma_info_thread_slots", + "MetricExpr": "34 * ASSISTS.ANY / tma_info_thread_slots", "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group", "MetricName": "tma_assists", "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)", @@ -113,7 +113,7 @@ { "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend", "DefaultMetricgroupName": "TopdownL1", - "MetricExpr": "topdown\\-be\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 5 * cpu@INT_MISC.RECOVERY_CYCLES\\,cmask\\=1\\,edge@ / tma_info_thread_slots", + "MetricExpr": "topdown\\-be\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 5 * INT_MISC.CLEARS_COUNT / tma_info_thread_slots", "MetricGroup": "Default;TmaL1;TopdownL1;tma_L1_group", "MetricName": "tma_backend_bound", "MetricThreshold": "tma_backend_bound > 0.2", @@ -135,7 +135,7 @@ { "BriefDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions.", "MetricExpr": "tma_light_operations * BR_INST_RETIRED.ALL_BRANCHES / (tma_retiring * tma_info_thread_slots)", - "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", + "MetricGroup": "Branches;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", "MetricName": "tma_branch_instructions", "MetricThreshold": "tma_branch_instructions > 0.1 & tma_light_operations > 0.6", "ScaleUnit": "100%" @@ -180,7 +180,7 @@ { "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "(29 * tma_info_system_average_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM + 23.5 * tma_info_system_average_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", + "MetricExpr": "(29 * tma_info_system_core_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM + 23.5 * tma_info_system_core_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group", "MetricName": "tma_contested_accesses", "MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", @@ -200,7 +200,7 @@ { "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "23.5 * tma_info_system_average_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", + "MetricExpr": "23.5 * tma_info_system_core_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", "MetricGroup": "Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group", "MetricName": "tma_data_sharing", "MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", @@ -212,7 +212,7 @@ "MetricExpr": "(cpu@INST_DECODED.DECODERS\\,cmask\\=1@ - cpu@INST_DECODED.DECODERS\\,cmask\\=2@) / tma_info_core_core_clks / 2", "MetricGroup": "DSBmiss;FetchBW;TopdownL4;tma_L4_group;tma_issueD0;tma_mite_group", "MetricName": "tma_decoder0_alone", - "MetricThreshold": "tma_decoder0_alone > 0.1 & (tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 5 > 0.35))", + "MetricThreshold": "tma_decoder0_alone > 0.1 & (tma_mite > 0.1 & tma_fetch_bandwidth > 0.2)", "PublicDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder. Related metrics: tma_few_uops_instructions", "ScaleUnit": "100%" }, @@ -240,7 +240,7 @@ "MetricExpr": "(IDQ.DSB_CYCLES_ANY - IDQ.DSB_CYCLES_OK) / tma_info_core_core_clks / 2", "MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group", "MetricName": "tma_dsb", - "MetricThreshold": "tma_dsb > 0.15 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 5 > 0.35)", + "MetricThreshold": "tma_dsb > 0.15 & tma_fetch_bandwidth > 0.2", "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.", "ScaleUnit": "100%" }, @@ -259,7 +259,7 @@ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group", "MetricName": "tma_dtlb_load", "MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store, tma_info_bottleneck_memory_data_tlbs", + "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store, tma_info_bottleneck_memory_data_tlbs, tma_info_bottleneck_memory_synchronization", "ScaleUnit": "100%" }, { @@ -268,12 +268,12 @@ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group", "MetricName": "tma_dtlb_store", "MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load, tma_info_bottleneck_memory_data_tlbs", + "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load, tma_info_bottleneck_memory_data_tlbs, tma_info_bottleneck_memory_synchronization", "ScaleUnit": "100%" }, { "BriefDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing", - "MetricExpr": "32.5 * tma_info_system_average_frequency * OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM / tma_info_thread_clks", + "MetricExpr": "32.5 * tma_info_system_core_frequency * OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM / tma_info_thread_clks", "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group", "MetricName": "tma_false_sharing", "MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", @@ -286,7 +286,7 @@ "MetricGroup": "MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group", "MetricName": "tma_fb_full", "MetricThreshold": "tma_fb_full > 0.3", - "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_bottleneck_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores", + "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores", "ScaleUnit": "100%" }, { @@ -294,7 +294,7 @@ "MetricExpr": "max(0, tma_frontend_bound - tma_fetch_latency)", "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB", "MetricName": "tma_fetch_bandwidth", - "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 5 > 0.35", + "MetricThreshold": "tma_fetch_bandwidth > 0.2", "MetricgroupNoGroup": "TopdownL2", "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp", "ScaleUnit": "100%" @@ -328,6 +328,15 @@ "ScaleUnit": "100%" }, { + "BriefDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists", + "MetricExpr": "34 * ASSISTS.FP / tma_info_thread_slots", + "MetricGroup": "HPC;TopdownL5;tma_L5_group;tma_assists_group", + "MetricName": "tma_fp_assists", + "MetricThreshold": "tma_fp_assists > 0.1", + "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists. FP Assist may apply when working with very small floating point values (so-called Denormals).", + "ScaleUnit": "100%" + }, + { "BriefDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired", "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ / (tma_retiring * tma_info_thread_slots)", "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P", @@ -390,13 +399,13 @@ "MetricName": "tma_heavy_operations", "MetricThreshold": "tma_heavy_operations > 0.1", "MetricgroupNoGroup": "TopdownL2", - "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.", + "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences. ([ICL+] Note this may overcount due to approximation using indirect events; [ADL+] .)", "ScaleUnit": "100%" }, { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses", - "MetricExpr": "ICACHE_16B.IFDATA_STALL / tma_info_thread_clks", - "MetricGroup": "BigFoot;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group", + "MetricExpr": "ICACHE_DATA.STALLS / tma_info_thread_clks", + "MetricGroup": "BigFootprint;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group", "MetricName": "tma_icache_misses", "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS_PS;FRONTEND_RETIRED.L1I_MISS_PS", @@ -405,7 +414,7 @@ { "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "(tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) * tma_info_thread_slots / BR_MISP_RETIRED.ALL_BRANCHES", + "MetricExpr": "tma_info_bottleneck_mispredictions * tma_info_thread_slots / BR_MISP_RETIRED.ALL_BRANCHES / 100", "MetricGroup": "Bad;BrMispredicts;tma_issueBM", "MetricName": "tma_info_bad_spec_branch_misprediction_cost", "PublicDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear). Related metrics: tma_branch_mispredicts, tma_info_bottleneck_mispredictions, tma_mispredicts_resteers" @@ -446,6 +455,12 @@ "MetricThreshold": "tma_info_bad_spec_ipmispredict < 200" }, { + "BriefDescription": "Speculative to Retired ratio of all clears (covering mispredicts and nukes)", + "MetricExpr": "INT_MISC.CLEARS_COUNT / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT)", + "MetricGroup": "BrMispredicts", + "MetricName": "tma_info_bad_spec_spec_clears_ratio" + }, + { "BriefDescription": "Probability of Core Bound bottleneck hidden by SMT-profiling artifacts", "MetricConstraint": "NO_GROUP_EVENTS", "MetricExpr": "(100 * (1 - tma_core_bound / tma_ports_utilization if tma_core_bound < tma_ports_utilization else 1) if tma_info_system_smt_2t_utilization > 0.5 else 0)", @@ -472,67 +487,102 @@ "PublicDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck. Related metrics: " }, { + "BriefDescription": "Total pipeline cost of \"useful operations\" - the baseline operations not covered by Branching_Overhead nor Irregular_Overhead.", + "MetricExpr": "100 * (tma_retiring - (BR_INST_RETIRED.ALL_BRANCHES + BR_INST_RETIRED.NEAR_CALL) / tma_info_thread_slots - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)", + "MetricGroup": "Ret", + "MetricName": "tma_info_bottleneck_base_non_br", + "MetricThreshold": "tma_info_bottleneck_base_non_br > 20" + }, + { "BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)", "MetricConstraint": "NO_GROUP_EVENTS", "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)", - "MetricGroup": "BigFoot;Fed;Frontend;IcMiss;MemoryTLB;tma_issueBC", + "MetricGroup": "BigFootprint;Fed;Frontend;IcMiss;MemoryTLB", "MetricName": "tma_info_bottleneck_big_code", - "MetricThreshold": "tma_info_bottleneck_big_code > 20", - "PublicDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses). Related metrics: tma_info_bottleneck_branching_overhead" + "MetricThreshold": "tma_info_bottleneck_big_code > 20" }, { "BriefDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls)", - "MetricExpr": "100 * ((BR_INST_RETIRED.COND + 3 * BR_INST_RETIRED.NEAR_CALL + (BR_INST_RETIRED.NEAR_TAKEN - BR_INST_RETIRED.COND_TAKEN - 2 * BR_INST_RETIRED.NEAR_CALL)) / tma_info_thread_slots)", - "MetricGroup": "Ret;tma_issueBC", + "MetricExpr": "100 * ((BR_INST_RETIRED.ALL_BRANCHES + BR_INST_RETIRED.NEAR_CALL) / tma_info_thread_slots)", + "MetricGroup": "Ret", "MetricName": "tma_info_bottleneck_branching_overhead", - "MetricThreshold": "tma_info_bottleneck_branching_overhead > 10", - "PublicDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls). Related metrics: tma_info_bottleneck_big_code" + "MetricThreshold": "tma_info_bottleneck_branching_overhead > 5" + }, + { + "BriefDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks", + "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_fb_full / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))", + "MetricGroup": "Mem;MemoryBW;Offcore;tma_issueBW", + "MetricName": "tma_info_bottleneck_cache_memory_bandwidth", + "MetricThreshold": "tma_info_bottleneck_cache_memory_bandwidth > 20", + "PublicDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full" + }, + { + "BriefDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks", + "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_store_latency / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))", + "MetricGroup": "Mem;MemoryLat;Offcore;tma_issueLat", + "MetricName": "tma_info_bottleneck_cache_memory_latency", + "MetricThreshold": "tma_info_bottleneck_cache_memory_latency > 20", + "PublicDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks. Related metrics: tma_l3_hit_latency, tma_mem_latency" + }, + { + "BriefDescription": "Total pipeline cost when the execution is compute-bound - an estimation", + "MetricExpr": "100 * (tma_core_bound * tma_divider / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_core_bound * (tma_ports_utilization / (tma_divider + tma_ports_utilization + tma_serializing_operation)) * (tma_ports_utilized_3m / (tma_ports_utilized_0 + tma_ports_utilized_1 + tma_ports_utilized_2 + tma_ports_utilized_3m)))", + "MetricGroup": "Cor;tma_issueComp", + "MetricName": "tma_info_bottleneck_compute_bound_est", + "MetricThreshold": "tma_info_bottleneck_compute_bound_est > 20", + "PublicDescription": "Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy. Related metrics: " }, { "BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "100 * (tma_frontend_bound - tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) - tma_info_bottleneck_big_code", + "MetricExpr": "100 * (tma_frontend_bound - (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * (10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts)) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) - tma_info_bottleneck_big_code", "MetricGroup": "Fed;FetchBW;Frontend", "MetricName": "tma_info_bottleneck_instruction_fetch_bw", "MetricThreshold": "tma_info_bottleneck_instruction_fetch_bw > 20" }, { - "BriefDescription": "Total pipeline cost of (external) Memory Bandwidth related bottlenecks", - "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full))) + tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_fb_full / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk))", - "MetricGroup": "Mem;MemoryBW;Offcore;tma_issueBW", - "MetricName": "tma_info_bottleneck_memory_bandwidth", - "MetricThreshold": "tma_info_bottleneck_memory_bandwidth > 20", - "PublicDescription": "Total pipeline cost of (external) Memory Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full" + "BriefDescription": "Total pipeline cost of irregular execution (e.g", + "MetricExpr": "100 * (tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * (10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts)) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_branch_mispredicts + tma_machine_clears * tma_other_nukes / tma_other_nukes + tma_core_bound * (tma_serializing_operation + tma_core_bound * RS_EVENTS.EMPTY_CYCLES / tma_info_thread_clks * tma_ports_utilized_0) / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)", + "MetricGroup": "Bad;Cor;Ret;tma_issueMS", + "MetricName": "tma_info_bottleneck_irregular_overhead", + "MetricThreshold": "tma_info_bottleneck_irregular_overhead > 10", + "PublicDescription": "Total pipeline cost of irregular execution (e.g. FP-assists in HPC, Wait time with work imbalance multithreaded workloads, overhead in system services or virtualized environments). Related metrics: tma_microcode_sequencer, tma_ms_switches" }, { "BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "100 * tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_dtlb_load / max(tma_l1_bound, tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))", + "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_load / max(tma_l1_bound, tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))", "MetricGroup": "Mem;MemoryTLB;Offcore;tma_issueTLB", "MetricName": "tma_info_bottleneck_memory_data_tlbs", "MetricThreshold": "tma_info_bottleneck_memory_data_tlbs > 20", - "PublicDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs). Related metrics: tma_dtlb_load, tma_dtlb_store" + "PublicDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs). Related metrics: tma_dtlb_load, tma_dtlb_store, tma_info_bottleneck_memory_synchronization" }, { - "BriefDescription": "Total pipeline cost of Memory Latency related bottlenecks (external memory and off-core caches)", - "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound))", - "MetricGroup": "Mem;MemoryLat;Offcore;tma_issueLat", - "MetricName": "tma_info_bottleneck_memory_latency", - "MetricThreshold": "tma_info_bottleneck_memory_latency > 20", - "PublicDescription": "Total pipeline cost of Memory Latency related bottlenecks (external memory and off-core caches). Related metrics: tma_l3_hit_latency, tma_mem_latency" + "BriefDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors)", + "MetricExpr": "100 * (tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_contested_accesses + tma_data_sharing) / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * tma_false_sharing / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores - tma_store_latency)) + tma_machine_clears * (1 - tma_other_nukes / tma_other_nukes))", + "MetricGroup": "Mem;Offcore;tma_issueTLB", + "MetricName": "tma_info_bottleneck_memory_synchronization", + "MetricThreshold": "tma_info_bottleneck_memory_synchronization > 10", + "PublicDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors). Related metrics: tma_dtlb_load, tma_dtlb_store, tma_info_bottleneck_memory_data_tlbs" }, { "BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "100 * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))", + "MetricExpr": "100 * (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))", "MetricGroup": "Bad;BadSpec;BrMispredicts;tma_issueBM", "MetricName": "tma_info_bottleneck_mispredictions", "MetricThreshold": "tma_info_bottleneck_mispredictions > 20", "PublicDescription": "Total pipeline cost of Branch Misprediction related bottlenecks. Related metrics: tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost, tma_mispredicts_resteers" }, { + "BriefDescription": "Total pipeline cost of remaining bottlenecks (apart from those listed in the Info.Bottlenecks metrics class)", + "MetricExpr": "100 - (tma_info_bottleneck_big_code + tma_info_bottleneck_instruction_fetch_bw + tma_info_bottleneck_mispredictions + tma_info_bottleneck_cache_memory_bandwidth + tma_info_bottleneck_cache_memory_latency + tma_info_bottleneck_memory_data_tlbs + tma_info_bottleneck_memory_synchronization + tma_info_bottleneck_compute_bound_est + tma_info_bottleneck_irregular_overhead + tma_info_bottleneck_branching_overhead + tma_info_bottleneck_base_non_br)", + "MetricGroup": "Cor;Offcore", + "MetricName": "tma_info_bottleneck_other_bottlenecks", + "MetricThreshold": "tma_info_bottleneck_other_bottlenecks > 20", + "PublicDescription": "Total pipeline cost of remaining bottlenecks (apart from those listed in the Info.Bottlenecks metrics class). Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls." + }, + { "BriefDescription": "Fraction of branches that are CALL or RET", "MetricExpr": "(BR_INST_RETIRED.NEAR_CALL + BR_INST_RETIRED.NEAR_RETURN) / BR_INST_RETIRED.ALL_BRANCHES", "MetricGroup": "Bad;Branches", @@ -564,7 +614,7 @@ }, { "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core", - "MetricExpr": "CPU_CLK_UNHALTED.DISTRIBUTED", + "MetricExpr": "(CPU_CLK_UNHALTED.DISTRIBUTED if #SMT_on else tma_info_thread_clks)", "MetricGroup": "SMT", "MetricName": "tma_info_core_core_clks" }, @@ -575,8 +625,14 @@ "MetricName": "tma_info_core_coreipc" }, { + "BriefDescription": "uops Executed per Cycle", + "MetricExpr": "UOPS_EXECUTED.THREAD / tma_info_thread_clks", + "MetricGroup": "Power", + "MetricName": "tma_info_core_epc" + }, + { "BriefDescription": "Floating Point Operations Per Cycle", - "MetricExpr": "(cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * cpu@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE\\,umask\\=0x18@ + 8 * cpu@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE\\,umask\\=0x60@ + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / tma_info_core_core_clks", + "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * FP_ARITH_INST_RETIRED.4_FLOPS + 8 * FP_ARITH_INST_RETIRED.8_FLOPS + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / tma_info_core_core_clks", "MetricGroup": "Flops;Ret", "MetricName": "tma_info_core_flopc" }, @@ -588,8 +644,8 @@ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)." }, { - "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core", - "MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)", + "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor)", + "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@", "MetricGroup": "Backend;Cor;Pipeline;PortsUtil", "MetricName": "tma_info_core_ilp" }, @@ -669,7 +725,7 @@ "MetricGroup": "Flops;InsType", "MetricName": "tma_info_inst_mix_iparith", "MetricThreshold": "tma_info_inst_mix_iparith < 10", - "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). May undercount due to FMA double counting. Approximated prior to BDW." + "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW." }, { "BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate)", @@ -677,7 +733,7 @@ "MetricGroup": "Flops;FpVector;InsType", "MetricName": "tma_info_inst_mix_iparith_avx128", "MetricThreshold": "tma_info_inst_mix_iparith_avx128 < 10", - "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate)", @@ -685,7 +741,7 @@ "MetricGroup": "Flops;FpVector;InsType", "MetricName": "tma_info_inst_mix_iparith_avx256", "MetricThreshold": "tma_info_inst_mix_iparith_avx256 < 10", - "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate)", @@ -693,7 +749,7 @@ "MetricGroup": "Flops;FpVector;InsType", "MetricName": "tma_info_inst_mix_iparith_avx512", "MetricThreshold": "tma_info_inst_mix_iparith_avx512 < 10", - "PublicDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate)", @@ -701,7 +757,7 @@ "MetricGroup": "Flops;FpScalar;InsType", "MetricName": "tma_info_inst_mix_iparith_scalar_dp", "MetricThreshold": "tma_info_inst_mix_iparith_scalar_dp < 10", - "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate)", @@ -709,7 +765,7 @@ "MetricGroup": "Flops;FpScalar;InsType", "MetricName": "tma_info_inst_mix_iparith_scalar_sp", "MetricThreshold": "tma_info_inst_mix_iparith_scalar_sp < 10", - "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)", @@ -727,7 +783,7 @@ }, { "BriefDescription": "Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate)", - "MetricExpr": "INST_RETIRED.ANY / (cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * cpu@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE\\,umask\\=0x18@ + 8 * cpu@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE\\,umask\\=0x60@ + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE)", + "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.SCALAR + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * FP_ARITH_INST_RETIRED.4_FLOPS + 8 * FP_ARITH_INST_RETIRED.8_FLOPS + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE)", "MetricGroup": "Flops;InsType", "MetricName": "tma_info_inst_mix_ipflop", "MetricThreshold": "tma_info_inst_mix_ipflop < 10" @@ -740,6 +796,12 @@ "MetricThreshold": "tma_info_inst_mix_ipload < 3" }, { + "BriefDescription": "Instructions per PAUSE (lower number means higher occurrence rate)", + "MetricExpr": "tma_info_inst_mix_instructions / MISC_RETIRED.PAUSE_INST", + "MetricGroup": "Flops;FpVector;InsType", + "MetricName": "tma_info_inst_mix_ippause" + }, + { "BriefDescription": "Instructions per Store (lower number means higher occurrence rate)", "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_STORES", "MetricGroup": "InsType", @@ -763,142 +825,154 @@ }, { "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]", - "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l1d_cache_fill_bw", "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_core_l1d_cache_fill_bw" + "MetricName": "tma_info_memory_core_l1d_cache_fill_bw_2t" }, { "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]", - "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l2_cache_fill_bw", "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_core_l2_cache_fill_bw" + "MetricName": "tma_info_memory_core_l2_cache_fill_bw_2t" }, { "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l3_cache_access_bw", "MetricGroup": "Mem;MemoryBW;Offcore", - "MetricName": "tma_info_memory_core_l3_cache_access_bw" + "MetricName": "tma_info_memory_core_l3_cache_access_bw_2t" }, { "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l3_cache_fill_bw", "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_core_l3_cache_fill_bw" + "MetricName": "tma_info_memory_core_l3_cache_fill_bw_2t" }, { "BriefDescription": "Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss-handling entries)", "MetricExpr": "1e3 * MEM_LOAD_RETIRED.FB_HIT / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_fb_hpki" }, { + "BriefDescription": "", + "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l1d_cache_fill_bw" + }, + { "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads", "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l1mpki" }, { "BriefDescription": "L1 cache true misses per kilo instruction for all demand loads (including speculative)", "MetricExpr": "1e3 * L2_RQSTS.ALL_DEMAND_DATA_RD / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l1mpki_load" }, { + "BriefDescription": "", + "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l2_cache_fill_bw" + }, + { "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)", "MetricExpr": "1e3 * (L2_RQSTS.REFERENCES - L2_RQSTS.MISS) / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l2hpki_all" }, { "BriefDescription": "L2 cache hits per kilo instruction for all demand loads (including speculative)", "MetricExpr": "1e3 * L2_RQSTS.DEMAND_DATA_RD_HIT / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l2hpki_load" }, { "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads", "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY", - "MetricGroup": "Backend;CacheMisses;Mem", + "MetricGroup": "Backend;CacheHits;Mem", "MetricName": "tma_info_memory_l2mpki" }, { "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all request types (including speculative)", "MetricExpr": "1e3 * L2_RQSTS.MISS / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem;Offcore", + "MetricGroup": "CacheHits;Mem;Offcore", "MetricName": "tma_info_memory_l2mpki_all" }, { "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all demand loads (including speculative)", "MetricExpr": "1e3 * L2_RQSTS.DEMAND_DATA_RD_MISS / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l2mpki_load" }, { - "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads", - "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", - "MetricName": "tma_info_memory_l3mpki" + "BriefDescription": "", + "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW;Offcore", + "MetricName": "tma_info_memory_l3_cache_access_bw" }, { - "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)", - "MetricExpr": "L1D_PEND_MISS.PENDING / (MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT)", - "MetricGroup": "Mem;MemoryBound;MemoryLat", - "MetricName": "tma_info_memory_load_miss_real_latency" + "BriefDescription": "", + "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l3_cache_fill_bw" }, { - "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss", - "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES", - "MetricGroup": "Mem;MemoryBW;MemoryBound", - "MetricName": "tma_info_memory_mlp", - "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)" + "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads", + "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY", + "MetricGroup": "Mem", + "MetricName": "tma_info_memory_l3mpki" }, { "BriefDescription": "Average Parallel L2 cache miss data reads", "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", "MetricGroup": "Memory_BW;Offcore", - "MetricName": "tma_info_memory_oro_data_l2_mlp" + "MetricName": "tma_info_memory_latency_data_l2_mlp" }, { "BriefDescription": "Average Latency for L2 cache miss demand Loads", "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS.DEMAND_DATA_RD", "MetricGroup": "Memory_Lat;Offcore", - "MetricName": "tma_info_memory_oro_load_l2_miss_latency" + "MetricName": "tma_info_memory_latency_load_l2_miss_latency" }, { "BriefDescription": "Average Parallel L2 cache miss demand Loads", "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / cpu@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=1@", "MetricGroup": "Memory_BW;Offcore", - "MetricName": "tma_info_memory_oro_load_l2_mlp" + "MetricName": "tma_info_memory_latency_load_l2_mlp" }, { "BriefDescription": "Average Latency for L3 cache miss demand Loads", "MetricExpr": "cpu@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,umask\\=0x10@ / OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD", "MetricGroup": "Memory_Lat;Offcore", - "MetricName": "tma_info_memory_oro_load_l3_miss_latency" + "MetricName": "tma_info_memory_latency_load_l3_miss_latency" }, { - "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l1d_cache_fill_bw", - "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_thread_l1d_cache_fill_bw_1t" + "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)", + "MetricExpr": "L1D_PEND_MISS.PENDING / (MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT)", + "MetricGroup": "Mem;MemoryBound;MemoryLat", + "MetricName": "tma_info_memory_load_miss_real_latency" }, { - "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l2_cache_fill_bw", - "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_thread_l2_cache_fill_bw_1t" + "BriefDescription": "\"Bus lock\" per kilo instruction", + "MetricExpr": "1e3 * SQ_MISC.BUS_LOCK / INST_RETIRED.ANY", + "MetricGroup": "Mem", + "MetricName": "tma_info_memory_mix_bus_lock_pki" }, { - "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l3_cache_access_bw", - "MetricGroup": "Mem;MemoryBW;Offcore", - "MetricName": "tma_info_memory_thread_l3_cache_access_bw_1t" + "BriefDescription": "Un-cacheable retired load per kilo instruction", + "MetricExpr": "1e3 * MEM_LOAD_MISC_RETIRED.UC / INST_RETIRED.ANY", + "MetricGroup": "Mem", + "MetricName": "tma_info_memory_mix_uc_load_pki" }, { - "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l3_cache_fill_bw", - "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_thread_l3_cache_fill_bw_1t" + "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss", + "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES", + "MetricGroup": "Mem;MemoryBW;MemoryBound", + "MetricName": "tma_info_memory_mlp", + "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)" }, { "BriefDescription": "STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk)", @@ -926,42 +1000,56 @@ "MetricName": "tma_info_memory_tlb_store_stlb_mpki" }, { - "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-thread", - "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@", + "BriefDescription": "", + "MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@)", "MetricGroup": "Cor;Pipeline;PortsUtil;SMT", "MetricName": "tma_info_pipeline_execute" }, { + "BriefDescription": "Instructions per a microcode Assist invocation", + "MetricExpr": "INST_RETIRED.ANY / ASSISTS.ANY", + "MetricGroup": "MicroSeq;Pipeline;Ret;Retire", + "MetricName": "tma_info_pipeline_ipassist", + "MetricThreshold": "tma_info_pipeline_ipassist < 100e3", + "PublicDescription": "Instructions per a microcode Assist invocation. See Assists tree node for details (lower number means higher occurrence rate)" + }, + { "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.", "MetricExpr": "tma_retiring * tma_info_thread_slots / cpu@UOPS_RETIRED.SLOTS\\,cmask\\=1@", "MetricGroup": "Pipeline;Ret", "MetricName": "tma_info_pipeline_retire" }, { - "BriefDescription": "Measured Average Frequency for unhalted processors [GHz]", + "BriefDescription": "Measured Average Core Frequency for unhalted processors [GHz]", "MetricExpr": "tma_info_system_turbo_utilization * TSC / 1e9 / duration_time", "MetricGroup": "Power;Summary", - "MetricName": "tma_info_system_average_frequency" + "MetricName": "tma_info_system_core_frequency" }, { - "BriefDescription": "Average CPU Utilization", + "BriefDescription": "Average CPU Utilization (percentage)", "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC", "MetricGroup": "HPC;Summary", "MetricName": "tma_info_system_cpu_utilization" }, { + "BriefDescription": "Average number of utilized CPUs", + "MetricExpr": "#num_cpus_online * tma_info_system_cpu_utilization", + "MetricGroup": "Summary", + "MetricName": "tma_info_system_cpus_utilized" + }, + { "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]", "MetricExpr": "64 * (UNC_ARB_TRK_REQUESTS.ALL + UNC_ARB_COH_TRK_REQUESTS.ALL) / 1e6 / duration_time / 1e3", - "MetricGroup": "HPC;Mem;MemoryBW;SoC;tma_issueBW", + "MetricGroup": "HPC;MemOffcore;MemoryBW;SoC;tma_issueBW", "MetricName": "tma_info_system_dram_bw_use", - "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_info_bottleneck_memory_bandwidth, tma_mem_bandwidth, tma_sq_full" + "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_mem_bandwidth, tma_sq_full" }, { "BriefDescription": "Giga Floating Point Operations Per Second", - "MetricExpr": "(cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * cpu@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE\\,umask\\=0x18@ + 8 * cpu@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE\\,umask\\=0x60@ + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / 1e9 / duration_time", + "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * FP_ARITH_INST_RETIRED.4_FLOPS + 8 * FP_ARITH_INST_RETIRED.8_FLOPS + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / 1e9 / duration_time", "MetricGroup": "Cor;Flops;HPC", "MetricName": "tma_info_system_gflops", - "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine." + "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width" }, { "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]", @@ -998,12 +1086,6 @@ "PublicDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches. ([RKL+]memory-controller only)" }, { - "BriefDescription": "Average latency of all requests to external memory (in Uncore cycles)", - "MetricExpr": "(UNC_ARB_TRK_OCCUPANCY.ALL + UNC_ARB_DAT_OCCUPANCY.RD) / UNC_ARB_TRK_REQUESTS.ALL", - "MetricGroup": "Mem;SoC", - "MetricName": "tma_info_system_mem_request_latency" - }, - { "BriefDescription": "Fraction of Core cycles where the core was running with power-delivery for baseline license level 0", "MetricExpr": "CORE_POWER.LVL0_TURBO_LICENSE / tma_info_core_core_clks", "MetricGroup": "Power", @@ -1097,8 +1179,8 @@ }, { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses", - "MetricExpr": "ICACHE_64B.IFTAG_STALL / tma_info_thread_clks", - "MetricGroup": "BigFoot;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group", + "MetricExpr": "ICACHE_TAG.STALLS / tma_info_thread_clks", + "MetricGroup": "BigFootprint;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group", "MetricName": "tma_itlb_misses", "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS_PS;FRONTEND_RETIRED.ITLB_MISS_PS", @@ -1107,7 +1189,7 @@ { "BriefDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache", "MetricExpr": "max((CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS) / tma_info_thread_clks, 0)", - "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group", "MetricName": "tma_l1_bound", "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", "PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_RETIRED.L1_HIT_PS;MEM_LOAD_RETIRED.FB_HIT_PS. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1", @@ -1117,7 +1199,7 @@ "BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads", "MetricConstraint": "NO_GROUP_EVENTS", "MetricExpr": "MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) / (MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + L1D_PEND_MISS.FB_FULL_PERIODS) * ((CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks)", - "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", "MetricName": "tma_l2_bound", "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", "PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L2_HIT_PS", @@ -1127,24 +1209,24 @@ "BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core", "MetricConstraint": "NO_GROUP_EVENTS_NMI", "MetricExpr": "(CYCLE_ACTIVITY.STALLS_L2_MISS - CYCLE_ACTIVITY.STALLS_L3_MISS) / tma_info_thread_clks", - "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", "MetricName": "tma_l3_bound", "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS", "ScaleUnit": "100%" }, { - "BriefDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)", - "MetricExpr": "9 * tma_info_system_average_frequency * MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", + "BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)", + "MetricExpr": "9 * tma_info_system_core_frequency * (MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2)) / tma_info_thread_clks", "MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group", "MetricName": "tma_l3_hit_latency", "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_info_bottleneck_memory_latency, tma_mem_latency", + "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_info_bottleneck_cache_memory_latency, tma_mem_latency", "ScaleUnit": "100%" }, { "BriefDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs)", - "MetricExpr": "ILD_STALL.LCP / tma_info_thread_clks", + "MetricExpr": "DECODE.LCP / tma_info_thread_clks", "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB", "MetricName": "tma_lcp", "MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", @@ -1158,7 +1240,7 @@ "MetricName": "tma_light_operations", "MetricThreshold": "tma_light_operations > 0.6", "MetricgroupNoGroup": "TopdownL2", - "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST", + "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST", "ScaleUnit": "100%" }, { @@ -1201,7 +1283,7 @@ "MetricExpr": "(LSD.CYCLES_ACTIVE - LSD.CYCLES_OK) / tma_info_core_core_clks / 2", "MetricGroup": "FetchBW;LSD;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group", "MetricName": "tma_lsd", - "MetricThreshold": "tma_lsd > 0.15 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 5 > 0.35)", + "MetricThreshold": "tma_lsd > 0.15 & tma_fetch_bandwidth > 0.2", "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to LSD (Loop Stream Detector) unit. LSD typically does well sustaining Uop supply. However; in some rare cases; optimal uop-delivery could not be reached for small loops whose size (in terms of number of uops) does not suit well the LSD structure.", "ScaleUnit": "100%" }, @@ -1216,21 +1298,21 @@ "ScaleUnit": "100%" }, { - "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM)", + "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)", "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@) / tma_info_thread_clks", "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW", "MetricName": "tma_mem_bandwidth", "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_bottleneck_memory_bandwidth, tma_info_system_dram_bw_use, tma_sq_full", + "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_sq_full", "ScaleUnit": "100%" }, { - "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM)", + "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM)", "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth", "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat", "MetricName": "tma_mem_latency", "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_info_bottleneck_memory_latency, tma_l3_hit_latency", + "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_info_bottleneck_cache_memory_latency, tma_l3_hit_latency", "ScaleUnit": "100%" }, { @@ -1254,11 +1336,11 @@ }, { "BriefDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit", - "MetricExpr": "tma_retiring * tma_info_thread_slots / UOPS_ISSUED.ANY * IDQ.MS_UOPS / tma_info_thread_slots", + "MetricExpr": "UOPS_RETIRED.SLOTS / UOPS_ISSUED.ANY * IDQ.MS_UOPS / tma_info_thread_slots", "MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueMC;tma_issueMS", "MetricName": "tma_microcode_sequencer", "MetricThreshold": "tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1", - "PublicDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided. Sample with: IDQ.MS_UOPS. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_ms_switches", + "PublicDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided. Sample with: IDQ.MS_UOPS. Related metrics: tma_clears_resteers, tma_info_bottleneck_irregular_overhead, tma_l1_bound, tma_machine_clears, tma_ms_switches", "ScaleUnit": "100%" }, { @@ -1275,7 +1357,7 @@ "MetricExpr": "(IDQ.MITE_CYCLES_ANY - IDQ.MITE_CYCLES_OK) / tma_info_core_core_clks / 2", "MetricGroup": "DSBmiss;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group", "MetricName": "tma_mite", - "MetricThreshold": "tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 5 > 0.35)", + "MetricThreshold": "tma_mite > 0.1 & tma_fetch_bandwidth > 0.2", "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck. Sample with: FRONTEND_RETIRED.ANY_DSB_MISS", "ScaleUnit": "100%" }, @@ -1284,16 +1366,16 @@ "MetricExpr": "(cpu@IDQ.MITE_UOPS\\,cmask\\=4@ - cpu@IDQ.MITE_UOPS\\,cmask\\=5@) / tma_info_thread_clks", "MetricGroup": "DSBmiss;FetchBW;TopdownL4;tma_L4_group;tma_mite_group", "MetricName": "tma_mite_4wide", - "MetricThreshold": "tma_mite_4wide > 0.05 & (tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 5 > 0.35))", + "MetricThreshold": "tma_mite_4wide > 0.05 & (tma_mite > 0.1 & tma_fetch_bandwidth > 0.2)", "ScaleUnit": "100%" }, { - "BriefDescription": "The Mixing_Vectors metric gives the percentage of injected blend uops out of all uops issued", + "BriefDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles)", "MetricExpr": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH / UOPS_ISSUED.ANY", "MetricGroup": "TopdownL5;tma_L5_group;tma_issueMV;tma_ports_utilized_0_group", "MetricName": "tma_mixing_vectors", "MetricThreshold": "tma_mixing_vectors > 0.05", - "PublicDescription": "The Mixing_Vectors metric gives the percentage of injected blend uops out of all uops issued. Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches", + "PublicDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles). Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches", "ScaleUnit": "100%" }, { @@ -1302,22 +1384,22 @@ "MetricGroup": "FetchLat;MicroSeq;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueMC;tma_issueMS;tma_issueMV;tma_issueSO", "MetricName": "tma_ms_switches", "MetricThreshold": "tma_ms_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", - "PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation", + "PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_clears_resteers, tma_info_bottleneck_irregular_overhead, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation", "ScaleUnit": "100%" }, { "BriefDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions", "MetricExpr": "tma_light_operations * INST_RETIRED.NOP / (tma_retiring * tma_info_thread_slots)", - "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", + "MetricGroup": "Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group", "MetricName": "tma_nop_instructions", - "MetricThreshold": "tma_nop_instructions > 0.1 & tma_light_operations > 0.6", + "MetricThreshold": "tma_nop_instructions > 0.1 & (tma_other_light_ops > 0.3 & tma_light_operations > 0.6)", "PublicDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body. Sample with: INST_RETIRED.NOP", "ScaleUnit": "100%" }, { "BriefDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "max(0, tma_light_operations - (tma_fp_arith + tma_memory_operations + tma_branch_instructions + tma_nop_instructions))", + "MetricExpr": "max(0, tma_light_operations - (tma_fp_arith + tma_memory_operations + tma_branch_instructions))", "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", "MetricName": "tma_other_light_ops", "MetricThreshold": "tma_other_light_ops > 0.3 & tma_light_operations > 0.6", @@ -1325,6 +1407,22 @@ "ScaleUnit": "100%" }, { + "BriefDescription": "This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types).", + "MetricExpr": "max(tma_branch_mispredicts * (1 - BR_MISP_RETIRED.ALL_BRANCHES / (INT_MISC.CLEARS_COUNT - MACHINE_CLEARS.COUNT)), 0.0001)", + "MetricGroup": "BrMispredicts;TopdownL3;tma_L3_group;tma_branch_mispredicts_group", + "MetricName": "tma_other_mispredicts", + "MetricThreshold": "tma_other_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)", + "ScaleUnit": "100%" + }, + { + "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering.", + "MetricExpr": "max(tma_machine_clears * (1 - MACHINE_CLEARS.MEMORY_ORDERING / MACHINE_CLEARS.COUNT), 0.0001)", + "MetricGroup": "Machine_Clears;TopdownL3;tma_L3_group;tma_machine_clears_group", + "MetricName": "tma_other_nukes", + "MetricThreshold": "tma_other_nukes > 0.05 & (tma_machine_clears > 0.1 & tma_bad_speculation > 0.15)", + "ScaleUnit": "100%" + }, + { "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch)", "MetricExpr": "UOPS_DISPATCHED.PORT_0 / tma_info_core_core_clks", "MetricGroup": "Compute;TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P", @@ -1352,17 +1450,17 @@ "ScaleUnit": "100%" }, { - "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU)", + "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU)", "MetricExpr": "UOPS_DISPATCHED.PORT_6 / tma_info_core_core_clks", "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P", "MetricName": "tma_port_6", "MetricThreshold": "tma_port_6 > 0.6", - "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED.PORT_6. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2", + "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED.PORT_6. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2", "ScaleUnit": "100%" }, { "BriefDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related)", - "MetricExpr": "((cpu@EXE_ACTIVITY.3_PORTS_UTIL\\,umask\\=0x80@ + tma_serializing_operation * (CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY) + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL)) / tma_info_thread_clks if ARITH.DIVIDER_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY else (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL) / tma_info_thread_clks)", + "MetricExpr": "((tma_ports_utilized_0 * tma_info_thread_clks + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL)) / tma_info_thread_clks if ARITH.DIVIDER_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY else (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL) / tma_info_thread_clks)", "MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group", "MetricName": "tma_ports_utilization", "MetricThreshold": "tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)", @@ -1371,7 +1469,7 @@ }, { "BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)", - "MetricExpr": "cpu@EXE_ACTIVITY.3_PORTS_UTIL\\,umask\\=0x80@ / tma_info_thread_clks + tma_serializing_operation * (CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY) / tma_info_thread_clks", + "MetricExpr": "(cpu@EXE_ACTIVITY.3_PORTS_UTIL\\,umask\\=0x80@ + tma_core_bound * RS_EVENTS.EMPTY_CYCLES) / tma_info_thread_clks * (CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY) / tma_info_thread_clks", "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group", "MetricName": "tma_ports_utilized_0", "MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", @@ -1401,7 +1499,7 @@ "MetricExpr": "UOPS_EXECUTED.CYCLES_GE_3 / tma_info_thread_clks", "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group", "MetricName": "tma_ports_utilized_3m", - "MetricThreshold": "tma_ports_utilized_3m > 0.7 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", + "MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", "PublicDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Sample with: UOPS_EXECUTED.CYCLES_GE_3", "ScaleUnit": "100%" }, @@ -1419,18 +1517,18 @@ { "BriefDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations", "MetricExpr": "RESOURCE_STALLS.SCOREBOARD / tma_info_thread_clks", - "MetricGroup": "PortsUtil;TopdownL5;tma_L5_group;tma_issueSO;tma_ports_utilized_0_group", + "MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group;tma_issueSO", "MetricName": "tma_serializing_operation", - "MetricThreshold": "tma_serializing_operation > 0.1 & (tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)))", + "MetricThreshold": "tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)", "PublicDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out-of-order execution which may limit performance. Sample with: RESOURCE_STALLS.SCOREBOARD. Related metrics: tma_ms_switches", "ScaleUnit": "100%" }, { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions", "MetricExpr": "140 * MISC_RETIRED.PAUSE_INST / tma_info_thread_clks", - "MetricGroup": "TopdownL6;tma_L6_group;tma_serializing_operation_group", + "MetricGroup": "TopdownL4;tma_L4_group;tma_serializing_operation_group", "MetricName": "tma_slow_pause", - "MetricThreshold": "tma_slow_pause > 0.05 & (tma_serializing_operation > 0.1 & (tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))))", + "MetricThreshold": "tma_slow_pause > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions. Sample with: MISC_RETIRED.PAUSE_INST", "ScaleUnit": "100%" }, @@ -1459,7 +1557,7 @@ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group", "MetricName": "tma_sq_full", "MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_bottleneck_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth", + "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth", "ScaleUnit": "100%" }, { @@ -1527,10 +1625,10 @@ { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears", "MetricExpr": "10 * BACLEARS.ANY / tma_info_thread_clks", - "MetricGroup": "BigFoot;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group", + "MetricGroup": "BigFootprint;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group", "MetricName": "tma_unknown_branches", "MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))", - "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit). Sample with: BACLEARS.ANY", + "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit) hence called Unknown Branches. Sample with: BACLEARS.ANY", "ScaleUnit": "100%" }, { diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/metricgroups.json b/tools/perf/pmu-events/arch/x86/sandybridge/metricgroups.json index bebb85945d62..a2c27794c0d8 100644 --- a/tools/perf/pmu-events/arch/x86/sandybridge/metricgroups.json +++ b/tools/perf/pmu-events/arch/x86/sandybridge/metricgroups.json @@ -2,10 +2,10 @@ "Backend": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Bad": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "BadSpec": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", - "BigFoot": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "BigFootprint": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "BrMispredicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Branches": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", - "CacheMisses": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "CacheHits": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Compute": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Cor": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "DSB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", @@ -23,7 +23,9 @@ "L2Evicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "LSD": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MachineClears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "Machine_Clears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Mem": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "MemOffcore": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryBW": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryBound": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryLat": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", @@ -88,6 +90,7 @@ "tma_issueTLB": "Metrics related by the issue $issueTLB", "tma_l1_bound_group": "Metrics contributing to tma_l1_bound category", "tma_light_operations_group": "Metrics contributing to tma_light_operations category", + "tma_machine_clears_group": "Metrics contributing to tma_machine_clears category", "tma_mem_latency_group": "Metrics contributing to tma_mem_latency category", "tma_memory_bound_group": "Metrics contributing to tma_memory_bound category", "tma_microcode_sequencer_group": "Metrics contributing to tma_microcode_sequencer category", diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json b/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json index 8898b6fd0dea..ce836ebda542 100644 --- a/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json +++ b/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json @@ -163,7 +163,7 @@ "MetricExpr": "tma_frontend_bound - tma_fetch_latency", "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB", "MetricName": "tma_fetch_bandwidth", - "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 4 > 0.35", + "MetricThreshold": "tma_fetch_bandwidth > 0.2", "MetricgroupNoGroup": "TopdownL2", "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Related metrics: tma_dsb_switches, tma_info_frontend_dsb_coverage, tma_lcp", "ScaleUnit": "100%" @@ -193,7 +193,7 @@ "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P", "MetricName": "tma_fp_scalar", "MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)", - "PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_512b, tma_port_6, tma_ports_utilized_2", + "PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_6, tma_ports_utilized_2", "ScaleUnit": "100%" }, { @@ -202,7 +202,25 @@ "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P", "MetricName": "tma_fp_vector", "MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)", - "PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_512b, tma_port_6, tma_ports_utilized_2", + "PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_6, tma_ports_utilized_2", + "ScaleUnit": "100%" + }, + { + "BriefDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors", + "MetricExpr": "(FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE + FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE) / UOPS_DISPATCHED.THREAD", + "MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P", + "MetricName": "tma_fp_vector_128b", + "MetricThreshold": "tma_fp_vector_128b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))", + "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_6, tma_ports_utilized_2", + "ScaleUnit": "100%" + }, + { + "BriefDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors", + "MetricExpr": "(SIMD_FP_256.PACKED_DOUBLE + SIMD_FP_256.PACKED_SINGLE) / UOPS_DISPATCHED.THREAD", + "MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P", + "MetricName": "tma_fp_vector_256b", + "MetricThreshold": "tma_fp_vector_256b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))", + "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_512b, tma_port_6, tma_ports_utilized_2", "ScaleUnit": "100%" }, { @@ -222,7 +240,7 @@ "MetricName": "tma_heavy_operations", "MetricThreshold": "tma_heavy_operations > 0.1", "MetricgroupNoGroup": "TopdownL2", - "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.", + "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences. ([ICL+] Note this may overcount due to approximation using indirect events; [ADL+] .)", "ScaleUnit": "100%" }, { @@ -244,7 +262,7 @@ "MetricName": "tma_info_core_flopc" }, { - "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core", + "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor)", "MetricExpr": "UOPS_DISPATCHED.THREAD / (cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@ / 2 if #SMT_on else cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@)", "MetricGroup": "Backend;Cor;Pipeline;PortsUtil", "MetricName": "tma_info_core_ilp" @@ -271,21 +289,27 @@ "MetricName": "tma_info_pipeline_retire" }, { - "BriefDescription": "Measured Average Frequency for unhalted processors [GHz]", + "BriefDescription": "Measured Average Core Frequency for unhalted processors [GHz]", "MetricExpr": "tma_info_system_turbo_utilization * TSC / 1e9 / duration_time", "MetricGroup": "Power;Summary", - "MetricName": "tma_info_system_average_frequency" + "MetricName": "tma_info_system_core_frequency" }, { - "BriefDescription": "Average CPU Utilization", + "BriefDescription": "Average CPU Utilization (percentage)", "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC", "MetricGroup": "HPC;Summary", "MetricName": "tma_info_system_cpu_utilization" }, { + "BriefDescription": "Average number of utilized CPUs", + "MetricExpr": "#num_cpus_online * tma_info_system_cpu_utilization", + "MetricGroup": "Summary", + "MetricName": "tma_info_system_cpus_utilized" + }, + { "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]", "MetricExpr": "64 * (UNC_ARB_TRK_REQUESTS.ALL + UNC_ARB_COH_TRK_REQUESTS.ALL) / 1e6 / duration_time / 1e3", - "MetricGroup": "HPC;Mem;MemoryBW;SoC;tma_issueBW", + "MetricGroup": "HPC;MemOffcore;MemoryBW;SoC;tma_issueBW", "MetricName": "tma_info_system_dram_bw_use", "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_mem_bandwidth" }, @@ -294,7 +318,7 @@ "MetricExpr": "(FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * (FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE) + 8 * SIMD_FP_256.PACKED_SINGLE) / 1e9 / duration_time", "MetricGroup": "Cor;Flops;HPC", "MetricName": "tma_info_system_gflops", - "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine." + "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width" }, { "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]", @@ -317,19 +341,6 @@ "MetricThreshold": "tma_info_system_kernel_utilization > 0.05" }, { - "BriefDescription": "Average number of parallel requests to external memory", - "MetricExpr": "UNC_ARB_TRK_OCCUPANCY.ALL / UNC_ARB_TRK_OCCUPANCY.CYCLES_WITH_ANY_REQUEST", - "MetricGroup": "Mem;SoC", - "MetricName": "tma_info_system_mem_parallel_requests", - "PublicDescription": "Average number of parallel requests to external memory. Accounts for all requests" - }, - { - "BriefDescription": "Average latency of all requests to external memory (in Uncore cycles)", - "MetricExpr": "UNC_ARB_TRK_OCCUPANCY.ALL / UNC_ARB_TRK_REQUESTS.ALL", - "MetricGroup": "Mem;SoC", - "MetricName": "tma_info_system_mem_request_latency" - }, - { "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active", "MetricExpr": "(1 - CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / (CPU_CLK_UNHALTED.REF_XCLK_ANY / 2) if #SMT_on else 0)", "MetricGroup": "SMT", @@ -388,7 +399,7 @@ { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses", "MetricExpr": "(12 * ITLB_MISSES.STLB_HIT + ITLB_MISSES.WALK_DURATION) / tma_info_thread_clks", - "MetricGroup": "BigFoot;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group", + "MetricGroup": "BigFootprint;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group", "MetricName": "tma_itlb_misses", "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: ITLB_MISSES.WALK_COMPLETED", @@ -398,7 +409,7 @@ "BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core", "MetricConstraint": "NO_GROUP_EVENTS_SMT", "MetricExpr": "MEM_LOAD_UOPS_RETIRED.LLC_HIT / (MEM_LOAD_UOPS_RETIRED.LLC_HIT + 7 * MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS) * CYCLE_ACTIVITY.STALLS_L2_PENDING / tma_info_thread_clks", - "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", "MetricName": "tma_l3_bound", "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT_PS", @@ -420,7 +431,7 @@ "MetricName": "tma_light_operations", "MetricThreshold": "tma_light_operations > 0.6", "MetricgroupNoGroup": "TopdownL2", - "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST", + "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST", "ScaleUnit": "100%" }, { @@ -435,21 +446,21 @@ "ScaleUnit": "100%" }, { - "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM)", + "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)", "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=6@) / tma_info_thread_clks", "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW", "MetricName": "tma_mem_bandwidth", "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_info_system_dram_bw_use", + "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_info_system_dram_bw_use", "ScaleUnit": "100%" }, { - "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM)", + "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM)", "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth", "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat", "MetricName": "tma_mem_latency", "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: ", + "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: ", "ScaleUnit": "100%" }, { diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/cache.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/cache.json index 9606e76b98d6..b0447aad0dfc 100644 --- a/tools/perf/pmu-events/arch/x86/sapphirerapids/cache.json +++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/cache.json @@ -432,6 +432,7 @@ "BriefDescription": "Retired load instructions with remote Intel(R) Optane(TM) DC persistent memory as the data source where the data request missed all caches.", "EventCode": "0xd3", "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM", + "PEBS": "1", "PublicDescription": "Counts retired load instructions with remote Intel(R) Optane(TM) DC persistent memory as the data source and the data request missed L3.", "SampleAfterValue": "100007", "UMask": "0x10" diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/frontend.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/frontend.json index 9e53da55d0c1..93d99318a623 100644 --- a/tools/perf/pmu-events/arch/x86/sapphirerapids/frontend.json +++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/frontend.json @@ -267,7 +267,7 @@ "CounterMask": "6", "EventCode": "0x79", "EventName": "IDQ.DSB_CYCLES_OK", - "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).", + "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the DSB (Decode Stream Buffer) path. Count includes uops that may 'bypass' the IDQ.", "SampleAfterValue": "2000003", "UMask": "0x8" }, diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/memory.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/memory.json index e8bf7c9c44e1..5420f529f491 100644 --- a/tools/perf/pmu-events/arch/x86/sapphirerapids/memory.json +++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/memory.json @@ -264,6 +264,7 @@ "BriefDescription": "Number of times an RTM execution aborted.", "EventCode": "0xc9", "EventName": "RTM_RETIRED.ABORTED", + "PEBS": "1", "PublicDescription": "Counts the number of times RTM abort was triggered.", "SampleAfterValue": "100003", "UMask": "0x4" diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/metricgroups.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/metricgroups.json index e6f7934320bf..81e5ca1c3078 100644 --- a/tools/perf/pmu-events/arch/x86/sapphirerapids/metricgroups.json +++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/metricgroups.json @@ -2,10 +2,11 @@ "Backend": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Bad": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "BadSpec": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", - "BigFoot": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "BigFootprint": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "BrMispredicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Branches": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", - "CacheMisses": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "C0Wait": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "CacheHits": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "CodeGen": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Compute": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Cor": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", @@ -27,7 +28,9 @@ "L2Evicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "LSD": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MachineClears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "Machine_Clears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Mem": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "MemOffcore": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryBW": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryBound": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryLat": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", @@ -68,6 +71,7 @@ "tma_assists_group": "Metrics contributing to tma_assists category", "tma_backend_bound_group": "Metrics contributing to tma_backend_bound category", "tma_bad_speculation_group": "Metrics contributing to tma_bad_speculation category", + "tma_branch_mispredicts_group": "Metrics contributing to tma_branch_mispredicts category", "tma_branch_resteers_group": "Metrics contributing to tma_branch_resteers category", "tma_core_bound_group": "Metrics contributing to tma_core_bound category", "tma_dram_bound_group": "Metrics contributing to tma_dram_bound category", @@ -81,9 +85,9 @@ "tma_heavy_operations_group": "Metrics contributing to tma_heavy_operations category", "tma_int_operations_group": "Metrics contributing to tma_int_operations category", "tma_issue2P": "Metrics related by the issue $issue2P", - "tma_issueBC": "Metrics related by the issue $issueBC", "tma_issueBM": "Metrics related by the issue $issueBM", "tma_issueBW": "Metrics related by the issue $issueBW", + "tma_issueComp": "Metrics related by the issue $issueComp", "tma_issueD0": "Metrics related by the issue $issueD0", "tma_issueFB": "Metrics related by the issue $issueFB", "tma_issueFL": "Metrics related by the issue $issueFL", @@ -103,11 +107,13 @@ "tma_l3_bound_group": "Metrics contributing to tma_l3_bound category", "tma_light_operations_group": "Metrics contributing to tma_light_operations category", "tma_load_op_utilization_group": "Metrics contributing to tma_load_op_utilization category", + "tma_machine_clears_group": "Metrics contributing to tma_machine_clears category", "tma_mem_bandwidth_group": "Metrics contributing to tma_mem_bandwidth category", "tma_mem_latency_group": "Metrics contributing to tma_mem_latency category", "tma_memory_bound_group": "Metrics contributing to tma_memory_bound category", "tma_microcode_sequencer_group": "Metrics contributing to tma_microcode_sequencer category", "tma_mite_group": "Metrics contributing to tma_mite category", + "tma_other_light_ops_group": "Metrics contributing to tma_other_light_ops category", "tma_ports_utilization_group": "Metrics contributing to tma_ports_utilization category", "tma_ports_utilized_0_group": "Metrics contributing to tma_ports_utilized_0 category", "tma_ports_utilized_3m_group": "Metrics contributing to tma_ports_utilized_3m category", diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/pipeline.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/pipeline.json index 2cfe814d2015..e2086bedeca8 100644 --- a/tools/perf/pmu-events/arch/x86/sapphirerapids/pipeline.json +++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/pipeline.json @@ -1,21 +1,5 @@ [ { - "BriefDescription": "AMX retired arithmetic BF16 operations.", - "EventCode": "0xce", - "EventName": "AMX_OPS_RETIRED.BF16", - "PublicDescription": "Number of AMX-based retired arithmetic bfloat16 (BF16) floating-point operations. Counts TDPBF16PS FP instructions. SW to use operation multiplier of 4", - "SampleAfterValue": "1000003", - "UMask": "0x2" - }, - { - "BriefDescription": "AMX retired arithmetic integer 8-bit operations.", - "EventCode": "0xce", - "EventName": "AMX_OPS_RETIRED.INT8", - "PublicDescription": "Number of AMX-based retired arithmetic integer operations of 8-bit width source operands. Counts TDPB[SS,UU,US,SU]D instructions. SW should use operation multiplier of 8.", - "SampleAfterValue": "1000003", - "UMask": "0x1" - }, - { "BriefDescription": "This event is deprecated. Refer to new event ARITH.DIV_ACTIVE", "CounterMask": "1", "Deprecated": "1", @@ -444,6 +428,7 @@ "BriefDescription": "INST_RETIRED.MACRO_FUSED", "EventCode": "0xc0", "EventName": "INST_RETIRED.MACRO_FUSED", + "PEBS": "1", "SampleAfterValue": "2000003", "UMask": "0x10" }, @@ -451,6 +436,7 @@ "BriefDescription": "Retired NOP instructions.", "EventCode": "0xc0", "EventName": "INST_RETIRED.NOP", + "PEBS": "1", "PublicDescription": "Counts all retired NOP or ENDBR32/64 instructions", "SampleAfterValue": "2000003", "UMask": "0x2" @@ -467,6 +453,7 @@ "BriefDescription": "Iterations of Repeat string retired instructions.", "EventCode": "0xc0", "EventName": "INST_RETIRED.REP_ITERATION", + "PEBS": "1", "PublicDescription": "Number of iterations of Repeat (REP) string retired instructions such as MOVS, CMPS, and SCAS. Each has a byte, word, and doubleword version and string instructions can be repeated using a repetition prefix, REP, that allows their architectural execution to be repeated a number of times as specified by the RCX register. Note the number of iterations is implementation-dependent.", "SampleAfterValue": "2000003", "UMask": "0x8" diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json index 56e54babcc26..f8c0eac8b828 100644 --- a/tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json +++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json @@ -85,6 +85,24 @@ "ScaleUnit": "1MB/s" }, { + "BriefDescription": "Percentage of inbound full cacheline writes initiated by end device controllers that miss the L3 cache.", + "MetricExpr": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOM / UNC_CHA_TOR_INSERTS.IO_ITOM", + "MetricName": "io_percent_of_inbound_full_writes_that_miss_l3", + "ScaleUnit": "100%" + }, + { + "BriefDescription": "Percentage of inbound partial cacheline writes initiated by end device controllers that miss the L3 cache.", + "MetricExpr": "(UNC_CHA_TOR_INSERTS.IO_MISS_ITOMCACHENEAR + UNC_CHA_TOR_INSERTS.IO_MISS_RFO) / (UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR + UNC_CHA_TOR_INSERTS.IO_RFO)", + "MetricName": "io_percent_of_inbound_partial_writes_that_miss_l3", + "ScaleUnit": "100%" + }, + { + "BriefDescription": "Percentage of inbound reads initiated by end device controllers that miss the L3 cache.", + "MetricExpr": "UNC_CHA_TOR_INSERTS.IO_MISS_PCIRDCUR / UNC_CHA_TOR_INSERTS.IO_PCIRDCUR", + "MetricName": "io_percent_of_inbound_reads_that_miss_l3", + "ScaleUnit": "100%" + }, + { "BriefDescription": "Ratio of number of completed page walks (for 2 megabyte and 4 megabyte page sizes) caused by a code fetch to the total number of completed instructions", "MetricExpr": "ITLB_MISSES.WALK_COMPLETED_2M_4M / INST_RETIRED.ANY", "MetricName": "itlb_2nd_level_large_page_mpi", @@ -310,20 +328,20 @@ "MetricExpr": "(UOPS_DISPATCHED.PORT_0 + UOPS_DISPATCHED.PORT_1 + UOPS_DISPATCHED.PORT_5_11 + UOPS_DISPATCHED.PORT_6) / (5 * tma_info_core_core_clks)", "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group", "MetricName": "tma_alu_op_utilization", - "MetricThreshold": "tma_alu_op_utilization > 0.6", + "MetricThreshold": "tma_alu_op_utilization > 0.4", "ScaleUnit": "100%" }, { - "BriefDescription": "This metric estimates fraction of cycles where the Advanced Matrix Extensions (AMX) execution engine was busy with tile (arithmetic) operations", + "BriefDescription": "This metric estimates fraction of cycles where the Advanced Matrix eXtensions (AMX) execution engine was busy with tile (arithmetic) operations", "MetricExpr": "EXE.AMX_BUSY / tma_info_core_core_clks", - "MetricGroup": "Compute;HPC;Server;TopdownL5;tma_L5_group;tma_ports_utilized_0_group", + "MetricGroup": "Compute;HPC;Server;TopdownL3;tma_L3_group;tma_core_bound_group", "MetricName": "tma_amx_busy", - "MetricThreshold": "tma_amx_busy > 0.5 & (tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)))", + "MetricThreshold": "tma_amx_busy > 0.5 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)", "ScaleUnit": "100%" }, { "BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists", - "MetricExpr": "100 * cpu@ASSISTS.ANY\\,umask\\=0x1B@ / tma_info_thread_slots", + "MetricExpr": "78 * ASSISTS.ANY / tma_info_thread_slots", "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group", "MetricName": "tma_assists", "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)", @@ -381,6 +399,22 @@ "ScaleUnit": "100%" }, { + "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due staying in C0.1 power-performance optimized state (Faster wakeup time; Smaller power savings).", + "MetricExpr": "CPU_CLK_UNHALTED.C01 / tma_info_thread_clks", + "MetricGroup": "C0Wait;TopdownL4;tma_L4_group;tma_serializing_operation_group", + "MetricName": "tma_c01_wait", + "MetricThreshold": "tma_c01_wait > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", + "ScaleUnit": "100%" + }, + { + "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due staying in C0.2 power-performance optimized state (Slower wakeup time; Larger power savings).", + "MetricExpr": "CPU_CLK_UNHALTED.C02 / tma_info_thread_clks", + "MetricGroup": "C0Wait;TopdownL4;tma_L4_group;tma_serializing_operation_group", + "MetricName": "tma_c02_wait", + "MetricThreshold": "tma_c02_wait > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", + "ScaleUnit": "100%" + }, + { "BriefDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction", "MetricExpr": "max(0, tma_microcode_sequencer - tma_assists)", "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group", @@ -400,7 +434,7 @@ }, { "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses", - "MetricExpr": "(76 * tma_info_system_average_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD * (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) + 75.5 * tma_info_system_average_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", + "MetricExpr": "(76 * tma_info_system_core_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD * (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) + 75.5 * tma_info_system_core_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group", "MetricName": "tma_contested_accesses", "MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", @@ -420,7 +454,7 @@ }, { "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses", - "MetricExpr": "75.5 * tma_info_system_average_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD + MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD * (1 - OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", + "MetricExpr": "75.5 * tma_info_system_core_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD + MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD * (1 - OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", "MetricGroup": "Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group", "MetricName": "tma_data_sharing", "MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", @@ -432,7 +466,7 @@ "MetricExpr": "(cpu@INST_DECODED.DECODERS\\,cmask\\=1@ - cpu@INST_DECODED.DECODERS\\,cmask\\=2@) / tma_info_core_core_clks / 2", "MetricGroup": "DSBmiss;FetchBW;TopdownL4;tma_L4_group;tma_issueD0;tma_mite_group", "MetricName": "tma_decoder0_alone", - "MetricThreshold": "tma_decoder0_alone > 0.1 & (tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 6 > 0.35))", + "MetricThreshold": "tma_decoder0_alone > 0.1 & (tma_mite > 0.1 & tma_fetch_bandwidth > 0.2)", "PublicDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder. Related metrics: tma_few_uops_instructions", "ScaleUnit": "100%" }, @@ -459,7 +493,7 @@ "MetricExpr": "(IDQ.DSB_CYCLES_ANY - IDQ.DSB_CYCLES_OK) / tma_info_core_core_clks / 2", "MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group", "MetricName": "tma_dsb", - "MetricThreshold": "tma_dsb > 0.15 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 6 > 0.35)", + "MetricThreshold": "tma_dsb > 0.15 & tma_fetch_bandwidth > 0.2", "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.", "ScaleUnit": "100%" }, @@ -478,7 +512,7 @@ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group", "MetricName": "tma_dtlb_load", "MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store, tma_info_bottleneck_memory_data_tlbs", + "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store, tma_info_bottleneck_memory_data_tlbs, tma_info_bottleneck_memory_synchronization", "ScaleUnit": "100%" }, { @@ -487,12 +521,12 @@ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group", "MetricName": "tma_dtlb_store", "MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load, tma_info_bottleneck_memory_data_tlbs", + "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load, tma_info_bottleneck_memory_data_tlbs, tma_info_bottleneck_memory_synchronization", "ScaleUnit": "100%" }, { "BriefDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing", - "MetricExpr": "80 * tma_info_system_average_frequency * OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM / tma_info_thread_clks", + "MetricExpr": "80 * tma_info_system_core_frequency * OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM / tma_info_thread_clks", "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group", "MetricName": "tma_false_sharing", "MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", @@ -505,7 +539,7 @@ "MetricGroup": "MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group", "MetricName": "tma_fb_full", "MetricThreshold": "tma_fb_full > 0.3", - "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_bottleneck_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores", + "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores", "ScaleUnit": "100%" }, { @@ -514,7 +548,7 @@ "MetricExpr": "max(0, tma_frontend_bound - tma_fetch_latency)", "MetricGroup": "Default;FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB", "MetricName": "tma_fetch_bandwidth", - "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 6 > 0.35", + "MetricThreshold": "tma_fetch_bandwidth > 0.2", "MetricgroupNoGroup": "TopdownL2;Default", "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp", "ScaleUnit": "100%" @@ -540,17 +574,8 @@ "ScaleUnit": "100%" }, { - "BriefDescription": "This metric approximates arithmetic floating-point (FP) matrix uops fraction the CPU has retired (aggregated across all supported FP datatypes in AMX engine)", - "MetricExpr": "cpu@AMX_OPS_RETIRED.BF16\\,cmask\\=1@ / (tma_retiring * tma_info_thread_slots)", - "MetricGroup": "Compute;Flops;HPC;Pipeline;Server;TopdownL4;tma_L4_group;tma_fp_arith_group", - "MetricName": "tma_fp_amx", - "MetricThreshold": "tma_fp_amx > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)", - "PublicDescription": "This metric approximates arithmetic floating-point (FP) matrix uops fraction the CPU has retired (aggregated across all supported FP datatypes in AMX engine). Refer to AMX_Busy and GFLOPs metrics for actual AMX utilization and FP performance, resp.", - "ScaleUnit": "100%" - }, - { "BriefDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired)", - "MetricExpr": "tma_x87_use + tma_fp_scalar + tma_fp_vector + tma_fp_amx", + "MetricExpr": "tma_x87_use + tma_fp_scalar + tma_fp_vector", "MetricGroup": "HPC;TopdownL3;tma_L3_group;tma_light_operations_group", "MetricName": "tma_fp_arith", "MetricThreshold": "tma_fp_arith > 0.2 & tma_light_operations > 0.6", @@ -568,7 +593,7 @@ }, { "BriefDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired", - "MetricExpr": "(cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + FP_ARITH_INST_RETIRED2.SCALAR) / (tma_retiring * tma_info_thread_slots)", + "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + FP_ARITH_INST_RETIRED2.SCALAR) / (tma_retiring * tma_info_thread_slots)", "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P", "MetricName": "tma_fp_scalar", "MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)", @@ -577,7 +602,7 @@ }, { "BriefDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths", - "MetricExpr": "(cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0x3c@ + FP_ARITH_INST_RETIRED2.VECTOR) / (tma_retiring * tma_info_thread_slots)", + "MetricExpr": "(cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@ + FP_ARITH_INST_RETIRED2.VECTOR) / (tma_retiring * tma_info_thread_slots)", "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P", "MetricName": "tma_fp_vector", "MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)", @@ -625,10 +650,10 @@ { "BriefDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions", "MetricExpr": "tma_light_operations * INST_RETIRED.MACRO_FUSED / (tma_retiring * tma_info_thread_slots)", - "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", + "MetricGroup": "Branches;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", "MetricName": "tma_fused_instructions", "MetricThreshold": "tma_fused_instructions > 0.1 & tma_light_operations > 0.6", - "PublicDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions. The instruction pairs of CMP+JCC or DEC+JCC are commonly used examples.", + "PublicDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions. CMP+JCC or DEC+JCC are common examples of legacy fusions. {([MTL] Note new MOV+OP and Load+OP fusions appear under Other_Light_Ops in MTL!)}", "ScaleUnit": "100%" }, { @@ -639,13 +664,13 @@ "MetricName": "tma_heavy_operations", "MetricThreshold": "tma_heavy_operations > 0.1", "MetricgroupNoGroup": "TopdownL2;Default", - "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences. Sample with: UOPS_RETIRED.HEAVY", + "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences. ([ICL+] Note this may overcount due to approximation using indirect events; [ADL+] .). Sample with: UOPS_RETIRED.HEAVY", "ScaleUnit": "100%" }, { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses", "MetricExpr": "ICACHE_DATA.STALLS / tma_info_thread_clks", - "MetricGroup": "BigFoot;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group", + "MetricGroup": "BigFootprint;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group", "MetricName": "tma_icache_misses", "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS_PS;FRONTEND_RETIRED.L1I_MISS_PS", @@ -653,7 +678,7 @@ }, { "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)", - "MetricExpr": "(tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) * tma_info_thread_slots / BR_MISP_RETIRED.ALL_BRANCHES", + "MetricExpr": "tma_info_bottleneck_mispredictions * tma_info_thread_slots / BR_MISP_RETIRED.ALL_BRANCHES / 100", "MetricGroup": "Bad;BrMispredicts;tma_issueBM", "MetricName": "tma_info_bad_spec_branch_misprediction_cost", "PublicDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear). Related metrics: tma_branch_mispredicts, tma_info_bottleneck_mispredictions, tma_mispredicts_resteers" @@ -694,6 +719,30 @@ "MetricThreshold": "tma_info_bad_spec_ipmispredict < 200" }, { + "BriefDescription": "Speculative to Retired ratio of all clears (covering mispredicts and nukes)", + "MetricExpr": "INT_MISC.CLEARS_COUNT / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT)", + "MetricGroup": "BrMispredicts", + "MetricName": "tma_info_bad_spec_spec_clears_ratio" + }, + { + "BriefDescription": "Probability of Core Bound bottleneck hidden by SMT-profiling artifacts", + "MetricExpr": "(100 * (1 - max(0, topdown\\-be\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) - topdown\\-mem\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound)) / (((cpu@EXE_ACTIVITY.3_PORTS_UTIL\\,umask\\=0x80@ + cpu@RS.EMPTY\\,umask\\=0x1@) / CPU_CLK_UNHALTED.THREAD * (CYCLE_ACTIVITY.STALLS_TOTAL - EXE_ACTIVITY.BOUND_ON_LOADS) / CPU_CLK_UNHALTED.THREAD * CPU_CLK_UNHALTED.THREAD + (EXE_ACTIVITY.1_PORTS_UTIL + topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) * cpu@EXE_ACTIVITY.2_PORTS_UTIL\\,umask\\=0xc@)) / CPU_CLK_UNHALTED.THREAD if ARITH.DIV_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - EXE_ACTIVITY.BOUND_ON_LOADS else (EXE_ACTIVITY.1_PORTS_UTIL + topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) * cpu@EXE_ACTIVITY.2_PORTS_UTIL\\,umask\\=0xc@) / CPU_CLK_UNHALTED.THREAD) if max(0, topdown\\-be\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) - topdown\\-mem\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound)) < (((cpu@EXE_ACTIVITY.3_PORTS_UTIL\\,umask\\=0x80@ + cpu@RS.EMPTY\\,umask\\=0x1@) / CPU_CLK_UNHALTED.THREAD * (CYCLE_ACTIVITY.STALLS_TOTAL - EXE_ACTIVITY.BOUND_ON_LOADS) / CPU_CLK_UNHALTED.THREAD * CPU_CLK_UNHALTED.THREAD + (EXE_ACTIVITY.1_PORTS_UTIL + topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) * cpu@EXE_ACTIVITY.2_PORTS_UTIL\\,umask\\=0xc@)) / CPU_CLK_UNHALTED.THREAD if ARITH.DIV_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - EXE_ACTIVITY.BOUND_ON_LOADS else (EXE_ACTIVITY.1_PORTS_UTIL + topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) * cpu@EXE_ACTIVITY.2_PORTS_UTIL\\,umask\\=0xc@) / CPU_CLK_UNHALTED.THREAD) else 1) if tma_info_system_smt_2t_utilization > 0.5 else 0) + 0 * slots", + "MetricGroup": "Cor;SMT", + "MetricName": "tma_info_botlnk_core_bound_likely" + }, + { + "BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck.", + "MetricExpr": "100 * (100 * ((topdown\\-fetch\\-lat / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) - INT_MISC.UOP_DROPPING / slots) * (DSB2MITE_SWITCHES.PENALTY_CYCLES / CPU_CLK_UNHALTED.THREAD) / (ICACHE_DATA.STALLS / CPU_CLK_UNHALTED.THREAD + ICACHE_TAG.STALLS / CPU_CLK_UNHALTED.THREAD + (INT_MISC.CLEAR_RESTEER_CYCLES / CPU_CLK_UNHALTED.THREAD + INT_MISC.UNKNOWN_BRANCH_CYCLES / CPU_CLK_UNHALTED.THREAD) + min(3 * cpu@UOPS_RETIRED.MS\\,cmask\\=0x1\\,edge\\=0x1@ / (UOPS_RETIRED.SLOTS / UOPS_ISSUED.ANY) / CPU_CLK_UNHALTED.THREAD, 1) + DECODE.LCP / CPU_CLK_UNHALTED.THREAD + DSB2MITE_SWITCHES.PENALTY_CYCLES / CPU_CLK_UNHALTED.THREAD) + max(0, topdown\\-fe\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) - INT_MISC.UOP_DROPPING / slots - (topdown\\-fetch\\-lat / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) - INT_MISC.UOP_DROPPING / slots)) * ((IDQ.MITE_CYCLES_ANY - IDQ.MITE_CYCLES_OK) / (CPU_CLK_UNHALTED.DISTRIBUTED if #SMT_on else CPU_CLK_UNHALTED.THREAD) / 2) / ((IDQ.MITE_CYCLES_ANY - IDQ.MITE_CYCLES_OK) / (CPU_CLK_UNHALTED.DISTRIBUTED if #SMT_on else CPU_CLK_UNHALTED.THREAD) / 2 + (IDQ.DSB_CYCLES_ANY - IDQ.DSB_CYCLES_OK) / (CPU_CLK_UNHALTED.DISTRIBUTED if #SMT_on else CPU_CLK_UNHALTED.THREAD) / 2)))", + "MetricGroup": "DSBmiss;Fed", + "MetricName": "tma_info_botlnk_dsb_misses" + }, + { + "BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck.", + "MetricExpr": "100 * (100 * ((topdown\\-fetch\\-lat / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) - INT_MISC.UOP_DROPPING / slots) * (ICACHE_DATA.STALLS / CPU_CLK_UNHALTED.THREAD) / (ICACHE_DATA.STALLS / CPU_CLK_UNHALTED.THREAD + ICACHE_TAG.STALLS / CPU_CLK_UNHALTED.THREAD + (INT_MISC.CLEAR_RESTEER_CYCLES / CPU_CLK_UNHALTED.THREAD + INT_MISC.UNKNOWN_BRANCH_CYCLES / CPU_CLK_UNHALTED.THREAD) + min(3 * cpu@UOPS_RETIRED.MS\\,cmask\\=0x1\\,edge\\=0x1@ / (UOPS_RETIRED.SLOTS / UOPS_ISSUED.ANY) / CPU_CLK_UNHALTED.THREAD, 1) + DECODE.LCP / CPU_CLK_UNHALTED.THREAD + DSB2MITE_SWITCHES.PENALTY_CYCLES / CPU_CLK_UNHALTED.THREAD)))", + "MetricGroup": "Fed;FetchLat;IcMiss", + "MetricName": "tma_info_botlnk_ic_misses" + }, + { "BriefDescription": "Probability of Core Bound bottleneck hidden by SMT-profiling artifacts", "MetricExpr": "(100 * (1 - tma_core_bound / tma_ports_utilization if tma_core_bound < tma_ports_utilization else 1) if tma_info_system_smt_2t_utilization > 0.5 else 0)", "MetricGroup": "Cor;SMT", @@ -717,61 +766,98 @@ "PublicDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck. Related metrics: " }, { + "BriefDescription": "Total pipeline cost of \"useful operations\" - the baseline operations not covered by Branching_Overhead nor Irregular_Overhead.", + "MetricExpr": "100 * (tma_retiring - (BR_INST_RETIRED.ALL_BRANCHES + BR_INST_RETIRED.NEAR_CALL) / tma_info_thread_slots - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)", + "MetricGroup": "Ret", + "MetricName": "tma_info_bottleneck_base_non_br", + "MetricThreshold": "tma_info_bottleneck_base_non_br > 20" + }, + { "BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)", "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)", - "MetricGroup": "BigFoot;Fed;Frontend;IcMiss;MemoryTLB;tma_issueBC", + "MetricGroup": "BigFootprint;Fed;Frontend;IcMiss;MemoryTLB", "MetricName": "tma_info_bottleneck_big_code", - "MetricThreshold": "tma_info_bottleneck_big_code > 20", - "PublicDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses). Related metrics: tma_info_bottleneck_branching_overhead" + "MetricThreshold": "tma_info_bottleneck_big_code > 20" }, { "BriefDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls)", - "MetricExpr": "100 * ((BR_INST_RETIRED.COND + 3 * BR_INST_RETIRED.NEAR_CALL + (BR_INST_RETIRED.NEAR_TAKEN - BR_INST_RETIRED.COND_TAKEN - 2 * BR_INST_RETIRED.NEAR_CALL)) / tma_info_thread_slots)", - "MetricGroup": "Ret;tma_issueBC", + "MetricExpr": "100 * ((BR_INST_RETIRED.ALL_BRANCHES + BR_INST_RETIRED.NEAR_CALL) / tma_info_thread_slots)", + "MetricGroup": "Ret", "MetricName": "tma_info_bottleneck_branching_overhead", - "MetricThreshold": "tma_info_bottleneck_branching_overhead > 10", - "PublicDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls). Related metrics: tma_info_bottleneck_big_code" + "MetricThreshold": "tma_info_bottleneck_branching_overhead > 5" + }, + { + "BriefDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks", + "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_fb_full / (tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))", + "MetricGroup": "Mem;MemoryBW;Offcore;tma_issueBW", + "MetricName": "tma_info_bottleneck_cache_memory_bandwidth", + "MetricThreshold": "tma_info_bottleneck_cache_memory_bandwidth > 20", + "PublicDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full" + }, + { + "BriefDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks", + "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_store_latency / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))", + "MetricGroup": "Mem;MemoryLat;Offcore;tma_issueLat", + "MetricName": "tma_info_bottleneck_cache_memory_latency", + "MetricThreshold": "tma_info_bottleneck_cache_memory_latency > 20", + "PublicDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks. Related metrics: tma_l3_hit_latency, tma_mem_latency" + }, + { + "BriefDescription": "Total pipeline cost when the execution is compute-bound - an estimation", + "MetricExpr": "100 * (tma_core_bound * tma_divider / (tma_amx_busy + tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_core_bound * tma_amx_busy / (tma_amx_busy + tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_core_bound * (tma_ports_utilization / (tma_amx_busy + tma_divider + tma_ports_utilization + tma_serializing_operation)) * (tma_ports_utilized_3m / (tma_ports_utilized_0 + tma_ports_utilized_1 + tma_ports_utilized_2 + tma_ports_utilized_3m)))", + "MetricGroup": "Cor;tma_issueComp", + "MetricName": "tma_info_bottleneck_compute_bound_est", + "MetricThreshold": "tma_info_bottleneck_compute_bound_est > 20", + "PublicDescription": "Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy. Related metrics: " }, { "BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks", - "MetricExpr": "100 * (tma_frontend_bound - tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) - tma_info_bottleneck_big_code", + "MetricExpr": "100 * (tma_frontend_bound - (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) - (1 - INST_RETIRED.REP_ITERATION / cpu@UOPS_RETIRED.MS\\,cmask\\=1@) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * tma_other_mispredicts / tma_branch_mispredicts) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))) - tma_info_bottleneck_big_code", "MetricGroup": "Fed;FetchBW;Frontend", "MetricName": "tma_info_bottleneck_instruction_fetch_bw", "MetricThreshold": "tma_info_bottleneck_instruction_fetch_bw > 20" }, { - "BriefDescription": "Total pipeline cost of (external) Memory Bandwidth related bottlenecks", - "MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full))) + tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_fb_full / (tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk))", - "MetricGroup": "Mem;MemoryBW;Offcore;tma_issueBW", - "MetricName": "tma_info_bottleneck_memory_bandwidth", - "MetricThreshold": "tma_info_bottleneck_memory_bandwidth > 20", - "PublicDescription": "Total pipeline cost of (external) Memory Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full" + "BriefDescription": "Total pipeline cost of irregular execution (e.g", + "MetricExpr": "100 * ((1 - INST_RETIRED.REP_ITERATION / cpu@UOPS_RETIRED.MS\\,cmask\\=1@) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * tma_other_mispredicts / tma_branch_mispredicts) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_branch_mispredicts + tma_machine_clears * tma_other_nukes / tma_other_nukes + tma_core_bound * (tma_serializing_operation + cpu@RS.EMPTY\\,umask\\=1@ / tma_info_thread_clks * tma_ports_utilized_0) / (tma_amx_busy + tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)", + "MetricGroup": "Bad;Cor;Ret;tma_issueMS", + "MetricName": "tma_info_bottleneck_irregular_overhead", + "MetricThreshold": "tma_info_bottleneck_irregular_overhead > 10", + "PublicDescription": "Total pipeline cost of irregular execution (e.g. FP-assists in HPC, Wait time with work imbalance multithreaded workloads, overhead in system services or virtualized environments). Related metrics: tma_microcode_sequencer, tma_ms_switches" }, { "BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)", - "MetricExpr": "100 * tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_dtlb_load / max(tma_l1_bound, tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))", + "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_dtlb_load / max(tma_l1_bound, tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))", "MetricGroup": "Mem;MemoryTLB;Offcore;tma_issueTLB", "MetricName": "tma_info_bottleneck_memory_data_tlbs", "MetricThreshold": "tma_info_bottleneck_memory_data_tlbs > 20", - "PublicDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs). Related metrics: tma_dtlb_load, tma_dtlb_store" + "PublicDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs). Related metrics: tma_dtlb_load, tma_dtlb_store, tma_info_bottleneck_memory_synchronization" }, { - "BriefDescription": "Total pipeline cost of Memory Latency related bottlenecks (external memory and off-core caches)", - "MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound))", - "MetricGroup": "Mem;MemoryLat;Offcore;tma_issueLat", - "MetricName": "tma_info_bottleneck_memory_latency", - "MetricThreshold": "tma_info_bottleneck_memory_latency > 20", - "PublicDescription": "Total pipeline cost of Memory Latency related bottlenecks (external memory and off-core caches). Related metrics: tma_l3_hit_latency, tma_mem_latency" + "BriefDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors)", + "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) * tma_remote_cache / (tma_local_mem + tma_remote_cache + tma_remote_mem) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_contested_accesses + tma_data_sharing) / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * tma_false_sharing / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores - tma_store_latency)) + tma_machine_clears * (1 - tma_other_nukes / tma_other_nukes))", + "MetricGroup": "Mem;Offcore;tma_issueTLB", + "MetricName": "tma_info_bottleneck_memory_synchronization", + "MetricThreshold": "tma_info_bottleneck_memory_synchronization > 10", + "PublicDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors). Related metrics: tma_dtlb_load, tma_dtlb_store, tma_info_bottleneck_memory_data_tlbs" }, { "BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks", - "MetricExpr": "100 * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))", + "MetricExpr": "100 * (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))", "MetricGroup": "Bad;BadSpec;BrMispredicts;tma_issueBM", "MetricName": "tma_info_bottleneck_mispredictions", "MetricThreshold": "tma_info_bottleneck_mispredictions > 20", "PublicDescription": "Total pipeline cost of Branch Misprediction related bottlenecks. Related metrics: tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost, tma_mispredicts_resteers" }, { + "BriefDescription": "Total pipeline cost of remaining bottlenecks (apart from those listed in the Info.Bottlenecks metrics class)", + "MetricExpr": "100 - (tma_info_bottleneck_big_code + tma_info_bottleneck_instruction_fetch_bw + tma_info_bottleneck_mispredictions + tma_info_bottleneck_cache_memory_bandwidth + tma_info_bottleneck_cache_memory_latency + tma_info_bottleneck_memory_data_tlbs + tma_info_bottleneck_memory_synchronization + tma_info_bottleneck_compute_bound_est + tma_info_bottleneck_irregular_overhead + tma_info_bottleneck_branching_overhead + tma_info_bottleneck_base_non_br)", + "MetricGroup": "Cor;Offcore", + "MetricName": "tma_info_bottleneck_other_bottlenecks", + "MetricThreshold": "tma_info_bottleneck_other_bottlenecks > 20", + "PublicDescription": "Total pipeline cost of remaining bottlenecks (apart from those listed in the Info.Bottlenecks metrics class). Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls." + }, + { "BriefDescription": "Fraction of branches that are CALL or RET", "MetricExpr": "(BR_INST_RETIRED.NEAR_CALL + BR_INST_RETIRED.NEAR_RETURN) / BR_INST_RETIRED.ALL_BRANCHES", "MetricGroup": "Bad;Branches", @@ -803,7 +889,7 @@ }, { "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core", - "MetricExpr": "CPU_CLK_UNHALTED.DISTRIBUTED", + "MetricExpr": "(CPU_CLK_UNHALTED.DISTRIBUTED if #SMT_on else tma_info_thread_clks)", "MetricGroup": "SMT", "MetricName": "tma_info_core_core_clks" }, @@ -814,8 +900,14 @@ "MetricName": "tma_info_core_coreipc" }, { + "BriefDescription": "uops Executed per Cycle", + "MetricExpr": "UOPS_EXECUTED.THREAD / tma_info_thread_clks", + "MetricGroup": "Power", + "MetricName": "tma_info_core_epc" + }, + { "BriefDescription": "Floating Point Operations Per Cycle", - "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + FP_ARITH_INST_RETIRED2.SCALAR_HALF + 2 * (FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED2.COMPLEX_SCALAR_HALF) + 4 * cpu@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE\\,umask\\=0x18@ + 8 * (FP_ARITH_INST_RETIRED2.128B_PACKED_HALF + cpu@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE\\,umask\\=0x60@) + 16 * (FP_ARITH_INST_RETIRED2.256B_PACKED_HALF + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) + 32 * FP_ARITH_INST_RETIRED2.512B_PACKED_HALF + 4 * AMX_OPS_RETIRED.BF16", + "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * FP_ARITH_INST_RETIRED.4_FLOPS + 8 * FP_ARITH_INST_RETIRED.8_FLOPS + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / tma_info_core_core_clks", "MetricGroup": "Flops;Ret", "MetricName": "tma_info_core_flopc" }, @@ -827,8 +919,8 @@ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)." }, { - "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core", - "MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)", + "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor)", + "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@", "MetricGroup": "Backend;Cor;Pipeline;PortsUtil", "MetricName": "tma_info_core_ilp" }, @@ -884,6 +976,13 @@ "MetricName": "tma_info_frontend_l2mpki_code_all" }, { + "BriefDescription": "Average number of cycles the front-end was delayed due to an Unknown Branch detection", + "MetricExpr": "INT_MISC.UNKNOWN_BRANCH_CYCLES / cpu@INT_MISC.UNKNOWN_BRANCH_CYCLES\\,cmask\\=1\\,edge@", + "MetricGroup": "Fed", + "MetricName": "tma_info_frontend_unknown_branch_cost", + "PublicDescription": "Average number of cycles the front-end was delayed due to an Unknown Branch detection. See Unknown_Branches node." + }, + { "BriefDescription": "Branch instructions per taken branch.", "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN", "MetricGroup": "Branches;Fed;PGO", @@ -898,27 +997,11 @@ }, { "BriefDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate)", - "MetricExpr": "INST_RETIRED.ANY / (cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + FP_ARITH_INST_RETIRED2.SCALAR + (cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0x3c@ + FP_ARITH_INST_RETIRED2.VECTOR))", + "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.SCALAR + FP_ARITH_INST_RETIRED2.SCALAR + (cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@ + FP_ARITH_INST_RETIRED2.VECTOR))", "MetricGroup": "Flops;InsType", "MetricName": "tma_info_inst_mix_iparith", "MetricThreshold": "tma_info_inst_mix_iparith < 10", - "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). May undercount due to FMA double counting. Approximated prior to BDW." - }, - { - "BriefDescription": "Instructions per FP Arithmetic AMX operation (lower number means higher occurrence rate)", - "MetricExpr": "INST_RETIRED.ANY / AMX_OPS_RETIRED.BF16", - "MetricGroup": "Flops;FpVector;InsType;Server", - "MetricName": "tma_info_inst_mix_iparith_amx_f16", - "MetricThreshold": "tma_info_inst_mix_iparith_amx_f16 < 10", - "PublicDescription": "Instructions per FP Arithmetic AMX operation (lower number means higher occurrence rate). Operations factored per matrices' sizes of the AMX instructions." - }, - { - "BriefDescription": "Instructions per Integer Arithmetic AMX operation (lower number means higher occurrence rate)", - "MetricExpr": "INST_RETIRED.ANY / AMX_OPS_RETIRED.INT8", - "MetricGroup": "InsType;IntVector;Server", - "MetricName": "tma_info_inst_mix_iparith_amx_int8", - "MetricThreshold": "tma_info_inst_mix_iparith_amx_int8 < 10", - "PublicDescription": "Instructions per Integer Arithmetic AMX operation (lower number means higher occurrence rate). Operations factored per matrices' sizes of the AMX instructions." + "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW." }, { "BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate)", @@ -926,7 +1009,7 @@ "MetricGroup": "Flops;FpVector;InsType", "MetricName": "tma_info_inst_mix_iparith_avx128", "MetricThreshold": "tma_info_inst_mix_iparith_avx128 < 10", - "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate)", @@ -934,7 +1017,7 @@ "MetricGroup": "Flops;FpVector;InsType", "MetricName": "tma_info_inst_mix_iparith_avx256", "MetricThreshold": "tma_info_inst_mix_iparith_avx256 < 10", - "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate)", @@ -942,7 +1025,7 @@ "MetricGroup": "Flops;FpVector;InsType", "MetricName": "tma_info_inst_mix_iparith_avx512", "MetricThreshold": "tma_info_inst_mix_iparith_avx512 < 10", - "PublicDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate)", @@ -950,7 +1033,15 @@ "MetricGroup": "Flops;FpScalar;InsType", "MetricName": "tma_info_inst_mix_iparith_scalar_dp", "MetricThreshold": "tma_info_inst_mix_iparith_scalar_dp < 10", - "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." + }, + { + "BriefDescription": "Instructions per FP Arithmetic Scalar Half-Precision instruction (lower number means higher occurrence rate)", + "MetricExpr": "INST_RETIRED.ANY / FP_ARITH_INST_RETIRED2.SCALAR", + "MetricGroup": "Flops;FpScalar;InsType;Server", + "MetricName": "tma_info_inst_mix_iparith_scalar_hp", + "MetricThreshold": "tma_info_inst_mix_iparith_scalar_hp < 10", + "PublicDescription": "Instructions per FP Arithmetic Scalar Half-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate)", @@ -958,7 +1049,7 @@ "MetricGroup": "Flops;FpScalar;InsType", "MetricName": "tma_info_inst_mix_iparith_scalar_sp", "MetricThreshold": "tma_info_inst_mix_iparith_scalar_sp < 10", - "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)", @@ -976,7 +1067,7 @@ }, { "BriefDescription": "Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate)", - "MetricExpr": "INST_RETIRED.ANY / tma_info_core_flopc", + "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.SCALAR + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * FP_ARITH_INST_RETIRED.4_FLOPS + 8 * FP_ARITH_INST_RETIRED.8_FLOPS + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE)", "MetricGroup": "Flops;InsType", "MetricName": "tma_info_inst_mix_ipflop", "MetricThreshold": "tma_info_inst_mix_ipflop < 10" @@ -989,6 +1080,12 @@ "MetricThreshold": "tma_info_inst_mix_ipload < 3" }, { + "BriefDescription": "Instructions per PAUSE (lower number means higher occurrence rate)", + "MetricExpr": "tma_info_inst_mix_instructions / CPU_CLK_UNHALTED.PAUSE_INST", + "MetricGroup": "Flops;FpVector;InsType", + "MetricName": "tma_info_inst_mix_ippause" + }, + { "BriefDescription": "Instructions per Store (lower number means higher occurrence rate)", "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_STORES", "MetricGroup": "InsType", @@ -1011,16 +1108,28 @@ "PublicDescription": "Instruction per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_lcp" }, { + "BriefDescription": "\"Bus lock\" per kilo instruction", + "MetricExpr": "tma_info_memory_mix_bus_lock_pki", + "MetricGroup": "Mem", + "MetricName": "tma_info_memory_bus_lock_pki" + }, + { + "BriefDescription": "STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk)", + "MetricExpr": "tma_info_memory_tlb_code_stlb_mpki", + "MetricGroup": "Fed;MemoryTLB", + "MetricName": "tma_info_memory_code_stlb_mpki" + }, + { "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]", - "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l1d_cache_fill_bw", "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_core_l1d_cache_fill_bw" + "MetricName": "tma_info_memory_core_l1d_cache_fill_bw_2t" }, { "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]", - "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l2_cache_fill_bw", "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_core_l2_cache_fill_bw" + "MetricName": "tma_info_memory_core_l2_cache_fill_bw_2t" }, { "BriefDescription": "Rate of non silent evictions from the L2 cache per Kilo instruction", @@ -1036,130 +1145,298 @@ }, { "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l3_cache_access_bw", "MetricGroup": "Mem;MemoryBW;Offcore", - "MetricName": "tma_info_memory_core_l3_cache_access_bw" + "MetricName": "tma_info_memory_core_l3_cache_access_bw_2t" }, { "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l3_cache_fill_bw", "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_core_l3_cache_fill_bw" + "MetricName": "tma_info_memory_core_l3_cache_fill_bw_2t" + }, + { + "BriefDescription": "Average Parallel L2 cache miss data reads", + "MetricExpr": "tma_info_memory_latency_data_l2_mlp", + "MetricGroup": "Memory_BW;Offcore", + "MetricName": "tma_info_memory_data_l2_mlp" }, { "BriefDescription": "Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss-handling entries)", "MetricExpr": "1e3 * MEM_LOAD_RETIRED.FB_HIT / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_fb_hpki" }, { + "BriefDescription": "", + "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l1d_cache_fill_bw" + }, + { + "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]", + "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / (duration_time * 1e3 / 1e3)", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l1d_cache_fill_bw_2t" + }, + { "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads", "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l1mpki" }, { "BriefDescription": "L1 cache true misses per kilo instruction for all demand loads (including speculative)", "MetricExpr": "1e3 * L2_RQSTS.ALL_DEMAND_DATA_RD / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l1mpki_load" }, { + "BriefDescription": "", + "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l2_cache_fill_bw" + }, + { + "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]", + "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / (duration_time * 1e3 / 1e3)", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l2_cache_fill_bw_2t" + }, + { + "BriefDescription": "Rate of non silent evictions from the L2 cache per Kilo instruction", + "MetricExpr": "1e3 * L2_LINES_OUT.NON_SILENT / INST_RETIRED.ANY", + "MetricGroup": "L2Evicts;Mem;Server", + "MetricName": "tma_info_memory_l2_evictions_nonsilent_pki" + }, + { + "BriefDescription": "Rate of silent evictions from the L2 cache per Kilo instruction where the evicted lines are dropped (no writeback to L3 or memory)", + "MetricExpr": "1e3 * L2_LINES_OUT.SILENT / INST_RETIRED.ANY", + "MetricGroup": "L2Evicts;Mem;Server", + "MetricName": "tma_info_memory_l2_evictions_silent_pki" + }, + { "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)", "MetricExpr": "1e3 * (L2_RQSTS.REFERENCES - L2_RQSTS.MISS) / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l2hpki_all" }, { "BriefDescription": "L2 cache hits per kilo instruction for all demand loads (including speculative)", "MetricExpr": "1e3 * L2_RQSTS.DEMAND_DATA_RD_HIT / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l2hpki_load" }, { "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads", "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY", - "MetricGroup": "Backend;CacheMisses;Mem", + "MetricGroup": "Backend;CacheHits;Mem", "MetricName": "tma_info_memory_l2mpki" }, { "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all request types (including speculative)", "MetricExpr": "1e3 * L2_RQSTS.MISS / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem;Offcore", + "MetricGroup": "CacheHits;Mem;Offcore", "MetricName": "tma_info_memory_l2mpki_all" }, { "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all demand loads (including speculative)", "MetricExpr": "1e3 * L2_RQSTS.DEMAND_DATA_RD_MISS / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l2mpki_load" }, { + "BriefDescription": "", + "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW;Offcore", + "MetricName": "tma_info_memory_l3_cache_access_bw" + }, + { + "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]", + "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1e9 / (duration_time * 1e3 / 1e3)", + "MetricGroup": "Mem;MemoryBW;Offcore", + "MetricName": "tma_info_memory_l3_cache_access_bw_2t" + }, + { + "BriefDescription": "", + "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l3_cache_fill_bw" + }, + { + "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]", + "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / (duration_time * 1e3 / 1e3)", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l3_cache_fill_bw_2t" + }, + { "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads", "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "Mem", "MetricName": "tma_info_memory_l3mpki" }, { - "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)", - "MetricExpr": "L1D_PEND_MISS.PENDING / MEM_LOAD_COMPLETED.L1_MISS_ANY", - "MetricGroup": "Mem;MemoryBound;MemoryLat", - "MetricName": "tma_info_memory_load_miss_real_latency" + "BriefDescription": "Average Parallel L2 cache miss data reads", + "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", + "MetricGroup": "Memory_BW;Offcore", + "MetricName": "tma_info_memory_latency_data_l2_mlp" }, { - "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss", - "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES", - "MetricGroup": "Mem;MemoryBW;MemoryBound", - "MetricName": "tma_info_memory_mlp", - "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)" + "BriefDescription": "Average Latency for L2 cache miss demand Loads", + "MetricExpr": "tma_info_memory_load_l2_miss_latency", + "MetricGroup": "Memory_Lat;Offcore", + "MetricName": "tma_info_memory_latency_load_l2_miss_latency" }, { - "BriefDescription": "Average Parallel L2 cache miss data reads", - "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", + "BriefDescription": "Average Parallel L2 cache miss demand Loads", + "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / cpu@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=1@", "MetricGroup": "Memory_BW;Offcore", - "MetricName": "tma_info_memory_oro_data_l2_mlp" + "MetricName": "tma_info_memory_latency_load_l2_mlp" + }, + { + "BriefDescription": "Average Latency for L3 cache miss demand Loads", + "MetricExpr": "tma_info_memory_load_l3_miss_latency", + "MetricGroup": "Memory_Lat;Offcore", + "MetricName": "tma_info_memory_latency_load_l3_miss_latency" }, { "BriefDescription": "Average Latency for L2 cache miss demand Loads", "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS.DEMAND_DATA_RD", "MetricGroup": "Memory_Lat;Offcore", - "MetricName": "tma_info_memory_oro_load_l2_miss_latency" + "MetricName": "tma_info_memory_load_l2_miss_latency" }, { "BriefDescription": "Average Parallel L2 cache miss demand Loads", - "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / cpu@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=1@", + "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / cpu@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=0x1@", "MetricGroup": "Memory_BW;Offcore", - "MetricName": "tma_info_memory_oro_load_l2_mlp" + "MetricName": "tma_info_memory_load_l2_mlp" }, { "BriefDescription": "Average Latency for L3 cache miss demand Loads", "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD / OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD", "MetricGroup": "Memory_Lat;Offcore", - "MetricName": "tma_info_memory_oro_load_l3_miss_latency" + "MetricName": "tma_info_memory_load_l3_miss_latency" }, { - "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l1d_cache_fill_bw", - "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_thread_l1d_cache_fill_bw_1t" + "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)", + "MetricExpr": "L1D_PEND_MISS.PENDING / MEM_LOAD_COMPLETED.L1_MISS_ANY", + "MetricGroup": "Mem;MemoryBound;MemoryLat", + "MetricName": "tma_info_memory_load_miss_real_latency" }, { - "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l2_cache_fill_bw", - "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_thread_l2_cache_fill_bw_1t" + "BriefDescription": "STLB (2nd level TLB) data load speculative misses per kilo instruction (misses of any page-size that complete the page walk)", + "MetricExpr": "tma_info_memory_tlb_load_stlb_mpki", + "MetricGroup": "Mem;MemoryTLB", + "MetricName": "tma_info_memory_load_stlb_mpki" }, { - "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l3_cache_access_bw", - "MetricGroup": "Mem;MemoryBW;Offcore", - "MetricName": "tma_info_memory_thread_l3_cache_access_bw_1t" + "BriefDescription": "\"Bus lock\" per kilo instruction", + "MetricExpr": "1e3 * SQ_MISC.BUS_LOCK / INST_RETIRED.ANY", + "MetricGroup": "Mem", + "MetricName": "tma_info_memory_mix_bus_lock_pki" }, { - "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l3_cache_fill_bw", - "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_thread_l3_cache_fill_bw_1t" + "BriefDescription": "Off-core accesses per kilo instruction for modified write requests", + "MetricExpr": "1e3 * OCR.MODIFIED_WRITE.ANY_RESPONSE / tma_info_inst_mix_instructions", + "MetricGroup": "Offcore", + "MetricName": "tma_info_memory_mix_offcore_mwrite_any_pki" + }, + { + "BriefDescription": "Off-core accesses per kilo instruction for reads-to-core requests (speculative; including in-core HW prefetches)", + "MetricExpr": "1e3 * OCR.READS_TO_CORE.ANY_RESPONSE / tma_info_inst_mix_instructions", + "MetricGroup": "CacheHits;Offcore", + "MetricName": "tma_info_memory_mix_offcore_read_any_pki" + }, + { + "BriefDescription": "L3 cache misses per kilo instruction for reads-to-core requests (speculative; including in-core HW prefetches)", + "MetricExpr": "1e3 * OCR.READS_TO_CORE.L3_MISS / tma_info_inst_mix_instructions", + "MetricGroup": "Offcore", + "MetricName": "tma_info_memory_mix_offcore_read_l3m_pki" + }, + { + "BriefDescription": "Un-cacheable retired load per kilo instruction", + "MetricExpr": "tma_info_memory_uc_load_pki", + "MetricGroup": "Mem", + "MetricName": "tma_info_memory_mix_uc_load_pki" + }, + { + "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss", + "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES", + "MetricGroup": "Mem;MemoryBW;MemoryBound", + "MetricName": "tma_info_memory_mlp", + "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)" + }, + { + "BriefDescription": "Off-core accesses per kilo instruction for modified write requests", + "MetricExpr": "1e3 * OCR.MODIFIED_WRITE.ANY_RESPONSE / INST_RETIRED.ANY", + "MetricGroup": "Offcore", + "MetricName": "tma_info_memory_offcore_mwrite_any_pki" + }, + { + "BriefDescription": "Off-core accesses per kilo instruction for reads-to-core requests (speculative; including in-core HW prefetches)", + "MetricExpr": "1e3 * OCR.READS_TO_CORE.ANY_RESPONSE / INST_RETIRED.ANY", + "MetricGroup": "CacheHits;Offcore", + "MetricName": "tma_info_memory_offcore_read_any_pki" + }, + { + "BriefDescription": "L3 cache misses per kilo instruction for reads-to-core requests (speculative; including in-core HW prefetches)", + "MetricExpr": "1e3 * OCR.READS_TO_CORE.L3_MISS / INST_RETIRED.ANY", + "MetricGroup": "Offcore", + "MetricName": "tma_info_memory_offcore_read_l3m_pki" + }, + { + "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses", + "MetricExpr": "(ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING) / (4 * (CPU_CLK_UNHALTED.DISTRIBUTED if #SMT_on else CPU_CLK_UNHALTED.THREAD))", + "MetricGroup": "Mem;MemoryTLB", + "MetricName": "tma_info_memory_page_walks_utilization" + }, + { + "BriefDescription": "Average DRAM BW for Reads-to-Core (R2C) covering for memory attached to local- and remote-socket", + "MetricExpr": "64 * OCR.READS_TO_CORE.DRAM / 1e9 / (duration_time * 1e3 / 1e3)", + "MetricGroup": "HPC;Mem;MemoryBW;SoC", + "MetricName": "tma_info_memory_r2c_dram_bw", + "PublicDescription": "Average DRAM BW for Reads-to-Core (R2C) covering for memory attached to local- and remote-socket. See R2C_Offcore_BW." + }, + { + "BriefDescription": "Average L3-cache miss BW for Reads-to-Core (R2C)", + "MetricExpr": "64 * OCR.READS_TO_CORE.L3_MISS / 1e9 / (duration_time * 1e3 / 1e3)", + "MetricGroup": "HPC;Mem;MemoryBW;SoC", + "MetricName": "tma_info_memory_r2c_l3m_bw", + "PublicDescription": "Average L3-cache miss BW for Reads-to-Core (R2C). This covering going to DRAM or other memory off-chip memory tears. See R2C_Offcore_BW." + }, + { + "BriefDescription": "Average Off-core access BW for Reads-to-Core (R2C)", + "MetricExpr": "64 * OCR.READS_TO_CORE.ANY_RESPONSE / 1e9 / (duration_time * 1e3 / 1e3)", + "MetricGroup": "HPC;Mem;MemoryBW;SoC", + "MetricName": "tma_info_memory_r2c_offcore_bw", + "PublicDescription": "Average Off-core access BW for Reads-to-Core (R2C). R2C account for demand or prefetch load/RFO/code access that fill data into the Core caches." + }, + { + "BriefDescription": "Average DRAM BW for Reads-to-Core (R2C) covering for memory attached to local- and remote-socket", + "MetricExpr": "64 * OCR.READS_TO_CORE.DRAM / 1e9 / duration_time", + "MetricGroup": "HPC;Mem;MemoryBW;SoC", + "MetricName": "tma_info_memory_soc_r2c_dram_bw", + "PublicDescription": "Average DRAM BW for Reads-to-Core (R2C) covering for memory attached to local- and remote-socket. See R2C_Offcore_BW." + }, + { + "BriefDescription": "Average L3-cache miss BW for Reads-to-Core (R2C)", + "MetricExpr": "64 * OCR.READS_TO_CORE.L3_MISS / 1e9 / duration_time", + "MetricGroup": "HPC;Mem;MemoryBW;SoC", + "MetricName": "tma_info_memory_soc_r2c_l3m_bw", + "PublicDescription": "Average L3-cache miss BW for Reads-to-Core (R2C). This covering going to DRAM or other memory off-chip memory tears. See R2C_Offcore_BW." + }, + { + "BriefDescription": "Average Off-core access BW for Reads-to-Core (R2C)", + "MetricExpr": "64 * OCR.READS_TO_CORE.ANY_RESPONSE / 1e9 / duration_time", + "MetricGroup": "HPC;Mem;MemoryBW;SoC", + "MetricName": "tma_info_memory_soc_r2c_offcore_bw", + "PublicDescription": "Average Off-core access BW for Reads-to-Core (R2C). R2C account for demand or prefetch load/RFO/code access that fill data into the Core caches." + }, + { + "BriefDescription": "STLB (2nd level TLB) data store speculative misses per kilo instruction (misses of any page-size that complete the page walk)", + "MetricExpr": "tma_info_memory_tlb_store_stlb_mpki", + "MetricGroup": "Mem;MemoryTLB", + "MetricName": "tma_info_memory_store_stlb_mpki" }, { "BriefDescription": "STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk)", @@ -1187,15 +1464,21 @@ "MetricName": "tma_info_memory_tlb_store_stlb_mpki" }, { - "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-thread", - "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@", + "BriefDescription": "Un-cacheable retired load per kilo instruction", + "MetricExpr": "1e3 * MEM_LOAD_MISC_RETIRED.UC / INST_RETIRED.ANY", + "MetricGroup": "Mem", + "MetricName": "tma_info_memory_uc_load_pki" + }, + { + "BriefDescription": "", + "MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@)", "MetricGroup": "Cor;Pipeline;PortsUtil;SMT", "MetricName": "tma_info_pipeline_execute" }, { "BriefDescription": "Instructions per a microcode Assist invocation", - "MetricExpr": "INST_RETIRED.ANY / cpu@ASSISTS.ANY\\,umask\\=0x1B@", - "MetricGroup": "Pipeline;Ret;Retire", + "MetricExpr": "INST_RETIRED.ANY / ASSISTS.ANY", + "MetricGroup": "MicroSeq;Pipeline;Ret;Retire", "MetricName": "tma_info_pipeline_ipassist", "MetricThreshold": "tma_info_pipeline_ipassist < 100e3", "PublicDescription": "Instructions per a microcode Assist invocation. See Assists tree node for details (lower number means higher occurrence rate)" @@ -1209,41 +1492,62 @@ { "BriefDescription": "Estimated fraction of retirement-cycles dealing with repeat instructions", "MetricExpr": "INST_RETIRED.REP_ITERATION / cpu@UOPS_RETIRED.SLOTS\\,cmask\\=1@", - "MetricGroup": "Pipeline;Ret", + "MetricGroup": "MicroSeq;Pipeline;Ret", "MetricName": "tma_info_pipeline_strings_cycles", "MetricThreshold": "tma_info_pipeline_strings_cycles > 0.1" }, { - "BriefDescription": "Measured Average Frequency for unhalted processors [GHz]", + "BriefDescription": "Fraction of cycles the processor is waiting yet unhalted; covering legacy PAUSE instruction, as well as C0.1 / C0.2 power-performance optimized states", + "MetricExpr": "CPU_CLK_UNHALTED.C0_WAIT / tma_info_thread_clks", + "MetricGroup": "C0Wait", + "MetricName": "tma_info_system_c0_wait", + "MetricThreshold": "tma_info_system_c0_wait > 0.05" + }, + { + "BriefDescription": "Measured Average Core Frequency for unhalted processors [GHz]", "MetricExpr": "tma_info_system_turbo_utilization * TSC / 1e9 / duration_time", "MetricGroup": "Power;Summary", - "MetricName": "tma_info_system_average_frequency" + "MetricName": "tma_info_system_core_frequency" }, { - "BriefDescription": "Average CPU Utilization", + "BriefDescription": "Average CPU Utilization (percentage)", "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC", "MetricGroup": "HPC;Summary", "MetricName": "tma_info_system_cpu_utilization" }, { + "BriefDescription": "Average number of utilized CPUs", + "MetricExpr": "#num_cpus_online * tma_info_system_cpu_utilization", + "MetricGroup": "Summary", + "MetricName": "tma_info_system_cpus_utilized" + }, + { "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]", "MetricExpr": "64 * (UNC_M_CAS_COUNT.RD + UNC_M_CAS_COUNT.WR) / 1e9 / duration_time", - "MetricGroup": "HPC;Mem;MemoryBW;SoC;tma_issueBW", + "MetricGroup": "HPC;MemOffcore;MemoryBW;SoC;tma_issueBW", "MetricName": "tma_info_system_dram_bw_use", - "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_info_bottleneck_memory_bandwidth, tma_mem_bandwidth, tma_sq_full" + "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_mem_bandwidth, tma_sq_full" }, { "BriefDescription": "Giga Floating Point Operations Per Second", - "MetricExpr": "tma_info_core_flopc / duration_time", + "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * FP_ARITH_INST_RETIRED.4_FLOPS + 8 * FP_ARITH_INST_RETIRED.8_FLOPS + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / 1e9 / duration_time", "MetricGroup": "Cor;Flops;HPC", "MetricName": "tma_info_system_gflops", - "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine." + "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width" }, { - "BriefDescription": "Average IO (network or disk) Bandwidth Use for Writes [GB / sec]", + "BriefDescription": "Average IO (network or disk) Bandwidth Use for Reads [GB / sec]", "MetricExpr": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR * 64 / 1e9 / duration_time", - "MetricGroup": "IoBW;Mem;Server;SoC", - "MetricName": "tma_info_system_io_write_bw" + "MetricGroup": "IoBW;MemOffcore;Server;SoC", + "MetricName": "tma_info_system_io_read_bw", + "PublicDescription": "Average IO (network or disk) Bandwidth Use for Reads [GB / sec]. Bandwidth of IO reads that are initiated by end device controllers that are requesting memory from the CPU" + }, + { + "BriefDescription": "Average IO (network or disk) Bandwidth Use for Writes [GB / sec]", + "MetricExpr": "(UNC_CHA_TOR_INSERTS.IO_ITOM + UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR) * 64 / 1e9 / duration_time", + "MetricGroup": "IoBW;MemOffcore;Server;SoC", + "MetricName": "tma_info_system_io_write_bw", + "PublicDescription": "Average IO (network or disk) Bandwidth Use for Writes [GB / sec]. Bandwidth of IO writes that are initiated by end device controllers that are writing memory to the CPU" }, { "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]", @@ -1268,7 +1572,7 @@ { "BriefDescription": "Average latency of data read request to external DRAM memory [in nanoseconds]", "MetricExpr": "1e9 * (UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_DDR / UNC_CHA_TOR_INSERTS.IA_MISS_DRD_DDR) / uncore_cha_0@event\\=0x1@", - "MetricGroup": "Mem;MemoryLat;Server;SoC", + "MetricGroup": "MemOffcore;MemoryLat;Server;SoC", "MetricName": "tma_info_system_mem_dram_read_latency", "PublicDescription": "Average latency of data read request to external DRAM memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches" }, @@ -1282,7 +1586,7 @@ { "BriefDescription": "Average latency of data read request to external 3D X-Point memory [in nanoseconds]", "MetricExpr": "(1e9 * (UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PMM / UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PMM) / uncore_cha_0@event\\=0x1@ if #has_pmem > 0 else 0)", - "MetricGroup": "Mem;MemoryLat;Server;SoC", + "MetricGroup": "MemOffcore;MemoryLat;Server;SoC", "MetricName": "tma_info_system_mem_pmm_read_latency", "PublicDescription": "Average latency of data read request to external 3D X-Point memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches" }, @@ -1297,13 +1601,13 @@ { "BriefDescription": "Average 3DXP Memory Bandwidth Use for reads [GB / sec]", "MetricExpr": "(64 * UNC_M_PMM_RPQ_INSERTS / 1e9 / duration_time if #has_pmem > 0 else 0)", - "MetricGroup": "Mem;MemoryBW;Server;SoC", + "MetricGroup": "MemOffcore;MemoryBW;Server;SoC", "MetricName": "tma_info_system_pmm_read_bw" }, { "BriefDescription": "Average 3DXP Memory Bandwidth Use for Writes [GB / sec]", "MetricExpr": "(64 * UNC_M_PMM_WPQ_INSERTS / 1e9 / duration_time if #has_pmem > 0 else 0)", - "MetricGroup": "Mem;MemoryBW;Server;SoC", + "MetricGroup": "MemOffcore;MemoryBW;Server;SoC", "MetricName": "tma_info_system_pmm_write_bw" }, { @@ -1319,18 +1623,18 @@ "MetricName": "tma_info_system_socket_clks" }, { - "BriefDescription": "Tera Integer (matrix) Operations Per Second", - "MetricExpr": "8 * AMX_OPS_RETIRED.INT8 / 1e12 / duration_time", - "MetricGroup": "Cor;HPC;IntVector;Server", - "MetricName": "tma_info_system_tiops" - }, - { "BriefDescription": "Average Frequency Utilization relative nominal frequency", "MetricExpr": "tma_info_thread_clks / CPU_CLK_UNHALTED.REF_TSC", "MetricGroup": "Power", "MetricName": "tma_info_system_turbo_utilization" }, { + "BriefDescription": "Measured Average Uncore Frequency for the SoC [GHz]", + "MetricExpr": "tma_info_system_socket_clks / 1e9 / duration_time", + "MetricGroup": "SoC", + "MetricName": "tma_info_system_uncore_frequency" + }, + { "BriefDescription": "Cross-socket Ultra Path Interconnect (UPI) data transmit bandwidth for data only [MB / sec]", "MetricExpr": "UNC_UPI_TxL_FLITS.ALL_DATA * 64 / 9 / 1e6", "MetricGroup": "Server;SoC", @@ -1388,17 +1692,8 @@ "MetricThreshold": "tma_info_thread_uptb < 9" }, { - "BriefDescription": "This metric approximates arithmetic Integer (Int) matrix uops fraction the CPU has retired (aggregated across all supported Int datatypes in AMX engine)", - "MetricExpr": "cpu@AMX_OPS_RETIRED.INT8\\,cmask\\=1@ / (tma_retiring * tma_info_thread_slots)", - "MetricGroup": "Compute;HPC;IntVector;Pipeline;Server;TopdownL4;tma_L4_group;tma_int_operations_group", - "MetricName": "tma_int_amx", - "MetricThreshold": "tma_int_amx > 0.1 & (tma_int_operations > 0.1 & tma_light_operations > 0.6)", - "PublicDescription": "This metric approximates arithmetic Integer (Int) matrix uops fraction the CPU has retired (aggregated across all supported Int datatypes in AMX engine). Refer to AMX_Busy and TIOPs metrics for actual AMX utilization and Int performance, resp.", - "ScaleUnit": "100%" - }, - { "BriefDescription": "This metric represents overall Integer (Int) select operations fraction the CPU has executed (retired)", - "MetricExpr": "tma_int_vector_128b + tma_int_vector_256b + tma_shuffles + tma_int_amx", + "MetricExpr": "tma_int_vector_128b + tma_int_vector_256b", "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", "MetricName": "tma_int_operations", "MetricThreshold": "tma_int_operations > 0.1 & tma_light_operations > 0.6", @@ -1415,18 +1710,18 @@ "ScaleUnit": "100%" }, { - "BriefDescription": "This metric represents 256-bit vector Integer ADD/SUB/SAD or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired", + "BriefDescription": "This metric represents 256-bit vector Integer ADD/SUB/SAD/MUL or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired", "MetricExpr": "(INT_VEC_RETIRED.ADD_256 + INT_VEC_RETIRED.MUL_256 + INT_VEC_RETIRED.VNNI_256) / (tma_retiring * tma_info_thread_slots)", "MetricGroup": "Compute;IntVector;Pipeline;TopdownL4;tma_L4_group;tma_int_operations_group;tma_issue2P", "MetricName": "tma_int_vector_256b", "MetricThreshold": "tma_int_vector_256b > 0.1 & (tma_int_operations > 0.1 & tma_light_operations > 0.6)", - "PublicDescription": "This metric represents 256-bit vector Integer ADD/SUB/SAD or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2", + "PublicDescription": "This metric represents 256-bit vector Integer ADD/SUB/SAD/MUL or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2", "ScaleUnit": "100%" }, { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses", "MetricExpr": "ICACHE_TAG.STALLS / tma_info_thread_clks", - "MetricGroup": "BigFoot;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group", + "MetricGroup": "BigFootprint;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group", "MetricName": "tma_itlb_misses", "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS_PS;FRONTEND_RETIRED.ITLB_MISS_PS", @@ -1435,7 +1730,7 @@ { "BriefDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache", "MetricExpr": "max((EXE_ACTIVITY.BOUND_ON_LOADS - MEMORY_ACTIVITY.STALLS_L1D_MISS) / tma_info_thread_clks, 0)", - "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group", "MetricName": "tma_l1_bound", "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", "PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_RETIRED.L1_HIT_PS;MEM_LOAD_RETIRED.FB_HIT_PS. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1", @@ -1444,7 +1739,7 @@ { "BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads", "MetricExpr": "(MEMORY_ACTIVITY.STALLS_L1D_MISS - MEMORY_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks", - "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", "MetricName": "tma_l2_bound", "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", "PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L2_HIT_PS", @@ -1453,19 +1748,19 @@ { "BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core", "MetricExpr": "(MEMORY_ACTIVITY.STALLS_L2_MISS - MEMORY_ACTIVITY.STALLS_L3_MISS) / tma_info_thread_clks", - "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", "MetricName": "tma_l3_bound", "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS", "ScaleUnit": "100%" }, { - "BriefDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)", - "MetricExpr": "33 * tma_info_system_average_frequency * MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", + "BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)", + "MetricExpr": "33 * tma_info_system_core_frequency * (MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2)) / tma_info_thread_clks", "MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group", "MetricName": "tma_l3_hit_latency", "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_info_bottleneck_memory_latency, tma_mem_latency", + "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_info_bottleneck_cache_memory_latency, tma_mem_latency", "ScaleUnit": "100%" }, { @@ -1485,7 +1780,7 @@ "MetricName": "tma_light_operations", "MetricThreshold": "tma_light_operations > 0.6", "MetricgroupNoGroup": "TopdownL2;Default", - "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST", + "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST", "ScaleUnit": "100%" }, { @@ -1515,10 +1810,10 @@ }, { "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory", - "MetricExpr": "71 * tma_info_system_average_frequency * MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", + "MetricExpr": "71 * tma_info_system_core_frequency * MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", "MetricGroup": "Server;TopdownL5;tma_L5_group;tma_mem_latency_group", - "MetricName": "tma_local_dram", - "MetricThreshold": "tma_local_dram > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))", + "MetricName": "tma_local_mem", + "MetricThreshold": "tma_local_mem > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))", "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory. Caching will improve the latency and increase performance. Sample with: MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM_PS", "ScaleUnit": "100%" }, @@ -1551,21 +1846,21 @@ "ScaleUnit": "100%" }, { - "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM)", + "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)", "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@) / tma_info_thread_clks", "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW", "MetricName": "tma_mem_bandwidth", "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_bottleneck_memory_bandwidth, tma_info_system_dram_bw_use, tma_sq_full", + "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_sq_full", "ScaleUnit": "100%" }, { - "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM)", + "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM)", "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth", "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat", "MetricName": "tma_mem_latency", "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_info_bottleneck_memory_latency, tma_l3_hit_latency", + "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_info_bottleneck_cache_memory_latency, tma_l3_hit_latency", "ScaleUnit": "100%" }, { @@ -1583,9 +1878,9 @@ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to LFENCE Instructions.", "MetricConstraint": "NO_GROUP_EVENTS_NMI", "MetricExpr": "13 * MISC2_RETIRED.LFENCE / tma_info_thread_clks", - "MetricGroup": "TopdownL6;tma_L6_group;tma_serializing_operation_group", + "MetricGroup": "TopdownL4;tma_L4_group;tma_serializing_operation_group", "MetricName": "tma_memory_fence", - "MetricThreshold": "tma_memory_fence > 0.05 & (tma_serializing_operation > 0.1 & (tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))))", + "MetricThreshold": "tma_memory_fence > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", "ScaleUnit": "100%" }, { @@ -1602,7 +1897,7 @@ "MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueMC;tma_issueMS", "MetricName": "tma_microcode_sequencer", "MetricThreshold": "tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1", - "PublicDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided. Sample with: UOPS_RETIRED.MS. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_ms_switches", + "PublicDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided. Sample with: UOPS_RETIRED.MS. Related metrics: tma_clears_resteers, tma_info_bottleneck_irregular_overhead, tma_l1_bound, tma_machine_clears, tma_ms_switches", "ScaleUnit": "100%" }, { @@ -1619,32 +1914,32 @@ "MetricExpr": "(IDQ.MITE_CYCLES_ANY - IDQ.MITE_CYCLES_OK) / tma_info_core_core_clks / 2", "MetricGroup": "DSBmiss;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group", "MetricName": "tma_mite", - "MetricThreshold": "tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 6 > 0.35)", + "MetricThreshold": "tma_mite > 0.1 & tma_fetch_bandwidth > 0.2", "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck. Sample with: FRONTEND_RETIRED.ANY_DSB_MISS", "ScaleUnit": "100%" }, { - "BriefDescription": "The Mixing_Vectors metric gives the percentage of injected blend uops out of all uops issued", + "BriefDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles)", "MetricExpr": "160 * ASSISTS.SSE_AVX_MIX / tma_info_thread_clks", "MetricGroup": "TopdownL5;tma_L5_group;tma_issueMV;tma_ports_utilized_0_group", "MetricName": "tma_mixing_vectors", "MetricThreshold": "tma_mixing_vectors > 0.05", - "PublicDescription": "The Mixing_Vectors metric gives the percentage of injected blend uops out of all uops issued. Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches", + "PublicDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles). Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches", "ScaleUnit": "100%" }, { "BriefDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS)", - "MetricExpr": "3 * cpu@UOPS_RETIRED.MS\\,cmask\\=1\\,edge@ / (tma_retiring * tma_info_thread_slots / UOPS_ISSUED.ANY) / tma_info_thread_clks", + "MetricExpr": "3 * cpu@UOPS_RETIRED.MS\\,cmask\\=1\\,edge@ / (UOPS_RETIRED.SLOTS / UOPS_ISSUED.ANY) / tma_info_thread_clks", "MetricGroup": "FetchLat;MicroSeq;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueMC;tma_issueMS;tma_issueMV;tma_issueSO", "MetricName": "tma_ms_switches", "MetricThreshold": "tma_ms_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", - "PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: FRONTEND_RETIRED.MS_FLOWS. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation", + "PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: FRONTEND_RETIRED.MS_FLOWS. Related metrics: tma_clears_resteers, tma_info_bottleneck_irregular_overhead, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation", "ScaleUnit": "100%" }, { "BriefDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused", "MetricExpr": "tma_light_operations * (BR_INST_RETIRED.ALL_BRANCHES - INST_RETIRED.MACRO_FUSED) / (tma_retiring * tma_info_thread_slots)", - "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", + "MetricGroup": "Branches;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", "MetricName": "tma_non_fused_branches", "MetricThreshold": "tma_non_fused_branches > 0.1 & tma_light_operations > 0.6", "PublicDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused. Non-conditional branches like direct JMP or CALL would count here. Can be used to examine fusible conditional jumps that were not fused.", @@ -1653,15 +1948,15 @@ { "BriefDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions", "MetricExpr": "tma_light_operations * INST_RETIRED.NOP / (tma_retiring * tma_info_thread_slots)", - "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", + "MetricGroup": "Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group", "MetricName": "tma_nop_instructions", - "MetricThreshold": "tma_nop_instructions > 0.1 & tma_light_operations > 0.6", + "MetricThreshold": "tma_nop_instructions > 0.1 & (tma_other_light_ops > 0.3 & tma_light_operations > 0.6)", "PublicDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body. Sample with: INST_RETIRED.NOP", "ScaleUnit": "100%" }, { "BriefDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes", - "MetricExpr": "max(0, tma_light_operations - (tma_fp_arith + tma_int_operations + tma_memory_operations + tma_fused_instructions + tma_non_fused_branches + tma_nop_instructions))", + "MetricExpr": "max(0, tma_light_operations - (tma_fp_arith + tma_int_operations + tma_memory_operations + tma_fused_instructions + tma_non_fused_branches))", "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", "MetricName": "tma_other_light_ops", "MetricThreshold": "tma_other_light_ops > 0.3 & tma_light_operations > 0.6", @@ -1669,6 +1964,22 @@ "ScaleUnit": "100%" }, { + "BriefDescription": "This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types).", + "MetricExpr": "max(tma_branch_mispredicts * (1 - BR_MISP_RETIRED.ALL_BRANCHES / (INT_MISC.CLEARS_COUNT - MACHINE_CLEARS.COUNT)), 0.0001)", + "MetricGroup": "BrMispredicts;TopdownL3;tma_L3_group;tma_branch_mispredicts_group", + "MetricName": "tma_other_mispredicts", + "MetricThreshold": "tma_other_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)", + "ScaleUnit": "100%" + }, + { + "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering.", + "MetricExpr": "max(tma_machine_clears * (1 - MACHINE_CLEARS.MEMORY_ORDERING / MACHINE_CLEARS.COUNT), 0.0001)", + "MetricGroup": "Machine_Clears;TopdownL3;tma_L3_group;tma_machine_clears_group", + "MetricName": "tma_other_nukes", + "MetricThreshold": "tma_other_nukes > 0.05 & (tma_machine_clears > 0.1 & tma_bad_speculation > 0.15)", + "ScaleUnit": "100%" + }, + { "BriefDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Page Faults", "MetricExpr": "99 * ASSISTS.PAGE_FAULT / tma_info_thread_slots", "MetricGroup": "TopdownL5;tma_L5_group;tma_assists_group", @@ -1679,7 +1990,7 @@ }, { "BriefDescription": "This metric roughly estimates (based on idle latencies) how often the CPU was stalled on accesses to external 3D-Xpoint (Crystal Ridge, a.k.a", - "MetricExpr": "(((1 - ((19 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS)) + 10 * (MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS))) / (19 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS)) + 10 * (MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS)) + (25 * (MEM_LOAD_RETIRED.LOCAL_PMM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) if #has_pmem > 0 else 0) + 33 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) if #has_pmem > 0 else 0))) if #has_pmem > 0 else 0)) * (MEMORY_ACTIVITY.STALLS_L3_MISS / tma_info_thread_clks) if 1e6 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM + MEM_LOAD_RETIRED.LOCAL_PMM) > MEM_LOAD_RETIRED.L1_MISS else 0) if #has_pmem > 0 else 0)", + "MetricExpr": "(((1 - (19 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS)) + 10 * (MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS))) / (19 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS)) + 10 * (MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS)) + (25 * (MEM_LOAD_RETIRED.LOCAL_PMM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS)) + 33 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS))))) * (MEMORY_ACTIVITY.STALLS_L3_MISS / tma_info_thread_clks) if 1e6 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM + MEM_LOAD_RETIRED.LOCAL_PMM) > MEM_LOAD_RETIRED.L1_MISS else 0) if #has_pmem > 0 else 0)", "MetricGroup": "MemoryBound;Server;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", "MetricName": "tma_pmm_bound", "MetricThreshold": "tma_pmm_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", @@ -1705,17 +2016,17 @@ "ScaleUnit": "100%" }, { - "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU)", + "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU)", "MetricExpr": "UOPS_DISPATCHED.PORT_6 / tma_info_core_core_clks", "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P", "MetricName": "tma_port_6", "MetricThreshold": "tma_port_6 > 0.6", - "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED.PORT_6. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2", + "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED.PORT_6. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2", "ScaleUnit": "100%" }, { "BriefDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related)", - "MetricExpr": "((cpu@EXE_ACTIVITY.3_PORTS_UTIL\\,umask\\=0x80@ + tma_serializing_operation * (CYCLE_ACTIVITY.STALLS_TOTAL - EXE_ACTIVITY.BOUND_ON_LOADS) + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * cpu@EXE_ACTIVITY.2_PORTS_UTIL\\,umask\\=0xc@)) / tma_info_thread_clks if ARITH.DIV_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - EXE_ACTIVITY.BOUND_ON_LOADS else (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * cpu@EXE_ACTIVITY.2_PORTS_UTIL\\,umask\\=0xc@) / tma_info_thread_clks)", + "MetricExpr": "((tma_ports_utilized_0 * tma_info_thread_clks + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * cpu@EXE_ACTIVITY.2_PORTS_UTIL\\,umask\\=0xc@)) / tma_info_thread_clks if ARITH.DIV_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - EXE_ACTIVITY.BOUND_ON_LOADS else (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * cpu@EXE_ACTIVITY.2_PORTS_UTIL\\,umask\\=0xc@) / tma_info_thread_clks)", "MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group", "MetricName": "tma_ports_utilization", "MetricThreshold": "tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)", @@ -1724,7 +2035,7 @@ }, { "BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)", - "MetricExpr": "cpu@EXE_ACTIVITY.3_PORTS_UTIL\\,umask\\=0x80@ / tma_info_thread_clks + tma_serializing_operation * (CYCLE_ACTIVITY.STALLS_TOTAL - EXE_ACTIVITY.BOUND_ON_LOADS) / tma_info_thread_clks", + "MetricExpr": "(cpu@EXE_ACTIVITY.3_PORTS_UTIL\\,umask\\=0x80@ + cpu@RS.EMPTY\\,umask\\=1@) / tma_info_thread_clks * (CYCLE_ACTIVITY.STALLS_TOTAL - EXE_ACTIVITY.BOUND_ON_LOADS) / tma_info_thread_clks", "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group", "MetricName": "tma_ports_utilized_0", "MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", @@ -1756,13 +2067,13 @@ "MetricExpr": "UOPS_EXECUTED.CYCLES_GE_3 / tma_info_thread_clks", "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group", "MetricName": "tma_ports_utilized_3m", - "MetricThreshold": "tma_ports_utilized_3m > 0.7 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", + "MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", "PublicDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Sample with: UOPS_EXECUTED.CYCLES_GE_3", "ScaleUnit": "100%" }, { "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote cache in other sockets including synchronizations issues", - "MetricExpr": "(135.5 * tma_info_system_average_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM + 135.5 * tma_info_system_average_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", + "MetricExpr": "(135.5 * tma_info_system_core_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM + 135.5 * tma_info_system_core_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", "MetricGroup": "Offcore;Server;Snoop;TopdownL5;tma_L5_group;tma_issueSyncxn;tma_mem_latency_group", "MetricName": "tma_remote_cache", "MetricThreshold": "tma_remote_cache > 0.05 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))", @@ -1771,10 +2082,10 @@ }, { "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory", - "MetricExpr": "149 * tma_info_system_average_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", + "MetricExpr": "149 * tma_info_system_core_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", "MetricGroup": "Server;Snoop;TopdownL5;tma_L5_group;tma_mem_latency_group", - "MetricName": "tma_remote_dram", - "MetricThreshold": "tma_remote_dram > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))", + "MetricName": "tma_remote_mem", + "MetricThreshold": "tma_remote_mem > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))", "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory. This is caused often due to non-optimal NUMA allocations. #link to NUMA article. Sample with: MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM_PS", "ScaleUnit": "100%" }, @@ -1791,28 +2102,29 @@ }, { "BriefDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations", - "MetricExpr": "RESOURCE_STALLS.SCOREBOARD / tma_info_thread_clks", - "MetricGroup": "PortsUtil;TopdownL5;tma_L5_group;tma_issueSO;tma_ports_utilized_0_group", + "MetricExpr": "RESOURCE_STALLS.SCOREBOARD / tma_info_thread_clks + tma_c02_wait", + "MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group;tma_issueSO", "MetricName": "tma_serializing_operation", - "MetricThreshold": "tma_serializing_operation > 0.1 & (tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)))", + "MetricThreshold": "tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)", "PublicDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out-of-order execution which may limit performance. Sample with: RESOURCE_STALLS.SCOREBOARD. Related metrics: tma_ms_switches", "ScaleUnit": "100%" }, { - "BriefDescription": "This metric represents Shuffle (cross \"vector lane\" data transfers) uops fraction the CPU has retired.", - "MetricExpr": "INT_VEC_RETIRED.SHUFFLES / (tma_retiring * tma_info_thread_slots)", - "MetricGroup": "HPC;Pipeline;TopdownL4;tma_L4_group;tma_int_operations_group", - "MetricName": "tma_shuffles", - "MetricThreshold": "tma_shuffles > 0.1 & (tma_int_operations > 0.1 & tma_light_operations > 0.6)", + "BriefDescription": "This metric represents fraction of slots where the CPU was retiring Shuffle operations of 256-bit vector size (FP or Integer)", + "MetricExpr": "tma_light_operations * INT_VEC_RETIRED.SHUFFLES / (tma_retiring * tma_info_thread_slots)", + "MetricGroup": "HPC;Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group", + "MetricName": "tma_shuffles_256b", + "MetricThreshold": "tma_shuffles_256b > 0.1 & (tma_other_light_ops > 0.3 & tma_light_operations > 0.6)", + "PublicDescription": "This metric represents fraction of slots where the CPU was retiring Shuffle operations of 256-bit vector size (FP or Integer). Shuffles may incur slow cross \"vector lane\" data transfers.", "ScaleUnit": "100%" }, { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions", "MetricConstraint": "NO_GROUP_EVENTS_NMI", "MetricExpr": "CPU_CLK_UNHALTED.PAUSE / tma_info_thread_clks", - "MetricGroup": "TopdownL6;tma_L6_group;tma_serializing_operation_group", + "MetricGroup": "TopdownL4;tma_L4_group;tma_serializing_operation_group", "MetricName": "tma_slow_pause", - "MetricThreshold": "tma_slow_pause > 0.05 & (tma_serializing_operation > 0.1 & (tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))))", + "MetricThreshold": "tma_slow_pause > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions. Sample with: CPU_CLK_UNHALTED.PAUSE_INST", "ScaleUnit": "100%" }, @@ -1840,7 +2152,7 @@ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group", "MetricName": "tma_sq_full", "MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_bottleneck_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth", + "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth", "ScaleUnit": "100%" }, { @@ -1907,10 +2219,10 @@ { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears", "MetricExpr": "INT_MISC.UNKNOWN_BRANCH_CYCLES / tma_info_thread_clks", - "MetricGroup": "BigFoot;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group", + "MetricGroup": "BigFootprint;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group", "MetricName": "tma_unknown_branches", "MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))", - "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit). Sample with: FRONTEND_RETIRED.UNKNOWN_BRANCH", + "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit) hence called Unknown Branches. Sample with: FRONTEND_RETIRED.UNKNOWN_BRANCH", "ScaleUnit": "100%" }, { diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-cache.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-cache.json index cf6fa70f37c1..25a2b9695135 100644 --- a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-cache.json +++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-cache.json @@ -4144,6 +4144,42 @@ "Unit": "CHA" }, { + "BriefDescription": "ItoMCacheNear (partial write) transactions from an IO device that addresses memory on the local socket", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR_LOCAL", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", + "UMask": "0xcd42ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "ItoMCacheNear (partial write) transactions from an IO device that addresses memory on a remote socket", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR_REMOTE", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", + "UMask": "0xcd437f04", + "Unit": "CHA" + }, + { + "BriefDescription": "ItoM (write) transactions from an IO device that addresses memory on the local socket", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOM_LOCAL", + "PerPkg": "1", + "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", + "UMask": "0xcc42ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "ItoM (write) transactions from an IO device that addresses memory on a remote socket", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOM_REMOTE", + "PerPkg": "1", + "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", + "UMask": "0xcc437f04", + "Unit": "CHA" + }, + { "BriefDescription": "TOR Inserts; Misses from local IO", "EventCode": "0x35", "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS", @@ -4153,7 +4189,7 @@ "Unit": "CHA" }, { - "BriefDescription": "TOR Inserts; ItoM misses from local IO", + "BriefDescription": "TOR Inserts : ItoM, indicating a full cacheline write request, from IO Devices that missed the LLC", "EventCode": "0x35", "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOM", "PerPkg": "1", @@ -4171,7 +4207,7 @@ "Unit": "CHA" }, { - "BriefDescription": "TOR Inserts; RdCur and FsRdCur misses from local IO", + "BriefDescription": "TOR Inserts; RdCur and FsRdCur requests from local IO that miss LLC", "EventCode": "0x35", "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_PCIRDCUR", "PerPkg": "1", @@ -4198,6 +4234,24 @@ "Unit": "CHA" }, { + "BriefDescription": "PCIRDCUR (read) transactions from an IO device that addresses memory on a remote socket", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR_LOCAL", + "PerPkg": "1", + "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", + "UMask": "0xc8f2ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "PCIRDCUR (read) transactions from an IO device that addresses memory on the local socket", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR_REMOTE", + "PerPkg": "1", + "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", + "UMask": "0xc8f37f04", + "Unit": "CHA" + }, + { "BriefDescription": "TOR Inserts; RFO from local IO", "EventCode": "0x35", "EventName": "UNC_CHA_TOR_INSERTS.IO_RFO", @@ -5566,6 +5620,42 @@ "Unit": "CHA" }, { + "BriefDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC and targets local memory", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOMCACHENEAR_LOCAL", + "PerPkg": "1", + "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", + "UMask": "0xcd42fe04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC and targets remote memory", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOMCACHENEAR_REMOTE", + "PerPkg": "1", + "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", + "UMask": "0xcd437e04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy; ITOM misses from local IO and targets local memory", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOM_LOCAL", + "PerPkg": "1", + "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", + "UMask": "0xcc42fe04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy; ITOM misses from local IO and targets remote memory", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOM_REMOTE", + "PerPkg": "1", + "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", + "UMask": "0xcc437e04", + "Unit": "CHA" + }, + { "BriefDescription": "TOR Occupancy; RdCur and FsRdCur misses from local IO", "EventCode": "0x36", "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_PCIRDCUR", @@ -5575,6 +5665,24 @@ "Unit": "CHA" }, { + "BriefDescription": "TOR Occupancy; RdCur and FsRdCur misses from local IO and targets local memory", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_PCIRDCUR_LOCAL", + "PerPkg": "1", + "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", + "UMask": "0xc8f2fe04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy; RdCur and FsRdCur misses from local IO and targets remote memory", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_PCIRDCUR_REMOTE", + "PerPkg": "1", + "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", + "UMask": "0xc8f37e04", + "Unit": "CHA" + }, + { "BriefDescription": "TOR Occupancy; RFO misses from local IO", "EventCode": "0x36", "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_RFO", diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-interconnect.json index 65d088556bae..22bb490e9666 100644 --- a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-interconnect.json +++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-interconnect.json @@ -4888,7 +4888,7 @@ "Unit": "MDF" }, { - "BriefDescription": "Number of cycles incoming messages from the vertical ring that are bounced at the SBO\r\nIngress (V-EMIB) (AD)", + "BriefDescription": "Number of cycles incoming messages from the vertical ring that are bounced at the SBO Ingress (V-EMIB) (AD)", "EventCode": "0x4B", "EventName": "UNC_MDF_CRS_TxR_V_BOUNCES.AD", "PerPkg": "1", @@ -4897,7 +4897,7 @@ "Unit": "MDF" }, { - "BriefDescription": "Number of cycles incoming messages from the vertical ring that are bounced at the SBO\r\nIngress (V-EMIB) (AK)", + "BriefDescription": "Number of cycles incoming messages from the vertical ring that are bounced at the SBO Ingress (V-EMIB) (AK)", "EventCode": "0x4B", "EventName": "UNC_MDF_CRS_TxR_V_BOUNCES.AK", "PerPkg": "1", @@ -4906,7 +4906,7 @@ "Unit": "MDF" }, { - "BriefDescription": "Number of cycles incoming messages from the vertical ring that are bounced at the SBO\r\nIngress (V-EMIB) (AKC)", + "BriefDescription": "Number of cycles incoming messages from the vertical ring that are bounced at the SBO Ingress (V-EMIB) (AKC)", "EventCode": "0x4B", "EventName": "UNC_MDF_CRS_TxR_V_BOUNCES.AKC", "PerPkg": "1", @@ -4915,7 +4915,7 @@ "Unit": "MDF" }, { - "BriefDescription": "Number of cycles incoming messages from the vertical ring that are bounced at the SBO\r\nIngress (V-EMIB) (BL)", + "BriefDescription": "Number of cycles incoming messages from the vertical ring that are bounced at the SBO Ingress (V-EMIB) (BL)", "EventCode": "0x4B", "EventName": "UNC_MDF_CRS_TxR_V_BOUNCES.BL", "PerPkg": "1", @@ -4924,7 +4924,7 @@ "Unit": "MDF" }, { - "BriefDescription": "Number of cycles incoming messages from the vertical ring that are bounced at the SBO\r\nIngress (V-EMIB) (IV)", + "BriefDescription": "Number of cycles incoming messages from the vertical ring that are bounced at the SBO Ingress (V-EMIB) (IV)", "EventCode": "0x4B", "EventName": "UNC_MDF_CRS_TxR_V_BOUNCES.IV", "PerPkg": "1", @@ -5291,7 +5291,7 @@ "EventCode": "0x05", "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCB", "PerPkg": "1", - "PublicDescription": "Matches on Receive path of a UPI Port : Non-Coherent Bypass : Matches on Receive path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.", + "PublicDescription": "Matches on Receive path of a UPI Port : Non-Coherent Bypass : Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.", "UMask": "0xe", "Unit": "UPI" }, @@ -5300,7 +5300,7 @@ "EventCode": "0x05", "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCB_OPC", "PerPkg": "1", - "PublicDescription": "Matches on Receive path of a UPI Port : Non-Coherent Bypass, Match Opcode : Matches on Receive path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.", + "PublicDescription": "Matches on Receive path of a UPI Port : Non-Coherent Bypass, Match Opcode : Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.", "UMask": "0x10e", "Unit": "UPI" }, @@ -5309,7 +5309,7 @@ "EventCode": "0x05", "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCS", "PerPkg": "1", - "PublicDescription": "Matches on Receive path of a UPI Port : Non-Coherent Standard : Matches on Receive path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.", + "PublicDescription": "Matches on Receive path of a UPI Port : Non-Coherent Standard : Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.", "UMask": "0xf", "Unit": "UPI" }, @@ -5318,7 +5318,7 @@ "EventCode": "0x05", "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCS_OPC", "PerPkg": "1", - "PublicDescription": "Matches on Receive path of a UPI Port : Non-Coherent Standard, Match Opcode : Matches on Receive path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.", + "PublicDescription": "Matches on Receive path of a UPI Port : Non-Coherent Standard, Match Opcode : Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.", "UMask": "0x10f", "Unit": "UPI" }, @@ -5763,7 +5763,7 @@ "EventCode": "0x04", "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCB", "PerPkg": "1", - "PublicDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Bypass : Matches on Transmit path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.", + "PublicDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Bypass : Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.", "UMask": "0xe", "Unit": "UPI" }, @@ -5772,7 +5772,7 @@ "EventCode": "0x04", "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCB_OPC", "PerPkg": "1", - "PublicDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Bypass, Match Opcode : Matches on Transmit path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.", + "PublicDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Bypass, Match Opcode : Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.", "UMask": "0x10e", "Unit": "UPI" }, @@ -5781,7 +5781,7 @@ "EventCode": "0x04", "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCS", "PerPkg": "1", - "PublicDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Standard : Matches on Transmit path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.", + "PublicDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Standard : Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.", "UMask": "0xf", "Unit": "UPI" }, @@ -5790,7 +5790,7 @@ "EventCode": "0x04", "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCS_OPC", "PerPkg": "1", - "PublicDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Standard, Match Opcode : Matches on Transmit path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.", + "PublicDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Standard, Match Opcode : Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.", "UMask": "0x10f", "Unit": "UPI" }, diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/cache.json b/tools/perf/pmu-events/arch/x86/sierraforest/cache.json index 7f0dc65a55d2..f937ba0e50e1 100644 --- a/tools/perf/pmu-events/arch/x86/sierraforest/cache.json +++ b/tools/perf/pmu-events/arch/x86/sierraforest/cache.json @@ -16,6 +16,148 @@ "UMask": "0x4f" }, { + "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to an instruction cache or TLB miss.", + "EventCode": "0x35", + "EventName": "MEM_BOUND_STALLS_IFETCH.ALL", + "SampleAfterValue": "1000003", + "UMask": "0x7f" + }, + { + "BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or TLB miss which hit in the L2 cache.", + "EventCode": "0x35", + "EventName": "MEM_BOUND_STALLS_IFETCH.L2_HIT", + "PublicDescription": "Counts the number of cycles the core is stalled due to an instruction cache or Translation Lookaside Buffer (TLB) miss which hit in the L2 cache.", + "SampleAfterValue": "1000003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to an icache or itlb miss which hit in the LLC.", + "EventCode": "0x35", + "EventName": "MEM_BOUND_STALLS_IFETCH.LLC_HIT", + "SampleAfterValue": "1000003", + "UMask": "0x6" + }, + { + "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to an icache or itlb miss which missed all the caches.", + "EventCode": "0x35", + "EventName": "MEM_BOUND_STALLS_IFETCH.LLC_MISS", + "SampleAfterValue": "1000003", + "UMask": "0x78" + }, + { + "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to an L1 demand load miss.", + "EventCode": "0x34", + "EventName": "MEM_BOUND_STALLS_LOAD.ALL", + "SampleAfterValue": "1000003", + "UMask": "0x7f" + }, + { + "BriefDescription": "Counts the number of cycles the core is stalled due to a demand load which hit in the L2 cache.", + "EventCode": "0x34", + "EventName": "MEM_BOUND_STALLS_LOAD.L2_HIT", + "PublicDescription": "Counts the number of cycles a core is stalled due to a demand load which hit in the L2 cache.", + "SampleAfterValue": "1000003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to a demand load miss which hit in the LLC.", + "EventCode": "0x34", + "EventName": "MEM_BOUND_STALLS_LOAD.LLC_HIT", + "SampleAfterValue": "1000003", + "UMask": "0x6" + }, + { + "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to a demand load miss which missed all the local caches.", + "EventCode": "0x34", + "EventName": "MEM_BOUND_STALLS_LOAD.LLC_MISS", + "SampleAfterValue": "1000003", + "UMask": "0x78" + }, + { + "BriefDescription": "Counts the number of load ops retired that miss the L3 cache and hit in DRAM", + "EventCode": "0xd3", + "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM", + "PEBS": "1", + "SampleAfterValue": "1000003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts the number of load ops retired that hit the L1 data cache.", + "EventCode": "0xd1", + "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts the number of load ops retired that miss in the L1 data cache.", + "EventCode": "0xd1", + "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0x40" + }, + { + "BriefDescription": "Counts the number of load ops retired that hit in the L2 cache.", + "EventCode": "0xd1", + "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0x2" + }, + { + "BriefDescription": "Counts the number of load ops retired that miss in the L2 cache.", + "EventCode": "0xd1", + "EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0x80" + }, + { + "BriefDescription": "Counts the number of load ops retired that hit in the L3 cache.", + "EventCode": "0xd1", + "EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0x1c" + }, + { + "BriefDescription": "Counts the number of loads that hit in a write combining buffer (WCB), excluding the first load that caused the WCB to allocate.", + "EventCode": "0xd1", + "EventName": "MEM_LOAD_UOPS_RETIRED.WCB_HIT", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0x20" + }, + { + "BriefDescription": "Counts the number of cycles that uops are blocked for any of the following reasons: load buffer, store buffer or RSV full.", + "EventCode": "0x04", + "EventName": "MEM_SCHEDULER_BLOCK.ALL", + "SampleAfterValue": "20003", + "UMask": "0x7" + }, + { + "BriefDescription": "Counts the number of cycles that uops are blocked due to a load buffer full condition.", + "EventCode": "0x04", + "EventName": "MEM_SCHEDULER_BLOCK.LD_BUF", + "SampleAfterValue": "20003", + "UMask": "0x2" + }, + { + "BriefDescription": "Counts the number of cycles that uops are blocked due to an RSV full condition.", + "EventCode": "0x04", + "EventName": "MEM_SCHEDULER_BLOCK.RSV", + "SampleAfterValue": "20003", + "UMask": "0x4" + }, + { + "BriefDescription": "Counts the number of cycles that uops are blocked due to a store buffer full condition.", + "EventCode": "0x04", + "EventName": "MEM_SCHEDULER_BLOCK.ST_BUF", + "SampleAfterValue": "20003", + "UMask": "0x1" + }, + { "BriefDescription": "Counts the number of load ops retired.", "Data_LA": "1", "EventCode": "0xd0", @@ -144,6 +286,42 @@ "UMask": "0x5" }, { + "BriefDescription": "Counts the number of load uops retired that performed one or more locks", + "Data_LA": "1", + "EventCode": "0xd0", + "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0x21" + }, + { + "BriefDescription": "Counts the number of memory uops retired that were splits.", + "Data_LA": "1", + "EventCode": "0xd0", + "EventName": "MEM_UOPS_RETIRED.SPLIT", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0x43" + }, + { + "BriefDescription": "Counts the number of retired split load uops.", + "Data_LA": "1", + "EventCode": "0xd0", + "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0x41" + }, + { + "BriefDescription": "Counts the number of retired split store uops.", + "Data_LA": "1", + "EventCode": "0xd0", + "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0x42" + }, + { "BriefDescription": "Counts the number of stores uops retired same as MEM_UOPS_RETIRED.ALL_STORES", "Data_LA": "1", "EventCode": "0xd0", @@ -151,5 +329,12 @@ "PEBS": "2", "SampleAfterValue": "1000003", "UMask": "0x6" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to an icache miss", + "EventCode": "0x71", + "EventName": "TOPDOWN_FE_BOUND.ICACHE", + "SampleAfterValue": "1000003", + "UMask": "0x20" } ] diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/floating-point.json b/tools/perf/pmu-events/arch/x86/sierraforest/floating-point.json new file mode 100644 index 000000000000..00c9a8ae0f53 --- /dev/null +++ b/tools/perf/pmu-events/arch/x86/sierraforest/floating-point.json @@ -0,0 +1,68 @@ +[ + { + "BriefDescription": "Counts the number of cycles when any of the floating point dividers are active.", + "CounterMask": "1", + "EventCode": "0xcd", + "EventName": "ARITH.FPDIV_ACTIVE", + "SampleAfterValue": "1000003", + "UMask": "0x2" + }, + { + "BriefDescription": "Counts the number of all types of floating point operations per uop with all default weighting", + "EventCode": "0xc8", + "EventName": "FP_FLOPS_RETIRED.ALL", + "PEBS": "1", + "SampleAfterValue": "1000003", + "UMask": "0x3" + }, + { + "BriefDescription": "This event is deprecated. [This event is alias to FP_FLOPS_RETIRED.FP64]", + "Deprecated": "1", + "EventCode": "0xc8", + "EventName": "FP_FLOPS_RETIRED.DP", + "PEBS": "1", + "SampleAfterValue": "1000003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts the number of floating point operations that produce 32 bit single precision results [This event is alias to FP_FLOPS_RETIRED.SP]", + "EventCode": "0xc8", + "EventName": "FP_FLOPS_RETIRED.FP32", + "PEBS": "1", + "SampleAfterValue": "1000003", + "UMask": "0x2" + }, + { + "BriefDescription": "Counts the number of floating point operations that produce 64 bit double precision results [This event is alias to FP_FLOPS_RETIRED.DP]", + "EventCode": "0xc8", + "EventName": "FP_FLOPS_RETIRED.FP64", + "PEBS": "1", + "SampleAfterValue": "1000003", + "UMask": "0x1" + }, + { + "BriefDescription": "This event is deprecated. [This event is alias to FP_FLOPS_RETIRED.FP32]", + "Deprecated": "1", + "EventCode": "0xc8", + "EventName": "FP_FLOPS_RETIRED.SP", + "PEBS": "1", + "SampleAfterValue": "1000003", + "UMask": "0x2" + }, + { + "BriefDescription": "Counts the number of floating point operations retired that required microcode assist.", + "EventCode": "0xc3", + "EventName": "MACHINE_CLEARS.FP_ASSIST", + "PublicDescription": "Counts the number of floating point operations retired that required microcode assist, which is not a reflection of the number of FP operations, instructions or uops.", + "SampleAfterValue": "20003", + "UMask": "0x4" + }, + { + "BriefDescription": "Counts the number of floating point divide uops retired (x87 and sse, including x87 sqrt).", + "EventCode": "0xc2", + "EventName": "UOPS_RETIRED.FPDIV", + "PEBS": "1", + "SampleAfterValue": "2000003", + "UMask": "0x8" + } +] diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/frontend.json b/tools/perf/pmu-events/arch/x86/sierraforest/frontend.json index be8f1c7e195c..356d36aecc81 100644 --- a/tools/perf/pmu-events/arch/x86/sierraforest/frontend.json +++ b/tools/perf/pmu-events/arch/x86/sierraforest/frontend.json @@ -1,5 +1,21 @@ [ { + "BriefDescription": "Counts the total number of BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.", + "EventCode": "0xe6", + "EventName": "BACLEARS.ANY", + "PublicDescription": "Counts the total number of BACLEARS, which occur when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.", + "SampleAfterValue": "200003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts the number of instructions retired that were tagged because empty issue slots were seen before the uop due to ITLB miss", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.ITLB_MISS", + "PEBS": "1", + "SampleAfterValue": "1000003", + "UMask": "0x10" + }, + { "BriefDescription": "Counts every time the code stream enters into a new cache line by walking sequential from the previous line or being redirected by a jump.", "EventCode": "0x80", "EventName": "ICACHE.ACCESSES", diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/memory.json b/tools/perf/pmu-events/arch/x86/sierraforest/memory.json index 79d8af45100c..e0ce2decc805 100644 --- a/tools/perf/pmu-events/arch/x86/sierraforest/memory.json +++ b/tools/perf/pmu-events/arch/x86/sierraforest/memory.json @@ -1,5 +1,71 @@ [ { + "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer is stalled due to any number of reasons, including an L1 miss, WCB full, pagewalk, store address block or store data block, on a load that retires.", + "EventCode": "0x05", + "EventName": "LD_HEAD.ANY_AT_RET", + "SampleAfterValue": "1000003", + "UMask": "0xff" + }, + { + "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer is stalled due to a core bound stall including a store address match, a DTLB miss or a page walk that detains the load from retiring.", + "EventCode": "0x05", + "EventName": "LD_HEAD.L1_BOUND_AT_RET", + "SampleAfterValue": "1000003", + "UMask": "0xf4" + }, + { + "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a DL1 miss.", + "EventCode": "0x05", + "EventName": "LD_HEAD.L1_MISS_AT_RET", + "SampleAfterValue": "1000003", + "UMask": "0x81" + }, + { + "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to other block cases.", + "EventCode": "0x05", + "EventName": "LD_HEAD.OTHER_AT_RET", + "PublicDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to other block cases such as pipeline conflicts, fences, etc.", + "SampleAfterValue": "1000003", + "UMask": "0xc0" + }, + { + "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a pagewalk.", + "EventCode": "0x05", + "EventName": "LD_HEAD.PGWALK_AT_RET", + "SampleAfterValue": "1000003", + "UMask": "0xa0" + }, + { + "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a store address match.", + "EventCode": "0x05", + "EventName": "LD_HEAD.ST_ADDR_AT_RET", + "SampleAfterValue": "1000003", + "UMask": "0x84" + }, + { + "BriefDescription": "Counts the number of machine clears due to memory ordering caused by a snoop from an external agent. Does not count internally generated machine clears such as those due to memory disambiguation.", + "EventCode": "0xc3", + "EventName": "MACHINE_CLEARS.MEMORY_ORDERING", + "SampleAfterValue": "20003", + "UMask": "0x2" + }, + { + "BriefDescription": "Counts misaligned loads that are 4K page splits.", + "EventCode": "0x13", + "EventName": "MISALIGN_MEM_REF.LOAD_PAGE_SPLIT", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0x2" + }, + { + "BriefDescription": "Counts misaligned stores that are 4K page splits.", + "EventCode": "0x13", + "EventName": "MISALIGN_MEM_REF.STORE_PAGE_SPLIT", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0x4" + }, + { "BriefDescription": "Counts demand data reads that were not supplied by the L3 cache.", "EventCode": "0xB7", "EventName": "OCR.DEMAND_DATA_RD.L3_MISS", diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/other.json b/tools/perf/pmu-events/arch/x86/sierraforest/other.json index 2414f6ff53b0..70a9da7e97df 100644 --- a/tools/perf/pmu-events/arch/x86/sierraforest/other.json +++ b/tools/perf/pmu-events/arch/x86/sierraforest/other.json @@ -1,5 +1,14 @@ [ { + "BriefDescription": "This event is deprecated. [This event is alias to MISC_RETIRED.LBR_INSERTS]", + "Deprecated": "1", + "EventCode": "0xe4", + "EventName": "LBR_INSERTS.ANY", + "PEBS": "1", + "SampleAfterValue": "1000003", + "UMask": "0x1" + }, + { "BriefDescription": "Counts demand data reads that have any type of response.", "EventCode": "0xB7", "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE", @@ -16,5 +25,12 @@ "MSRValue": "0x10002", "SampleAfterValue": "100003", "UMask": "0x1" + }, + { + "BriefDescription": "Counts the number of issue slots in a UMWAIT or TPAUSE instruction where no uop issues due to the instruction putting the CPU into the C0.1 activity state.", + "EventCode": "0x75", + "EventName": "SERIALIZATION.C01_MS_SCB", + "SampleAfterValue": "200003", + "UMask": "0x4" } ] diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/pipeline.json b/tools/perf/pmu-events/arch/x86/sierraforest/pipeline.json index 41212957ef21..90292dc03d33 100644 --- a/tools/perf/pmu-events/arch/x86/sierraforest/pipeline.json +++ b/tools/perf/pmu-events/arch/x86/sierraforest/pipeline.json @@ -1,5 +1,13 @@ [ { + "BriefDescription": "Counts the number of cycles when any of the dividers are active.", + "CounterMask": "1", + "EventCode": "0xcd", + "EventName": "ARITH.DIV_ACTIVE", + "SampleAfterValue": "1000003", + "UMask": "0x3" + }, + { "BriefDescription": "Counts the total number of branch instructions retired for all branch types.", "EventCode": "0xc4", "EventName": "BR_INST_RETIRED.ALL_BRANCHES", @@ -8,6 +16,71 @@ "SampleAfterValue": "200003" }, { + "BriefDescription": "Counts the number of retired JCC (Jump on Conditional Code) branch instructions retired, includes both taken and not taken branches.", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.COND", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0x7e" + }, + { + "BriefDescription": "Counts the number of taken JCC (Jump on Conditional Code) branch instructions retired.", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.COND_TAKEN", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0xfe" + }, + { + "BriefDescription": "Counts the number of far branch instructions retired, includes far jump, far call and return, and interrupt call and return.", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.FAR_BRANCH", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0xbf" + }, + { + "BriefDescription": "Counts the number of near indirect JMP and near indirect CALL branch instructions retired.", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.INDIRECT", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0xeb" + }, + { + "BriefDescription": "Counts the number of near indirect CALL branch instructions retired.", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.INDIRECT_CALL", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0xfb" + }, + { + "BriefDescription": "This event is deprecated. Refer to new event BR_INST_RETIRED.INDIRECT_CALL", + "Deprecated": "1", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.IND_CALL", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0xfb" + }, + { + "BriefDescription": "Counts the number of near CALL branch instructions retired.", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.NEAR_CALL", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0xf9" + }, + { + "BriefDescription": "Counts the number of near RET branch instructions retired.", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.NEAR_RETURN", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0xf7" + }, + { "BriefDescription": "Counts the total number of mispredicted branch instructions retired for all branch types.", "EventCode": "0xc5", "EventName": "BR_MISP_RETIRED.ALL_BRANCHES", @@ -16,6 +89,54 @@ "SampleAfterValue": "200003" }, { + "BriefDescription": "Counts the number of mispredicted JCC (Jump on Conditional Code) branch instructions retired.", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.COND", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0x7e" + }, + { + "BriefDescription": "Counts the number of mispredicted taken JCC (Jump on Conditional Code) branch instructions retired.", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.COND_TAKEN", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0xfe" + }, + { + "BriefDescription": "Counts the number of mispredicted near indirect JMP and near indirect CALL branch instructions retired.", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.INDIRECT", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0xeb" + }, + { + "BriefDescription": "Counts the number of mispredicted near indirect CALL branch instructions retired.", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.INDIRECT_CALL", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0xfb" + }, + { + "BriefDescription": "Counts the number of mispredicted near taken branch instructions retired.", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.NEAR_TAKEN", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0x80" + }, + { + "BriefDescription": "Counts the number of mispredicted near RET branch instructions retired.", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.RETURN", + "PEBS": "1", + "SampleAfterValue": "200003", + "UMask": "0xf7" + }, + { "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles", "EventName": "CPU_CLK_UNHALTED.CORE", "SampleAfterValue": "2000003", @@ -68,29 +189,294 @@ "SampleAfterValue": "2000003" }, { - "BriefDescription": "Counts the number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear.", + "BriefDescription": "Counts the number of retired loads that are blocked because it initially appears to be store forward blocked, but subsequently is shown not to be blocked based on 4K alias check.", + "EventCode": "0x03", + "EventName": "LD_BLOCKS.ADDRESS_ALIAS", + "PEBS": "1", + "SampleAfterValue": "1000003", + "UMask": "0x4" + }, + { + "BriefDescription": "Counts the number of retired loads that are blocked because its address exactly matches an older store whose data is not ready.", + "EventCode": "0x03", + "EventName": "LD_BLOCKS.DATA_UNKNOWN", + "PEBS": "1", + "SampleAfterValue": "1000003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts the number of retired loads that are blocked because its address partially overlapped with an older store.", + "EventCode": "0x03", + "EventName": "LD_BLOCKS.STORE_FORWARD", + "PEBS": "1", + "SampleAfterValue": "1000003", + "UMask": "0x2" + }, + { + "BriefDescription": "Counts the number of machine clears due to memory ordering in which an internal load passes an older store within the same CPU.", + "EventCode": "0xc3", + "EventName": "MACHINE_CLEARS.DISAMBIGUATION", + "SampleAfterValue": "20003", + "UMask": "0x8" + }, + { + "BriefDescription": "Counts the number of machine clears due to a page fault. Counts both I-Side and D-Side (Loads/Stores) page faults. A page fault occurs when either the page is not present, or an access violation occurs.", + "EventCode": "0xc3", + "EventName": "MACHINE_CLEARS.PAGE_FAULT", + "SampleAfterValue": "20003", + "UMask": "0x20" + }, + { + "BriefDescription": "Counts the number of machine clears that flush the pipeline and restart the machine with the use of microcode due to SMC, MEMORY_ORDERING, FP_ASSISTS, PAGE_FAULT, DISAMBIGUATION, and FPC_VIRTUAL_TRAP.", + "EventCode": "0xc3", + "EventName": "MACHINE_CLEARS.SLOW", + "SampleAfterValue": "20003", + "UMask": "0x6f" + }, + { + "BriefDescription": "Counts the number of machine clears due to program modifying data (self modifying code) within 1K of a recently fetched code page.", + "EventCode": "0xc3", + "EventName": "MACHINE_CLEARS.SMC", + "SampleAfterValue": "20003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts the number of Last Branch Record (LBR) entries. Requires LBRs to be enabled and configured in IA32_LBR_CTL. [This event is alias to LBR_INSERTS.ANY]", + "EventCode": "0xe4", + "EventName": "MISC_RETIRED.LBR_INSERTS", + "PEBS": "1", + "SampleAfterValue": "1000003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts the number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. [This event is alias to TOPDOWN_BAD_SPECULATION.ALL_P]", "EventCode": "0x73", "EventName": "TOPDOWN_BAD_SPECULATION.ALL", - "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window, including relevant microcode flows, and while uops are not yet available in the instruction queue (IQ) or until an FE_BOUND event occurs besides OTHER and CISC. Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear.", + "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window, including relevant microcode flows, and while uops are not yet available in the instruction queue (IQ) or until an FE_BOUND event occurs besides OTHER and CISC. Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear. [This event is alias to TOPDOWN_BAD_SPECULATION.ALL_P]", "SampleAfterValue": "1000003" }, { - "BriefDescription": "Counts the number of retirement slots not consumed due to backend stalls", + "BriefDescription": "Counts the number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. [This event is alias to TOPDOWN_BAD_SPECULATION.ALL]", + "EventCode": "0x73", + "EventName": "TOPDOWN_BAD_SPECULATION.ALL_P", + "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window, including relevant microcode flows, and while uops are not yet available in the instruction queue (IQ) or until an FE_BOUND event occurs besides OTHER and CISC. Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear. [This event is alias to TOPDOWN_BAD_SPECULATION.ALL]", + "SampleAfterValue": "1000003" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to Fast Nukes such as Memory Ordering Machine clears and MRN nukes", + "EventCode": "0x73", + "EventName": "TOPDOWN_BAD_SPECULATION.FASTNUKE", + "SampleAfterValue": "1000003", + "UMask": "0x2" + }, + { + "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a machine clear (nuke) of any kind including memory ordering and memory disambiguation.", + "EventCode": "0x73", + "EventName": "TOPDOWN_BAD_SPECULATION.MACHINE_CLEARS", + "SampleAfterValue": "1000003", + "UMask": "0x3" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to Branch Mispredict", + "EventCode": "0x73", + "EventName": "TOPDOWN_BAD_SPECULATION.MISPREDICT", + "SampleAfterValue": "1000003", + "UMask": "0x4" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to a machine clear (nuke).", + "EventCode": "0x73", + "EventName": "TOPDOWN_BAD_SPECULATION.NUKE", + "SampleAfterValue": "1000003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts the number of retirement slots not consumed due to backend stalls [This event is alias to TOPDOWN_BE_BOUND.ALL_P]", "EventCode": "0x74", "EventName": "TOPDOWN_BE_BOUND.ALL", "SampleAfterValue": "1000003" }, { - "BriefDescription": "Counts the number of retirement slots not consumed due to front end stalls", + "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to due to certain allocation restrictions", + "EventCode": "0x74", + "EventName": "TOPDOWN_BE_BOUND.ALLOC_RESTRICTIONS", + "SampleAfterValue": "1000003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts the number of retirement slots not consumed due to backend stalls [This event is alias to TOPDOWN_BE_BOUND.ALL]", + "EventCode": "0x74", + "EventName": "TOPDOWN_BE_BOUND.ALL_P", + "SampleAfterValue": "1000003" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to memory reservation stall (scheduler not being able to accept another uop). This could be caused by RSV full or load/store buffer block.", + "EventCode": "0x74", + "EventName": "TOPDOWN_BE_BOUND.MEM_SCHEDULER", + "SampleAfterValue": "1000003", + "UMask": "0x2" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to IEC and FPC RAT stalls - which can be due to the FIQ and IEC reservation station stall (integer, FP and SIMD scheduler not being able to accept another uop. )", + "EventCode": "0x74", + "EventName": "TOPDOWN_BE_BOUND.NON_MEM_SCHEDULER", + "SampleAfterValue": "1000003", + "UMask": "0x8" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to mrbl stall. A 'marble' refers to a physical register file entry, also known as the physical destination (PDST).", + "EventCode": "0x74", + "EventName": "TOPDOWN_BE_BOUND.REGISTER", + "SampleAfterValue": "1000003", + "UMask": "0x20" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to ROB full", + "EventCode": "0x74", + "EventName": "TOPDOWN_BE_BOUND.REORDER_BUFFER", + "SampleAfterValue": "1000003", + "UMask": "0x40" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to iq/jeu scoreboards or ms scb", + "EventCode": "0x74", + "EventName": "TOPDOWN_BE_BOUND.SERIALIZATION", + "SampleAfterValue": "1000003", + "UMask": "0x10" + }, + { + "BriefDescription": "Counts the number of retirement slots not consumed due to front end stalls [This event is alias to TOPDOWN_FE_BOUND.ALL_P]", "EventCode": "0x71", "EventName": "TOPDOWN_FE_BOUND.ALL", "SampleAfterValue": "1000003" }, { - "BriefDescription": "Counts the number of consumed retirement slots. Similar to UOPS_RETIRED.ALL", + "BriefDescription": "Counts the number of retirement slots not consumed due to front end stalls [This event is alias to TOPDOWN_FE_BOUND.ALL]", + "EventCode": "0x71", + "EventName": "TOPDOWN_FE_BOUND.ALL_P", + "SampleAfterValue": "1000003" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BAClear", + "EventCode": "0x71", + "EventName": "TOPDOWN_FE_BOUND.BRANCH_DETECT", + "SampleAfterValue": "1000003", + "UMask": "0x2" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BTClear", + "EventCode": "0x71", + "EventName": "TOPDOWN_FE_BOUND.BRANCH_RESTEER", + "SampleAfterValue": "1000003", + "UMask": "0x40" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to ms", + "EventCode": "0x71", + "EventName": "TOPDOWN_FE_BOUND.CISC", + "SampleAfterValue": "1000003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to decode stall", + "EventCode": "0x71", + "EventName": "TOPDOWN_FE_BOUND.DECODE", + "SampleAfterValue": "1000003", + "UMask": "0x8" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to frontend bandwidth restrictions due to decode, predecode, cisc, and other limitations.", + "EventCode": "0x71", + "EventName": "TOPDOWN_FE_BOUND.FRONTEND_BANDWIDTH", + "SampleAfterValue": "1000003", + "UMask": "0x8d" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to latency related stalls including BACLEARs, BTCLEARs, ITLB misses, and ICache misses.", + "EventCode": "0x71", + "EventName": "TOPDOWN_FE_BOUND.FRONTEND_LATENCY", + "SampleAfterValue": "1000003", + "UMask": "0x72" + }, + { + "BriefDescription": "This event is deprecated. [This event is alias to TOPDOWN_FE_BOUND.ITLB_MISS]", + "Deprecated": "1", + "EventCode": "0x71", + "EventName": "TOPDOWN_FE_BOUND.ITLB", + "SampleAfterValue": "1000003", + "UMask": "0x10" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to itlb miss [This event is alias to TOPDOWN_FE_BOUND.ITLB]", + "EventCode": "0x71", + "EventName": "TOPDOWN_FE_BOUND.ITLB_MISS", + "SampleAfterValue": "1000003", + "UMask": "0x10" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend that do not categorize into any other common frontend stall", + "EventCode": "0x71", + "EventName": "TOPDOWN_FE_BOUND.OTHER", + "SampleAfterValue": "1000003", + "UMask": "0x80" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to predecode wrong", + "EventCode": "0x71", + "EventName": "TOPDOWN_FE_BOUND.PREDECODE", + "SampleAfterValue": "1000003", + "UMask": "0x4" + }, + { + "BriefDescription": "Counts the number of consumed retirement slots. Similar to UOPS_RETIRED.ALL [This event is alias to TOPDOWN_RETIRING.ALL_P]", "EventCode": "0x72", "EventName": "TOPDOWN_RETIRING.ALL", "PEBS": "1", "SampleAfterValue": "1000003" + }, + { + "BriefDescription": "Counts the number of consumed retirement slots. Similar to UOPS_RETIRED.ALL [This event is alias to TOPDOWN_RETIRING.ALL]", + "EventCode": "0x72", + "EventName": "TOPDOWN_RETIRING.ALL_P", + "PEBS": "1", + "SampleAfterValue": "1000003" + }, + { + "BriefDescription": "Counts the number of uops issued by the front end every cycle.", + "EventCode": "0x0e", + "EventName": "UOPS_ISSUED.ANY", + "PublicDescription": "Counts the number of uops issued by the front end every cycle. When 4-uops are requested and only 2-uops are delivered, the event counts 2. Uops_issued correlates to the number of ROB entries. If uop takes 2 ROB slots it counts as 2 uops_issued.", + "SampleAfterValue": "1000003" + }, + { + "BriefDescription": "Counts the total number of uops retired.", + "EventCode": "0xc2", + "EventName": "UOPS_RETIRED.ALL", + "PEBS": "1", + "SampleAfterValue": "2000003" + }, + { + "BriefDescription": "Counts the number of integer divide uops retired.", + "EventCode": "0xc2", + "EventName": "UOPS_RETIRED.IDIV", + "PEBS": "1", + "SampleAfterValue": "2000003", + "UMask": "0x10" + }, + { + "BriefDescription": "Counts the number of uops that are from the complex flows issued by the micro-sequencer (MS). This includes uops from flows due to complex instructions, faults, assists, and inserted flows.", + "EventCode": "0xc2", + "EventName": "UOPS_RETIRED.MS", + "PEBS": "1", + "SampleAfterValue": "2000003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts the number of x87 uops retired, includes those in ms flows", + "EventCode": "0xc2", + "EventName": "UOPS_RETIRED.X87", + "PEBS": "1", + "SampleAfterValue": "2000003", + "UMask": "0x2" } ] diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/uncore-cache.json b/tools/perf/pmu-events/arch/x86/sierraforest/uncore-cache.json new file mode 100644 index 000000000000..a3aafbbc3484 --- /dev/null +++ b/tools/perf/pmu-events/arch/x86/sierraforest/uncore-cache.json @@ -0,0 +1,2853 @@ +[ + { + "BriefDescription": "Clockticks for CMS units attached to CHA", + "EventCode": "0x01", + "EventName": "UNC_CHACMS_CLOCKTICKS", + "PerPkg": "1", + "PortMask": "0x000", + "PublicDescription": "UNC_CHACMS_CLOCKTICKS", + "Unit": "CHACMS" + }, + { + "BriefDescription": "Number of CHA clock cycles while the event is enabled", + "EventCode": "0x01", + "EventName": "UNC_CHA_CLOCKTICKS", + "PerPkg": "1", + "PublicDescription": "Clockticks of the uncore caching and home agent (CHA)", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts transactions that looked into the multi-socket cacheline Directory state, and therefore did not send a snoop because the Directory indicated it was not needed.", + "EventCode": "0x53", + "EventName": "UNC_CHA_DIR_LOOKUP.NO_SNP", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts transactions that looked into the multi-socket cacheline Directory state, and sent one or more snoops, because the Directory indicated it was needed.", + "EventCode": "0x53", + "EventName": "UNC_CHA_DIR_LOOKUP.SNP", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts only multi-socket cacheline Directory state updates memory writes issued from the HA pipe. This does not include memory write requests which are for I (Invalid) or E (Exclusive) cachelines.", + "EventCode": "0x54", + "EventName": "UNC_CHA_DIR_UPDATE.HA", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts only multi-socket cacheline Directory state updates due to memory writes issued from the TOR pipe which are the result of remote transaction hitting the SF/LLC and returning data Core2Core. This does not include memory write requests which are for I (Invalid) or E (Exclusive) cachelines.", + "EventCode": "0x54", + "EventName": "UNC_CHA_DIR_UPDATE.TOR", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "CHA" + }, + { + "BriefDescription": "Distress signal assertion for dynamic prefetch throttle (DPT). Threshold for distress signal assertion reached in TOR or IRQ (immediate cause for triggering).", + "EventCode": "0x59", + "EventName": "UNC_CHA_DISTRESS_ASSERTED.DPT_ANY", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x3", + "Unit": "CHA" + }, + { + "BriefDescription": "Distress signal assertion for dynamic prefetch throttle (DPT). Threshold for distress signal assertion reached in IRQ (immediate cause for triggering).", + "EventCode": "0x59", + "EventName": "UNC_CHA_DISTRESS_ASSERTED.DPT_IRQ", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "CHA" + }, + { + "BriefDescription": "Distress signal assertion for dynamic prefetch throttle (DPT). Threshold for distress signal assertion reached in TOR (immediate cause for triggering).", + "EventCode": "0x59", + "EventName": "UNC_CHA_DISTRESS_ASSERTED.DPT_TOR", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts when a normal (Non-Isochronous) full line write is issued from the CHA to the any of the memory controller channels.", + "EventCode": "0x5b", + "EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "CHA" + }, + { + "BriefDescription": "CHA to iMC Full Line Writes Issued : ISOCH Full Line : Counts the total number of full line writes issued from the HA into the memory controller.", + "EventCode": "0x5b", + "EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL_PRIORITY", + "PerPkg": "1", + "UMask": "0x4", + "Unit": "CHA" + }, + { + "BriefDescription": "CHA to iMC Full Line Writes Issued : Partial Non-ISOCH : Counts the total number of full line writes issued from the HA into the memory controller.", + "EventCode": "0x5b", + "EventName": "UNC_CHA_IMC_WRITES_COUNT.PARTIAL", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "CHA" + }, + { + "BriefDescription": "CHA to iMC Full Line Writes Issued : ISOCH Partial : Counts the total number of full line writes issued from the HA into the memory controller.", + "EventCode": "0x5b", + "EventName": "UNC_CHA_IMC_WRITES_COUNT.PARTIAL_PRIORITY", + "PerPkg": "1", + "UMask": "0x8", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: All Requests to Remotely Homed Memory", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.ALL_REMOTE", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : All transactions from Remote Agents", + "UMask": "0x17e0ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: CRd Requests", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.CODE", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : CRd Requests", + "UMask": "0x1bd0ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: Read Requests and Read Prefetches", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.DATA_RD", + "PerPkg": "1", + "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions", + "UMask": "0x1bc1ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: Read Requests, Read Prefetches, and Snoops", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_ALL", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : Data Reads", + "UMask": "0x1fc1ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: Read Requests to Locally Homed Memory", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_LOCAL", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : Demand Data Reads, Core and LLC prefetches", + "UMask": "0x841ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: Read Requests, Read Prefetches, and Snoops which miss the Cache", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_MISS", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : Data Read Misses", + "UMask": "0x1fc101", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: All Requests to Locally Homed Memory", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.LOCALLY_HOMED_ADDRESS", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : Transactions homed locally", + "UMask": "0xbdfff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: Code Read Requests and Code Read Prefetches to Locally Homed Memory", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_CODE", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : CRd Requests", + "UMask": "0x19d0ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: Read Requests and Read Prefetches to Locally Homed Memory", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_DATA_RD", + "PerPkg": "1", + "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions", + "UMask": "0x19c1ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: Code Read Requests to Locally Homed Memory", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_DMND_CODE", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : CRd Requests", + "UMask": "0x1850ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: Read Requests to Locally Homed Memory", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_DMND_DATA_RD", + "PerPkg": "1", + "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions", + "UMask": "0x1841ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: RFO Requests to Locally Homed Memory", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_DMND_RFO", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : RFO Requests", + "UMask": "0x1848ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: LLC Prefetch Requests to Locally Homed Memory", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_LLC_PF", + "PerPkg": "1", + "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions", + "UMask": "0x189dff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: All Prefetches to Locally Homed Memory", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_PF", + "PerPkg": "1", + "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions", + "UMask": "0x199dff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: Code Prefetches to Locally Homed Memory", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_PF_CODE", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : CRd Requests", + "UMask": "0x1910ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: Read Prefetches to Locally Homed Memory", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_PF_DATA_RD", + "PerPkg": "1", + "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions", + "UMask": "0x1981ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: RFO Prefetches to Locally Homed Memory", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_PF_RFO", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : RFO Requests", + "UMask": "0x1908ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: RFO Requests and RFO Prefetches to Locally Homed Memory", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_RFO", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : RFO Requests", + "UMask": "0x19c8ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: All Requests to Remotely Homed Memory", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.REMOTELY_HOMED_ADDRESS", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : Transactions homed remotely : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Transaction whose address resides in a remote MC", + "UMask": "0x15dfff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: Code Read/Prefetch Requests from a Remote Socket", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_CODE", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : CRd Requests", + "UMask": "0x1a10ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: Data Read/Prefetch Requests from a Remote Socket", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_DATA_RD", + "PerPkg": "1", + "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions", + "UMask": "0x1a01ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: RFO Requests/Prefetches from a Remote Socket", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_RFO", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : RFO Requests", + "UMask": "0x1a08ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: Snoop Requests from a Remote Socket", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_SNP", + "PerPkg": "1", + "PublicDescription": "Counts the number of times the LLC was accessed", + "UMask": "0x1c19ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: All RFO and RFO Prefetches", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.RFO", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : All RFOs - Demand and Prefetches", + "UMask": "0x1bc8ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: RFO Requests and RFO Prefetches to Locally Homed Memory", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.RFO_LOCAL", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : Locally HOMed RFOs - Demand and Prefetches", + "UMask": "0x9c8ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: Writes to Locally Homed Memory (includes writebacks from L1/L2)", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.WRITE_LOCAL", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : Writes", + "UMask": "0x842ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: Writes to Remotely Homed Memory (includes writebacks from L1/L2)", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.WRITE_REMOTE", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : Remote Writes", + "UMask": "0x17c2ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", + "EventCode": "0x37", + "EventName": "UNC_CHA_LLC_VICTIMS.ALL", + "PerPkg": "1", + "PublicDescription": "Lines Victimized : All Lines Victimized", + "UMask": "0xf", + "Unit": "CHA" + }, + { + "BriefDescription": "Lines Victimized : IA traffic : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", + "EventCode": "0x37", + "EventName": "UNC_CHA_LLC_VICTIMS.IA", + "PerPkg": "1", + "UMask": "0x20", + "Unit": "CHA" + }, + { + "BriefDescription": "Lines Victimized : IO traffic : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", + "EventCode": "0x37", + "EventName": "UNC_CHA_LLC_VICTIMS.IO", + "PerPkg": "1", + "UMask": "0x10", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", + "EventCode": "0x37", + "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_ALL", + "PerPkg": "1", + "PublicDescription": "Lines Victimized : Local - All Lines", + "UMask": "0x200f", + "Unit": "CHA" + }, + { + "BriefDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", + "EventCode": "0x37", + "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_E", + "PerPkg": "1", + "PublicDescription": "Lines Victimized : Local - Lines in E State", + "UMask": "0x2002", + "Unit": "CHA" + }, + { + "BriefDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", + "EventCode": "0x37", + "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_F", + "PerPkg": "1", + "PublicDescription": "Lines Victimized : Local - Lines in F State", + "UMask": "0x2008", + "Unit": "CHA" + }, + { + "BriefDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", + "EventCode": "0x37", + "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_M", + "PerPkg": "1", + "PublicDescription": "Lines Victimized : Local - Lines in M State", + "UMask": "0x2001", + "Unit": "CHA" + }, + { + "BriefDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", + "EventCode": "0x37", + "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_S", + "PerPkg": "1", + "PublicDescription": "Lines Victimized : Local - Lines in S State", + "UMask": "0x2004", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", + "EventCode": "0x37", + "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_ALL", + "PerPkg": "1", + "PublicDescription": "Lines Victimized : Remote - All Lines", + "UMask": "0x800f", + "Unit": "CHA" + }, + { + "BriefDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", + "EventCode": "0x37", + "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_E", + "PerPkg": "1", + "PublicDescription": "Lines Victimized : Remote - Lines in E State", + "UMask": "0x8002", + "Unit": "CHA" + }, + { + "BriefDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", + "EventCode": "0x37", + "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_M", + "PerPkg": "1", + "PublicDescription": "Lines Victimized : Remote - Lines in M State", + "UMask": "0x8001", + "Unit": "CHA" + }, + { + "BriefDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", + "EventCode": "0x37", + "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_S", + "PerPkg": "1", + "PublicDescription": "Lines Victimized : Remote - Lines in S State", + "UMask": "0x8004", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", + "EventCode": "0x37", + "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_E", + "PerPkg": "1", + "PublicDescription": "Lines Victimized : Lines in E state", + "UMask": "0x2", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", + "EventCode": "0x37", + "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_M", + "PerPkg": "1", + "PublicDescription": "Lines Victimized : Lines in M state", + "UMask": "0x1", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", + "EventCode": "0x37", + "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_S", + "PerPkg": "1", + "PublicDescription": "Lines Victimized : Lines in S State", + "UMask": "0x4", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts when a RFO (the Read for Ownership issued before a write) request hit a cacheline in the S (Shared) state.", + "EventCode": "0x39", + "EventName": "UNC_CHA_MISC.RFO_HIT_S", + "PerPkg": "1", + "PublicDescription": "Cbo Misc : RFO HitS", + "UMask": "0x8", + "Unit": "CHA" + }, + { + "BriefDescription": "OSB Snoop Broadcast : Local InvItoE : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.", + "EventCode": "0x55", + "EventName": "UNC_CHA_OSB.LOCAL_INVITOE", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "CHA" + }, + { + "BriefDescription": "OSB Snoop Broadcast : Local Rd : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.", + "EventCode": "0x55", + "EventName": "UNC_CHA_OSB.LOCAL_READ", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "CHA" + }, + { + "BriefDescription": "OSB Snoop Broadcast : Off : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.", + "EventCode": "0x55", + "EventName": "UNC_CHA_OSB.OFF_PWRHEURISTIC", + "PerPkg": "1", + "UMask": "0x20", + "Unit": "CHA" + }, + { + "BriefDescription": "OSB Snoop Broadcast : Remote Rd : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.", + "EventCode": "0x55", + "EventName": "UNC_CHA_OSB.REMOTE_READ", + "PerPkg": "1", + "UMask": "0x4", + "Unit": "CHA" + }, + { + "BriefDescription": "OSB Snoop Broadcast : RFO HitS Snoop Broadcast : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.", + "EventCode": "0x55", + "EventName": "UNC_CHA_OSB.RFO_HITS_SNP_BCAST", + "PerPkg": "1", + "UMask": "0x10", + "Unit": "CHA" + }, + { + "BriefDescription": "UNC_CHA_REMOTE_SF.ALLOC_EXCLUSIVE", + "EventCode": "0x69", + "EventName": "UNC_CHA_REMOTE_SF.ALLOC_EXCLUSIVE", + "PerPkg": "1", + "UMask": "0x10", + "Unit": "CHA" + }, + { + "BriefDescription": "UNC_CHA_REMOTE_SF.ALLOC_SHARED", + "EventCode": "0x69", + "EventName": "UNC_CHA_REMOTE_SF.ALLOC_SHARED", + "PerPkg": "1", + "UMask": "0x8", + "Unit": "CHA" + }, + { + "BriefDescription": "UNC_CHA_REMOTE_SF.DEALLOC_EVCTCLN", + "EventCode": "0x69", + "EventName": "UNC_CHA_REMOTE_SF.DEALLOC_EVCTCLN", + "PerPkg": "1", + "UMask": "0x40", + "Unit": "CHA" + }, + { + "BriefDescription": "UNC_CHA_REMOTE_SF.DIRBACKED_ONLY", + "EventCode": "0x69", + "EventName": "UNC_CHA_REMOTE_SF.DIRBACKED_ONLY", + "PerPkg": "1", + "Unit": "CHA" + }, + { + "BriefDescription": "UNC_CHA_REMOTE_SF.HIT_EXCLUSIVE", + "EventCode": "0x69", + "EventName": "UNC_CHA_REMOTE_SF.HIT_EXCLUSIVE", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "CHA" + }, + { + "BriefDescription": "UNC_CHA_REMOTE_SF.HIT_SHARED", + "EventCode": "0x69", + "EventName": "UNC_CHA_REMOTE_SF.HIT_SHARED", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "CHA" + }, + { + "BriefDescription": "UNC_CHA_REMOTE_SF.INCLUSIVE_ONLY", + "EventCode": "0x69", + "EventName": "UNC_CHA_REMOTE_SF.INCLUSIVE_ONLY", + "PerPkg": "1", + "Unit": "CHA" + }, + { + "BriefDescription": "UNC_CHA_REMOTE_SF.MISS", + "EventCode": "0x69", + "EventName": "UNC_CHA_REMOTE_SF.MISS", + "PerPkg": "1", + "UMask": "0x4", + "Unit": "CHA" + }, + { + "BriefDescription": "UNC_CHA_REMOTE_SF.UPDATE_EXCLUSIVE", + "EventCode": "0x69", + "EventName": "UNC_CHA_REMOTE_SF.UPDATE_EXCLUSIVE", + "PerPkg": "1", + "Unit": "CHA" + }, + { + "BriefDescription": "UNC_CHA_REMOTE_SF.UPDATE_SHARED", + "EventCode": "0x69", + "EventName": "UNC_CHA_REMOTE_SF.UPDATE_SHARED", + "PerPkg": "1", + "UMask": "0x80", + "Unit": "CHA" + }, + { + "BriefDescription": "UNC_CHA_REMOTE_SF.VICTIM_EXCLUSIVE", + "EventCode": "0x69", + "EventName": "UNC_CHA_REMOTE_SF.VICTIM_EXCLUSIVE", + "PerPkg": "1", + "Unit": "CHA" + }, + { + "BriefDescription": "UNC_CHA_REMOTE_SF.VICTIM_SHARED", + "EventCode": "0x69", + "EventName": "UNC_CHA_REMOTE_SF.VICTIM_SHARED", + "PerPkg": "1", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts the total number of requests coming from a unit on this socket for exclusive ownership of a cache line without receiving data (INVITOE) to the CHA.", + "EventCode": "0x50", + "EventName": "UNC_CHA_REQUESTS.INVITOE", + "PerPkg": "1", + "PublicDescription": "HA Read and Write Requests : InvalItoE", + "UMask": "0x30", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts the total number of requests coming from a unit on this socket for exclusive ownership of a cache line without receiving data (INVITOE) to the CHA.", + "EventCode": "0x50", + "EventName": "UNC_CHA_REQUESTS.INVITOE_LOCAL", + "PerPkg": "1", + "UMask": "0x10", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts the total number of requests coming from a remote socket for exclusive ownership of a cache line without receiving data (INVITOE) to the CHA.", + "EventCode": "0x50", + "EventName": "UNC_CHA_REQUESTS.INVITOE_REMOTE", + "PerPkg": "1", + "UMask": "0x20", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts read requests made into this CHA. Reads include all read opcodes (including RFO: the Read for Ownership issued before a write) .", + "EventCode": "0x50", + "EventName": "UNC_CHA_REQUESTS.READS", + "PerPkg": "1", + "PublicDescription": "HA Read and Write Requests : Reads", + "UMask": "0x3", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts read requests coming from a unit on this socket made into this CHA. Reads include all read opcodes (including RFO: the Read for Ownership issued before a write).", + "EventCode": "0x50", + "EventName": "UNC_CHA_REQUESTS.READS_LOCAL", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts read requests coming from a remote socket made into the CHA. Reads include all read opcodes (including RFO: the Read for Ownership issued before a write).", + "EventCode": "0x50", + "EventName": "UNC_CHA_REQUESTS.READS_REMOTE", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts write requests made into the CHA, including streaming, evictions, HitM (Reads from another core to a Modified cacheline), etc.", + "EventCode": "0x50", + "EventName": "UNC_CHA_REQUESTS.WRITES", + "PerPkg": "1", + "PublicDescription": "HA Read and Write Requests : Writes", + "UMask": "0xc", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts write requests coming from a unit on this socket made into this CHA, including streaming, evictions, HitM (Reads from another core to a Modified cacheline), etc.", + "EventCode": "0x50", + "EventName": "UNC_CHA_REQUESTS.WRITES_LOCAL", + "PerPkg": "1", + "UMask": "0x4", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", + "EventCode": "0x50", + "EventName": "UNC_CHA_REQUESTS.WRITES_REMOTE", + "PerPkg": "1", + "UMask": "0x8", + "Unit": "CHA" + }, + { + "BriefDescription": "All TOR Inserts", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.ALL", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : All", + "UMask": "0xc001ffff", + "Unit": "CHA" + }, + { + "BriefDescription": "CLFlush transactions from a CXL device which hit in the L3.", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.CXL_HIT_CLFLUSH", + "PerPkg": "1", + "UMask": "0x78c8c7fd20", + "Unit": "CHA" + }, + { + "BriefDescription": "FsRdCur transactions from a CXL device which hit in the L3.", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.CXL_HIT_FSRDCUR", + "PerPkg": "1", + "UMask": "0x78c8effd20", + "Unit": "CHA" + }, + { + "BriefDescription": "FsRdCurPtl transactions from a CXL device which hit in the L3.", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.CXL_HIT_FSRDCURPTL", + "PerPkg": "1", + "UMask": "0x78c9effd20", + "Unit": "CHA" + }, + { + "BriefDescription": "ItoM transactions from a CXL device which hit in the L3.", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.CXL_HIT_ITOM", + "PerPkg": "1", + "UMask": "0x78cc47fd20", + "Unit": "CHA" + }, + { + "BriefDescription": "ItoMWr transactions from a CXL device which hit in the L3.", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.CXL_HIT_ITOMWR", + "PerPkg": "1", + "UMask": "0x78cc4ffd20", + "Unit": "CHA" + }, + { + "BriefDescription": "MemPushWr transactions from a CXL device which hit in the L3.", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.CXL_HIT_MEMPUSHWR", + "PerPkg": "1", + "UMask": "0x78cc6ffd20", + "Unit": "CHA" + }, + { + "BriefDescription": "WCiL transactions from a CXL device which hit in the L3.", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.CXL_HIT_WCIL", + "PerPkg": "1", + "UMask": "0x78c86ffd20", + "Unit": "CHA" + }, + { + "BriefDescription": "WcilF transactions from a CXL device which hit in the L3.", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.CXL_HIT_WCILF", + "PerPkg": "1", + "UMask": "0x78c867fd20", + "Unit": "CHA" + }, + { + "BriefDescription": "WiL transactions from a CXL device which hit in the L3.", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.CXL_HIT_WIL", + "PerPkg": "1", + "UMask": "0x78c87ffd20", + "Unit": "CHA" + }, + { + "BriefDescription": "CLFlush transactions from a CXL device which miss the L3.", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.CXL_MISS_CLFLUSH", + "PerPkg": "1", + "UMask": "0x78c8c7fe20", + "Unit": "CHA" + }, + { + "BriefDescription": "FsRdCur transactions from a CXL device which miss the L3.", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.CXL_MISS_FSRDCUR", + "PerPkg": "1", + "UMask": "0x78c8effe20", + "Unit": "CHA" + }, + { + "BriefDescription": "FsRdCurPtl transactions from a CXL device which miss the L3.", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.CXL_MISS_FSRDCURPTL", + "PerPkg": "1", + "UMask": "0x78c9effe20", + "Unit": "CHA" + }, + { + "BriefDescription": "ItoM transactions from a CXL device which miss the L3.", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.CXL_MISS_ITOM", + "PerPkg": "1", + "UMask": "0x78cc47fe20", + "Unit": "CHA" + }, + { + "BriefDescription": "ItoMWr transactions from a CXL device which miss the L3.", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.CXL_MISS_ITOMWR", + "PerPkg": "1", + "UMask": "0x78cc4ffe20", + "Unit": "CHA" + }, + { + "BriefDescription": "MemPushWr transactions from a CXL device which miss the L3.", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.CXL_MISS_MEMPUSHWR", + "PerPkg": "1", + "UMask": "0x78cc6ffe20", + "Unit": "CHA" + }, + { + "BriefDescription": "WCiL transactions from a CXL device which miss the L3.", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.CXL_MISS_WCIL", + "PerPkg": "1", + "UMask": "0x78c86ffe20", + "Unit": "CHA" + }, + { + "BriefDescription": "WcilF transactions from a CXL device which miss the L3.", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.CXL_MISS_WCILF", + "PerPkg": "1", + "UMask": "0x78c867fe20", + "Unit": "CHA" + }, + { + "BriefDescription": "WiL transactions from a CXL device which miss the L3.", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.CXL_MISS_WIL", + "PerPkg": "1", + "UMask": "0x78c87ffe20", + "Unit": "CHA" + }, + { + "BriefDescription": "All locally initiated requests from IA Cores", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : All requests from iA Cores", + "UMask": "0xc001ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "CLFlush events that are initiated from the Core", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_CLFLUSH", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : CLFlushes issued by iA Cores", + "UMask": "0xc8c7ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "CLFlushOpt events that are initiated from the Core", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_CLFLUSHOPT", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : CLFlushOpts issued by iA Cores", + "UMask": "0xc8d7ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "Code read from local IA that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_CRD", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : CRDs issued by iA Cores", + "UMask": "0xc80fff01", + "Unit": "CHA" + }, + { + "BriefDescription": "Code read prefetch from local IA that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_CRD_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Inserts; Code read prefetch from local IA that misses in the snoop filter", + "UMask": "0xc88fff01", + "Unit": "CHA" + }, + { + "BriefDescription": "Data read opt from local IA that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_DRD_OPT", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : DRd_Opts issued by iA Cores", + "UMask": "0xc827ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "Data read opt prefetch from local IA that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_DRD_OPT_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : DRd_Opt_Prefs issued by iA Cores", + "UMask": "0xc8a7ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "All locally initiated requests from IA Cores which hit the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : All requests from iA Cores that Hit the LLC", + "UMask": "0xc001fd01", + "Unit": "CHA" + }, + { + "BriefDescription": "Code read from local IA that hit the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CRD", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : CRds issued by iA Cores that Hit the LLC", + "UMask": "0xc80ffd01", + "Unit": "CHA" + }, + { + "BriefDescription": "Code read prefetch from local IA that hit the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CRD_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that hit the LLC", + "UMask": "0xc88ffd01", + "Unit": "CHA" + }, + { + "BriefDescription": "All requests issued from IA cores to CXL accelerator memory regions that hit the LLC.", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CXL_ACC", + "PerPkg": "1", + "UMask": "0x10c0018101", + "Unit": "CHA" + }, + { + "BriefDescription": "Data read opt from local IA that hit the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD_OPT", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : DRd_Opts issued by iA Cores that hit the LLC", + "UMask": "0xc827fd01", + "Unit": "CHA" + }, + { + "BriefDescription": "Data read opt prefetch from local IA that hit the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD_OPT_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : DRd_Opt_Prefs issued by iA Cores that hit the LLC", + "UMask": "0xc8a7fd01", + "Unit": "CHA" + }, + { + "BriefDescription": "ItoM requests from local IA cores that hit the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_ITOM", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMs issued by iA Cores that Hit LLC", + "UMask": "0xcc47fd01", + "Unit": "CHA" + }, + { + "BriefDescription": "Last level cache prefetch code read from local IA that hit the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFCODE", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : LLCPrefCode issued by iA Cores that hit the LLC", + "UMask": "0xcccffd01", + "Unit": "CHA" + }, + { + "BriefDescription": "Last level cache prefetch data read from local IA that hit the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFDATA", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : LLCPrefData issued by iA Cores that hit the LLC", + "UMask": "0xccd7fd01", + "Unit": "CHA" + }, + { + "BriefDescription": "Last level cache prefetch read for ownership from local IA that hit the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFRFO", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores that hit the LLC", + "UMask": "0xccc7fd01", + "Unit": "CHA" + }, + { + "BriefDescription": "Read for ownership from local IA that hit the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_RFO", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : RFOs issued by iA Cores that Hit the LLC", + "UMask": "0xc807fd01", + "Unit": "CHA" + }, + { + "BriefDescription": "Read for ownership prefetch from local IA that hit the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_RFO_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : RFO_Prefs issued by iA Cores that Hit the LLC", + "UMask": "0xc887fd01", + "Unit": "CHA" + }, + { + "BriefDescription": "ItoM events that are initiated from the Core", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_ITOM", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMs issued by iA Cores", + "UMask": "0xcc47ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "ItoMCacheNear requests from local IA cores", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_ITOMCACHENEAR", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMCacheNears issued by iA Cores", + "UMask": "0xcd47ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "Last level cache prefetch code read from local IA.", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_LLCPREFCODE", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : LLCPrefCode issued by iA Cores", + "UMask": "0xcccfff01", + "Unit": "CHA" + }, + { + "BriefDescription": "Last level cache prefetch data read from local IA.", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_LLCPREFDATA", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : LLCPrefData issued by iA Cores", + "UMask": "0xccd7ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "Last level cache prefetch read for ownership from local IA that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_LLCPREFRFO", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores", + "UMask": "0xccc7ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "All locally initiated requests from IA Cores which miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : All requests from iA Cores that Missed the LLC", + "UMask": "0xc001fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "Code read from local IA that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : CRds issued by iA Cores that Missed the LLC", + "UMask": "0xc80ffe01", + "Unit": "CHA" + }, + { + "BriefDescription": "CRDs from local IA cores to locally homed memory", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_LOCAL", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : CRd issued by iA Cores that Missed the LLC - HOMed locally", + "UMask": "0xc80efe01", + "Unit": "CHA" + }, + { + "BriefDescription": "Code read prefetch from local IA that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that Missed the LLC", + "UMask": "0xc88ffe01", + "Unit": "CHA" + }, + { + "BriefDescription": "CRD Prefetches from local IA cores to locally homed memory", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_PREF_LOCAL", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed locally", + "UMask": "0xc88efe01", + "Unit": "CHA" + }, + { + "BriefDescription": "CRD Prefetches from local IA cores to remotely homed memory", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_PREF_REMOTE", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed remotely", + "UMask": "0xc88f7e01", + "Unit": "CHA" + }, + { + "BriefDescription": "CRDs from local IA cores to remotely homed memory", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_REMOTE", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : CRd issued by iA Cores that Missed the LLC - HOMed remotely", + "UMask": "0xc80f7e01", + "Unit": "CHA" + }, + { + "BriefDescription": "All requests issued from IA cores to CXL accelerator memory regions that miss the LLC.", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CXL_ACC", + "PerPkg": "1", + "UMask": "0x10c0018201", + "Unit": "CHA" + }, + { + "BriefDescription": "DRds and equivalent opcodes issued from an IA core which miss the L3 and target memory in a CXL type 2 memory expander card.", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_CXL_ACC", + "PerPkg": "1", + "PublicDescription": "DRds issued from an IA core which miss the L3 and target memory in a CXL type 2 memory expander card.", + "UMask": "0x10c8178201", + "Unit": "CHA" + }, + { + "BriefDescription": "Data read opt from local IA that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : DRd_Opt issued by iA Cores that missed the LLC", + "UMask": "0xc827fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "Inserts into the TOR from local IA cores which miss the LLC and snoop filter with the opcode DRd_Opt, and which target local memory", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_LOCAL", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : DRd_Opt issued by iA Cores that Missed the LLC - HOMed locally", + "UMask": "0xc826fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "Data read opt prefetch from local IA that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : DRd_Opt_Prefs issued by iA Cores that missed the LLC", + "UMask": "0xc8a7fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "Inserts into the TOR from local IA cores which miss the LLC and snoop filter with the opcode DRD_PREF_OPT, and target local memory", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_PREF_LOCAL", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : DRd_Opt_Prefs issued by iA Cores that missed the LLC", + "UMask": "0xc8a6fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "Inserts into the TOR from local IA cores which miss the LLC and snoop filter with the opcode DRD_PREF_OPT, and target remote memory", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_PREF_REMOTE", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : DRd_Opt_Prefs issued by iA Cores that missed the LLC", + "UMask": "0xc8a77e01", + "Unit": "CHA" + }, + { + "BriefDescription": "Inserts into the TOR from local IA cores which miss the LLC and snoop filter with the opcode DRd_Opt, and target remote memory", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_REMOTE", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : DRd_Opt issued by iA Cores that missed the LLC", + "UMask": "0xc8277e01", + "Unit": "CHA" + }, + { + "BriefDescription": "L2 data prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_CXL_ACC", + "PerPkg": "1", + "UMask": "0x10c8978201", + "Unit": "CHA" + }, + { + "BriefDescription": "ItoM requests from local IA cores that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_ITOM", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMs issued by iA Cores that Missed LLC", + "UMask": "0xcc47fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "Last level cache prefetch code read from local IA that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFCODE", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : LLCPrefCode issued by iA Cores that missed the LLC", + "UMask": "0xcccffe01", + "Unit": "CHA" + }, + { + "BriefDescription": "Last level cache prefetch data read from local IA that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFDATA", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : LLCPrefData issued by iA Cores that missed the LLC", + "UMask": "0xccd7fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "LLC data prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFDATA_CXL_ACC", + "PerPkg": "1", + "UMask": "0x10ccd78201", + "Unit": "CHA" + }, + { + "BriefDescription": "Last level cache prefetch read for ownership from local IA that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFRFO", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores that missed the LLC", + "UMask": "0xccc7fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "L2 RFO prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFRFO_CXL_ACC", + "PerPkg": "1", + "UMask": "0x10c8878201", + "Unit": "CHA" + }, + { + "BriefDescription": "WCILF requests from local IA cores to locally homed DDR addresses that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCILF_DDR", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed locally", + "UMask": "0xc8668601", + "Unit": "CHA" + }, + { + "BriefDescription": "WCILF requests from local IA cores to locally homed PMM addresses which miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCILF_PMM", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed locally", + "UMask": "0xc8668a01", + "Unit": "CHA" + }, + { + "BriefDescription": "WCIL requests from local IA cores to locally homed DDR addresses that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCIL_DDR", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed locally", + "UMask": "0xc86e8601", + "Unit": "CHA" + }, + { + "BriefDescription": "WCIL requests from local IA cores to locally homed PMM addresses which miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCIL_PMM", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed locally", + "UMask": "0xc86e8a01", + "Unit": "CHA" + }, + { + "BriefDescription": "WCILF requests from local IA cores to remotely homed DDR addresses that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCILF_DDR", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely", + "UMask": "0xc8670601", + "Unit": "CHA" + }, + { + "BriefDescription": "WCILF requests from local IA cores to remotely homed PMM addresses which miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCILF_PMM", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely", + "UMask": "0xc8670a01", + "Unit": "CHA" + }, + { + "BriefDescription": "WCIL requests from local IA cores to remotely homed DDR addresses that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCIL_DDR", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely", + "UMask": "0xc86f0601", + "Unit": "CHA" + }, + { + "BriefDescription": "WCIL requests from local IA cores to remotely homed PMM addresses which miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCIL_PMM", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely", + "UMask": "0xc86f0a01", + "Unit": "CHA" + }, + { + "BriefDescription": "Read for ownership from local IA that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : RFOs issued by iA Cores that Missed the LLC", + "UMask": "0xc807fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "RFOs issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_CXL_ACC", + "PerPkg": "1", + "UMask": "0x10c8078201", + "Unit": "CHA" + }, + { + "BriefDescription": "Read for ownership from local IA that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_LOCAL", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : RFOs issued by iA Cores that Missed the LLC - HOMed locally", + "UMask": "0xc806fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "Read for ownership prefetch from local IA that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : RFO_Prefs issued by iA Cores that Missed the LLC", + "UMask": "0xc887fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "LLC RFO prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_CXL_ACC", + "PerPkg": "1", + "UMask": "0x10ccc78201", + "Unit": "CHA" + }, + { + "BriefDescription": "Read for ownership prefetch from local IA that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_LOCAL", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : RFO_Prefs issued by iA Cores that Missed the LLC - HOMed locally", + "UMask": "0xc886fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "Read for ownership prefetch from local IA that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_REMOTE", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : RFO_Prefs issued by iA Cores that Missed the LLC - HOMed remotely", + "UMask": "0xc8877e01", + "Unit": "CHA" + }, + { + "BriefDescription": "Read for ownership from local IA that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_REMOTE", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : RFOs issued by iA Cores that Missed the LLC - HOMed remotely", + "UMask": "0xc8077e01", + "Unit": "CHA" + }, + { + "BriefDescription": "UCRDF requests from local IA cores that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_UCRDF", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : UCRdFs issued by iA Cores that Missed LLC", + "UMask": "0xc877de01", + "Unit": "CHA" + }, + { + "BriefDescription": "WCIL requests from a local IA core that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCIL", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : WCiLs issued by iA Cores that Missed the LLC", + "UMask": "0xc86ffe01", + "Unit": "CHA" + }, + { + "BriefDescription": "WCILF requests from local IA core that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCILF", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : WCiLF issued by iA Cores that Missed the LLC", + "UMask": "0xc867fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "WCILF requests from local IA cores to DDR homed addresses which miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCILF_DDR", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting DDR that missed the LLC", + "UMask": "0xc8678601", + "Unit": "CHA" + }, + { + "BriefDescription": "WCILF requests from local IA cores to PMM homed addresses which miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCILF_PMM", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting PMM that missed the LLC", + "UMask": "0xc8678a01", + "Unit": "CHA" + }, + { + "BriefDescription": "WCIL requests from local IA cores to DDR homed addresses which miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCIL_DDR", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : WCiLs issued by iA Cores targeting DDR that missed the LLC", + "UMask": "0xc86f8601", + "Unit": "CHA" + }, + { + "BriefDescription": "WCIL requests from a local IA core to PMM homed addresses that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCIL_PMM", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : WCiLs issued by iA Cores targeting PMM that missed the LLC", + "UMask": "0xc86f8a01", + "Unit": "CHA" + }, + { + "BriefDescription": "WIL requests from local IA cores that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WIL", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : WiLs issued by iA Cores that Missed LLC", + "UMask": "0xc87fde01", + "Unit": "CHA" + }, + { + "BriefDescription": "Read for ownership from local IA that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_RFO", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : RFOs issued by iA Cores", + "UMask": "0xc807ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "Read for ownership prefetch from local IA that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_RFO_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : RFO_Prefs issued by iA Cores", + "UMask": "0xc887ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "SpecItoM events that are initiated from the Core", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_SPECITOM", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : SpecItoMs issued by iA Cores", + "UMask": "0xcc57ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "WbEFtoEs issued by iA Cores. (Non Modified Write Backs)", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_WBEFTOE", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMs issued by IO Devices that Hit the LLC", + "UMask": "0xcc3fff01", + "Unit": "CHA" + }, + { + "BriefDescription": "WbEFtoIs issued by iA Cores . (Non Modified Write Backs)", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_WBEFTOI", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMs issued by IO Devices that Hit the LLC", + "UMask": "0xcc37ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "WbMtoEs issued by iA Cores . (Modified Write Backs)", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_WBMTOE", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMs issued by IO Devices that Hit the LLC", + "UMask": "0xcc2fff01", + "Unit": "CHA" + }, + { + "BriefDescription": "WbMtoI requests from local IA cores", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_WBMTOI", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : WbMtoIs issued by iA Cores", + "UMask": "0xcc27ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "WbStoIs issued by iA Cores . (Non Modified Write Backs)", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_WBSTOI", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMs issued by IO Devices that Hit the LLC", + "UMask": "0xcc67ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "WCIL requests from a local IA core", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_WCIL", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : WCiLs issued by iA Cores", + "UMask": "0xc86fff01", + "Unit": "CHA" + }, + { + "BriefDescription": "WCILF requests from local IA core", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_WCILF", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : WCiLF issued by iA Cores", + "UMask": "0xc867ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "All TOR inserts from local IO devices", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : All requests from IO Devices", + "UMask": "0xc001ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "CLFlush requests from IO devices", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_CLFLUSH", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : CLFlushes issued by IO Devices", + "UMask": "0xc8c3ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "All TOR inserts from local IO devices which hit the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : All requests from IO Devices that hit the LLC", + "UMask": "0xc001fd04", + "Unit": "CHA" + }, + { + "BriefDescription": "ItoMs from local IO devices which hit the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_ITOM", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMs issued by IO Devices that Hit the LLC", + "UMask": "0xcc43fd04", + "Unit": "CHA" + }, + { + "BriefDescription": "ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_ITOMCACHENEAR", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC", + "UMask": "0xcd43fd04", + "Unit": "CHA" + }, + { + "BriefDescription": "PCIRDCURs issued by IO devices which hit the LLC", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_PCIRDCUR", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : PCIRdCurs issued by IO Devices that hit the LLC", + "UMask": "0xc8f3fd04", + "Unit": "CHA" + }, + { + "BriefDescription": "RFOs from local IO devices which hit the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_RFO", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : RFOs issued by IO Devices that hit the LLC", + "UMask": "0xc803fd04", + "Unit": "CHA" + }, + { + "BriefDescription": "All TOR ItoM inserts from local IO devices", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOM", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMs issued by IO Devices", + "UMask": "0xcc43ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "ItoMCacheNears, indicating a partial write request, from IO Devices", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices", + "UMask": "0xcd43ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "All TOR inserts from local IO devices which miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : All requests from IO Devices that missed the LLC", + "UMask": "0xc001fe04", + "Unit": "CHA" + }, + { + "BriefDescription": "All TOR ItoM inserts from local IO devices which miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOM", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMs issued by IO Devices that missed the LLC", + "UMask": "0xcc43fe04", + "Unit": "CHA" + }, + { + "BriefDescription": "ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOMCACHENEAR", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC", + "UMask": "0xcd43fe04", + "Unit": "CHA" + }, + { + "BriefDescription": "ItoMCacheNear transactions from an IO device on the local socket that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOMCACHENEAR_LOCAL", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC", + "UMask": "0xcd42fe04", + "Unit": "CHA" + }, + { + "BriefDescription": "ItoMCacheNear transactions from an IO device on a remote socket that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOMCACHENEAR_REMOTE", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC", + "UMask": "0xcd437e04", + "Unit": "CHA" + }, + { + "BriefDescription": "ItoM transactions from an IO device on the local socket that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOM_LOCAL", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMs issued by IO Devices that missed the LLC", + "UMask": "0xcc42fe04", + "Unit": "CHA" + }, + { + "BriefDescription": "ItoM transactions from an IO device on a remote socket that miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOM_REMOTE", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMs issued by IO Devices that missed the LLC", + "UMask": "0xcc437e04", + "Unit": "CHA" + }, + { + "BriefDescription": "PCIRDCURs issued by IO devices which miss the LLC", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_PCIRDCUR", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : PCIRdCurs issued by IO Devices that missed the LLC", + "UMask": "0xc8f3fe04", + "Unit": "CHA" + }, + { + "BriefDescription": "All TOR RFO inserts from local IO devices which miss the cache", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_RFO", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : RFOs issued by IO Devices that missed the LLC", + "UMask": "0xc803fe04", + "Unit": "CHA" + }, + { + "BriefDescription": "PCIRDCURs issued by IO devices", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : PCIRdCurs issued by IO Devices", + "UMask": "0xc8f3ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "RFOs from local IO devices", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_RFO", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : RFOs issued by IO Devices", + "UMask": "0xc803ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "WBMtoI requests from IO devices", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_WBMTOI", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : WbMtoIs issued by IO Devices", + "UMask": "0xcc23ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Inserts for SF or LLC Evictions", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.LLC_OR_SF_EVICTIONS", + "PerPkg": "1", + "PublicDescription": "TOR allocation occurred as a result of SF/LLC evictions (came from the ISMQ)", + "UMask": "0xc001ff02", + "Unit": "CHA" + }, + { + "BriefDescription": "All locally initiated requests", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.LOC_ALL", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : All from Local iA and IO", + "UMask": "0xc000ff05", + "Unit": "CHA" + }, + { + "BriefDescription": "All from Local iA", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.LOC_IA", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : All from Local iA", + "UMask": "0xc000ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "All from Local IO", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.LOC_IO", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : All from Local IO", + "UMask": "0xc000ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "All remote requests (e.g. snoops, writebacks) that came from remote sockets", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.REM_ALL", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : All Remote Requests", + "UMask": "0xc001ffc8", + "Unit": "CHA" + }, + { + "BriefDescription": "All snoops to this LLC that came from remote sockets", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.REM_SNPS", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : All Snoops from Remote", + "UMask": "0xc001ff08", + "Unit": "CHA" + }, + { + "BriefDescription": "Occupancy for all TOR entries", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.ALL", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : All", + "UMask": "0xc001ffff", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for CLFlush transactions from a CXL device which hit in the L3.", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_HIT_CLFLUSH", + "PerPkg": "1", + "UMask": "0x78c8c7fd20", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for FsRdCur transactions from a CXL device which hit in the L3.", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_HIT_FSRDCUR", + "PerPkg": "1", + "UMask": "0x78c8effd20", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for FsRdCurPtl transactions from a CXL device which hit in the L3.", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_HIT_FSRDCURPTL", + "PerPkg": "1", + "UMask": "0x78c9effd20", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for ItoM transactions from a CXL device which hit in the L3.", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_HIT_ITOM", + "PerPkg": "1", + "UMask": "0x78cc47fd20", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for ItoMWr transactions from a CXL device which hit in the L3.", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_HIT_ITOMWR", + "PerPkg": "1", + "UMask": "0x78cc4ffd20", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for MemPushWr transactions from a CXL device which hit in the L3.", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_HIT_MEMPUSHWR", + "PerPkg": "1", + "UMask": "0x78cc6ffd20", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WCiL transactions from a CXL device which hit in the L3.", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_HIT_WCIL", + "PerPkg": "1", + "UMask": "0x78c86ffd20", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WcilF transactions from a CXL device which hit in the L3.", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_HIT_WCILF", + "PerPkg": "1", + "UMask": "0x78c867fd20", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WiL transactions from a CXL device which hit in the L3.", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_HIT_WIL", + "PerPkg": "1", + "UMask": "0x78c87ffd20", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for CLFlush transactions from a CXL device which miss the L3.", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_MISS_CLFLUSH", + "PerPkg": "1", + "UMask": "0x78c8c7fe20", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for FsRdCur transactions from a CXL device which miss the L3.", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_MISS_FSRDCUR", + "PerPkg": "1", + "UMask": "0x78c8effe20", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for FsRdCurPtl transactions from a CXL device which miss the L3.", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_MISS_FSRDCURPTL", + "PerPkg": "1", + "UMask": "0x78c9effe20", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for ItoM transactions from a CXL device which miss the L3.", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_MISS_ITOM", + "PerPkg": "1", + "UMask": "0x78cc47fe20", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for ItoMWr transactions from a CXL device which miss the L3.", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_MISS_ITOMWR", + "PerPkg": "1", + "UMask": "0x78cc4ffe20", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for MemPushWr transactions from a CXL device which miss the L3.", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_MISS_MEMPUSHWR", + "PerPkg": "1", + "UMask": "0x78cc6ffe20", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WCiL transactions from a CXL device which miss the L3.", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_MISS_WCIL", + "PerPkg": "1", + "UMask": "0x78c86ffe20", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WcilF transactions from a CXL device which miss the L3.", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_MISS_WCILF", + "PerPkg": "1", + "UMask": "0x78c867fe20", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WiL transactions from a CXL device which miss the L3.", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_MISS_WIL", + "PerPkg": "1", + "UMask": "0x78c87ffe20", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for All locally initiated requests from IA Cores", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : All requests from iA Cores", + "UMask": "0xc001ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for CLFlush events that are initiated from the Core", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CLFLUSH", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : CLFlushes issued by iA Cores", + "UMask": "0xc8c7ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for CLFlushOpt events that are initiated from the Core", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CLFLUSHOPT", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : CLFlushOpts issued by iA Cores", + "UMask": "0xc8d7ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Code read from local IA that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CRD", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : CRDs issued by iA Cores", + "UMask": "0xc80fff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Code read prefetch from local IA that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CRD_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy; Code read prefetch from local IA that misses in the snoop filter", + "UMask": "0xc88fff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Data read opt from local IA that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD_OPT", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : DRd_Opts issued by iA Cores", + "UMask": "0xc827ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Data read opt prefetch from local IA that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD_OPT_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : DRd_Opt_Prefs issued by iA Cores", + "UMask": "0xc8a7ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for All locally initiated requests from IA Cores which hit the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : All requests from iA Cores that Hit the LLC", + "UMask": "0xc001fd01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Code read from local IA that hit the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CRD", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : CRds issued by iA Cores that Hit the LLC", + "UMask": "0xc80ffd01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Code read prefetch from local IA that hit the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CRD_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that hit the LLC", + "UMask": "0xc88ffd01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for All requests issued from IA cores to CXL accelerator memory regions that hit the LLC.", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CXL_ACC", + "PerPkg": "1", + "UMask": "0x10c0018101", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Data read opt from local IA that hit the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD_OPT", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : DRd_Opts issued by iA Cores that hit the LLC", + "UMask": "0xc827fd01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Data read opt prefetch from local IA that hit the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD_OPT_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : DRd_Opt_Prefs issued by iA Cores that hit the LLC", + "UMask": "0xc8a7fd01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for ItoM requests from local IA cores that hit the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_ITOM", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : ItoMs issued by iA Cores that Hit LLC", + "UMask": "0xcc47fd01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Last level cache prefetch code read from local IA that hit the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LLCPREFCODE", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : LLCPrefCode issued by iA Cores that hit the LLC", + "UMask": "0xcccffd01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Last level cache prefetch data read from local IA that hit the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LLCPREFDATA", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : LLCPrefData issued by iA Cores that hit the LLC", + "UMask": "0xccd7fd01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Last level cache prefetch read for ownership from local IA that hit the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LLCPREFRFO", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : LLCPrefRFO issued by iA Cores that hit the LLC", + "UMask": "0xccc7fd01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Read for ownership from local IA that hit the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_RFO", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : RFOs issued by iA Cores that Hit the LLC", + "UMask": "0xc807fd01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Read for ownership prefetch from local IA that hit the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_RFO_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores that Hit the LLC", + "UMask": "0xc887fd01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for ItoM events that are initiated from the Core", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_ITOM", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : ItoMs issued by iA Cores", + "UMask": "0xcc47ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for ItoMCacheNear requests from local IA cores", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_ITOMCACHENEAR", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : ItoMCacheNears issued by iA Cores", + "UMask": "0xcd47ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Last level cache prefetch code read from local IA.", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_LLCPREFCODE", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : LLCPrefCode issued by iA Cores", + "UMask": "0xcccfff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Last level cache prefetch data read from local IA.", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_LLCPREFDATA", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : LLCPrefData issued by iA Cores", + "UMask": "0xccd7ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Last level cache prefetch read for ownership from local IA that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_LLCPREFRFO", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : LLCPrefRFO issued by iA Cores", + "UMask": "0xccc7ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for All locally initiated requests from IA Cores which miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : All requests from iA Cores that Missed the LLC", + "UMask": "0xc001fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Code read from local IA that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : CRds issued by iA Cores that Missed the LLC", + "UMask": "0xc80ffe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for CRDs from local IA cores to locally homed memory", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_LOCAL", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : CRd issued by iA Cores that Missed the LLC - HOMed locally", + "UMask": "0xc80efe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Code read prefetch from local IA that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that Missed the LLC", + "UMask": "0xc88ffe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for CRD Prefetches from local IA cores to locally homed memory", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_PREF_LOCAL", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed locally", + "UMask": "0xc88efe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for CRD Prefetches from local IA cores to remotely homed memory", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_PREF_REMOTE", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed remotely", + "UMask": "0xc88f7e01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for CRDs from local IA cores to remotely homed memory", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_REMOTE", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : CRd issued by iA Cores that Missed the LLC - HOMed remotely", + "UMask": "0xc80f7e01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for All requests issued from IA cores to CXL accelerator memory regions that miss the LLC.", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CXL_ACC", + "PerPkg": "1", + "UMask": "0x10c0018201", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for DRds and equivalent opcodes issued from an IA core which miss the L3 and target memory in a CXL type 2 memory expander card.", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_CXL_ACC", + "PerPkg": "1", + "UMask": "0x10c8178201", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Data read opt from local IA that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : DRd_Opt issued by iA Cores that missed the LLC", + "UMask": "0xc827fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Data read opt prefetch from local IA that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : DRd_Opt_Prefs issued by iA Cores that missed the LLC", + "UMask": "0xc8a7fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for L2 data prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_CXL_ACC", + "PerPkg": "1", + "UMask": "0x10c8978201", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for ItoM requests from local IA cores that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_ITOM", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : ItoMs issued by iA Cores that Missed LLC", + "UMask": "0xcc47fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Last level cache prefetch code read from local IA that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFCODE", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : LLCPrefCode issued by iA Cores that missed the LLC", + "UMask": "0xcccffe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Last level cache prefetch data read from local IA that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFDATA", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : LLCPrefData issued by iA Cores that missed the LLC", + "UMask": "0xccd7fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for LLC data prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFDATA_CXL_ACC", + "PerPkg": "1", + "UMask": "0x10ccd78201", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Last level cache prefetch read for ownership from local IA that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFRFO", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : LLCPrefRFO issued by iA Cores that missed the LLC", + "UMask": "0xccc7fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for L2 RFO prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFRFO_CXL_ACC", + "PerPkg": "1", + "UMask": "0x10c8878201", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WCILF requests from local IA cores to locally homed DDR addresses that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCILF_DDR", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed locally", + "UMask": "0xc8668601", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WCILF requests from local IA cores to locally homed PMM addresses which miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCILF_PMM", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed locally", + "UMask": "0xc8668a01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WCIL requests from local IA cores to locally homed DDR addresses that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCIL_DDR", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed locally", + "UMask": "0xc86e8601", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WCIL requests from local IA cores to locally homed PMM addresses which miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCIL_PMM", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed locally", + "UMask": "0xc86e8a01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WCILF requests from local IA cores to remotely homed DDR addresses that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_REMOTE_WCILF_DDR", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely", + "UMask": "0xc8670601", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WCILF requests from local IA cores to remotely homed PMM addresses which miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_REMOTE_WCILF_PMM", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely", + "UMask": "0xc8670a01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WCIL requests from local IA cores to remotely homed DDR addresses that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_REMOTE_WCIL_DDR", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely", + "UMask": "0xc86f0601", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WCIL requests from local IA cores to remotely homed PMM addresses which miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_REMOTE_WCIL_PMM", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely", + "UMask": "0xc86f0a01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Read for ownership from local IA that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : RFOs issued by iA Cores that Missed the LLC", + "UMask": "0xc807fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for RFOs issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_CXL_ACC", + "PerPkg": "1", + "UMask": "0x10c8078201", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Read for ownership from local IA that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_LOCAL", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : RFOs issued by iA Cores that Missed the LLC - HOMed locally", + "UMask": "0xc806fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Read for ownership prefetch from local IA that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores that Missed the LLC", + "UMask": "0xc887fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for LLC RFO prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF_CXL_ACC", + "PerPkg": "1", + "UMask": "0x10ccc78201", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Read for ownership prefetch from local IA that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF_LOCAL", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores that Missed the LLC - HOMed locally", + "UMask": "0xc886fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Read for ownership prefetch from local IA that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF_REMOTE", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores that Missed the LLC - HOMed remotely", + "UMask": "0xc8877e01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Read for ownership from local IA that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_REMOTE", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : RFOs issued by iA Cores that Missed the LLC - HOMed remotely", + "UMask": "0xc8077e01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for UCRDF requests from local IA cores that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_UCRDF", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : UCRdFs issued by iA Cores that Missed LLC", + "UMask": "0xc877de01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WCIL requests from a local IA core that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCIL", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores that Missed the LLC", + "UMask": "0xc86ffe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WCILF requests from local IA core that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCILF", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : WCiLF issued by iA Cores that Missed the LLC", + "UMask": "0xc867fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WCILF requests from local IA cores to DDR homed addresses which miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCILF_DDR", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC", + "UMask": "0xc8678601", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WCILF requests from local IA cores to PMM homed addresses which miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCILF_PMM", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC", + "UMask": "0xc8678a01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WCIL requests from local IA cores to DDR homed addresses which miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCIL_DDR", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC", + "UMask": "0xc86f8601", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WCIL requests from a local IA core to PMM homed addresses that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCIL_PMM", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC", + "UMask": "0xc86f8a01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WIL requests from local IA cores that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WIL", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : WiLs issued by iA Cores that Missed LLC", + "UMask": "0xc87fde01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Read for ownership from local IA that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_RFO", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : RFOs issued by iA Cores", + "UMask": "0xc807ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Read for ownership prefetch from local IA that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_RFO_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores", + "UMask": "0xc887ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for SpecItoM events that are initiated from the Core", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_SPECITOM", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : SpecItoMs issued by iA Cores", + "UMask": "0xcc57ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WbMtoI requests from local IA cores", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WBMTOI", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : WbMtoIs issued by iA Cores", + "UMask": "0xcc27ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WCIL requests from a local IA core", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WCIL", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores", + "UMask": "0xc86fff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WCILF requests from local IA core", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WCILF", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : WCiLF issued by iA Cores", + "UMask": "0xc867ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for All TOR inserts from local IO devices", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : All requests from IO Devices", + "UMask": "0xc001ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for CLFlush requests from IO devices", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_CLFLUSH", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : CLFlushes issued by IO Devices", + "UMask": "0xc8c3ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for All TOR inserts from local IO devices which hit the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : All requests from IO Devices that hit the LLC", + "UMask": "0xc001fd04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for ItoMs from local IO devices which hit the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_ITOM", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : ItoMs issued by IO Devices that Hit the LLC", + "UMask": "0xcc43fd04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_ITOMCACHENEAR", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC", + "UMask": "0xcd43fd04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for PCIRDCURs issued by IO devices which hit the LLC", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_PCIRDCUR", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices that hit the LLC", + "UMask": "0xc8f3fd04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for RFOs from local IO devices which hit the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_RFO", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : RFOs issued by IO Devices that hit the LLC", + "UMask": "0xc803fd04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for All TOR ItoM inserts from local IO devices", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_ITOM", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : ItoMs issued by IO Devices", + "UMask": "0xcc43ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for ItoMCacheNears, indicating a partial write request, from IO Devices", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_ITOMCACHENEAR", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices", + "UMask": "0xcd43ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for All TOR inserts from local IO devices which miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : All requests from IO Devices that missed the LLC", + "UMask": "0xc001fe04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for All TOR ItoM inserts from local IO devices which miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOM", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : ItoMs issued by IO Devices that missed the LLC", + "UMask": "0xcc43fe04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOMCACHENEAR", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC", + "UMask": "0xcd43fe04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for ItoMCacheNear transactions from an IO device on the local socket that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOMCACHENEAR_LOCAL", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC", + "UMask": "0xcd42fe04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for ItoMCacheNear transactions from an IO device on a remote socket that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOMCACHENEAR_REMOTE", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC", + "UMask": "0xcd437e04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for ItoM transactions from an IO device on the local socket that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOM_LOCAL", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : ItoMs issued by IO Devices that missed the LLC", + "UMask": "0xcc42fe04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for ItoM transactions from an IO device on a remote socket that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOM_REMOTE", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : ItoMs issued by IO Devices that missed the LLC", + "UMask": "0xcc437e04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for PCIRDCURs issued by IO devices which miss the LLC", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_PCIRDCUR", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices that missed the LLC", + "UMask": "0xc8f3fe04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for PCIRDCUR transactions from an IO device on the local socket that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_PCIRDCUR_LOCAL", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices that missed the LLC", + "UMask": "0xc8f2fe04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for PCIRDCUR transactions from an IO device on a remote socket that miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_PCIRDCUR_REMOTE", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices that missed the LLC", + "UMask": "0xc8f37e04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for All TOR RFO inserts from local IO devices which miss the cache", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_RFO", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : RFOs issued by IO Devices that missed the LLC", + "UMask": "0xc803fe04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for PCIRDCURs issued by IO devices", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_PCIRDCUR", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices", + "UMask": "0xc8f3ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for RFOs from local IO devices", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_RFO", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : RFOs issued by IO Devices", + "UMask": "0xc803ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WBMtoI requests from IO devices", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_WBMTOI", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : WbMtoIs issued by IO Devices", + "UMask": "0xcc23ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for All locally initiated requests", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_ALL", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : All from Local iA and IO", + "UMask": "0xc000ff05", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for All from Local iA", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_IA", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : All from Local iA", + "UMask": "0xc000ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for All from Local IO", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_IO", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : All from Local IO", + "UMask": "0xc000ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for All remote requests (e.g. snoops, writebacks) that came from remote sockets", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.REM_ALL", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : All Remote Requests", + "UMask": "0xc001ffc8", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for All snoops to this LLC that came from remote sockets", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.REM_SNPS", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : All Snoops from Remote", + "UMask": "0xc001ff08", + "Unit": "CHA" + } +] diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/uncore-cxl.json b/tools/perf/pmu-events/arch/x86/sierraforest/uncore-cxl.json new file mode 100644 index 000000000000..dc676c7aa37f --- /dev/null +++ b/tools/perf/pmu-events/arch/x86/sierraforest/uncore-cxl.json @@ -0,0 +1,10 @@ +[ + { + "BriefDescription": "B2CXL Clockticks", + "EventCode": "0x01", + "EventName": "UNC_B2CXL_CLOCKTICKS", + "PerPkg": "1", + "PortMask": "0x000", + "Unit": "B2CXL" + } +] diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/sierraforest/uncore-interconnect.json new file mode 100644 index 000000000000..6932b2fea3a5 --- /dev/null +++ b/tools/perf/pmu-events/arch/x86/sierraforest/uncore-interconnect.json @@ -0,0 +1,1228 @@ +[ + { + "BriefDescription": "Clockticks of the mesh to memory (B2CMI)", + "EventCode": "0x01", + "EventName": "UNC_B2CMI_CLOCKTICKS", + "PerPkg": "1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts the number of time D2C was not honoured by egress due to directory state constraints", + "EventCode": "0x17", + "EventName": "UNC_B2CMI_DIRECT2CORE_NOT_TAKEN_DIRSTATE", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts the number of times B2CMI egress did D2C (direct to core)", + "EventCode": "0x16", + "EventName": "UNC_B2CMI_DIRECT2CORE_TAKEN", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts the number of times D2C wasn't honoured even though the incoming request had d2c set for non cisgress txn", + "EventCode": "0x18", + "EventName": "UNC_B2CMI_DIRECT2CORE_TXN_OVERRIDE", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts the number of d2k wasn't done due to credit constraints", + "EventCode": "0x1B", + "EventName": "UNC_B2CMI_DIRECT2UPI_NOT_TAKEN_CREDITS", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Direct to UPI Transactions - Ignored due to lack of credits : All : Counts the number of d2k wasn't done due to credit constraints", + "EventCode": "0x1B", + "EventName": "UNC_B2CMI_DIRECT2UPI_NOT_TAKEN_CREDITS.EGRESS", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts the number of time D2K was not honoured by egress due to directory state constraints", + "EventCode": "0x1A", + "EventName": "UNC_B2CMI_DIRECT2UPI_NOT_TAKEN_DIRSTATE", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Cycles when Direct2UPI was Disabled : Egress Ignored D2U : Counts the number of time D2K was not honoured by egress due to directory state constraints", + "EventCode": "0x1A", + "EventName": "UNC_B2CMI_DIRECT2UPI_NOT_TAKEN_DIRSTATE.EGRESS", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts the number of times egress did D2K (Direct to KTI)", + "EventCode": "0x19", + "EventName": "UNC_B2CMI_DIRECT2UPI_TAKEN", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts the number of times D2K wasn't honoured even though the incoming request had d2k set for non cisgress txn", + "EventCode": "0x1C", + "EventName": "UNC_B2CMI_DIRECT2UPI_TXN_OVERRIDE", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Directory Hit Clean", + "EventCode": "0x1D", + "EventName": "UNC_B2CMI_DIRECTORY_HIT.CLEAN", + "PerPkg": "1", + "UMask": "0x38", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Directory Hit : On NonDirty Line in A State", + "EventCode": "0x1D", + "EventName": "UNC_B2CMI_DIRECTORY_HIT.CLEAN_A", + "PerPkg": "1", + "UMask": "0x20", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Directory Hit : On NonDirty Line in I State", + "EventCode": "0x1D", + "EventName": "UNC_B2CMI_DIRECTORY_HIT.CLEAN_I", + "PerPkg": "1", + "UMask": "0x8", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Directory Hit : On NonDirty Line in S State", + "EventCode": "0x1D", + "EventName": "UNC_B2CMI_DIRECTORY_HIT.CLEAN_S", + "PerPkg": "1", + "UMask": "0x10", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Directory Hit Dirty (modified)", + "EventCode": "0x1D", + "EventName": "UNC_B2CMI_DIRECTORY_HIT.DIRTY", + "PerPkg": "1", + "UMask": "0x7", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Directory Hit : On Dirty Line in A State", + "EventCode": "0x1D", + "EventName": "UNC_B2CMI_DIRECTORY_HIT.DIRTY_A", + "PerPkg": "1", + "UMask": "0x4", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Directory Hit : On Dirty Line in I State", + "EventCode": "0x1D", + "EventName": "UNC_B2CMI_DIRECTORY_HIT.DIRTY_I", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Directory Hit : On Dirty Line in S State", + "EventCode": "0x1D", + "EventName": "UNC_B2CMI_DIRECTORY_HIT.DIRTY_S", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts the number of 1lm or 2lm hit read data returns to egress with any directory to non persistent memory", + "EventCode": "0x20", + "EventName": "UNC_B2CMI_DIRECTORY_LOOKUP.ANY", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts the number of 1lm or 2lm hit read data returns to egress with directory A to non persistent memory", + "EventCode": "0x20", + "EventName": "UNC_B2CMI_DIRECTORY_LOOKUP.STATE_A", + "PerPkg": "1", + "UMask": "0x8", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts the number of 1lm or 2lm hit read data returns to egress with directory I to non persistent memory", + "EventCode": "0x20", + "EventName": "UNC_B2CMI_DIRECTORY_LOOKUP.STATE_I", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts the number of 1lm or 2lm hit read data returns to egress with directory S to non persistent memory", + "EventCode": "0x20", + "EventName": "UNC_B2CMI_DIRECTORY_LOOKUP.STATE_S", + "PerPkg": "1", + "PublicDescription": "Counts the number of 1lm or 2lm hit read data returns to egress with directory S to non persistent memory", + "UMask": "0x4", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Directory Miss Clean", + "EventCode": "0x1E", + "EventName": "UNC_B2CMI_DIRECTORY_MISS.CLEAN", + "PerPkg": "1", + "UMask": "0x38", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Directory Miss : On NonDirty Line in A State", + "EventCode": "0x1E", + "EventName": "UNC_B2CMI_DIRECTORY_MISS.CLEAN_A", + "PerPkg": "1", + "UMask": "0x20", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Directory Miss : On NonDirty Line in I State", + "EventCode": "0x1E", + "EventName": "UNC_B2CMI_DIRECTORY_MISS.CLEAN_I", + "PerPkg": "1", + "UMask": "0x8", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Directory Miss : On NonDirty Line in S State", + "EventCode": "0x1E", + "EventName": "UNC_B2CMI_DIRECTORY_MISS.CLEAN_S", + "PerPkg": "1", + "UMask": "0x10", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Directory Miss Dirty (modified)", + "EventCode": "0x1E", + "EventName": "UNC_B2CMI_DIRECTORY_MISS.DIRTY", + "PerPkg": "1", + "UMask": "0x7", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Directory Miss : On Dirty Line in A State", + "EventCode": "0x1E", + "EventName": "UNC_B2CMI_DIRECTORY_MISS.DIRTY_A", + "PerPkg": "1", + "UMask": "0x4", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Directory Miss : On Dirty Line in I State", + "EventCode": "0x1E", + "EventName": "UNC_B2CMI_DIRECTORY_MISS.DIRTY_I", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Directory Miss : On Dirty Line in S State", + "EventCode": "0x1E", + "EventName": "UNC_B2CMI_DIRECTORY_MISS.DIRTY_S", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Any A2I Transition", + "EventCode": "0x21", + "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.A2I", + "PerPkg": "1", + "UMask": "0x320", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Any A2S Transition", + "EventCode": "0x21", + "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.A2S", + "PerPkg": "1", + "UMask": "0x340", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts cisgress directory updates", + "EventCode": "0x21", + "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.ANY", + "PerPkg": "1", + "UMask": "0x301", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts any 1lm or 2lm hit data return that would result in directory update to non persistent memory (DRAM)", + "EventCode": "0x21", + "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.HIT_ANY", + "PerPkg": "1", + "UMask": "0x101", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Directory update in near memory to the A state", + "EventCode": "0x21", + "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.HIT_X2A", + "PerPkg": "1", + "UMask": "0x114", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Directory update in near memory to the I state", + "EventCode": "0x21", + "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.HIT_X2I", + "PerPkg": "1", + "UMask": "0x128", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Directory update in near memory to the S state", + "EventCode": "0x21", + "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.HIT_X2S", + "PerPkg": "1", + "UMask": "0x142", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Any I2A Transition", + "EventCode": "0x21", + "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.I2A", + "PerPkg": "1", + "UMask": "0x304", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Any I2S Transition", + "EventCode": "0x21", + "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.I2S", + "PerPkg": "1", + "UMask": "0x302", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Directory update in far memory to the A state", + "EventCode": "0x21", + "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.MISS_X2A", + "PerPkg": "1", + "UMask": "0x214", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Directory update in far memory to the I state", + "EventCode": "0x21", + "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.MISS_X2I", + "PerPkg": "1", + "UMask": "0x228", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Directory update in far memory to the S state", + "EventCode": "0x21", + "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.MISS_X2S", + "PerPkg": "1", + "UMask": "0x242", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Any S2A Transition", + "EventCode": "0x21", + "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.S2A", + "PerPkg": "1", + "UMask": "0x310", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Any S2I Transition", + "EventCode": "0x21", + "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.S2I", + "PerPkg": "1", + "UMask": "0x308", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Directory update to the A state", + "EventCode": "0x21", + "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.X2A", + "PerPkg": "1", + "UMask": "0x314", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Directory update to the I state", + "EventCode": "0x21", + "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.X2I", + "PerPkg": "1", + "UMask": "0x328", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Directory update to the S state", + "EventCode": "0x21", + "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.X2S", + "PerPkg": "1", + "UMask": "0x342", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts any read", + "EventCode": "0x24", + "EventName": "UNC_B2CMI_IMC_READS.ALL", + "PerPkg": "1", + "UMask": "0x104", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts normal reads issue to CMI", + "EventCode": "0x24", + "EventName": "UNC_B2CMI_IMC_READS.NORMAL", + "PerPkg": "1", + "UMask": "0x101", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Count reads to NM region", + "EventCode": "0x24", + "EventName": "UNC_B2CMI_IMC_READS.TO_DDR_AS_CACHE", + "PerPkg": "1", + "UMask": "0x110", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts reads to 1lm non persistent memory regions", + "EventCode": "0x24", + "EventName": "UNC_B2CMI_IMC_READS.TO_DDR_AS_MEM", + "PerPkg": "1", + "UMask": "0x108", + "Unit": "B2CMI" + }, + { + "BriefDescription": "All Writes - All Channels", + "EventCode": "0x25", + "EventName": "UNC_B2CMI_IMC_WRITES.ALL", + "PerPkg": "1", + "UMask": "0x110", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Full Non-ISOCH - All Channels", + "EventCode": "0x25", + "EventName": "UNC_B2CMI_IMC_WRITES.FULL", + "PerPkg": "1", + "UMask": "0x101", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Non-Inclusive - All Channels", + "EventCode": "0x25", + "EventName": "UNC_B2CMI_IMC_WRITES.NI", + "PerPkg": "1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Non-Inclusive Miss - All Channels", + "EventCode": "0x25", + "EventName": "UNC_B2CMI_IMC_WRITES.NI_MISS", + "PerPkg": "1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Partial Non-ISOCH - All Channels", + "EventCode": "0x25", + "EventName": "UNC_B2CMI_IMC_WRITES.PARTIAL", + "PerPkg": "1", + "UMask": "0x102", + "Unit": "B2CMI" + }, + { + "BriefDescription": "DDR, acting as Cache - All Channels", + "EventCode": "0x25", + "EventName": "UNC_B2CMI_IMC_WRITES.TO_DDR_AS_CACHE", + "PerPkg": "1", + "UMask": "0x140", + "Unit": "B2CMI" + }, + { + "BriefDescription": "DDR - All Channels", + "EventCode": "0x25", + "EventName": "UNC_B2CMI_IMC_WRITES.TO_DDR_AS_MEM", + "PerPkg": "1", + "UMask": "0x120", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Prefetch CAM Inserts : UPI - Ch 0", + "EventCode": "0x56", + "EventName": "UNC_B2CMI_PREFCAM_INSERTS.CH0_UPI", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Prefetch CAM Inserts : XPT - Ch 0", + "EventCode": "0x56", + "EventName": "UNC_B2CMI_PREFCAM_INSERTS.CH0_XPT", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Prefetch CAM Inserts : UPI - All Channels", + "EventCode": "0x56", + "EventName": "UNC_B2CMI_PREFCAM_INSERTS.UPI_ALLCH", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Prefetch CAM Inserts : XPT -All Channels", + "EventCode": "0x56", + "EventName": "UNC_B2CMI_PREFCAM_INSERTS.XPT_ALLCH", + "PerPkg": "1", + "PublicDescription": "Prefetch CAM Inserts : XPT - All Channels", + "UMask": "0x1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Prefetch CAM Occupancy : Channel 0", + "EventCode": "0x54", + "EventName": "UNC_B2CMI_PREFCAM_OCCUPANCY.CH0", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts the 2lm reads and WRNI which were a hit", + "EventCode": "0x1F", + "EventName": "UNC_B2CMI_TAG_HIT.ALL", + "PerPkg": "1", + "UMask": "0xf", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts the 2lm reads which were a hit clean", + "EventCode": "0x1F", + "EventName": "UNC_B2CMI_TAG_HIT.RD_CLEAN", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts the 2lm reads which were a hit dirty", + "EventCode": "0x1F", + "EventName": "UNC_B2CMI_TAG_HIT.RD_DIRTY", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts the 2lm WRNI which were a hit clean", + "EventCode": "0x1F", + "EventName": "UNC_B2CMI_TAG_HIT.WR_CLEAN", + "PerPkg": "1", + "UMask": "0x4", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts the 2lm WRNI which were a hit dirty", + "EventCode": "0x1F", + "EventName": "UNC_B2CMI_TAG_HIT.WR_DIRTY", + "PerPkg": "1", + "UMask": "0x8", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts the 2lm second way read miss for a WrNI", + "EventCode": "0x4B", + "EventName": "UNC_B2CMI_TAG_MISS.CLEAN", + "PerPkg": "1", + "UMask": "0x5", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts the 2lm second way read miss for a WrNI", + "EventCode": "0x4B", + "EventName": "UNC_B2CMI_TAG_MISS.DIRTY", + "PerPkg": "1", + "UMask": "0xa", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts the 2lm second way read miss for a Rd", + "EventCode": "0x4B", + "EventName": "UNC_B2CMI_TAG_MISS.RD_2WAY", + "PerPkg": "1", + "UMask": "0x10", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts the 2lm reads which were a miss and the cache line is unmodified", + "EventCode": "0x4B", + "EventName": "UNC_B2CMI_TAG_MISS.RD_CLEAN", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts the 2lm reads which were a miss and the cache line is modified", + "EventCode": "0x4B", + "EventName": "UNC_B2CMI_TAG_MISS.RD_DIRTY", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts the 2lm second way read miss for a WrNI", + "EventCode": "0x4B", + "EventName": "UNC_B2CMI_TAG_MISS.WR_2WAY", + "PerPkg": "1", + "UMask": "0x20", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts the 2lm WRNI which were a miss and the cache line is unmodified", + "EventCode": "0x4B", + "EventName": "UNC_B2CMI_TAG_MISS.WR_CLEAN", + "PerPkg": "1", + "UMask": "0x4", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts the 2lm WRNI which were a miss and the cache line is modified", + "EventCode": "0x4B", + "EventName": "UNC_B2CMI_TAG_MISS.WR_DIRTY", + "PerPkg": "1", + "UMask": "0x8", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Tracker Inserts : Channel 0", + "EventCode": "0x32", + "EventName": "UNC_B2CMI_TRACKER_INSERTS.CH0", + "PerPkg": "1", + "UMask": "0x104", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Tracker Occupancy : Channel 0", + "EventCode": "0x33", + "EventName": "UNC_B2CMI_TRACKER_OCCUPANCY.CH0", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Write Tracker Inserts : Channel 0", + "EventCode": "0x40", + "EventName": "UNC_B2CMI_WR_TRACKER_INSERTS.CH0", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "UNC_B2HOT_CLOCKTICKS", + "EventCode": "0x01", + "EventName": "UNC_B2HOT_CLOCKTICKS", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "B2HOT" + }, + { + "BriefDescription": "Number of uclks in domain", + "EventCode": "0x01", + "EventName": "UNC_B2UPI_CLOCKTICKS", + "PerPkg": "1", + "Unit": "B2UPI" + }, + { + "BriefDescription": "Total Write Cache Occupancy : Mem", + "EventCode": "0x0F", + "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.MEM", + "PerPkg": "1", + "UMask": "0x4", + "Unit": "IRP" + }, + { + "BriefDescription": "IRP Clockticks", + "EventCode": "0x01", + "EventName": "UNC_I_CLOCKTICKS", + "PerPkg": "1", + "Unit": "IRP" + }, + { + "BriefDescription": "Inbound read requests received by the IRP and inserted into the FAF queue", + "EventCode": "0x18", + "EventName": "UNC_I_FAF_INSERTS", + "PerPkg": "1", + "Unit": "IRP" + }, + { + "BriefDescription": "FAF occupancy", + "EventCode": "0x19", + "EventName": "UNC_I_FAF_OCCUPANCY", + "PerPkg": "1", + "Unit": "IRP" + }, + { + "BriefDescription": "Misc Events - Set 1 : Lost Forward : Snoop pulled away ownership before a write was committed", + "EventCode": "0x1F", + "EventName": "UNC_I_MISC1.LOST_FWD", + "PerPkg": "1", + "UMask": "0x10", + "Unit": "IRP" + }, + { + "BriefDescription": "Inbound write (fast path) requests to coherent memory, received by the IRP resulting in write ownership requests issued by IRP to the mesh.", + "EventCode": "0x11", + "EventName": "UNC_I_TRANSACTIONS.WR_PREF", + "PerPkg": "1", + "UMask": "0x8", + "Unit": "IRP" + }, + { + "BriefDescription": "MDF Clockticks", + "EventCode": "0x01", + "EventName": "UNC_MDF_CLOCKTICKS", + "PerPkg": "1", + "Unit": "MDF" + }, + { + "BriefDescription": "Number of UPI LL clock cycles while the event is enabled", + "EventCode": "0x01", + "EventName": "UNC_UPI_CLOCKTICKS", + "PerPkg": "1", + "PublicDescription": "Number of kfclks", + "Unit": "UPI" + }, + { + "BriefDescription": "Cycles in L1 : Number of UPI qfclk cycles spent in L1 power mode. L1 is a mode that totally shuts down a UPI link. Use edge detect to count the number of instances when the UPI link entered L1. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. Because L1 totally shuts down the link, it takes a good amount of time to exit this mode.", + "EventCode": "0x21", + "EventName": "UNC_UPI_L1_POWER_CYCLES", + "PerPkg": "1", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Receive path of a UPI Port : Non-Coherent Bypass", + "EventCode": "0x05", + "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCB", + "PerPkg": "1", + "UMask": "0xe", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Receive path of a UPI Port : Non-Coherent Bypass, Match Opcode", + "EventCode": "0x05", + "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCB_OPC", + "PerPkg": "1", + "UMask": "0x10e", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Receive path of a UPI Port : Non-Coherent Standard", + "EventCode": "0x05", + "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCS", + "PerPkg": "1", + "UMask": "0xf", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Receive path of a UPI Port : Non-Coherent Standard, Match Opcode", + "EventCode": "0x05", + "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCS_OPC", + "PerPkg": "1", + "UMask": "0x10f", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Receive path of a UPI Port : Request", + "EventCode": "0x05", + "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.REQ", + "PerPkg": "1", + "UMask": "0x8", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Receive path of a UPI Port : Request, Match Opcode", + "EventCode": "0x05", + "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.REQ_OPC", + "PerPkg": "1", + "UMask": "0x108", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Receive path of a UPI Port : Response - Conflict", + "EventCode": "0x05", + "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSPCNFLT", + "PerPkg": "1", + "UMask": "0x1aa", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Receive path of a UPI Port : Response - Invalid", + "EventCode": "0x05", + "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSPI", + "PerPkg": "1", + "UMask": "0x12a", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Receive path of a UPI Port : Response - Data", + "EventCode": "0x05", + "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_DATA", + "PerPkg": "1", + "UMask": "0xc", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Receive path of a UPI Port : Response - Data, Match Opcode", + "EventCode": "0x05", + "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_DATA_OPC", + "PerPkg": "1", + "UMask": "0x10c", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Receive path of a UPI Port : Response - No Data", + "EventCode": "0x05", + "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_NODATA", + "PerPkg": "1", + "UMask": "0xa", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Receive path of a UPI Port : Response - No Data, Match Opcode", + "EventCode": "0x05", + "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_NODATA_OPC", + "PerPkg": "1", + "UMask": "0x10a", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Receive path of a UPI Port : Snoop", + "EventCode": "0x05", + "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.SNP", + "PerPkg": "1", + "UMask": "0x9", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Receive path of a UPI Port : Snoop, Match Opcode", + "EventCode": "0x05", + "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.SNP_OPC", + "PerPkg": "1", + "UMask": "0x109", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Receive path of a UPI Port : Writeback", + "EventCode": "0x05", + "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.WB", + "PerPkg": "1", + "UMask": "0xd", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Receive path of a UPI Port : Writeback, Match Opcode", + "EventCode": "0x05", + "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.WB_OPC", + "PerPkg": "1", + "UMask": "0x10d", + "Unit": "UPI" + }, + { + "BriefDescription": "Valid Flits Received : All Data : Shows legal flit time (hides impact of L0p and L0c).", + "EventCode": "0x03", + "EventName": "UNC_UPI_RxL_FLITS.ALL_DATA", + "PerPkg": "1", + "UMask": "0xf", + "Unit": "UPI" + }, + { + "BriefDescription": "Null FLITs received from any slot", + "EventCode": "0x03", + "EventName": "UNC_UPI_RxL_FLITS.ALL_NULL", + "PerPkg": "1", + "PublicDescription": "Valid Flits Received : Null FLITs received from any slot", + "UMask": "0x27", + "Unit": "UPI" + }, + { + "BriefDescription": "Valid Flits Received : Data : Shows legal flit time (hides impact of L0p and L0c). : Count Data Flits (which consume all slots), but how much to count is based on Slot0-2 mask, so count can be 0-3 depending on which slots are enabled for counting..", + "EventCode": "0x03", + "EventName": "UNC_UPI_RxL_FLITS.DATA", + "PerPkg": "1", + "UMask": "0x8", + "Unit": "UPI" + }, + { + "BriefDescription": "Valid Flits Received : Idle : Shows legal flit time (hides impact of L0p and L0c).", + "EventCode": "0x03", + "EventName": "UNC_UPI_RxL_FLITS.IDLE", + "PerPkg": "1", + "UMask": "0x47", + "Unit": "UPI" + }, + { + "BriefDescription": "Valid Flits Received : LLCRD Not Empty : Shows legal flit time (hides impact of L0p and L0c). : Enables counting of LLCRD (with non-zero payload). This only applies to slot 2 since LLCRD is only allowed in slot 2", + "EventCode": "0x03", + "EventName": "UNC_UPI_RxL_FLITS.LLCRD", + "PerPkg": "1", + "UMask": "0x10", + "Unit": "UPI" + }, + { + "BriefDescription": "Valid Flits Received : LLCTRL : Shows legal flit time (hides impact of L0p and L0c). : Equivalent to an idle packet. Enables counting of slot 0 LLCTRL messages.", + "EventCode": "0x03", + "EventName": "UNC_UPI_RxL_FLITS.LLCTRL", + "PerPkg": "1", + "UMask": "0x40", + "Unit": "UPI" + }, + { + "BriefDescription": "Valid Flits Received : All Non Data : Shows legal flit time (hides impact of L0p and L0c).", + "EventCode": "0x03", + "EventName": "UNC_UPI_RxL_FLITS.NON_DATA", + "PerPkg": "1", + "UMask": "0x97", + "Unit": "UPI" + }, + { + "BriefDescription": "Valid Flits Received : Slot NULL or LLCRD Empty : Shows legal flit time (hides impact of L0p and L0c). : LLCRD with all zeros is treated as NULL. Slot 1 is not treated as NULL if slot 0 is a dual slot. This can apply to slot 0,1, or 2.", + "EventCode": "0x03", + "EventName": "UNC_UPI_RxL_FLITS.NULL", + "PerPkg": "1", + "UMask": "0x20", + "Unit": "UPI" + }, + { + "BriefDescription": "Valid Flits Received : Protocol Header : Shows legal flit time (hides impact of L0p and L0c). : Enables count of protocol headers in slot 0,1,2 (depending on slot uMask bits)", + "EventCode": "0x03", + "EventName": "UNC_UPI_RxL_FLITS.PROTHDR", + "PerPkg": "1", + "UMask": "0x80", + "Unit": "UPI" + }, + { + "BriefDescription": "Valid Flits Received : Slot 0 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 0 - Other mask bits determine types of headers to count.", + "EventCode": "0x03", + "EventName": "UNC_UPI_RxL_FLITS.SLOT0", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "UPI" + }, + { + "BriefDescription": "Valid Flits Received : Slot 1 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 1 - Other mask bits determine types of headers to count.", + "EventCode": "0x03", + "EventName": "UNC_UPI_RxL_FLITS.SLOT1", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "UPI" + }, + { + "BriefDescription": "Valid Flits Received : Slot 2 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 2 - Other mask bits determine types of headers to count.", + "EventCode": "0x03", + "EventName": "UNC_UPI_RxL_FLITS.SLOT2", + "PerPkg": "1", + "UMask": "0x4", + "Unit": "UPI" + }, + { + "BriefDescription": "RxQ Flit Buffer Allocations : Slot 0 : Number of allocations into the UPI Rx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.", + "EventCode": "0x30", + "EventName": "UNC_UPI_RxL_INSERTS.SLOT0", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "UPI" + }, + { + "BriefDescription": "RxQ Flit Buffer Allocations : Slot 1 : Number of allocations into the UPI Rx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.", + "EventCode": "0x30", + "EventName": "UNC_UPI_RxL_INSERTS.SLOT1", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "UPI" + }, + { + "BriefDescription": "RxQ Flit Buffer Allocations : Slot 2 : Number of allocations into the UPI Rx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.", + "EventCode": "0x30", + "EventName": "UNC_UPI_RxL_INSERTS.SLOT2", + "PerPkg": "1", + "UMask": "0x4", + "Unit": "UPI" + }, + { + "BriefDescription": "RxQ Occupancy - All Packets : Slot 0", + "EventCode": "0x32", + "EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT0", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "UPI" + }, + { + "BriefDescription": "RxQ Occupancy - All Packets : Slot 1", + "EventCode": "0x32", + "EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT1", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "UPI" + }, + { + "BriefDescription": "RxQ Occupancy - All Packets : Slot 2", + "EventCode": "0x32", + "EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT2", + "PerPkg": "1", + "UMask": "0x4", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Bypass", + "EventCode": "0x04", + "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCB", + "PerPkg": "1", + "UMask": "0xe", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Bypass, Match Opcode", + "EventCode": "0x04", + "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCB_OPC", + "PerPkg": "1", + "UMask": "0x10e", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Standard", + "EventCode": "0x04", + "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCS", + "PerPkg": "1", + "UMask": "0xf", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Standard, Match Opcode", + "EventCode": "0x04", + "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCS_OPC", + "PerPkg": "1", + "UMask": "0x10f", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Transmit path of a UPI Port : Request", + "EventCode": "0x04", + "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.REQ", + "PerPkg": "1", + "UMask": "0x8", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Transmit path of a UPI Port : Request, Match Opcode", + "EventCode": "0x04", + "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.REQ_OPC", + "PerPkg": "1", + "UMask": "0x108", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Transmit path of a UPI Port : Response - Conflict", + "EventCode": "0x04", + "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSPCNFLT", + "PerPkg": "1", + "UMask": "0x1aa", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Transmit path of a UPI Port : Response - Invalid", + "EventCode": "0x04", + "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSPI", + "PerPkg": "1", + "UMask": "0x12a", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Transmit path of a UPI Port : Response - Data", + "EventCode": "0x04", + "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_DATA", + "PerPkg": "1", + "UMask": "0xc", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Transmit path of a UPI Port : Response - Data, Match Opcode", + "EventCode": "0x04", + "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_DATA_OPC", + "PerPkg": "1", + "UMask": "0x10c", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Transmit path of a UPI Port : Response - No Data", + "EventCode": "0x04", + "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_NODATA", + "PerPkg": "1", + "UMask": "0xa", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Transmit path of a UPI Port : Response - No Data, Match Opcode", + "EventCode": "0x04", + "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_NODATA_OPC", + "PerPkg": "1", + "UMask": "0x10a", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Transmit path of a UPI Port : Snoop", + "EventCode": "0x04", + "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.SNP", + "PerPkg": "1", + "UMask": "0x9", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Transmit path of a UPI Port : Snoop, Match Opcode", + "EventCode": "0x04", + "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.SNP_OPC", + "PerPkg": "1", + "UMask": "0x109", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Transmit path of a UPI Port : Writeback", + "EventCode": "0x04", + "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.WB", + "PerPkg": "1", + "UMask": "0xd", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Transmit path of a UPI Port : Writeback, Match Opcode", + "EventCode": "0x04", + "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.WB_OPC", + "PerPkg": "1", + "UMask": "0x10d", + "Unit": "UPI" + }, + { + "BriefDescription": "Valid Flits Sent : All Data : Counts number of data flits across this UPI link.", + "EventCode": "0x02", + "EventName": "UNC_UPI_TxL_FLITS.ALL_DATA", + "PerPkg": "1", + "UMask": "0xf", + "Unit": "UPI" + }, + { + "BriefDescription": "All Null Flits", + "EventCode": "0x02", + "EventName": "UNC_UPI_TxL_FLITS.ALL_NULL", + "PerPkg": "1", + "PublicDescription": "Valid Flits Sent : Idle", + "UMask": "0x27", + "Unit": "UPI" + }, + { + "BriefDescription": "Valid Flits Sent : Data : Shows legal flit time (hides impact of L0p and L0c). : Count Data Flits (which consume all slots), but how much to count is based on Slot0-2 mask, so count can be 0-3 depending on which slots are enabled for counting..", + "EventCode": "0x02", + "EventName": "UNC_UPI_TxL_FLITS.DATA", + "PerPkg": "1", + "UMask": "0x8", + "Unit": "UPI" + }, + { + "BriefDescription": "Valid Flits Sent : Idle : Shows legal flit time (hides impact of L0p and L0c).", + "EventCode": "0x02", + "EventName": "UNC_UPI_TxL_FLITS.IDLE", + "PerPkg": "1", + "UMask": "0x47", + "Unit": "UPI" + }, + { + "BriefDescription": "Valid Flits Sent : LLCRD Not Empty : Shows legal flit time (hides impact of L0p and L0c). : Enables counting of LLCRD (with non-zero payload). This only applies to slot 2 since LLCRD is only allowed in slot 2", + "EventCode": "0x02", + "EventName": "UNC_UPI_TxL_FLITS.LLCRD", + "PerPkg": "1", + "UMask": "0x10", + "Unit": "UPI" + }, + { + "BriefDescription": "Valid Flits Sent : LLCTRL : Shows legal flit time (hides impact of L0p and L0c). : Equivalent to an idle packet. Enables counting of slot 0 LLCTRL messages.", + "EventCode": "0x02", + "EventName": "UNC_UPI_TxL_FLITS.LLCTRL", + "PerPkg": "1", + "UMask": "0x40", + "Unit": "UPI" + }, + { + "BriefDescription": "Valid Flits Sent : All Non Data : Shows legal flit time (hides impact of L0p and L0c).", + "EventCode": "0x02", + "EventName": "UNC_UPI_TxL_FLITS.NON_DATA", + "PerPkg": "1", + "PublicDescription": "Valid Flits Sent : Null FLITs transmitted to any slot", + "UMask": "0x97", + "Unit": "UPI" + }, + { + "BriefDescription": "Valid Flits Sent : Slot NULL or LLCRD Empty : Shows legal flit time (hides impact of L0p and L0c). : LLCRD with all zeros is treated as NULL. Slot 1 is not treated as NULL if slot 0 is a dual slot. This can apply to slot 0,1, or 2.", + "EventCode": "0x02", + "EventName": "UNC_UPI_TxL_FLITS.NULL", + "PerPkg": "1", + "UMask": "0x20", + "Unit": "UPI" + }, + { + "BriefDescription": "Valid Flits Sent : Protocol Header : Shows legal flit time (hides impact of L0p and L0c). : Enables count of protocol headers in slot 0,1,2 (depending on slot uMask bits)", + "EventCode": "0x02", + "EventName": "UNC_UPI_TxL_FLITS.PROTHDR", + "PerPkg": "1", + "UMask": "0x80", + "Unit": "UPI" + }, + { + "BriefDescription": "Valid Flits Sent : Slot 0 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 0 - Other mask bits determine types of headers to count.", + "EventCode": "0x02", + "EventName": "UNC_UPI_TxL_FLITS.SLOT0", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "UPI" + }, + { + "BriefDescription": "Valid Flits Sent : Slot 1 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 1 - Other mask bits determine types of headers to count.", + "EventCode": "0x02", + "EventName": "UNC_UPI_TxL_FLITS.SLOT1", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "UPI" + }, + { + "BriefDescription": "Valid Flits Sent : Slot 2 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 2 - Other mask bits determine types of headers to count.", + "EventCode": "0x02", + "EventName": "UNC_UPI_TxL_FLITS.SLOT2", + "PerPkg": "1", + "UMask": "0x4", + "Unit": "UPI" + }, + { + "BriefDescription": "Tx Flit Buffer Allocations : Number of allocations into the UPI Tx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.", + "EventCode": "0x40", + "EventName": "UNC_UPI_TxL_INSERTS", + "PerPkg": "1", + "Unit": "UPI" + }, + { + "BriefDescription": "Tx Flit Buffer Occupancy : Accumulates the number of flits in the TxQ. Generally, when data is transmitted across UPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This can be used with the cycles not empty event to track average occupancy, or the allocations event to track average lifetime in the TxQ.", + "EventCode": "0x42", + "EventName": "UNC_UPI_TxL_OCCUPANCY", + "PerPkg": "1", + "Unit": "UPI" + } +] diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/uncore-io.json b/tools/perf/pmu-events/arch/x86/sierraforest/uncore-io.json new file mode 100644 index 000000000000..9495cb0f68ea --- /dev/null +++ b/tools/perf/pmu-events/arch/x86/sierraforest/uncore-io.json @@ -0,0 +1,1634 @@ +[ + { + "BriefDescription": "IIO Clockticks", + "EventCode": "0x01", + "EventName": "UNC_IIO_CLOCKTICKS", + "PerPkg": "1", + "PortMask": "0x000", + "Unit": "IIO" + }, + { + "BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.", + "EventCode": "0xC2", + "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.ALL_PARTS", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x70ff004", + "Unit": "IIO" + }, + { + "BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.", + "EventCode": "0xC2", + "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART0", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x001", + "UMask": "0x7001004", + "Unit": "IIO" + }, + { + "BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.", + "EventCode": "0xC2", + "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x002", + "UMask": "0x7002004", + "Unit": "IIO" + }, + { + "BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.", + "EventCode": "0xC2", + "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART2", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x004", + "UMask": "0x7004004", + "Unit": "IIO" + }, + { + "BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.", + "EventCode": "0xC2", + "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART3", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x008", + "UMask": "0x7008004", + "Unit": "IIO" + }, + { + "BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.", + "EventCode": "0xC2", + "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART4", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x010", + "UMask": "0x7010004", + "Unit": "IIO" + }, + { + "BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.", + "EventCode": "0xC2", + "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART5", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x020", + "UMask": "0x7020004", + "Unit": "IIO" + }, + { + "BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.", + "EventCode": "0xC2", + "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART6", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x040", + "UMask": "0x7040004", + "Unit": "IIO" + }, + { + "BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.", + "EventCode": "0xC2", + "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART7", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x080", + "UMask": "0x7080004", + "Unit": "IIO" + }, + { + "BriefDescription": "Count of allocations in the completion buffer", + "EventCode": "0xD5", + "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.ALL_PARTS", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x70ff0ff", + "Unit": "IIO" + }, + { + "BriefDescription": "Count of allocations in the completion buffer", + "EventCode": "0xD5", + "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART0", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x001", + "UMask": "0x7001001", + "Unit": "IIO" + }, + { + "BriefDescription": "Count of allocations in the completion buffer", + "EventCode": "0xD5", + "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x002", + "UMask": "0x7002002", + "Unit": "IIO" + }, + { + "BriefDescription": "Count of allocations in the completion buffer", + "EventCode": "0xD5", + "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART2", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x004", + "UMask": "0x7004004", + "Unit": "IIO" + }, + { + "BriefDescription": "Count of allocations in the completion buffer", + "EventCode": "0xD5", + "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART3", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x008", + "UMask": "0x7008008", + "Unit": "IIO" + }, + { + "BriefDescription": "Count of allocations in the completion buffer", + "EventCode": "0xD5", + "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART4", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x010", + "UMask": "0x7010010", + "Unit": "IIO" + }, + { + "BriefDescription": "Count of allocations in the completion buffer", + "EventCode": "0xD5", + "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART5", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x020", + "UMask": "0x7020020", + "Unit": "IIO" + }, + { + "BriefDescription": "Count of allocations in the completion buffer", + "EventCode": "0xD5", + "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART6", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x040", + "UMask": "0x7040040", + "Unit": "IIO" + }, + { + "BriefDescription": "Count of allocations in the completion buffer", + "EventCode": "0xD5", + "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART7", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x080", + "UMask": "0x7080080", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.ALL_PARTS", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x70ff004", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART0", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x001", + "UMask": "0x7001004", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x002", + "UMask": "0x7002004", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART2", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x004", + "UMask": "0x7004004", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART3", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x008", + "UMask": "0x7008004", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART4", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x010", + "UMask": "0x7010004", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART5", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x020", + "UMask": "0x7020004", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART6", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x040", + "UMask": "0x7040004", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART7", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x080", + "UMask": "0x7080004", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.ALL_PARTS", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x70ff001", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART0", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x001", + "UMask": "0x7001001", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x002", + "UMask": "0x7002001", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART2", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x004", + "UMask": "0x7004001", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART3", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x008", + "UMask": "0x7008001", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART4", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x010", + "UMask": "0x7010001", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART5", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x020", + "UMask": "0x7020001", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART6", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x040", + "UMask": "0x7040001", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART7", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x080", + "UMask": "0x7080001", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.ALL_PARTS", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x70ff008", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.ALL_PARTS", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x70ff002", + "Unit": "IIO" + }, + { + "BriefDescription": "Counts once for every 4 bytes read from this card to memory. This event does include reads to IO.", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.ALL_PARTS", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x70ff004", + "Unit": "IIO" + }, + { + "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x001", + "UMask": "0x7001004", + "Unit": "IIO" + }, + { + "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x02", + "UMask": "0x7002004", + "Unit": "IIO" + }, + { + "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x04", + "UMask": "0x7004004", + "Unit": "IIO" + }, + { + "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x08", + "UMask": "0x7008004", + "Unit": "IIO" + }, + { + "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART4", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x10", + "UMask": "0x7010004", + "Unit": "IIO" + }, + { + "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART5", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x20", + "UMask": "0x7020004", + "Unit": "IIO" + }, + { + "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART6", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x40", + "UMask": "0x7040004", + "Unit": "IIO" + }, + { + "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART7", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x80", + "UMask": "0x7080004", + "Unit": "IIO" + }, + { + "BriefDescription": "Counts once for every 4 bytes written from this card to memory. This event does include writes to IO.", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.ALL_PARTS", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x70ff001", + "Unit": "IIO" + }, + { + "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x001", + "UMask": "0x7001001", + "Unit": "IIO" + }, + { + "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x02", + "UMask": "0x7002001", + "Unit": "IIO" + }, + { + "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x04", + "UMask": "0x7004001", + "Unit": "IIO" + }, + { + "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x08", + "UMask": "0x7008001", + "Unit": "IIO" + }, + { + "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART4", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x10", + "UMask": "0x7010001", + "Unit": "IIO" + }, + { + "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART5", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x20", + "UMask": "0x7020001", + "Unit": "IIO" + }, + { + "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART6", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x40", + "UMask": "0x7040001", + "Unit": "IIO" + }, + { + "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART7", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x80", + "UMask": "0x7080001", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART0", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x001", + "UMask": "0x7001008", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x002", + "UMask": "0x7002008", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART2", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x004", + "UMask": "0x7004008", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART3", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x008", + "UMask": "0x7008008", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART4", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x010", + "UMask": "0x7010008", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART5", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x020", + "UMask": "0x7020008", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART6", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x040", + "UMask": "0x7040008", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART7", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x080", + "UMask": "0x7080008", + "Unit": "IIO" + }, + { + "BriefDescription": "Counts once for every 4 bytes written from this card to a peer device's IO space.", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.ALL_PARTS", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x70ff002", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART0", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x001", + "UMask": "0x7001002", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x002", + "UMask": "0x7002002", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART2", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x004", + "UMask": "0x7004002", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART3", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x008", + "UMask": "0x7008002", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART4", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x010", + "UMask": "0x7010002", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART5", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x020", + "UMask": "0x7020002", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART6", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x040", + "UMask": "0x7040002", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART7", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x080", + "UMask": "0x7080002", + "Unit": "IIO" + }, + { + "BriefDescription": "IOTLB Hits to a 1G Page", + "EventCode": "0x40", + "EventName": "UNC_IIO_IOMMU0.1G_HITS", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x10", + "Unit": "IIO" + }, + { + "BriefDescription": "IOTLB Hits to a 2M Page", + "EventCode": "0x40", + "EventName": "UNC_IIO_IOMMU0.2M_HITS", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x8", + "Unit": "IIO" + }, + { + "BriefDescription": "IOTLB Hits to a 4K Page", + "EventCode": "0x40", + "EventName": "UNC_IIO_IOMMU0.4K_HITS", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "IOTLB lookups all", + "EventCode": "0x40", + "EventName": "UNC_IIO_IOMMU0.ALL_LOOKUPS", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x2", + "Unit": "IIO" + }, + { + "BriefDescription": "Context cache hits", + "EventCode": "0x40", + "EventName": "UNC_IIO_IOMMU0.CTXT_CACHE_HITS", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x80", + "Unit": "IIO" + }, + { + "BriefDescription": "Context cache lookups", + "EventCode": "0x40", + "EventName": "UNC_IIO_IOMMU0.CTXT_CACHE_LOOKUPS", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x40", + "Unit": "IIO" + }, + { + "BriefDescription": "IOTLB lookups first", + "EventCode": "0x40", + "EventName": "UNC_IIO_IOMMU0.FIRST_LOOKUPS", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x1", + "Unit": "IIO" + }, + { + "BriefDescription": "IOTLB Fills (same as IOTLB miss)", + "EventCode": "0x40", + "EventName": "UNC_IIO_IOMMU0.MISSES", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x20", + "Unit": "IIO" + }, + { + "BriefDescription": "IOMMU memory access (both low and high priority)", + "EventCode": "0x41", + "EventName": "UNC_IIO_IOMMU1.NUM_MEM_ACCESSES", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0xc0", + "Unit": "IIO" + }, + { + "BriefDescription": "IOMMU high priority memory access", + "EventCode": "0x41", + "EventName": "UNC_IIO_IOMMU1.NUM_MEM_ACCESSES_HIGH", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x80", + "Unit": "IIO" + }, + { + "BriefDescription": "IOMMU low priority memory access", + "EventCode": "0x41", + "EventName": "UNC_IIO_IOMMU1.NUM_MEM_ACCESSES_LOW", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x40", + "Unit": "IIO" + }, + { + "BriefDescription": "Second Level Page Walk Cache Hit to a 1G page", + "EventCode": "0x41", + "EventName": "UNC_IIO_IOMMU1.SLPWC_1G_HITS", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "Second Level Page Walk Cache Hit to a 256T page", + "EventCode": "0x41", + "EventName": "UNC_IIO_IOMMU1.SLPWC_256T_HITS", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x10", + "Unit": "IIO" + }, + { + "BriefDescription": "Second Level Page Walk Cache Hit to a 2M page", + "EventCode": "0x41", + "EventName": "UNC_IIO_IOMMU1.SLPWC_2M_HITS", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x2", + "Unit": "IIO" + }, + { + "BriefDescription": "Second Level Page Walk Cache Hit to a 512G page", + "EventCode": "0x41", + "EventName": "UNC_IIO_IOMMU1.SLPWC_512G_HITS", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x8", + "Unit": "IIO" + }, + { + "BriefDescription": "Second Level Page Walk Cache fill", + "EventCode": "0x41", + "EventName": "UNC_IIO_IOMMU1.SLPWC_CACHE_FILLS", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x20", + "Unit": "IIO" + }, + { + "BriefDescription": "Second Level Page Walk Cache lookup", + "EventCode": "0x41", + "EventName": "UNC_IIO_IOMMU1.SLPWC_CACHE_LOOKUPS", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x1", + "Unit": "IIO" + }, + { + "BriefDescription": "Cycles PWT full", + "EventCode": "0x43", + "EventName": "UNC_IIO_IOMMU3.CYC_PWT_FULL", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x2", + "Unit": "IIO" + }, + { + "BriefDescription": "Interrupt Entry cache hit", + "EventCode": "0x43", + "EventName": "UNC_IIO_IOMMU3.INT_CACHE_HITS", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x80", + "Unit": "IIO" + }, + { + "BriefDescription": "Interrupt Entry cache lookup", + "EventCode": "0x43", + "EventName": "UNC_IIO_IOMMU3.INT_CACHE_LOOKUPS", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x40", + "Unit": "IIO" + }, + { + "BriefDescription": "Context Cache invalidation events", + "EventCode": "0x43", + "EventName": "UNC_IIO_IOMMU3.NUM_INVAL_CTXT_CACHE", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x8", + "Unit": "IIO" + }, + { + "BriefDescription": "Interrupt Entry Cache invalidation events", + "EventCode": "0x43", + "EventName": "UNC_IIO_IOMMU3.NUM_INVAL_INT_CACHE", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x20", + "Unit": "IIO" + }, + { + "BriefDescription": "IOTLB invalidation events", + "EventCode": "0x43", + "EventName": "UNC_IIO_IOMMU3.NUM_INVAL_IOTLB", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "PASID Cache invalidation events", + "EventCode": "0x43", + "EventName": "UNC_IIO_IOMMU3.NUM_INVAL_PASID_CACHE", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x10", + "Unit": "IIO" + }, + { + "BriefDescription": "Occupancy of outbound request queue : To device : Counts number of outbound requests/completions IIO is currently processing", + "EventCode": "0xc5", + "EventName": "UNC_IIO_NUM_OUSTANDING_REQ_FROM_CPU.TO_IO", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x70ff008", + "Unit": "IIO" + }, + { + "BriefDescription": "Passing data to be written", + "EventCode": "0x88", + "EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.DATA", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x700f020", + "Unit": "IIO" + }, + { + "BriefDescription": "Issuing final read or write of line", + "EventCode": "0x88", + "EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.FINAL_RD_WR", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x700f008", + "Unit": "IIO" + }, + { + "BriefDescription": "Processing response from IOMMU", + "EventCode": "0x88", + "EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.IOMMU_HIT", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x700f002", + "Unit": "IIO" + }, + { + "BriefDescription": "Issuing to IOMMU", + "EventCode": "0x88", + "EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.IOMMU_REQ", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x700f001", + "Unit": "IIO" + }, + { + "BriefDescription": "Request Ownership", + "EventCode": "0x88", + "EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.REQ_OWN", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x700f004", + "Unit": "IIO" + }, + { + "BriefDescription": "Writing line", + "EventCode": "0x88", + "EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.WR", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x700f010", + "Unit": "IIO" + }, + { + "BriefDescription": "-", + "EventCode": "0x8e", + "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.ABORT", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x70ff080", + "Unit": "IIO" + }, + { + "BriefDescription": "-", + "EventCode": "0x8e", + "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.CONFINED_P2P", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x70ff040", + "Unit": "IIO" + }, + { + "BriefDescription": "-", + "EventCode": "0x8e", + "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.LOC_P2P", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x70ff020", + "Unit": "IIO" + }, + { + "BriefDescription": "-", + "EventCode": "0x8e", + "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.MCAST", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x70ff002", + "Unit": "IIO" + }, + { + "BriefDescription": "-", + "EventCode": "0x8e", + "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.MEM", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x70ff008", + "Unit": "IIO" + }, + { + "BriefDescription": "-", + "EventCode": "0x8e", + "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.MSGB", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x70ff001", + "Unit": "IIO" + }, + { + "BriefDescription": "-", + "EventCode": "0x8e", + "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.REM_P2P", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x70ff010", + "Unit": "IIO" + }, + { + "BriefDescription": "-", + "EventCode": "0x8e", + "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.UBOX", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x70ff004", + "Unit": "IIO" + }, + { + "BriefDescription": "All 9 bits of Page Walk Tracker Occupancy", + "EventCode": "0x42", + "EventName": "UNC_IIO_PWT_OCCUPANCY", + "PerPkg": "1", + "PortMask": "0x000", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.ALL_PARTS", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x70ff004", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART0", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x001", + "UMask": "0x7001004", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x002", + "UMask": "0x7002004", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART2", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x004", + "UMask": "0x7004004", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART3", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x008", + "UMask": "0x7008004", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART4", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x010", + "UMask": "0x7010004", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART5", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x020", + "UMask": "0x7020004", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART6", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x040", + "UMask": "0x7040004", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART7", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x080", + "UMask": "0x7080004", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.ALL_PARTS", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x70ff001", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART0", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x001", + "UMask": "0x7001001", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x002", + "UMask": "0x7002001", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART2", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x004", + "UMask": "0x7004001", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART3", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x008", + "UMask": "0x7008001", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART4", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x010", + "UMask": "0x7010001", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART5", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x020", + "UMask": "0x7020001", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART6", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x040", + "UMask": "0x7040001", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART7", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x080", + "UMask": "0x7080001", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card.", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.ALL_PARTS", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x70ff008", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.ALL_PARTS", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x70ff002", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART0", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x001", + "UMask": "0x7001004", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x002", + "UMask": "0x7002004", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART2", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x004", + "UMask": "0x7004004", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART3", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x008", + "UMask": "0x7008004", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART4", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x010", + "UMask": "0x7010004", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART5", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x020", + "UMask": "0x7020004", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART6", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x040", + "UMask": "0x7040004", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART7", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x080", + "UMask": "0x7080004", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART0", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x001", + "UMask": "0x7001001", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x002", + "UMask": "0x7002001", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART2", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x004", + "UMask": "0x7004001", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART3", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x008", + "UMask": "0x7008001", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART4", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x010", + "UMask": "0x7010001", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART5", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x020", + "UMask": "0x7020001", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART6", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x040", + "UMask": "0x7040001", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART7", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x080", + "UMask": "0x7080001", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART0", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x001", + "UMask": "0x7001008", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x002", + "UMask": "0x7002008", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART2", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x004", + "UMask": "0x7004008", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART3", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x008", + "UMask": "0x7008008", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART4", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x010", + "UMask": "0x7010008", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART5", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x020", + "UMask": "0x7020008", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART6", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x040", + "UMask": "0x7040008", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART7", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x080", + "UMask": "0x7080008", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART0", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x001", + "UMask": "0x7001002", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x002", + "UMask": "0x7002002", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART2", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x004", + "UMask": "0x7004002", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART3", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x008", + "UMask": "0x7008002", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART4", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x010", + "UMask": "0x7010002", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART5", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x020", + "UMask": "0x7020002", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART6", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x040", + "UMask": "0x7040002", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART7", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x080", + "UMask": "0x7080002", + "Unit": "IIO" + } +] diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/uncore-memory.json b/tools/perf/pmu-events/arch/x86/sierraforest/uncore-memory.json new file mode 100644 index 000000000000..a2405ed640c9 --- /dev/null +++ b/tools/perf/pmu-events/arch/x86/sierraforest/uncore-memory.json @@ -0,0 +1,385 @@ +[ + { + "BriefDescription": "DRAM Activate Count : Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.", + "EventCode": "0x02", + "EventName": "UNC_M_ACT_COUNT.ALL", + "PerPkg": "1", + "UMask": "0xf7", + "Unit": "IMC" + }, + { + "BriefDescription": "DRAM Activate Count : Read transaction on Page Empty or Page Miss : Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.", + "EventCode": "0x02", + "EventName": "UNC_M_ACT_COUNT.RD", + "PerPkg": "1", + "UMask": "0xf1", + "Unit": "IMC" + }, + { + "BriefDescription": "DRAM Activate Count : Underfill Read transaction on Page Empty or Page Miss : Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.", + "EventCode": "0x02", + "EventName": "UNC_M_ACT_COUNT.UFILL", + "PerPkg": "1", + "UMask": "0xf4", + "Unit": "IMC" + }, + { + "BriefDescription": "DRAM Activate Count : Write transaction on Page Empty or Page Miss : Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.", + "EventCode": "0x02", + "EventName": "UNC_M_ACT_COUNT.WR", + "PerPkg": "1", + "UMask": "0xf2", + "Unit": "IMC" + }, + { + "BriefDescription": "CAS count for SubChannel 0, all CAS operations", + "EventCode": "0x05", + "EventName": "UNC_M_CAS_COUNT_SCH0.ALL", + "PerPkg": "1", + "UMask": "0xff", + "Unit": "IMC" + }, + { + "BriefDescription": "CAS count for SubChannel 0, all reads", + "EventCode": "0x05", + "EventName": "UNC_M_CAS_COUNT_SCH0.RD", + "PerPkg": "1", + "UMask": "0xcf", + "Unit": "IMC" + }, + { + "BriefDescription": "CAS count for SubChannel 0 regular reads", + "EventCode": "0x05", + "EventName": "UNC_M_CAS_COUNT_SCH0.RD_REG", + "PerPkg": "1", + "UMask": "0xc1", + "Unit": "IMC" + }, + { + "BriefDescription": "CAS count for SubChannel 0 underfill reads", + "EventCode": "0x05", + "EventName": "UNC_M_CAS_COUNT_SCH0.RD_UNDERFILL", + "PerPkg": "1", + "UMask": "0xc4", + "Unit": "IMC" + }, + { + "BriefDescription": "CAS count for SubChannel 0, all writes", + "EventCode": "0x05", + "EventName": "UNC_M_CAS_COUNT_SCH0.WR", + "PerPkg": "1", + "UMask": "0xf0", + "Unit": "IMC" + }, + { + "BriefDescription": "CAS count for SubChannel 0 regular writes", + "EventCode": "0x05", + "EventName": "UNC_M_CAS_COUNT_SCH0.WR_NONPRE", + "PerPkg": "1", + "UMask": "0xd0", + "Unit": "IMC" + }, + { + "BriefDescription": "CAS count for SubChannel 0 auto-precharge writes", + "EventCode": "0x05", + "EventName": "UNC_M_CAS_COUNT_SCH0.WR_PRE", + "PerPkg": "1", + "UMask": "0xe0", + "Unit": "IMC" + }, + { + "BriefDescription": "CAS count for SubChannel 1, all CAS operations", + "EventCode": "0x06", + "EventName": "UNC_M_CAS_COUNT_SCH1.ALL", + "PerPkg": "1", + "UMask": "0xff", + "Unit": "IMC" + }, + { + "BriefDescription": "CAS count for SubChannel 1, all reads", + "EventCode": "0x06", + "EventName": "UNC_M_CAS_COUNT_SCH1.RD", + "PerPkg": "1", + "UMask": "0xcf", + "Unit": "IMC" + }, + { + "BriefDescription": "CAS count for SubChannel 1 regular reads", + "EventCode": "0x06", + "EventName": "UNC_M_CAS_COUNT_SCH1.RD_REG", + "PerPkg": "1", + "UMask": "0xc1", + "Unit": "IMC" + }, + { + "BriefDescription": "CAS count for SubChannel 1 underfill reads", + "EventCode": "0x06", + "EventName": "UNC_M_CAS_COUNT_SCH1.RD_UNDERFILL", + "PerPkg": "1", + "UMask": "0xc4", + "Unit": "IMC" + }, + { + "BriefDescription": "CAS count for SubChannel 1, all writes", + "EventCode": "0x06", + "EventName": "UNC_M_CAS_COUNT_SCH1.WR", + "PerPkg": "1", + "UMask": "0xf0", + "Unit": "IMC" + }, + { + "BriefDescription": "CAS count for SubChannel 1 regular writes", + "EventCode": "0x06", + "EventName": "UNC_M_CAS_COUNT_SCH1.WR_NONPRE", + "PerPkg": "1", + "UMask": "0xd0", + "Unit": "IMC" + }, + { + "BriefDescription": "CAS count for SubChannel 1 auto-precharge writes", + "EventCode": "0x06", + "EventName": "UNC_M_CAS_COUNT_SCH1.WR_PRE", + "PerPkg": "1", + "UMask": "0xe0", + "Unit": "IMC" + }, + { + "BriefDescription": "Number of DRAM DCLK clock cycles while the event is enabled", + "EventCode": "0x01", + "EventName": "UNC_M_CLOCKTICKS", + "PerPkg": "1", + "PublicDescription": "DRAM Clockticks", + "UMask": "0x1", + "Unit": "IMC" + }, + { + "BriefDescription": "Number of DRAM HCLK clock cycles while the event is enabled", + "EventCode": "0x01", + "EventName": "UNC_M_HCLOCKTICKS", + "PerPkg": "1", + "PublicDescription": "DRAM Clockticks", + "Unit": "IMC" + }, + { + "BriefDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.", + "EventCode": "0x03", + "EventName": "UNC_M_PRE_COUNT.ALL", + "PerPkg": "1", + "UMask": "0xff", + "Unit": "IMC" + }, + { + "BriefDescription": "DRAM Precharge commands. : Precharge due to (?) : Counts the number of DRAM Precharge commands sent on this channel.", + "EventCode": "0x03", + "EventName": "UNC_M_PRE_COUNT.PGT", + "PerPkg": "1", + "UMask": "0xf8", + "Unit": "IMC" + }, + { + "BriefDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.", + "EventCode": "0x03", + "EventName": "UNC_M_PRE_COUNT.RD", + "PerPkg": "1", + "UMask": "0xf1", + "Unit": "IMC" + }, + { + "BriefDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.", + "EventCode": "0x03", + "EventName": "UNC_M_PRE_COUNT.UFILL", + "PerPkg": "1", + "UMask": "0xf4", + "Unit": "IMC" + }, + { + "BriefDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.", + "EventCode": "0x03", + "EventName": "UNC_M_PRE_COUNT.WR", + "PerPkg": "1", + "UMask": "0xf2", + "Unit": "IMC" + }, + { + "BriefDescription": "Read buffer inserts on subchannel 0", + "EventCode": "0x17", + "EventName": "UNC_M_RDB_INSERTS.SCH0", + "PerPkg": "1", + "UMask": "0x40", + "Unit": "IMC" + }, + { + "BriefDescription": "Read buffer inserts on subchannel 1", + "EventCode": "0x17", + "EventName": "UNC_M_RDB_INSERTS.SCH1", + "PerPkg": "1", + "UMask": "0x80", + "Unit": "IMC" + }, + { + "BriefDescription": "Read buffer occupancy on subchannel 0", + "EventCode": "0x1a", + "EventName": "UNC_M_RDB_OCCUPANCY_SCH0", + "PerPkg": "1", + "Unit": "IMC" + }, + { + "BriefDescription": "Read buffer occupancy on subchannel 1", + "EventCode": "0x1b", + "EventName": "UNC_M_RDB_OCCUPANCY_SCH1", + "PerPkg": "1", + "Unit": "IMC" + }, + { + "BriefDescription": "Read Pending Queue Allocations : Counts the number of allocations into the Read Pending Queue. This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This includes both ISOCH and non-ISOCH requests.", + "EventCode": "0x10", + "EventName": "UNC_M_RPQ_INSERTS.PCH0", + "PerPkg": "1", + "UMask": "0x50", + "Unit": "IMC" + }, + { + "BriefDescription": "Read Pending Queue Allocations : Counts the number of allocations into the Read Pending Queue. This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This includes both ISOCH and non-ISOCH requests.", + "EventCode": "0x10", + "EventName": "UNC_M_RPQ_INSERTS.PCH1", + "PerPkg": "1", + "UMask": "0xa0", + "Unit": "IMC" + }, + { + "BriefDescription": "Read Pending Queue inserts for subchannel 0, pseudochannel 0", + "EventCode": "0x10", + "EventName": "UNC_M_RPQ_INSERTS.SCH0_PCH0", + "PerPkg": "1", + "UMask": "0x10", + "Unit": "IMC" + }, + { + "BriefDescription": "Read Pending Queue inserts for subchannel 0, pseudochannel 1", + "EventCode": "0x10", + "EventName": "UNC_M_RPQ_INSERTS.SCH0_PCH1", + "PerPkg": "1", + "UMask": "0x20", + "Unit": "IMC" + }, + { + "BriefDescription": "Read Pending Queue inserts for subchannel 1, pseudochannel 0", + "EventCode": "0x10", + "EventName": "UNC_M_RPQ_INSERTS.SCH1_PCH0", + "PerPkg": "1", + "UMask": "0x40", + "Unit": "IMC" + }, + { + "BriefDescription": "Read Pending Queue inserts for subchannel 1, pseudochannel 1", + "EventCode": "0x10", + "EventName": "UNC_M_RPQ_INSERTS.SCH1_PCH1", + "PerPkg": "1", + "UMask": "0x80", + "Unit": "IMC" + }, + { + "BriefDescription": "Read pending queue occupancy for subchannel 0, pseudochannel 0", + "EventCode": "0x80", + "EventName": "UNC_M_RPQ_OCCUPANCY_SCH0_PCH0", + "PerPkg": "1", + "Unit": "IMC" + }, + { + "BriefDescription": "Read pending queue occupancy for subchannel 0, pseudochannel 1", + "EventCode": "0x81", + "EventName": "UNC_M_RPQ_OCCUPANCY_SCH0_PCH1", + "PerPkg": "1", + "Unit": "IMC" + }, + { + "BriefDescription": "Read pending queue occupancy for subchannel 1, pseudochannel 0", + "EventCode": "0x82", + "EventName": "UNC_M_RPQ_OCCUPANCY_SCH1_PCH0", + "PerPkg": "1", + "Unit": "IMC" + }, + { + "BriefDescription": "Read pending queue occupancy for subchannel 1, pseudochannel 1", + "EventCode": "0x83", + "EventName": "UNC_M_RPQ_OCCUPANCY_SCH1_PCH1", + "PerPkg": "1", + "Unit": "IMC" + }, + { + "BriefDescription": "Write Pending Queue Allocations", + "EventCode": "0x22", + "EventName": "UNC_M_WPQ_INSERTS.PCH0", + "PerPkg": "1", + "UMask": "0x50", + "Unit": "IMC" + }, + { + "BriefDescription": "Write Pending Queue Allocations", + "EventCode": "0x22", + "EventName": "UNC_M_WPQ_INSERTS.PCH1", + "PerPkg": "1", + "UMask": "0xa0", + "Unit": "IMC" + }, + { + "BriefDescription": "Write Pending Queue inserts for subchannel 0, pseudochannel 0", + "EventCode": "0x22", + "EventName": "UNC_M_WPQ_INSERTS.SCH0_PCH0", + "PerPkg": "1", + "UMask": "0x10", + "Unit": "IMC" + }, + { + "BriefDescription": "Write Pending Queue inserts for subchannel 0, pseudochannel 1", + "EventCode": "0x22", + "EventName": "UNC_M_WPQ_INSERTS.SCH0_PCH1", + "PerPkg": "1", + "UMask": "0x20", + "Unit": "IMC" + }, + { + "BriefDescription": "Write Pending Queue inserts for subchannel 1, pseudochannel 0", + "EventCode": "0x22", + "EventName": "UNC_M_WPQ_INSERTS.SCH1_PCH0", + "PerPkg": "1", + "UMask": "0x40", + "Unit": "IMC" + }, + { + "BriefDescription": "Write Pending Queue inserts for subchannel 1, pseudochannel 1", + "EventCode": "0x22", + "EventName": "UNC_M_WPQ_INSERTS.SCH1_PCH1", + "PerPkg": "1", + "UMask": "0x80", + "Unit": "IMC" + }, + { + "BriefDescription": "Write pending queue occupancy for subchannel 0, pseudochannel 0", + "EventCode": "0x84", + "EventName": "UNC_M_WPQ_OCCUPANCY_SCH0_PCH0", + "PerPkg": "1", + "Unit": "IMC" + }, + { + "BriefDescription": "Write pending queue occupancy for subchannel 0, pseudochannel 1", + "EventCode": "0x85", + "EventName": "UNC_M_WPQ_OCCUPANCY_SCH0_PCH1", + "PerPkg": "1", + "Unit": "IMC" + }, + { + "BriefDescription": "Write pending queue occupancy for subchannel 1, pseudochannel 0", + "EventCode": "0x86", + "EventName": "UNC_M_WPQ_OCCUPANCY_SCH1_PCH0", + "PerPkg": "1", + "Unit": "IMC" + }, + { + "BriefDescription": "Write pending queue occupancy for subchannel 1, pseudochannel 1", + "EventCode": "0x87", + "EventName": "UNC_M_WPQ_OCCUPANCY_SCH1_PCH1", + "PerPkg": "1", + "Unit": "IMC" + } +] diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/uncore-power.json b/tools/perf/pmu-events/arch/x86/sierraforest/uncore-power.json new file mode 100644 index 000000000000..e3a66166e28c --- /dev/null +++ b/tools/perf/pmu-events/arch/x86/sierraforest/uncore-power.json @@ -0,0 +1,10 @@ +[ + { + "BriefDescription": "PCU Clockticks", + "EventCode": "0x01", + "EventName": "UNC_P_CLOCKTICKS", + "PerPkg": "1", + "PublicDescription": "PCU Clockticks: The PCU runs off a fixed 1 GHz clock. This event counts the number of pclk cycles measured while the counter was enabled. The pclk, like the Memory Controller's dclk, counts at a constant rate making it a good measure of actual wall time.", + "Unit": "PCU" + } +] diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/virtual-memory.json b/tools/perf/pmu-events/arch/x86/sierraforest/virtual-memory.json index bd5f2b634c98..371974c6d6c3 100644 --- a/tools/perf/pmu-events/arch/x86/sierraforest/virtual-memory.json +++ b/tools/perf/pmu-events/arch/x86/sierraforest/virtual-memory.json @@ -1,24 +1,131 @@ [ { - "BriefDescription": "Counts the number of page walks completed due to load DTLB misses to a 1G page.", + "BriefDescription": "Counts the number of first level TLB misses but second level hits due to a demand load that did not start a page walk. Accounts for all page sizes. Will result in a DTLB write from STLB.", + "EventCode": "0x08", + "EventName": "DTLB_LOAD_MISSES.STLB_HIT", + "SampleAfterValue": "200003", + "UMask": "0x20" + }, + { + "BriefDescription": "Counts the number of page walks completed due to load DTLB misses.", "EventCode": "0x08", "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED", - "SampleAfterValue": "1000003", + "SampleAfterValue": "200003", "UMask": "0xe" }, { + "BriefDescription": "Counts the number of page walks completed due to load DTLB misses to a 2M or 4M page.", + "EventCode": "0x08", + "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M", + "PublicDescription": "Counts the number of page walks completed due to loads (including SW prefetches) whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 2M or 4M pages. Includes page walks that page fault.", + "SampleAfterValue": "200003", + "UMask": "0x4" + }, + { + "BriefDescription": "Counts the number of page walks completed due to load DTLB misses to a 4K page.", + "EventCode": "0x08", + "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K", + "PublicDescription": "Counts the number of page walks completed due to loads (including SW prefetches) whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 4K pages. Includes page walks that page fault.", + "SampleAfterValue": "200003", + "UMask": "0x2" + }, + { + "BriefDescription": "Counts the number of page walks outstanding for Loads (demand or SW prefetch) in PMH every cycle.", + "EventCode": "0x08", + "EventName": "DTLB_LOAD_MISSES.WALK_PENDING", + "PublicDescription": "Counts the number of page walks outstanding for Loads (demand or SW prefetch) in PMH every cycle. A PMH page walk is outstanding from page walk start till PMH becomes idle again (ready to serve next walk). Includes EPT-walk intervals.", + "SampleAfterValue": "200003", + "UMask": "0x10" + }, + { + "BriefDescription": "Counts the number of first level TLB misses but second level hits due to stores that did not start a page walk. Accounts for all pages sizes. Will result in a DTLB write from STLB.", + "EventCode": "0x49", + "EventName": "DTLB_STORE_MISSES.STLB_HIT", + "SampleAfterValue": "2000003", + "UMask": "0x20" + }, + { "BriefDescription": "Counts the number of page walks completed due to store DTLB misses to a 1G page.", "EventCode": "0x49", "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED", - "SampleAfterValue": "1000003", + "SampleAfterValue": "2000003", "UMask": "0xe" }, { + "BriefDescription": "Counts the number of page walks completed due to store DTLB misses to a 2M or 4M page.", + "EventCode": "0x49", + "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M", + "PublicDescription": "Counts the number of page walks completed due to stores whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 2M or 4M pages. Includes page walks that page fault.", + "SampleAfterValue": "2000003", + "UMask": "0x4" + }, + { + "BriefDescription": "Counts the number of page walks completed due to store DTLB misses to a 4K page.", + "EventCode": "0x49", + "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K", + "PublicDescription": "Counts the number of page walks completed due to stores whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 4K pages. Includes page walks that page fault.", + "SampleAfterValue": "2000003", + "UMask": "0x2" + }, + { + "BriefDescription": "Counts the number of page walks outstanding in the page miss handler (PMH) for stores every cycle.", + "EventCode": "0x49", + "EventName": "DTLB_STORE_MISSES.WALK_PENDING", + "PublicDescription": "Counts the number of page walks outstanding in the page miss handler (PMH) for stores every cycle. A PMH page walk is outstanding from page walk start till PMH becomes idle again (ready to serve next walk). Includes EPT-walk intervals.", + "SampleAfterValue": "200003", + "UMask": "0x10" + }, + { + "BriefDescription": "Counts the number of page walks initiated by a instruction fetch that missed the first and second level TLBs.", + "EventCode": "0x85", + "EventName": "ITLB_MISSES.MISS_CAUSED_WALK", + "SampleAfterValue": "1000003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts the number of first level TLB misses but second level hits due to an instruction fetch that did not start a page walk. Account for all pages sizes. Will result in an ITLB write from STLB.", + "EventCode": "0x85", + "EventName": "ITLB_MISSES.STLB_HIT", + "SampleAfterValue": "2000003", + "UMask": "0x20" + }, + { "BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to any page size.", "EventCode": "0x85", "EventName": "ITLB_MISSES.WALK_COMPLETED", "PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to any page size. Includes page walks that page fault.", "SampleAfterValue": "200003", "UMask": "0xe" + }, + { + "BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to a 2M or 4M page.", + "EventCode": "0x85", + "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M", + "PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 2M or 4M pages. Includes page walks that page fault.", + "SampleAfterValue": "2000003", + "UMask": "0x4" + }, + { + "BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to a 4K page.", + "EventCode": "0x85", + "EventName": "ITLB_MISSES.WALK_COMPLETED_4K", + "PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 4K pages. Includes page walks that page fault.", + "SampleAfterValue": "2000003", + "UMask": "0x2" + }, + { + "BriefDescription": "Counts the number of page walks outstanding for iside in PMH every cycle.", + "EventCode": "0x85", + "EventName": "ITLB_MISSES.WALK_PENDING", + "PublicDescription": "Counts the number of page walks outstanding for iside in PMH every cycle. A PMH page walk is outstanding from page walk start till PMH becomes idle again (ready to serve next walk). Includes EPT-walk intervals. Walks could be counted by edge detecting on this event, but would count restarted suspended walks.", + "SampleAfterValue": "200003", + "UMask": "0x10" + }, + { + "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a DTLB miss.", + "EventCode": "0x05", + "EventName": "LD_HEAD.DTLB_MISS_AT_RET", + "SampleAfterValue": "1000003", + "UMask": "0x90" } ] diff --git a/tools/perf/pmu-events/arch/x86/skylake/frontend.json b/tools/perf/pmu-events/arch/x86/skylake/frontend.json index 095904c77001..d6f543471b24 100644 --- a/tools/perf/pmu-events/arch/x86/skylake/frontend.json +++ b/tools/perf/pmu-events/arch/x86/skylake/frontend.json @@ -19,7 +19,7 @@ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switches", "EventCode": "0xAB", "EventName": "DSB2MITE_SWITCHES.COUNT", - "PublicDescription": "This event counts the number of the Decode Stream Buffer (DSB)-to-MITE switches including all misses because of missing Decode Stream Buffer (DSB) cache and u-arch forced misses.\nNote: Invoking MITE requires two or three cycles delay.", + "PublicDescription": "This event counts the number of the Decode Stream Buffer (DSB)-to-MITE switches including all misses because of missing Decode Stream Buffer (DSB) cache and u-arch forced misses. Note: Invoking MITE requires two or three cycles delay.", "SampleAfterValue": "2000003", "UMask": "0x1" }, @@ -267,11 +267,11 @@ "UMask": "0x4" }, { - "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops [This event is alias to IDQ.DSB_CYCLES_OK]", + "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 or more Uops [This event is alias to IDQ.DSB_CYCLES_OK]", "CounterMask": "4", "EventCode": "0x79", "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS", - "PublicDescription": "Counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ. [This event is alias to IDQ.DSB_CYCLES_OK]", + "PublicDescription": "Counts the number of cycles 4 or more uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ. [This event is alias to IDQ.DSB_CYCLES_OK]", "SampleAfterValue": "2000003", "UMask": "0x18" }, @@ -321,11 +321,11 @@ "UMask": "0x18" }, { - "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops [This event is alias to IDQ.ALL_DSB_CYCLES_4_UOPS]", + "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 or more Uops [This event is alias to IDQ.ALL_DSB_CYCLES_4_UOPS]", "CounterMask": "4", "EventCode": "0x79", "EventName": "IDQ.DSB_CYCLES_OK", - "PublicDescription": "Counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ. [This event is alias to IDQ.ALL_DSB_CYCLES_4_UOPS]", + "PublicDescription": "Counts the number of cycles 4 or more uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ. [This event is alias to IDQ.ALL_DSB_CYCLES_4_UOPS]", "SampleAfterValue": "2000003", "UMask": "0x18" }, diff --git a/tools/perf/pmu-events/arch/x86/skylake/memory.json b/tools/perf/pmu-events/arch/x86/skylake/memory.json index 588ad6059a13..f047862f9735 100644 --- a/tools/perf/pmu-events/arch/x86/skylake/memory.json +++ b/tools/perf/pmu-events/arch/x86/skylake/memory.json @@ -1008,7 +1008,7 @@ "BriefDescription": "Number of times an RTM execution aborted due to any reasons (multiple categories may count as one).", "EventCode": "0xC9", "EventName": "RTM_RETIRED.ABORTED", - "PEBS": "1", + "PEBS": "2", "PublicDescription": "Number of times RTM abort was triggered.", "SampleAfterValue": "2000003", "UMask": "0x4" diff --git a/tools/perf/pmu-events/arch/x86/skylake/metricgroups.json b/tools/perf/pmu-events/arch/x86/skylake/metricgroups.json index a151ba9cccb0..5452a1448ded 100644 --- a/tools/perf/pmu-events/arch/x86/skylake/metricgroups.json +++ b/tools/perf/pmu-events/arch/x86/skylake/metricgroups.json @@ -2,10 +2,10 @@ "Backend": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Bad": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "BadSpec": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", - "BigFoot": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "BigFootprint": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "BrMispredicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Branches": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", - "CacheMisses": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "CacheHits": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "CodeGen": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Compute": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Cor": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", @@ -25,7 +25,9 @@ "L2Evicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "LSD": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MachineClears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "Machine_Clears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Mem": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "MemOffcore": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryBW": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryBound": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryLat": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", @@ -63,8 +65,10 @@ "tma_L5_group": "Metrics for top-down breakdown at level 5", "tma_L6_group": "Metrics for top-down breakdown at level 6", "tma_alu_op_utilization_group": "Metrics contributing to tma_alu_op_utilization category", + "tma_assists_group": "Metrics contributing to tma_assists category", "tma_backend_bound_group": "Metrics contributing to tma_backend_bound category", "tma_bad_speculation_group": "Metrics contributing to tma_bad_speculation category", + "tma_branch_mispredicts_group": "Metrics contributing to tma_branch_mispredicts category", "tma_branch_resteers_group": "Metrics contributing to tma_branch_resteers category", "tma_core_bound_group": "Metrics contributing to tma_core_bound category", "tma_dram_bound_group": "Metrics contributing to tma_dram_bound category", @@ -77,9 +81,9 @@ "tma_frontend_bound_group": "Metrics contributing to tma_frontend_bound category", "tma_heavy_operations_group": "Metrics contributing to tma_heavy_operations category", "tma_issue2P": "Metrics related by the issue $issue2P", - "tma_issueBC": "Metrics related by the issue $issueBC", "tma_issueBM": "Metrics related by the issue $issueBM", "tma_issueBW": "Metrics related by the issue $issueBW", + "tma_issueComp": "Metrics related by the issue $issueComp", "tma_issueD0": "Metrics related by the issue $issueD0", "tma_issueFB": "Metrics related by the issue $issueFB", "tma_issueFL": "Metrics related by the issue $issueFL", @@ -99,10 +103,12 @@ "tma_l3_bound_group": "Metrics contributing to tma_l3_bound category", "tma_light_operations_group": "Metrics contributing to tma_light_operations category", "tma_load_op_utilization_group": "Metrics contributing to tma_load_op_utilization category", + "tma_machine_clears_group": "Metrics contributing to tma_machine_clears category", "tma_mem_latency_group": "Metrics contributing to tma_mem_latency category", "tma_memory_bound_group": "Metrics contributing to tma_memory_bound category", "tma_microcode_sequencer_group": "Metrics contributing to tma_microcode_sequencer category", "tma_mite_group": "Metrics contributing to tma_mite category", + "tma_other_light_ops_group": "Metrics contributing to tma_other_light_ops category", "tma_ports_utilization_group": "Metrics contributing to tma_ports_utilization category", "tma_ports_utilized_0_group": "Metrics contributing to tma_ports_utilized_0 category", "tma_ports_utilized_3m_group": "Metrics contributing to tma_ports_utilized_3m category", diff --git a/tools/perf/pmu-events/arch/x86/skylake/pipeline.json b/tools/perf/pmu-events/arch/x86/skylake/pipeline.json index cd3e737bf4a1..fe202d1e368a 100644 --- a/tools/perf/pmu-events/arch/x86/skylake/pipeline.json +++ b/tools/perf/pmu-events/arch/x86/skylake/pipeline.json @@ -387,7 +387,7 @@ "Errata": "SKL091, SKL044", "EventCode": "0xC0", "EventName": "INST_RETIRED.NOP", - "PEBS": "1", + "PEBS": "2", "SampleAfterValue": "2000003", "UMask": "0x2" }, diff --git a/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json b/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json index faa615c57893..3af71b84bb9d 100644 --- a/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json +++ b/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json @@ -83,12 +83,12 @@ "MetricExpr": "(UOPS_DISPATCHED_PORT.PORT_0 + UOPS_DISPATCHED_PORT.PORT_1 + UOPS_DISPATCHED_PORT.PORT_5 + UOPS_DISPATCHED_PORT.PORT_6) / tma_info_thread_slots", "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group", "MetricName": "tma_alu_op_utilization", - "MetricThreshold": "tma_alu_op_utilization > 0.6", + "MetricThreshold": "tma_alu_op_utilization > 0.4", "ScaleUnit": "100%" }, { "BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists", - "MetricExpr": "100 * (FP_ASSIST.ANY + OTHER_ASSISTS.ANY) / tma_info_thread_slots", + "MetricExpr": "34 * (FP_ASSIST.ANY + OTHER_ASSISTS.ANY) / tma_info_thread_slots", "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group", "MetricName": "tma_assists", "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)", @@ -156,7 +156,7 @@ { "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "(18.5 * tma_info_system_average_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM + 16.5 * tma_info_system_average_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", + "MetricExpr": "(18.5 * tma_info_system_core_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM + 16.5 * tma_info_system_core_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group", "MetricName": "tma_contested_accesses", "MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", @@ -177,7 +177,7 @@ { "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "16.5 * tma_info_system_average_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", + "MetricExpr": "16.5 * tma_info_system_core_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", "MetricGroup": "Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group", "MetricName": "tma_data_sharing", "MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", @@ -189,7 +189,7 @@ "MetricExpr": "(cpu@INST_DECODED.DECODERS\\,cmask\\=1@ - cpu@INST_DECODED.DECODERS\\,cmask\\=2@) / tma_info_core_core_clks / 2", "MetricGroup": "DSBmiss;FetchBW;TopdownL4;tma_L4_group;tma_issueD0;tma_mite_group", "MetricName": "tma_decoder0_alone", - "MetricThreshold": "tma_decoder0_alone > 0.1 & (tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 4 > 0.35))", + "MetricThreshold": "tma_decoder0_alone > 0.1 & (tma_mite > 0.1 & tma_fetch_bandwidth > 0.2)", "PublicDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder. Related metrics: tma_few_uops_instructions", "ScaleUnit": "100%" }, @@ -214,10 +214,10 @@ }, { "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline", - "MetricExpr": "(IDQ.ALL_DSB_CYCLES_ANY_UOPS - IDQ.ALL_DSB_CYCLES_4_UOPS) / tma_info_core_core_clks / 2", + "MetricExpr": "(IDQ.DSB_CYCLES_ANY - IDQ.DSB_CYCLES_OK) / tma_info_core_core_clks / 2", "MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group", "MetricName": "tma_dsb", - "MetricThreshold": "tma_dsb > 0.15 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 4 > 0.35)", + "MetricThreshold": "tma_dsb > 0.15 & tma_fetch_bandwidth > 0.2", "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.", "ScaleUnit": "100%" }, @@ -237,7 +237,7 @@ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group", "MetricName": "tma_dtlb_load", "MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store, tma_info_bottleneck_memory_data_tlbs", + "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store, tma_info_bottleneck_memory_data_tlbs, tma_info_bottleneck_memory_synchronization", "ScaleUnit": "100%" }, { @@ -246,13 +246,13 @@ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group", "MetricName": "tma_dtlb_store", "MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load, tma_info_bottleneck_memory_data_tlbs", + "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load, tma_info_bottleneck_memory_data_tlbs, tma_info_bottleneck_memory_synchronization", "ScaleUnit": "100%" }, { "BriefDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "22 * tma_info_system_average_frequency * OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM / tma_info_thread_clks", + "MetricExpr": "22 * tma_info_system_core_frequency * OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM / tma_info_thread_clks", "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group", "MetricName": "tma_false_sharing", "MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", @@ -266,7 +266,7 @@ "MetricGroup": "MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group", "MetricName": "tma_fb_full", "MetricThreshold": "tma_fb_full > 0.3", - "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_bottleneck_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores", + "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores", "ScaleUnit": "100%" }, { @@ -274,7 +274,7 @@ "MetricExpr": "tma_frontend_bound - tma_fetch_latency", "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB", "MetricName": "tma_fetch_bandwidth", - "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 4 > 0.35", + "MetricThreshold": "tma_fetch_bandwidth > 0.2", "MetricgroupNoGroup": "TopdownL2", "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp", "ScaleUnit": "100%" @@ -309,6 +309,15 @@ "ScaleUnit": "100%" }, { + "BriefDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists", + "MetricExpr": "34 * FP_ASSIST.ANY / tma_info_thread_slots", + "MetricGroup": "HPC;TopdownL5;tma_L5_group;tma_assists_group", + "MetricName": "tma_fp_assists", + "MetricThreshold": "tma_fp_assists > 0.1", + "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists. FP Assist may apply when working with very small floating point values (so-called Denormals).", + "ScaleUnit": "100%" + }, + { "BriefDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired", "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ / UOPS_RETIRED.RETIRE_SLOTS", "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P", @@ -358,10 +367,10 @@ { "BriefDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions", "MetricExpr": "tma_light_operations * UOPS_RETIRED.MACRO_FUSED / UOPS_RETIRED.RETIRE_SLOTS", - "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", + "MetricGroup": "Branches;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", "MetricName": "tma_fused_instructions", "MetricThreshold": "tma_fused_instructions > 0.1 & tma_light_operations > 0.6", - "PublicDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions. The instruction pairs of CMP+JCC or DEC+JCC are commonly used examples.", + "PublicDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions. CMP+JCC or DEC+JCC are common examples of legacy fusions. {([MTL] Note new MOV+OP and Load+OP fusions appear under Other_Light_Ops in MTL!)}", "ScaleUnit": "100%" }, { @@ -371,13 +380,13 @@ "MetricName": "tma_heavy_operations", "MetricThreshold": "tma_heavy_operations > 0.1", "MetricgroupNoGroup": "TopdownL2", - "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.", + "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences. ([ICL+] Note this may overcount due to approximation using indirect events; [ADL+] .)", "ScaleUnit": "100%" }, { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses", "MetricExpr": "(ICACHE_16B.IFDATA_STALL + 2 * cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=1\\,edge@) / tma_info_thread_clks", - "MetricGroup": "BigFoot;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group", + "MetricGroup": "BigFootprint;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group", "MetricName": "tma_icache_misses", "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS_PS;FRONTEND_RETIRED.L1I_MISS_PS", @@ -385,14 +394,14 @@ }, { "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)", - "MetricExpr": "(tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) * tma_info_thread_slots / BR_MISP_RETIRED.ALL_BRANCHES", + "MetricExpr": "tma_info_bottleneck_mispredictions * tma_info_thread_slots / BR_MISP_RETIRED.ALL_BRANCHES / 100", "MetricGroup": "Bad;BrMispredicts;tma_issueBM", "MetricName": "tma_info_bad_spec_branch_misprediction_cost", "PublicDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear). Related metrics: tma_branch_mispredicts, tma_info_bottleneck_mispredictions, tma_mispredicts_resteers" }, { "BriefDescription": "Instructions per retired mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).", - "MetricExpr": "tma_info_inst_mix_instructions / (UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * cpu@BR_MISP_EXEC.ALL_BRANCHES\\,umask\\=0xE4@)", + "MetricExpr": "tma_info_inst_mix_instructions / (UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * BR_MISP_EXEC.INDIRECT)", "MetricGroup": "Bad;BrMispredicts", "MetricName": "tma_info_bad_spec_ipmisp_indirect", "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1e3" @@ -405,6 +414,12 @@ "MetricThreshold": "tma_info_bad_spec_ipmispredict < 200" }, { + "BriefDescription": "Speculative to Retired ratio of all clears (covering mispredicts and nukes)", + "MetricExpr": "INT_MISC.CLEARS_COUNT / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT)", + "MetricGroup": "BrMispredicts", + "MetricName": "tma_info_bad_spec_spec_clears_ratio" + }, + { "BriefDescription": "Probability of Core Bound bottleneck hidden by SMT-profiling artifacts", "MetricConstraint": "NO_GROUP_EVENTS", "MetricExpr": "(100 * (1 - tma_core_bound / tma_ports_utilization if tma_core_bound < tma_ports_utilization else 1) if tma_info_system_smt_2t_utilization > 0.5 else 0)", @@ -430,67 +445,102 @@ "PublicDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck. Related metrics: " }, { + "BriefDescription": "Total pipeline cost of \"useful operations\" - the baseline operations not covered by Branching_Overhead nor Irregular_Overhead.", + "MetricExpr": "100 * (tma_retiring - (BR_INST_RETIRED.ALL_BRANCHES + BR_INST_RETIRED.NEAR_CALL) / tma_info_thread_slots - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)", + "MetricGroup": "Ret", + "MetricName": "tma_info_bottleneck_base_non_br", + "MetricThreshold": "tma_info_bottleneck_base_non_br > 20" + }, + { "BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)", "MetricConstraint": "NO_GROUP_EVENTS", "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)", - "MetricGroup": "BigFoot;Fed;Frontend;IcMiss;MemoryTLB;tma_issueBC", + "MetricGroup": "BigFootprint;Fed;Frontend;IcMiss;MemoryTLB", "MetricName": "tma_info_bottleneck_big_code", - "MetricThreshold": "tma_info_bottleneck_big_code > 20", - "PublicDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses). Related metrics: tma_info_bottleneck_branching_overhead" + "MetricThreshold": "tma_info_bottleneck_big_code > 20" }, { "BriefDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls)", - "MetricExpr": "100 * ((BR_INST_RETIRED.CONDITIONAL + 3 * BR_INST_RETIRED.NEAR_CALL + (BR_INST_RETIRED.NEAR_TAKEN - (BR_INST_RETIRED.CONDITIONAL - BR_INST_RETIRED.NOT_TAKEN) - 2 * BR_INST_RETIRED.NEAR_CALL)) / tma_info_thread_slots)", - "MetricGroup": "Ret;tma_issueBC", + "MetricExpr": "100 * ((BR_INST_RETIRED.ALL_BRANCHES + BR_INST_RETIRED.NEAR_CALL) / tma_info_thread_slots)", + "MetricGroup": "Ret", "MetricName": "tma_info_bottleneck_branching_overhead", - "MetricThreshold": "tma_info_bottleneck_branching_overhead > 10", - "PublicDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls). Related metrics: tma_info_bottleneck_big_code" + "MetricThreshold": "tma_info_bottleneck_branching_overhead > 5" + }, + { + "BriefDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks", + "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_fb_full / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))", + "MetricGroup": "Mem;MemoryBW;Offcore;tma_issueBW", + "MetricName": "tma_info_bottleneck_cache_memory_bandwidth", + "MetricThreshold": "tma_info_bottleneck_cache_memory_bandwidth > 20", + "PublicDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full" + }, + { + "BriefDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks", + "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_store_latency / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency)))", + "MetricGroup": "Mem;MemoryLat;Offcore;tma_issueLat", + "MetricName": "tma_info_bottleneck_cache_memory_latency", + "MetricThreshold": "tma_info_bottleneck_cache_memory_latency > 20", + "PublicDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks. Related metrics: tma_l3_hit_latency, tma_mem_latency" + }, + { + "BriefDescription": "Total pipeline cost when the execution is compute-bound - an estimation", + "MetricExpr": "100 * (tma_core_bound * tma_divider / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_core_bound * (tma_ports_utilization / (tma_divider + tma_ports_utilization + tma_serializing_operation)) * (tma_ports_utilized_3m / (tma_ports_utilized_0 + tma_ports_utilized_1 + tma_ports_utilized_2 + tma_ports_utilized_3m)))", + "MetricGroup": "Cor;tma_issueComp", + "MetricName": "tma_info_bottleneck_compute_bound_est", + "MetricThreshold": "tma_info_bottleneck_compute_bound_est > 20", + "PublicDescription": "Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy. Related metrics: " }, { "BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "100 * (tma_frontend_bound - tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) - tma_info_bottleneck_big_code", + "MetricExpr": "100 * (tma_frontend_bound - (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * (10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts)) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) - tma_info_bottleneck_big_code", "MetricGroup": "Fed;FetchBW;Frontend", "MetricName": "tma_info_bottleneck_instruction_fetch_bw", "MetricThreshold": "tma_info_bottleneck_instruction_fetch_bw > 20" }, { - "BriefDescription": "Total pipeline cost of (external) Memory Bandwidth related bottlenecks", - "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full))) + tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_fb_full / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk))", - "MetricGroup": "Mem;MemoryBW;Offcore;tma_issueBW", - "MetricName": "tma_info_bottleneck_memory_bandwidth", - "MetricThreshold": "tma_info_bottleneck_memory_bandwidth > 20", - "PublicDescription": "Total pipeline cost of (external) Memory Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full" + "BriefDescription": "Total pipeline cost of irregular execution (e.g", + "MetricExpr": "100 * (tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * (10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts)) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_branch_mispredicts + tma_machine_clears * tma_other_nukes / tma_other_nukes + tma_core_bound * (tma_serializing_operation + tma_core_bound * RS_EVENTS.EMPTY_CYCLES / tma_info_thread_clks * tma_ports_utilized_0) / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)", + "MetricGroup": "Bad;Cor;Ret;tma_issueMS", + "MetricName": "tma_info_bottleneck_irregular_overhead", + "MetricThreshold": "tma_info_bottleneck_irregular_overhead > 10", + "PublicDescription": "Total pipeline cost of irregular execution (e.g. FP-assists in HPC, Wait time with work imbalance multithreaded workloads, overhead in system services or virtualized environments). Related metrics: tma_microcode_sequencer, tma_ms_switches" }, { "BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "100 * tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_dtlb_load / max(tma_l1_bound, tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency)))", + "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_load / max(tma_l1_bound, tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency)))", "MetricGroup": "Mem;MemoryTLB;Offcore;tma_issueTLB", "MetricName": "tma_info_bottleneck_memory_data_tlbs", "MetricThreshold": "tma_info_bottleneck_memory_data_tlbs > 20", - "PublicDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs). Related metrics: tma_dtlb_load, tma_dtlb_store" + "PublicDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs). Related metrics: tma_dtlb_load, tma_dtlb_store, tma_info_bottleneck_memory_synchronization" }, { - "BriefDescription": "Total pipeline cost of Memory Latency related bottlenecks (external memory and off-core caches)", - "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound))", - "MetricGroup": "Mem;MemoryLat;Offcore;tma_issueLat", - "MetricName": "tma_info_bottleneck_memory_latency", - "MetricThreshold": "tma_info_bottleneck_memory_latency > 20", - "PublicDescription": "Total pipeline cost of Memory Latency related bottlenecks (external memory and off-core caches). Related metrics: tma_l3_hit_latency, tma_mem_latency" + "BriefDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors)", + "MetricExpr": "100 * (tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_contested_accesses + tma_data_sharing) / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * tma_false_sharing / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency - tma_store_latency)) + tma_machine_clears * (1 - tma_other_nukes / tma_other_nukes))", + "MetricGroup": "Mem;Offcore;tma_issueTLB", + "MetricName": "tma_info_bottleneck_memory_synchronization", + "MetricThreshold": "tma_info_bottleneck_memory_synchronization > 10", + "PublicDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors). Related metrics: tma_dtlb_load, tma_dtlb_store, tma_info_bottleneck_memory_data_tlbs" }, { "BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "100 * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))", + "MetricExpr": "100 * (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))", "MetricGroup": "Bad;BadSpec;BrMispredicts;tma_issueBM", "MetricName": "tma_info_bottleneck_mispredictions", "MetricThreshold": "tma_info_bottleneck_mispredictions > 20", "PublicDescription": "Total pipeline cost of Branch Misprediction related bottlenecks. Related metrics: tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost, tma_mispredicts_resteers" }, { + "BriefDescription": "Total pipeline cost of remaining bottlenecks (apart from those listed in the Info.Bottlenecks metrics class)", + "MetricExpr": "100 - (tma_info_bottleneck_big_code + tma_info_bottleneck_instruction_fetch_bw + tma_info_bottleneck_mispredictions + tma_info_bottleneck_cache_memory_bandwidth + tma_info_bottleneck_cache_memory_latency + tma_info_bottleneck_memory_data_tlbs + tma_info_bottleneck_memory_synchronization + tma_info_bottleneck_compute_bound_est + tma_info_bottleneck_irregular_overhead + tma_info_bottleneck_branching_overhead + tma_info_bottleneck_base_non_br)", + "MetricGroup": "Cor;Offcore", + "MetricName": "tma_info_bottleneck_other_bottlenecks", + "MetricThreshold": "tma_info_bottleneck_other_bottlenecks > 20", + "PublicDescription": "Total pipeline cost of remaining bottlenecks (apart from those listed in the Info.Bottlenecks metrics class). Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls." + }, + { "BriefDescription": "Fraction of branches that are CALL or RET", "MetricExpr": "(BR_INST_RETIRED.NEAR_CALL + BR_INST_RETIRED.NEAR_RETURN) / BR_INST_RETIRED.ALL_BRANCHES", "MetricGroup": "Bad;Branches", @@ -511,7 +561,7 @@ { "BriefDescription": "Fraction of branches that are unconditional (direct or indirect) jumps", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "(BR_INST_RETIRED.NEAR_TAKEN - (BR_INST_RETIRED.CONDITIONAL - BR_INST_RETIRED.NOT_TAKEN) - 2 * BR_INST_RETIRED.NEAR_CALL) / BR_INST_RETIRED.ALL_BRANCHES", + "MetricExpr": "(BR_INST_RETIRED.NEAR_TAKEN - (BR_INST_RETIRED.COND - BR_INST_RETIRED.NOT_TAKEN) - 2 * BR_INST_RETIRED.NEAR_CALL) / BR_INST_RETIRED.ALL_BRANCHES", "MetricGroup": "Bad;Branches", "MetricName": "tma_info_branches_jump" }, @@ -528,9 +578,15 @@ "MetricName": "tma_info_core_coreipc" }, { + "BriefDescription": "uops Executed per Cycle", + "MetricExpr": "UOPS_EXECUTED.THREAD / tma_info_thread_clks", + "MetricGroup": "Power", + "MetricName": "tma_info_core_epc" + }, + { "BriefDescription": "Floating Point Operations Per Cycle", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * (FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) / tma_info_core_core_clks", + "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * FP_ARITH_INST_RETIRED.4_FLOPS + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) / tma_info_core_core_clks", "MetricGroup": "Flops;Ret", "MetricName": "tma_info_core_flopc" }, @@ -542,8 +598,8 @@ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)." }, { - "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core", - "MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)", + "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor)", + "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@", "MetricGroup": "Backend;Cor;Pipeline;PortsUtil", "MetricName": "tma_info_core_ilp" }, @@ -618,7 +674,7 @@ "MetricGroup": "Flops;InsType", "MetricName": "tma_info_inst_mix_iparith", "MetricThreshold": "tma_info_inst_mix_iparith < 10", - "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). May undercount due to FMA double counting. Approximated prior to BDW." + "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW." }, { "BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate)", @@ -626,7 +682,7 @@ "MetricGroup": "Flops;FpVector;InsType", "MetricName": "tma_info_inst_mix_iparith_avx128", "MetricThreshold": "tma_info_inst_mix_iparith_avx128 < 10", - "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate)", @@ -634,7 +690,7 @@ "MetricGroup": "Flops;FpVector;InsType", "MetricName": "tma_info_inst_mix_iparith_avx256", "MetricThreshold": "tma_info_inst_mix_iparith_avx256 < 10", - "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate)", @@ -642,7 +698,7 @@ "MetricGroup": "Flops;FpScalar;InsType", "MetricName": "tma_info_inst_mix_iparith_scalar_dp", "MetricThreshold": "tma_info_inst_mix_iparith_scalar_dp < 10", - "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate)", @@ -650,7 +706,7 @@ "MetricGroup": "Flops;FpScalar;InsType", "MetricName": "tma_info_inst_mix_iparith_scalar_sp", "MetricThreshold": "tma_info_inst_mix_iparith_scalar_sp < 10", - "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)", @@ -669,7 +725,7 @@ { "BriefDescription": "Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate)", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * (FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE)", + "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.SCALAR + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * FP_ARITH_INST_RETIRED.4_FLOPS + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE)", "MetricGroup": "Flops;InsType", "MetricName": "tma_info_inst_mix_ipflop", "MetricThreshold": "tma_info_inst_mix_ipflop < 10" @@ -705,136 +761,142 @@ }, { "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]", - "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l1d_cache_fill_bw", "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_core_l1d_cache_fill_bw" + "MetricName": "tma_info_memory_core_l1d_cache_fill_bw_2t" }, { "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]", - "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l2_cache_fill_bw", "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_core_l2_cache_fill_bw" + "MetricName": "tma_info_memory_core_l2_cache_fill_bw_2t" }, { "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l3_cache_access_bw", "MetricGroup": "Mem;MemoryBW;Offcore", - "MetricName": "tma_info_memory_core_l3_cache_access_bw" + "MetricName": "tma_info_memory_core_l3_cache_access_bw_2t" }, { "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l3_cache_fill_bw", "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_core_l3_cache_fill_bw" + "MetricName": "tma_info_memory_core_l3_cache_fill_bw_2t" }, { "BriefDescription": "Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss-handling entries)", "MetricExpr": "1e3 * MEM_LOAD_RETIRED.FB_HIT / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_fb_hpki" }, { + "BriefDescription": "", + "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l1d_cache_fill_bw" + }, + { "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads", "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l1mpki" }, { "BriefDescription": "L1 cache true misses per kilo instruction for all demand loads (including speculative)", "MetricExpr": "1e3 * L2_RQSTS.ALL_DEMAND_DATA_RD / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l1mpki_load" }, { + "BriefDescription": "", + "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l2_cache_fill_bw" + }, + { "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)", "MetricExpr": "1e3 * (L2_RQSTS.REFERENCES - L2_RQSTS.MISS) / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l2hpki_all" }, { "BriefDescription": "L2 cache hits per kilo instruction for all demand loads (including speculative)", "MetricExpr": "1e3 * L2_RQSTS.DEMAND_DATA_RD_HIT / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l2hpki_load" }, { "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads", "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY", - "MetricGroup": "Backend;CacheMisses;Mem", + "MetricGroup": "Backend;CacheHits;Mem", "MetricName": "tma_info_memory_l2mpki" }, { "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all request types (including speculative)", "MetricExpr": "1e3 * L2_RQSTS.MISS / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem;Offcore", + "MetricGroup": "CacheHits;Mem;Offcore", "MetricName": "tma_info_memory_l2mpki_all" }, { "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all demand loads (including speculative)", "MetricExpr": "1e3 * L2_RQSTS.DEMAND_DATA_RD_MISS / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l2mpki_load" }, { - "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads", - "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", - "MetricName": "tma_info_memory_l3mpki" + "BriefDescription": "", + "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW;Offcore", + "MetricName": "tma_info_memory_l3_cache_access_bw" }, { - "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)", - "MetricExpr": "L1D_PEND_MISS.PENDING / (MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT)", - "MetricGroup": "Mem;MemoryBound;MemoryLat", - "MetricName": "tma_info_memory_load_miss_real_latency" + "BriefDescription": "", + "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l3_cache_fill_bw" }, { - "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss", - "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES", - "MetricGroup": "Mem;MemoryBW;MemoryBound", - "MetricName": "tma_info_memory_mlp", - "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)" + "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads", + "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY", + "MetricGroup": "Mem", + "MetricName": "tma_info_memory_l3mpki" }, { "BriefDescription": "Average Parallel L2 cache miss data reads", "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", "MetricGroup": "Memory_BW;Offcore", - "MetricName": "tma_info_memory_oro_data_l2_mlp" + "MetricName": "tma_info_memory_latency_data_l2_mlp" }, { "BriefDescription": "Average Latency for L2 cache miss demand Loads", "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS.DEMAND_DATA_RD", "MetricGroup": "Memory_Lat;Offcore", - "MetricName": "tma_info_memory_oro_load_l2_miss_latency" + "MetricName": "tma_info_memory_latency_load_l2_miss_latency" }, { "BriefDescription": "Average Parallel L2 cache miss demand Loads", "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD", "MetricGroup": "Memory_BW;Offcore", - "MetricName": "tma_info_memory_oro_load_l2_mlp" - }, - { - "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l1d_cache_fill_bw", - "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_thread_l1d_cache_fill_bw_1t" + "MetricName": "tma_info_memory_latency_load_l2_mlp" }, { - "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l2_cache_fill_bw", - "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_thread_l2_cache_fill_bw_1t" + "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)", + "MetricExpr": "L1D_PEND_MISS.PENDING / (MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT)", + "MetricGroup": "Mem;MemoryBound;MemoryLat", + "MetricName": "tma_info_memory_load_miss_real_latency" }, { - "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l3_cache_access_bw", - "MetricGroup": "Mem;MemoryBW;Offcore", - "MetricName": "tma_info_memory_thread_l3_cache_access_bw_1t" + "BriefDescription": "Un-cacheable retired load per kilo instruction", + "MetricExpr": "1e3 * MEM_LOAD_MISC_RETIRED.UC / INST_RETIRED.ANY", + "MetricGroup": "Mem", + "MetricName": "tma_info_memory_mix_uc_load_pki" }, { - "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l3_cache_fill_bw", - "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_thread_l3_cache_fill_bw_1t" + "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss", + "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES", + "MetricGroup": "Mem;MemoryBW;MemoryBound", + "MetricName": "tma_info_memory_mlp", + "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)" }, { "BriefDescription": "STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk)", @@ -863,43 +925,57 @@ "MetricName": "tma_info_memory_tlb_store_stlb_mpki" }, { - "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-thread", - "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@", + "BriefDescription": "", + "MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@)", "MetricGroup": "Cor;Pipeline;PortsUtil;SMT", "MetricName": "tma_info_pipeline_execute" }, { + "BriefDescription": "Instructions per a microcode Assist invocation", + "MetricExpr": "INST_RETIRED.ANY / (FP_ASSIST.ANY + OTHER_ASSISTS.ANY)", + "MetricGroup": "MicroSeq;Pipeline;Ret;Retire", + "MetricName": "tma_info_pipeline_ipassist", + "MetricThreshold": "tma_info_pipeline_ipassist < 100e3", + "PublicDescription": "Instructions per a microcode Assist invocation. See Assists tree node for details (lower number means higher occurrence rate)" + }, + { "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.", "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / cpu@UOPS_RETIRED.RETIRE_SLOTS\\,cmask\\=1@", "MetricGroup": "Pipeline;Ret", "MetricName": "tma_info_pipeline_retire" }, { - "BriefDescription": "Measured Average Frequency for unhalted processors [GHz]", + "BriefDescription": "Measured Average Core Frequency for unhalted processors [GHz]", "MetricExpr": "tma_info_system_turbo_utilization * TSC / 1e9 / duration_time", "MetricGroup": "Power;Summary", - "MetricName": "tma_info_system_average_frequency" + "MetricName": "tma_info_system_core_frequency" }, { - "BriefDescription": "Average CPU Utilization", + "BriefDescription": "Average CPU Utilization (percentage)", "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC", "MetricGroup": "HPC;Summary", "MetricName": "tma_info_system_cpu_utilization" }, { + "BriefDescription": "Average number of utilized CPUs", + "MetricExpr": "#num_cpus_online * tma_info_system_cpu_utilization", + "MetricGroup": "Summary", + "MetricName": "tma_info_system_cpus_utilized" + }, + { "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]", "MetricExpr": "64 * (UNC_ARB_TRK_REQUESTS.ALL + UNC_ARB_COH_TRK_REQUESTS.ALL) / 1e6 / duration_time / 1e3", - "MetricGroup": "HPC;Mem;MemoryBW;SoC;tma_issueBW", + "MetricGroup": "HPC;MemOffcore;MemoryBW;SoC;tma_issueBW", "MetricName": "tma_info_system_dram_bw_use", - "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_info_bottleneck_memory_bandwidth, tma_mem_bandwidth, tma_sq_full" + "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_mem_bandwidth, tma_sq_full" }, { "BriefDescription": "Giga Floating Point Operations Per Second", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * (FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) / 1e9 / duration_time", + "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * FP_ARITH_INST_RETIRED.4_FLOPS + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) / 1e9 / duration_time", "MetricGroup": "Cor;Flops;HPC", "MetricName": "tma_info_system_gflops", - "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine." + "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width" }, { "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]", @@ -929,13 +1005,6 @@ "PublicDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches" }, { - "BriefDescription": "Average number of parallel requests to external memory", - "MetricExpr": "UNC_ARB_TRK_OCCUPANCY.ALL / UNC_ARB_TRK_OCCUPANCY.CYCLES_WITH_ANY_REQUEST", - "MetricGroup": "Mem;SoC", - "MetricName": "tma_info_system_mem_parallel_requests", - "PublicDescription": "Average number of parallel requests to external memory. Accounts for all requests" - }, - { "BriefDescription": "Average latency of data read request to external memory (in nanoseconds)", "MetricExpr": "1e9 * (UNC_ARB_TRK_OCCUPANCY.DATA_READ / UNC_ARB_TRK_REQUESTS.DATA_READ) / (tma_info_system_socket_clks / duration_time)", "MetricGroup": "Mem;MemoryLat;SoC", @@ -943,12 +1012,6 @@ "PublicDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches. ([RKL+]memory-controller only)" }, { - "BriefDescription": "Average latency of all requests to external memory (in Uncore cycles)", - "MetricExpr": "UNC_ARB_TRK_OCCUPANCY.ALL / UNC_ARB_TRK_REQUESTS.ALL", - "MetricGroup": "Mem;SoC", - "MetricName": "tma_info_system_mem_request_latency" - }, - { "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active", "MetricExpr": "(1 - CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / (CPU_CLK_UNHALTED.REF_XCLK_ANY / 2) if #SMT_on else 0)", "MetricGroup": "SMT", @@ -1013,8 +1076,8 @@ }, { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses", - "MetricExpr": "ICACHE_64B.IFTAG_STALL / tma_info_thread_clks", - "MetricGroup": "BigFoot;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group", + "MetricExpr": "ICACHE_TAG.STALLS / tma_info_thread_clks", + "MetricGroup": "BigFootprint;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group", "MetricName": "tma_itlb_misses", "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS_PS;FRONTEND_RETIRED.ITLB_MISS_PS", @@ -1023,7 +1086,7 @@ { "BriefDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache", "MetricExpr": "max((CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS) / tma_info_thread_clks, 0)", - "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group", "MetricName": "tma_l1_bound", "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", "PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_RETIRED.L1_HIT_PS;MEM_LOAD_RETIRED.FB_HIT_PS. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1", @@ -1033,7 +1096,7 @@ "BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads", "MetricConstraint": "NO_GROUP_EVENTS", "MetricExpr": "MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) / (MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=1@) * ((CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks)", - "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", "MetricName": "tma_l2_bound", "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", "PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L2_HIT_PS", @@ -1042,24 +1105,24 @@ { "BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core", "MetricExpr": "(CYCLE_ACTIVITY.STALLS_L2_MISS - CYCLE_ACTIVITY.STALLS_L3_MISS) / tma_info_thread_clks", - "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", "MetricName": "tma_l3_bound", "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS", "ScaleUnit": "100%" }, { - "BriefDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)", - "MetricExpr": "6.5 * tma_info_system_average_frequency * MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", + "BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)", + "MetricExpr": "6.5 * tma_info_system_core_frequency * (MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2)) / tma_info_thread_clks", "MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group", "MetricName": "tma_l3_hit_latency", "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_info_bottleneck_memory_latency, tma_mem_latency", + "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_info_bottleneck_cache_memory_latency, tma_mem_latency", "ScaleUnit": "100%" }, { "BriefDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs)", - "MetricExpr": "ILD_STALL.LCP / tma_info_thread_clks", + "MetricExpr": "DECODE.LCP / tma_info_thread_clks", "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB", "MetricName": "tma_lcp", "MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", @@ -1073,7 +1136,7 @@ "MetricName": "tma_light_operations", "MetricThreshold": "tma_light_operations > 0.6", "MetricgroupNoGroup": "TopdownL2", - "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST", + "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST", "ScaleUnit": "100%" }, { @@ -1123,21 +1186,21 @@ "ScaleUnit": "100%" }, { - "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM)", + "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)", "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@) / tma_info_thread_clks", "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW", "MetricName": "tma_mem_bandwidth", "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_bottleneck_memory_bandwidth, tma_info_system_dram_bw_use, tma_sq_full", + "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_sq_full", "ScaleUnit": "100%" }, { - "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM)", + "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM)", "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth", "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat", "MetricName": "tma_mem_latency", "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_info_bottleneck_memory_latency, tma_l3_hit_latency", + "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_info_bottleneck_cache_memory_latency, tma_l3_hit_latency", "ScaleUnit": "100%" }, { @@ -1165,7 +1228,7 @@ "MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueMC;tma_issueMS", "MetricName": "tma_microcode_sequencer", "MetricThreshold": "tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1", - "PublicDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided. Sample with: IDQ.MS_UOPS. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_ms_switches", + "PublicDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided. Sample with: IDQ.MS_UOPS. Related metrics: tma_clears_resteers, tma_info_bottleneck_irregular_overhead, tma_l1_bound, tma_machine_clears, tma_ms_switches", "ScaleUnit": "100%" }, { @@ -1182,17 +1245,17 @@ "MetricExpr": "(IDQ.ALL_MITE_CYCLES_ANY_UOPS - IDQ.ALL_MITE_CYCLES_4_UOPS) / tma_info_core_core_clks / 2", "MetricGroup": "DSBmiss;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group", "MetricName": "tma_mite", - "MetricThreshold": "tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 4 > 0.35)", + "MetricThreshold": "tma_mite > 0.1 & tma_fetch_bandwidth > 0.2", "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck. Sample with: FRONTEND_RETIRED.ANY_DSB_MISS", "ScaleUnit": "100%" }, { - "BriefDescription": "The Mixing_Vectors metric gives the percentage of injected blend uops out of all uops issued", + "BriefDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles)", "MetricExpr": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH / UOPS_ISSUED.ANY", "MetricGroup": "TopdownL5;tma_L5_group;tma_issueMV;tma_ports_utilized_0_group", "MetricName": "tma_mixing_vectors", "MetricThreshold": "tma_mixing_vectors > 0.05", - "PublicDescription": "The Mixing_Vectors metric gives the percentage of injected blend uops out of all uops issued. Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches", + "PublicDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles). Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches", "ScaleUnit": "100%" }, { @@ -1201,13 +1264,13 @@ "MetricGroup": "FetchLat;MicroSeq;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueMC;tma_issueMS;tma_issueMV;tma_issueSO", "MetricName": "tma_ms_switches", "MetricThreshold": "tma_ms_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", - "PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation", + "PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_clears_resteers, tma_info_bottleneck_irregular_overhead, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation", "ScaleUnit": "100%" }, { "BriefDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused", "MetricExpr": "tma_light_operations * (BR_INST_RETIRED.ALL_BRANCHES - UOPS_RETIRED.MACRO_FUSED) / UOPS_RETIRED.RETIRE_SLOTS", - "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", + "MetricGroup": "Branches;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", "MetricName": "tma_non_fused_branches", "MetricThreshold": "tma_non_fused_branches > 0.1 & tma_light_operations > 0.6", "PublicDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused. Non-conditional branches like direct JMP or CALL would count here. Can be used to examine fusible conditional jumps that were not fused.", @@ -1216,15 +1279,15 @@ { "BriefDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions", "MetricExpr": "tma_light_operations * INST_RETIRED.NOP / UOPS_RETIRED.RETIRE_SLOTS", - "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", + "MetricGroup": "Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group", "MetricName": "tma_nop_instructions", - "MetricThreshold": "tma_nop_instructions > 0.1 & tma_light_operations > 0.6", + "MetricThreshold": "tma_nop_instructions > 0.1 & (tma_other_light_ops > 0.3 & tma_light_operations > 0.6)", "PublicDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body. Sample with: INST_RETIRED.NOP", "ScaleUnit": "100%" }, { "BriefDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes", - "MetricExpr": "max(0, tma_light_operations - (tma_fp_arith + tma_memory_operations + tma_fused_instructions + tma_non_fused_branches + tma_nop_instructions))", + "MetricExpr": "max(0, tma_light_operations - (tma_fp_arith + tma_memory_operations + tma_fused_instructions + tma_non_fused_branches))", "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", "MetricName": "tma_other_light_ops", "MetricThreshold": "tma_other_light_ops > 0.3 & tma_light_operations > 0.6", @@ -1232,6 +1295,22 @@ "ScaleUnit": "100%" }, { + "BriefDescription": "This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types).", + "MetricExpr": "max(tma_branch_mispredicts * (1 - BR_MISP_RETIRED.ALL_BRANCHES / (INT_MISC.CLEARS_COUNT - MACHINE_CLEARS.COUNT)), 0.0001)", + "MetricGroup": "BrMispredicts;TopdownL3;tma_L3_group;tma_branch_mispredicts_group", + "MetricName": "tma_other_mispredicts", + "MetricThreshold": "tma_other_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)", + "ScaleUnit": "100%" + }, + { + "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering.", + "MetricExpr": "max(tma_machine_clears * (1 - MACHINE_CLEARS.MEMORY_ORDERING / MACHINE_CLEARS.COUNT), 0.0001)", + "MetricGroup": "Machine_Clears;TopdownL3;tma_L3_group;tma_machine_clears_group", + "MetricName": "tma_other_nukes", + "MetricThreshold": "tma_other_nukes > 0.05 & (tma_machine_clears > 0.1 & tma_bad_speculation > 0.15)", + "ScaleUnit": "100%" + }, + { "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch)", "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_0 / tma_info_core_core_clks", "MetricGroup": "Compute;TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P", @@ -1286,12 +1365,12 @@ "ScaleUnit": "100%" }, { - "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU)", + "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU)", "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_6 / tma_info_core_core_clks", "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P", "MetricName": "tma_port_6", "MetricThreshold": "tma_port_6 > 0.6", - "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_6. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2", + "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_6. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2", "ScaleUnit": "100%" }, { @@ -1305,7 +1384,7 @@ }, { "BriefDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related)", - "MetricExpr": "((EXE_ACTIVITY.EXE_BOUND_0_PORTS + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL)) / tma_info_thread_clks if ARITH.DIVIDER_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY else (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL) / tma_info_thread_clks)", + "MetricExpr": "((tma_ports_utilized_0 * tma_info_thread_clks + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL)) / tma_info_thread_clks if ARITH.DIVIDER_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY else (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL) / tma_info_thread_clks)", "MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group", "MetricName": "tma_ports_utilization", "MetricThreshold": "tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)", @@ -1314,7 +1393,7 @@ }, { "BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)", - "MetricExpr": "(UOPS_EXECUTED.CORE_CYCLES_NONE / 2 if #SMT_on else CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY) / tma_info_core_core_clks", + "MetricExpr": "(EXE_ACTIVITY.EXE_BOUND_0_PORTS + tma_core_bound * RS_EVENTS.EMPTY_CYCLES) / tma_info_thread_clks * (CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY) / tma_info_thread_clks", "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group", "MetricName": "tma_ports_utilized_0", "MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", @@ -1344,7 +1423,7 @@ "MetricExpr": "(UOPS_EXECUTED.CORE_CYCLES_GE_3 / 2 if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_3) / tma_info_core_core_clks", "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group", "MetricName": "tma_ports_utilized_3m", - "MetricThreshold": "tma_ports_utilized_3m > 0.7 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", + "MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", "ScaleUnit": "100%" }, { @@ -1360,9 +1439,9 @@ { "BriefDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations", "MetricExpr": "PARTIAL_RAT_STALLS.SCOREBOARD / tma_info_thread_clks", - "MetricGroup": "PortsUtil;TopdownL5;tma_L5_group;tma_issueSO;tma_ports_utilized_0_group", + "MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group;tma_issueSO", "MetricName": "tma_serializing_operation", - "MetricThreshold": "tma_serializing_operation > 0.1 & (tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)))", + "MetricThreshold": "tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)", "PublicDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out-of-order execution which may limit performance. Sample with: PARTIAL_RAT_STALLS.SCOREBOARD. Related metrics: tma_ms_switches", "ScaleUnit": "100%" }, @@ -1391,7 +1470,7 @@ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group", "MetricName": "tma_sq_full", "MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_bottleneck_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth", + "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth", "ScaleUnit": "100%" }, { @@ -1449,10 +1528,10 @@ { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears", "MetricExpr": "9 * BACLEARS.ANY / tma_info_thread_clks", - "MetricGroup": "BigFoot;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group", + "MetricGroup": "BigFootprint;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group", "MetricName": "tma_unknown_branches", "MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))", - "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit). Sample with: BACLEARS.ANY", + "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit) hence called Unknown Branches. Sample with: BACLEARS.ANY", "ScaleUnit": "100%" }, { diff --git a/tools/perf/pmu-events/arch/x86/skylake/virtual-memory.json b/tools/perf/pmu-events/arch/x86/skylake/virtual-memory.json index f59405877ae8..73feadaf7674 100644 --- a/tools/perf/pmu-events/arch/x86/skylake/virtual-memory.json +++ b/tools/perf/pmu-events/arch/x86/skylake/virtual-memory.json @@ -205,7 +205,7 @@ "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake.", "EventCode": "0x85", "EventName": "ITLB_MISSES.WALK_PENDING", - "PublicDescription": "Counts 1 per cycle for each PMH (Page Miss Handler) that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake michroarchitecture.", + "PublicDescription": "Counts 1 per cycle for each PMH (Page Miss Handler) that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake microarchitecture.", "SampleAfterValue": "100003", "UMask": "0x10" }, diff --git a/tools/perf/pmu-events/arch/x86/skylakex/cache.json b/tools/perf/pmu-events/arch/x86/skylakex/cache.json index d28d8822a51a..14229f4b29d8 100644 --- a/tools/perf/pmu-events/arch/x86/skylakex/cache.json +++ b/tools/perf/pmu-events/arch/x86/skylakex/cache.json @@ -764,6 +764,15 @@ "UMask": "0x1" }, { + "BriefDescription": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.HIT_OTHER_CORE_FWD hit in the L3 and the snoop to one of the sibling cores hits the line in E/S/F state and the line is forwarded.", + "EventCode": "0xB7, 0xBB", + "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.HIT_OTHER_CORE_FWD", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x8003C07F7", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { "BriefDescription": "Counts all demand & prefetch RFOs that have any response type.", "EventCode": "0xB7, 0xBB", "EventName": "OFFCORE_RESPONSE.ALL_RFO.ANY_RESPONSE", diff --git a/tools/perf/pmu-events/arch/x86/skylakex/frontend.json b/tools/perf/pmu-events/arch/x86/skylakex/frontend.json index 095904c77001..d6f543471b24 100644 --- a/tools/perf/pmu-events/arch/x86/skylakex/frontend.json +++ b/tools/perf/pmu-events/arch/x86/skylakex/frontend.json @@ -19,7 +19,7 @@ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switches", "EventCode": "0xAB", "EventName": "DSB2MITE_SWITCHES.COUNT", - "PublicDescription": "This event counts the number of the Decode Stream Buffer (DSB)-to-MITE switches including all misses because of missing Decode Stream Buffer (DSB) cache and u-arch forced misses.\nNote: Invoking MITE requires two or three cycles delay.", + "PublicDescription": "This event counts the number of the Decode Stream Buffer (DSB)-to-MITE switches including all misses because of missing Decode Stream Buffer (DSB) cache and u-arch forced misses. Note: Invoking MITE requires two or three cycles delay.", "SampleAfterValue": "2000003", "UMask": "0x1" }, @@ -267,11 +267,11 @@ "UMask": "0x4" }, { - "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops [This event is alias to IDQ.DSB_CYCLES_OK]", + "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 or more Uops [This event is alias to IDQ.DSB_CYCLES_OK]", "CounterMask": "4", "EventCode": "0x79", "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS", - "PublicDescription": "Counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ. [This event is alias to IDQ.DSB_CYCLES_OK]", + "PublicDescription": "Counts the number of cycles 4 or more uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ. [This event is alias to IDQ.DSB_CYCLES_OK]", "SampleAfterValue": "2000003", "UMask": "0x18" }, @@ -321,11 +321,11 @@ "UMask": "0x18" }, { - "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops [This event is alias to IDQ.ALL_DSB_CYCLES_4_UOPS]", + "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 or more Uops [This event is alias to IDQ.ALL_DSB_CYCLES_4_UOPS]", "CounterMask": "4", "EventCode": "0x79", "EventName": "IDQ.DSB_CYCLES_OK", - "PublicDescription": "Counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ. [This event is alias to IDQ.ALL_DSB_CYCLES_4_UOPS]", + "PublicDescription": "Counts the number of cycles 4 or more uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ. [This event is alias to IDQ.ALL_DSB_CYCLES_4_UOPS]", "SampleAfterValue": "2000003", "UMask": "0x18" }, diff --git a/tools/perf/pmu-events/arch/x86/skylakex/memory.json b/tools/perf/pmu-events/arch/x86/skylakex/memory.json index 2b797dbc75fe..dba3cd6b3690 100644 --- a/tools/perf/pmu-events/arch/x86/skylakex/memory.json +++ b/tools/perf/pmu-events/arch/x86/skylakex/memory.json @@ -864,7 +864,7 @@ "BriefDescription": "Number of times an RTM execution aborted due to any reasons (multiple categories may count as one).", "EventCode": "0xC9", "EventName": "RTM_RETIRED.ABORTED", - "PEBS": "1", + "PEBS": "2", "PublicDescription": "Number of times RTM abort was triggered.", "SampleAfterValue": "2000003", "UMask": "0x4" diff --git a/tools/perf/pmu-events/arch/x86/skylakex/metricgroups.json b/tools/perf/pmu-events/arch/x86/skylakex/metricgroups.json index bc6a9a4d27a9..904d299c95a3 100644 --- a/tools/perf/pmu-events/arch/x86/skylakex/metricgroups.json +++ b/tools/perf/pmu-events/arch/x86/skylakex/metricgroups.json @@ -2,10 +2,10 @@ "Backend": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Bad": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "BadSpec": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", - "BigFoot": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "BigFootprint": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "BrMispredicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Branches": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", - "CacheMisses": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "CacheHits": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "CodeGen": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Compute": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Cor": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", @@ -26,7 +26,9 @@ "L2Evicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "LSD": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MachineClears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "Machine_Clears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Mem": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "MemOffcore": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryBW": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryBound": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryLat": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", @@ -64,8 +66,10 @@ "tma_L5_group": "Metrics for top-down breakdown at level 5", "tma_L6_group": "Metrics for top-down breakdown at level 6", "tma_alu_op_utilization_group": "Metrics contributing to tma_alu_op_utilization category", + "tma_assists_group": "Metrics contributing to tma_assists category", "tma_backend_bound_group": "Metrics contributing to tma_backend_bound category", "tma_bad_speculation_group": "Metrics contributing to tma_bad_speculation category", + "tma_branch_mispredicts_group": "Metrics contributing to tma_branch_mispredicts category", "tma_branch_resteers_group": "Metrics contributing to tma_branch_resteers category", "tma_core_bound_group": "Metrics contributing to tma_core_bound category", "tma_dram_bound_group": "Metrics contributing to tma_dram_bound category", @@ -78,9 +82,9 @@ "tma_frontend_bound_group": "Metrics contributing to tma_frontend_bound category", "tma_heavy_operations_group": "Metrics contributing to tma_heavy_operations category", "tma_issue2P": "Metrics related by the issue $issue2P", - "tma_issueBC": "Metrics related by the issue $issueBC", "tma_issueBM": "Metrics related by the issue $issueBM", "tma_issueBW": "Metrics related by the issue $issueBW", + "tma_issueComp": "Metrics related by the issue $issueComp", "tma_issueD0": "Metrics related by the issue $issueD0", "tma_issueFB": "Metrics related by the issue $issueFB", "tma_issueFL": "Metrics related by the issue $issueFL", @@ -100,10 +104,12 @@ "tma_l3_bound_group": "Metrics contributing to tma_l3_bound category", "tma_light_operations_group": "Metrics contributing to tma_light_operations category", "tma_load_op_utilization_group": "Metrics contributing to tma_load_op_utilization category", + "tma_machine_clears_group": "Metrics contributing to tma_machine_clears category", "tma_mem_latency_group": "Metrics contributing to tma_mem_latency category", "tma_memory_bound_group": "Metrics contributing to tma_memory_bound category", "tma_microcode_sequencer_group": "Metrics contributing to tma_microcode_sequencer category", "tma_mite_group": "Metrics contributing to tma_mite category", + "tma_other_light_ops_group": "Metrics contributing to tma_other_light_ops category", "tma_ports_utilization_group": "Metrics contributing to tma_ports_utilization category", "tma_ports_utilized_0_group": "Metrics contributing to tma_ports_utilized_0 category", "tma_ports_utilized_3m_group": "Metrics contributing to tma_ports_utilized_3m category", diff --git a/tools/perf/pmu-events/arch/x86/skylakex/other.json b/tools/perf/pmu-events/arch/x86/skylakex/other.json index cda8a7a45f0c..2511d722327a 100644 --- a/tools/perf/pmu-events/arch/x86/skylakex/other.json +++ b/tools/perf/pmu-events/arch/x86/skylakex/other.json @@ -19,7 +19,7 @@ "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX512 turbo schedule.", "EventCode": "0x28", "EventName": "CORE_POWER.LVL2_TURBO_LICENSE", - "PublicDescription": "Core cycles where the core was running with power-delivery for license level 2 (introduced in Skylake Server michroarchtecture). This includes high current AVX 512-bit instructions.", + "PublicDescription": "Core cycles where the core was running with power-delivery for license level 2 (introduced in Skylake Server microarchitecture). This includes high current AVX 512-bit instructions.", "SampleAfterValue": "200003", "UMask": "0x20" }, diff --git a/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json b/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json index 66d686cc933e..c50ddf5b40dd 100644 --- a/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json +++ b/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json @@ -396,7 +396,7 @@ "Errata": "SKL091, SKL044", "EventCode": "0xC0", "EventName": "INST_RETIRED.NOP", - "PEBS": "1", + "PEBS": "2", "SampleAfterValue": "2000003", "UMask": "0x2" }, diff --git a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json index ec3aa5ef00a3..8126f952a30c 100644 --- a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json +++ b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json @@ -210,6 +210,12 @@ "ScaleUnit": "1MB/s" }, { + "BriefDescription": "Bandwidth (MB/sec) of write requests that miss the last level cache (LLC) and go to remote memory.", + "MetricExpr": "UNC_CHA_REQUESTS.WRITES_REMOTE * 64 / 1e6 / duration_time", + "MetricName": "llc_miss_remote_memory_bandwidth_write", + "ScaleUnit": "1MB/s" + }, + { "BriefDescription": "The ratio of number of completed memory load instructions to the total number completed instructions", "MetricExpr": "MEM_INST_RETIRED.ALL_LOADS / INST_RETIRED.ANY", "MetricName": "loads_per_instr", @@ -298,12 +304,12 @@ "MetricExpr": "(UOPS_DISPATCHED_PORT.PORT_0 + UOPS_DISPATCHED_PORT.PORT_1 + UOPS_DISPATCHED_PORT.PORT_5 + UOPS_DISPATCHED_PORT.PORT_6) / tma_info_thread_slots", "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group", "MetricName": "tma_alu_op_utilization", - "MetricThreshold": "tma_alu_op_utilization > 0.6", + "MetricThreshold": "tma_alu_op_utilization > 0.4", "ScaleUnit": "100%" }, { "BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists", - "MetricExpr": "100 * (FP_ASSIST.ANY + OTHER_ASSISTS.ANY) / tma_info_thread_slots", + "MetricExpr": "34 * (FP_ASSIST.ANY + OTHER_ASSISTS.ANY) / tma_info_thread_slots", "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group", "MetricName": "tma_assists", "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)", @@ -371,7 +377,7 @@ { "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "(44 * tma_info_system_average_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM * (OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE / (OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE + OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) + 44 * tma_info_system_average_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", + "MetricExpr": "(44 * tma_info_system_core_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM * (OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE / (OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE + OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) + 44 * tma_info_system_core_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group", "MetricName": "tma_contested_accesses", "MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", @@ -392,7 +398,7 @@ { "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "44 * tma_info_system_average_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM * (1 - OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE / (OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE + OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", + "MetricExpr": "44 * tma_info_system_core_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM * (1 - OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE / (OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE + OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", "MetricGroup": "Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group", "MetricName": "tma_data_sharing", "MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", @@ -404,7 +410,7 @@ "MetricExpr": "(cpu@INST_DECODED.DECODERS\\,cmask\\=1@ - cpu@INST_DECODED.DECODERS\\,cmask\\=2@) / tma_info_core_core_clks / 2", "MetricGroup": "DSBmiss;FetchBW;TopdownL4;tma_L4_group;tma_issueD0;tma_mite_group", "MetricName": "tma_decoder0_alone", - "MetricThreshold": "tma_decoder0_alone > 0.1 & (tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 4 > 0.35))", + "MetricThreshold": "tma_decoder0_alone > 0.1 & (tma_mite > 0.1 & tma_fetch_bandwidth > 0.2)", "PublicDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder. Related metrics: tma_few_uops_instructions", "ScaleUnit": "100%" }, @@ -429,10 +435,10 @@ }, { "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline", - "MetricExpr": "(IDQ.ALL_DSB_CYCLES_ANY_UOPS - IDQ.ALL_DSB_CYCLES_4_UOPS) / tma_info_core_core_clks / 2", + "MetricExpr": "(IDQ.DSB_CYCLES_ANY - IDQ.DSB_CYCLES_OK) / tma_info_core_core_clks / 2", "MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group", "MetricName": "tma_dsb", - "MetricThreshold": "tma_dsb > 0.15 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 4 > 0.35)", + "MetricThreshold": "tma_dsb > 0.15 & tma_fetch_bandwidth > 0.2", "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.", "ScaleUnit": "100%" }, @@ -452,7 +458,7 @@ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group", "MetricName": "tma_dtlb_load", "MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store, tma_info_bottleneck_memory_data_tlbs", + "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store, tma_info_bottleneck_memory_data_tlbs, tma_info_bottleneck_memory_synchronization", "ScaleUnit": "100%" }, { @@ -461,13 +467,13 @@ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group", "MetricName": "tma_dtlb_store", "MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load, tma_info_bottleneck_memory_data_tlbs", + "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load, tma_info_bottleneck_memory_data_tlbs, tma_info_bottleneck_memory_synchronization", "ScaleUnit": "100%" }, { "BriefDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "(110 * tma_info_system_average_frequency * (OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.REMOTE_HITM + OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.REMOTE_HITM) + 47.5 * tma_info_system_average_frequency * (OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE + OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.HITM_OTHER_CORE)) / tma_info_thread_clks", + "MetricExpr": "(110 * tma_info_system_core_frequency * (OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.REMOTE_HITM + OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.REMOTE_HITM) + 47.5 * tma_info_system_core_frequency * (OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE + OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.HITM_OTHER_CORE)) / tma_info_thread_clks", "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group", "MetricName": "tma_false_sharing", "MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", @@ -481,7 +487,7 @@ "MetricGroup": "MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group", "MetricName": "tma_fb_full", "MetricThreshold": "tma_fb_full > 0.3", - "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_bottleneck_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores", + "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores", "ScaleUnit": "100%" }, { @@ -489,7 +495,7 @@ "MetricExpr": "tma_frontend_bound - tma_fetch_latency", "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB", "MetricName": "tma_fetch_bandwidth", - "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 4 > 0.35", + "MetricThreshold": "tma_fetch_bandwidth > 0.2", "MetricgroupNoGroup": "TopdownL2", "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp", "ScaleUnit": "100%" @@ -524,6 +530,15 @@ "ScaleUnit": "100%" }, { + "BriefDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists", + "MetricExpr": "34 * FP_ASSIST.ANY / tma_info_thread_slots", + "MetricGroup": "HPC;TopdownL5;tma_L5_group;tma_assists_group", + "MetricName": "tma_fp_assists", + "MetricThreshold": "tma_fp_assists > 0.1", + "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists. FP Assist may apply when working with very small floating point values (so-called Denormals).", + "ScaleUnit": "100%" + }, + { "BriefDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired", "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ / UOPS_RETIRED.RETIRE_SLOTS", "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P", @@ -582,10 +597,10 @@ { "BriefDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions", "MetricExpr": "tma_light_operations * UOPS_RETIRED.MACRO_FUSED / UOPS_RETIRED.RETIRE_SLOTS", - "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", + "MetricGroup": "Branches;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", "MetricName": "tma_fused_instructions", "MetricThreshold": "tma_fused_instructions > 0.1 & tma_light_operations > 0.6", - "PublicDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions. The instruction pairs of CMP+JCC or DEC+JCC are commonly used examples.", + "PublicDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions. CMP+JCC or DEC+JCC are common examples of legacy fusions. {([MTL] Note new MOV+OP and Load+OP fusions appear under Other_Light_Ops in MTL!)}", "ScaleUnit": "100%" }, { @@ -595,13 +610,13 @@ "MetricName": "tma_heavy_operations", "MetricThreshold": "tma_heavy_operations > 0.1", "MetricgroupNoGroup": "TopdownL2", - "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.", + "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences. ([ICL+] Note this may overcount due to approximation using indirect events; [ADL+] .)", "ScaleUnit": "100%" }, { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses", "MetricExpr": "(ICACHE_16B.IFDATA_STALL + 2 * cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=1\\,edge@) / tma_info_thread_clks", - "MetricGroup": "BigFoot;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group", + "MetricGroup": "BigFootprint;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group", "MetricName": "tma_icache_misses", "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS_PS;FRONTEND_RETIRED.L1I_MISS_PS", @@ -609,26 +624,50 @@ }, { "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)", - "MetricExpr": "(tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) * tma_info_thread_slots / BR_MISP_RETIRED.ALL_BRANCHES", + "MetricExpr": "tma_info_bottleneck_mispredictions * tma_info_thread_slots / BR_MISP_RETIRED.ALL_BRANCHES / 100", "MetricGroup": "Bad;BrMispredicts;tma_issueBM", "MetricName": "tma_info_bad_spec_branch_misprediction_cost", "PublicDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear). Related metrics: tma_branch_mispredicts, tma_info_bottleneck_mispredictions, tma_mispredicts_resteers" }, { "BriefDescription": "Instructions per retired mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).", - "MetricExpr": "tma_info_inst_mix_instructions / (UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * cpu@BR_MISP_EXEC.ALL_BRANCHES\\,umask\\=0xE4@)", + "MetricExpr": "tma_info_inst_mix_instructions / (UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * BR_MISP_EXEC.INDIRECT)", "MetricGroup": "Bad;BrMispredicts", "MetricName": "tma_info_bad_spec_ipmisp_indirect", "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1e3" }, { "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)", - "MetricExpr": "tma_info_core_ipmispredict", + "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES", "MetricGroup": "Bad;BadSpec;BrMispredicts", "MetricName": "tma_info_bad_spec_ipmispredict", "MetricThreshold": "tma_info_bad_spec_ipmispredict < 200" }, { + "BriefDescription": "Speculative to Retired ratio of all clears (covering mispredicts and nukes)", + "MetricExpr": "INT_MISC.CLEARS_COUNT / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT)", + "MetricGroup": "BrMispredicts", + "MetricName": "tma_info_bad_spec_spec_clears_ratio" + }, + { + "BriefDescription": "Probability of Core Bound bottleneck hidden by SMT-profiling artifacts", + "MetricExpr": "(100 * (1 - tma_core_bound / (((EXE_ACTIVITY.EXE_BOUND_0_PORTS + tma_core_bound * RS_EVENTS.EMPTY_CYCLES) / CPU_CLK_UNHALTED.THREAD * (CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY) / CPU_CLK_UNHALTED.THREAD * CPU_CLK_UNHALTED.THREAD + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL)) / CPU_CLK_UNHALTED.THREAD if ARITH.DIVIDER_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY else (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL) / CPU_CLK_UNHALTED.THREAD) if tma_core_bound < (((EXE_ACTIVITY.EXE_BOUND_0_PORTS + tma_core_bound * RS_EVENTS.EMPTY_CYCLES) / CPU_CLK_UNHALTED.THREAD * (CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY) / CPU_CLK_UNHALTED.THREAD * CPU_CLK_UNHALTED.THREAD + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL)) / CPU_CLK_UNHALTED.THREAD if ARITH.DIVIDER_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY else (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL) / CPU_CLK_UNHALTED.THREAD) else 1) if tma_info_system_smt_2t_utilization > 0.5 else 0)", + "MetricGroup": "Cor;SMT", + "MetricName": "tma_info_botlnk_core_bound_likely" + }, + { + "BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck.", + "MetricExpr": "100 * (100 * (tma_fetch_latency * (DSB2MITE_SWITCHES.PENALTY_CYCLES / CPU_CLK_UNHALTED.THREAD) / ((ICACHE_16B.IFDATA_STALL + 2 * cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=0x1\\,edge\\=0x1@) / CPU_CLK_UNHALTED.THREAD + ICACHE_TAG.STALLS / CPU_CLK_UNHALTED.THREAD + (INT_MISC.CLEAR_RESTEER_CYCLES / CPU_CLK_UNHALTED.THREAD + 9 * BACLEARS.ANY / CPU_CLK_UNHALTED.THREAD) + min(2 * IDQ.MS_SWITCHES / CPU_CLK_UNHALTED.THREAD, 1) + DECODE.LCP / CPU_CLK_UNHALTED.THREAD + DSB2MITE_SWITCHES.PENALTY_CYCLES / CPU_CLK_UNHALTED.THREAD) + tma_fetch_bandwidth * tma_mite / (tma_mite + tma_dsb)))", + "MetricGroup": "DSBmiss;Fed", + "MetricName": "tma_info_botlnk_dsb_misses" + }, + { + "BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck.", + "MetricExpr": "100 * (100 * (tma_fetch_latency * ((ICACHE_16B.IFDATA_STALL + 2 * cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=0x1\\,edge\\=0x1@) / CPU_CLK_UNHALTED.THREAD) / ((ICACHE_16B.IFDATA_STALL + 2 * cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=0x1\\,edge\\=0x1@) / CPU_CLK_UNHALTED.THREAD + ICACHE_TAG.STALLS / CPU_CLK_UNHALTED.THREAD + (INT_MISC.CLEAR_RESTEER_CYCLES / CPU_CLK_UNHALTED.THREAD + 9 * BACLEARS.ANY / CPU_CLK_UNHALTED.THREAD) + min(2 * IDQ.MS_SWITCHES / CPU_CLK_UNHALTED.THREAD, 1) + DECODE.LCP / CPU_CLK_UNHALTED.THREAD + DSB2MITE_SWITCHES.PENALTY_CYCLES / CPU_CLK_UNHALTED.THREAD)))", + "MetricGroup": "Fed;FetchLat;IcMiss", + "MetricName": "tma_info_botlnk_ic_misses" + }, + { "BriefDescription": "Probability of Core Bound bottleneck hidden by SMT-profiling artifacts", "MetricConstraint": "NO_GROUP_EVENTS", "MetricExpr": "(100 * (1 - tma_core_bound / tma_ports_utilization if tma_core_bound < tma_ports_utilization else 1) if tma_info_system_smt_2t_utilization > 0.5 else 0)", @@ -654,67 +693,102 @@ "PublicDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck. Related metrics: " }, { + "BriefDescription": "Total pipeline cost of \"useful operations\" - the baseline operations not covered by Branching_Overhead nor Irregular_Overhead.", + "MetricExpr": "100 * (tma_retiring - (BR_INST_RETIRED.ALL_BRANCHES + BR_INST_RETIRED.NEAR_CALL) / tma_info_thread_slots - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)", + "MetricGroup": "Ret", + "MetricName": "tma_info_bottleneck_base_non_br", + "MetricThreshold": "tma_info_bottleneck_base_non_br > 20" + }, + { "BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)", "MetricConstraint": "NO_GROUP_EVENTS", "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)", - "MetricGroup": "BigFoot;Fed;Frontend;IcMiss;MemoryTLB;tma_issueBC", + "MetricGroup": "BigFootprint;Fed;Frontend;IcMiss;MemoryTLB", "MetricName": "tma_info_bottleneck_big_code", - "MetricThreshold": "tma_info_bottleneck_big_code > 20", - "PublicDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses). Related metrics: tma_info_bottleneck_branching_overhead" + "MetricThreshold": "tma_info_bottleneck_big_code > 20" }, { "BriefDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls)", - "MetricExpr": "100 * ((BR_INST_RETIRED.CONDITIONAL + 3 * BR_INST_RETIRED.NEAR_CALL + (BR_INST_RETIRED.NEAR_TAKEN - (BR_INST_RETIRED.CONDITIONAL - BR_INST_RETIRED.NOT_TAKEN) - 2 * BR_INST_RETIRED.NEAR_CALL)) / tma_info_thread_slots)", - "MetricGroup": "Ret;tma_issueBC", + "MetricExpr": "100 * ((BR_INST_RETIRED.ALL_BRANCHES + BR_INST_RETIRED.NEAR_CALL) / tma_info_thread_slots)", + "MetricGroup": "Ret", "MetricName": "tma_info_bottleneck_branching_overhead", - "MetricThreshold": "tma_info_bottleneck_branching_overhead > 10", - "PublicDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls). Related metrics: tma_info_bottleneck_big_code" + "MetricThreshold": "tma_info_bottleneck_branching_overhead > 5" + }, + { + "BriefDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks", + "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_fb_full / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))", + "MetricGroup": "Mem;MemoryBW;Offcore;tma_issueBW", + "MetricName": "tma_info_bottleneck_cache_memory_bandwidth", + "MetricThreshold": "tma_info_bottleneck_cache_memory_bandwidth > 20", + "PublicDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full" + }, + { + "BriefDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks", + "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_store_latency / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency)))", + "MetricGroup": "Mem;MemoryLat;Offcore;tma_issueLat", + "MetricName": "tma_info_bottleneck_cache_memory_latency", + "MetricThreshold": "tma_info_bottleneck_cache_memory_latency > 20", + "PublicDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks. Related metrics: tma_l3_hit_latency, tma_mem_latency" + }, + { + "BriefDescription": "Total pipeline cost when the execution is compute-bound - an estimation", + "MetricExpr": "100 * (tma_core_bound * tma_divider / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_core_bound * (tma_ports_utilization / (tma_divider + tma_ports_utilization + tma_serializing_operation)) * (tma_ports_utilized_3m / (tma_ports_utilized_0 + tma_ports_utilized_1 + tma_ports_utilized_2 + tma_ports_utilized_3m)))", + "MetricGroup": "Cor;tma_issueComp", + "MetricName": "tma_info_bottleneck_compute_bound_est", + "MetricThreshold": "tma_info_bottleneck_compute_bound_est > 20", + "PublicDescription": "Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy. Related metrics: " }, { "BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "100 * (tma_frontend_bound - tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) - tma_info_bottleneck_big_code", + "MetricExpr": "100 * (tma_frontend_bound - (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * (10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts)) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) - tma_info_bottleneck_big_code", "MetricGroup": "Fed;FetchBW;Frontend", "MetricName": "tma_info_bottleneck_instruction_fetch_bw", "MetricThreshold": "tma_info_bottleneck_instruction_fetch_bw > 20" }, { - "BriefDescription": "Total pipeline cost of (external) Memory Bandwidth related bottlenecks", - "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full))) + tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_fb_full / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk))", - "MetricGroup": "Mem;MemoryBW;Offcore;tma_issueBW", - "MetricName": "tma_info_bottleneck_memory_bandwidth", - "MetricThreshold": "tma_info_bottleneck_memory_bandwidth > 20", - "PublicDescription": "Total pipeline cost of (external) Memory Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full" + "BriefDescription": "Total pipeline cost of irregular execution (e.g", + "MetricExpr": "100 * (tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * (10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts)) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_branch_mispredicts + tma_machine_clears * tma_other_nukes / tma_other_nukes + tma_core_bound * (tma_serializing_operation + tma_core_bound * RS_EVENTS.EMPTY_CYCLES / tma_info_thread_clks * tma_ports_utilized_0) / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)", + "MetricGroup": "Bad;Cor;Ret;tma_issueMS", + "MetricName": "tma_info_bottleneck_irregular_overhead", + "MetricThreshold": "tma_info_bottleneck_irregular_overhead > 10", + "PublicDescription": "Total pipeline cost of irregular execution (e.g. FP-assists in HPC, Wait time with work imbalance multithreaded workloads, overhead in system services or virtualized environments). Related metrics: tma_microcode_sequencer, tma_ms_switches" }, { "BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "100 * tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_dtlb_load / max(tma_l1_bound, tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency)))", + "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_load / max(tma_l1_bound, tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency)))", "MetricGroup": "Mem;MemoryTLB;Offcore;tma_issueTLB", "MetricName": "tma_info_bottleneck_memory_data_tlbs", "MetricThreshold": "tma_info_bottleneck_memory_data_tlbs > 20", - "PublicDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs). Related metrics: tma_dtlb_load, tma_dtlb_store" + "PublicDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs). Related metrics: tma_dtlb_load, tma_dtlb_store, tma_info_bottleneck_memory_synchronization" }, { - "BriefDescription": "Total pipeline cost of Memory Latency related bottlenecks (external memory and off-core caches)", - "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound))", - "MetricGroup": "Mem;MemoryLat;Offcore;tma_issueLat", - "MetricName": "tma_info_bottleneck_memory_latency", - "MetricThreshold": "tma_info_bottleneck_memory_latency > 20", - "PublicDescription": "Total pipeline cost of Memory Latency related bottlenecks (external memory and off-core caches). Related metrics: tma_l3_hit_latency, tma_mem_latency" + "BriefDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors)", + "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) * tma_remote_cache / (tma_local_mem + tma_remote_cache + tma_remote_mem) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_contested_accesses + tma_data_sharing) / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * tma_false_sharing / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency - tma_store_latency)) + tma_machine_clears * (1 - tma_other_nukes / tma_other_nukes))", + "MetricGroup": "Mem;Offcore;tma_issueTLB", + "MetricName": "tma_info_bottleneck_memory_synchronization", + "MetricThreshold": "tma_info_bottleneck_memory_synchronization > 10", + "PublicDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors). Related metrics: tma_dtlb_load, tma_dtlb_store, tma_info_bottleneck_memory_data_tlbs" }, { "BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "100 * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))", + "MetricExpr": "100 * (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))", "MetricGroup": "Bad;BadSpec;BrMispredicts;tma_issueBM", "MetricName": "tma_info_bottleneck_mispredictions", "MetricThreshold": "tma_info_bottleneck_mispredictions > 20", "PublicDescription": "Total pipeline cost of Branch Misprediction related bottlenecks. Related metrics: tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost, tma_mispredicts_resteers" }, { + "BriefDescription": "Total pipeline cost of remaining bottlenecks (apart from those listed in the Info.Bottlenecks metrics class)", + "MetricExpr": "100 - (tma_info_bottleneck_big_code + tma_info_bottleneck_instruction_fetch_bw + tma_info_bottleneck_mispredictions + tma_info_bottleneck_cache_memory_bandwidth + tma_info_bottleneck_cache_memory_latency + tma_info_bottleneck_memory_data_tlbs + tma_info_bottleneck_memory_synchronization + tma_info_bottleneck_compute_bound_est + tma_info_bottleneck_irregular_overhead + tma_info_bottleneck_branching_overhead + tma_info_bottleneck_base_non_br)", + "MetricGroup": "Cor;Offcore", + "MetricName": "tma_info_bottleneck_other_bottlenecks", + "MetricThreshold": "tma_info_bottleneck_other_bottlenecks > 20", + "PublicDescription": "Total pipeline cost of remaining bottlenecks (apart from those listed in the Info.Bottlenecks metrics class). Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls." + }, + { "BriefDescription": "Fraction of branches that are CALL or RET", "MetricExpr": "(BR_INST_RETIRED.NEAR_CALL + BR_INST_RETIRED.NEAR_RETURN) / BR_INST_RETIRED.ALL_BRANCHES", "MetricGroup": "Bad;Branches", @@ -735,7 +809,7 @@ { "BriefDescription": "Fraction of branches that are unconditional (direct or indirect) jumps", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "(BR_INST_RETIRED.NEAR_TAKEN - (BR_INST_RETIRED.CONDITIONAL - BR_INST_RETIRED.NOT_TAKEN) - 2 * BR_INST_RETIRED.NEAR_CALL) / BR_INST_RETIRED.ALL_BRANCHES", + "MetricExpr": "(BR_INST_RETIRED.NEAR_TAKEN - (BR_INST_RETIRED.COND - BR_INST_RETIRED.NOT_TAKEN) - 2 * BR_INST_RETIRED.NEAR_CALL) / BR_INST_RETIRED.ALL_BRANCHES", "MetricGroup": "Bad;Branches", "MetricName": "tma_info_branches_jump" }, @@ -752,9 +826,15 @@ "MetricName": "tma_info_core_coreipc" }, { + "BriefDescription": "uops Executed per Cycle", + "MetricExpr": "UOPS_EXECUTED.THREAD / tma_info_thread_clks", + "MetricGroup": "Power", + "MetricName": "tma_info_core_epc" + }, + { "BriefDescription": "Floating Point Operations Per Cycle", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * (FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE) + 8 * (FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / tma_info_core_core_clks", + "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * FP_ARITH_INST_RETIRED.4_FLOPS + 8 * FP_ARITH_INST_RETIRED.8_FLOPS + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / tma_info_core_core_clks", "MetricGroup": "Flops;Ret", "MetricName": "tma_info_core_flopc" }, @@ -766,19 +846,12 @@ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)." }, { - "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core", - "MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)", + "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor)", + "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@", "MetricGroup": "Backend;Cor;Pipeline;PortsUtil", "MetricName": "tma_info_core_ilp" }, { - "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)", - "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES", - "MetricGroup": "Bad;BadSpec;BrMispredicts;TopdownL1;tma_L1_group", - "MetricName": "tma_info_core_ipmispredict", - "MetricgroupNoGroup": "TopdownL1" - }, - { "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)", "MetricExpr": "IDQ.DSB_UOPS / (IDQ.DSB_UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS)", "MetricGroup": "DSB;Fed;FetchBW;tma_issueFB", @@ -849,7 +922,7 @@ "MetricGroup": "Flops;InsType", "MetricName": "tma_info_inst_mix_iparith", "MetricThreshold": "tma_info_inst_mix_iparith < 10", - "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). May undercount due to FMA double counting. Approximated prior to BDW." + "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW." }, { "BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate)", @@ -857,7 +930,7 @@ "MetricGroup": "Flops;FpVector;InsType", "MetricName": "tma_info_inst_mix_iparith_avx128", "MetricThreshold": "tma_info_inst_mix_iparith_avx128 < 10", - "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate)", @@ -865,7 +938,7 @@ "MetricGroup": "Flops;FpVector;InsType", "MetricName": "tma_info_inst_mix_iparith_avx256", "MetricThreshold": "tma_info_inst_mix_iparith_avx256 < 10", - "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate)", @@ -873,7 +946,7 @@ "MetricGroup": "Flops;FpVector;InsType", "MetricName": "tma_info_inst_mix_iparith_avx512", "MetricThreshold": "tma_info_inst_mix_iparith_avx512 < 10", - "PublicDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate)", @@ -881,7 +954,7 @@ "MetricGroup": "Flops;FpScalar;InsType", "MetricName": "tma_info_inst_mix_iparith_scalar_dp", "MetricThreshold": "tma_info_inst_mix_iparith_scalar_dp < 10", - "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate)", @@ -889,7 +962,7 @@ "MetricGroup": "Flops;FpScalar;InsType", "MetricName": "tma_info_inst_mix_iparith_scalar_sp", "MetricThreshold": "tma_info_inst_mix_iparith_scalar_sp < 10", - "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)", @@ -908,7 +981,7 @@ { "BriefDescription": "Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate)", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * (FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE) + 8 * (FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE)", + "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.SCALAR + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * FP_ARITH_INST_RETIRED.4_FLOPS + 8 * FP_ARITH_INST_RETIRED.8_FLOPS + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE)", "MetricGroup": "Flops;InsType", "MetricName": "tma_info_inst_mix_ipflop", "MetricThreshold": "tma_info_inst_mix_ipflop < 10" @@ -943,16 +1016,22 @@ "PublicDescription": "Instruction per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_lcp" }, { + "BriefDescription": "STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk)", + "MetricExpr": "tma_info_memory_tlb_code_stlb_mpki", + "MetricGroup": "Fed;MemoryTLB", + "MetricName": "tma_info_memory_code_stlb_mpki" + }, + { "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]", - "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l1d_cache_fill_bw", "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_core_l1d_cache_fill_bw" + "MetricName": "tma_info_memory_core_l1d_cache_fill_bw_2t" }, { "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]", - "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l2_cache_fill_bw", "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_core_l2_cache_fill_bw" + "MetricName": "tma_info_memory_core_l2_cache_fill_bw_2t" }, { "BriefDescription": "Rate of non silent evictions from the L2 cache per Kilo instruction", @@ -968,124 +1047,202 @@ }, { "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l3_cache_access_bw", "MetricGroup": "Mem;MemoryBW;Offcore", - "MetricName": "tma_info_memory_core_l3_cache_access_bw" + "MetricName": "tma_info_memory_core_l3_cache_access_bw_2t" }, { "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l3_cache_fill_bw", "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_core_l3_cache_fill_bw" + "MetricName": "tma_info_memory_core_l3_cache_fill_bw_2t" + }, + { + "BriefDescription": "Average Parallel L2 cache miss data reads", + "MetricExpr": "tma_info_memory_latency_data_l2_mlp", + "MetricGroup": "Memory_BW;Offcore", + "MetricName": "tma_info_memory_data_l2_mlp" }, { "BriefDescription": "Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss-handling entries)", "MetricExpr": "1e3 * MEM_LOAD_RETIRED.FB_HIT / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_fb_hpki" }, { + "BriefDescription": "", + "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l1d_cache_fill_bw" + }, + { + "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]", + "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / (duration_time * 1e3 / 1e3)", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l1d_cache_fill_bw_2t" + }, + { "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads", "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l1mpki" }, { "BriefDescription": "L1 cache true misses per kilo instruction for all demand loads (including speculative)", "MetricExpr": "1e3 * L2_RQSTS.ALL_DEMAND_DATA_RD / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l1mpki_load" }, { + "BriefDescription": "", + "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l2_cache_fill_bw" + }, + { + "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]", + "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / (duration_time * 1e3 / 1e3)", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l2_cache_fill_bw_2t" + }, + { + "BriefDescription": "Rate of non silent evictions from the L2 cache per Kilo instruction", + "MetricExpr": "1e3 * L2_LINES_OUT.NON_SILENT / INST_RETIRED.ANY", + "MetricGroup": "L2Evicts;Mem;Server", + "MetricName": "tma_info_memory_l2_evictions_nonsilent_pki" + }, + { + "BriefDescription": "Rate of silent evictions from the L2 cache per Kilo instruction where the evicted lines are dropped (no writeback to L3 or memory)", + "MetricExpr": "1e3 * L2_LINES_OUT.SILENT / INST_RETIRED.ANY", + "MetricGroup": "L2Evicts;Mem;Server", + "MetricName": "tma_info_memory_l2_evictions_silent_pki" + }, + { "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)", "MetricExpr": "1e3 * (L2_RQSTS.REFERENCES - L2_RQSTS.MISS) / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l2hpki_all" }, { "BriefDescription": "L2 cache hits per kilo instruction for all demand loads (including speculative)", "MetricExpr": "1e3 * L2_RQSTS.DEMAND_DATA_RD_HIT / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l2hpki_load" }, { "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads", "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY", - "MetricGroup": "Backend;CacheMisses;Mem", + "MetricGroup": "Backend;CacheHits;Mem", "MetricName": "tma_info_memory_l2mpki" }, { "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all request types (including speculative)", "MetricExpr": "1e3 * L2_RQSTS.MISS / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem;Offcore", + "MetricGroup": "CacheHits;Mem;Offcore", "MetricName": "tma_info_memory_l2mpki_all" }, { "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all demand loads (including speculative)", "MetricExpr": "1e3 * L2_RQSTS.DEMAND_DATA_RD_MISS / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l2mpki_load" }, { + "BriefDescription": "", + "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW;Offcore", + "MetricName": "tma_info_memory_l3_cache_access_bw" + }, + { + "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]", + "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1e9 / (duration_time * 1e3 / 1e3)", + "MetricGroup": "Mem;MemoryBW;Offcore", + "MetricName": "tma_info_memory_l3_cache_access_bw_2t" + }, + { + "BriefDescription": "", + "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l3_cache_fill_bw" + }, + { + "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]", + "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / (duration_time * 1e3 / 1e3)", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l3_cache_fill_bw_2t" + }, + { "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads", "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "Mem", "MetricName": "tma_info_memory_l3mpki" }, { - "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)", - "MetricExpr": "L1D_PEND_MISS.PENDING / (MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT)", - "MetricGroup": "Mem;MemoryBound;MemoryLat", - "MetricName": "tma_info_memory_load_miss_real_latency" + "BriefDescription": "Average Parallel L2 cache miss data reads", + "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", + "MetricGroup": "Memory_BW;Offcore", + "MetricName": "tma_info_memory_latency_data_l2_mlp" }, { - "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss", - "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES", - "MetricGroup": "Mem;MemoryBW;MemoryBound", - "MetricName": "tma_info_memory_mlp", - "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)" + "BriefDescription": "Average Latency for L2 cache miss demand Loads", + "MetricExpr": "tma_info_memory_load_l2_miss_latency", + "MetricGroup": "Memory_Lat;Offcore", + "MetricName": "tma_info_memory_latency_load_l2_miss_latency" }, { - "BriefDescription": "Average Parallel L2 cache miss data reads", - "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", + "BriefDescription": "Average Parallel L2 cache miss demand Loads", + "MetricExpr": "tma_info_memory_load_l2_mlp", "MetricGroup": "Memory_BW;Offcore", - "MetricName": "tma_info_memory_oro_data_l2_mlp" + "MetricName": "tma_info_memory_latency_load_l2_mlp" }, { "BriefDescription": "Average Latency for L2 cache miss demand Loads", "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS.DEMAND_DATA_RD", "MetricGroup": "Memory_Lat;Offcore", - "MetricName": "tma_info_memory_oro_load_l2_miss_latency" + "MetricName": "tma_info_memory_load_l2_miss_latency" }, { "BriefDescription": "Average Parallel L2 cache miss demand Loads", "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD", "MetricGroup": "Memory_BW;Offcore", - "MetricName": "tma_info_memory_oro_load_l2_mlp" + "MetricName": "tma_info_memory_load_l2_mlp" }, { - "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l1d_cache_fill_bw", - "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_thread_l1d_cache_fill_bw_1t" + "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)", + "MetricExpr": "L1D_PEND_MISS.PENDING / (MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT)", + "MetricGroup": "Mem;MemoryBound;MemoryLat", + "MetricName": "tma_info_memory_load_miss_real_latency" }, { - "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l2_cache_fill_bw", - "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_thread_l2_cache_fill_bw_1t" + "BriefDescription": "STLB (2nd level TLB) data load speculative misses per kilo instruction (misses of any page-size that complete the page walk)", + "MetricExpr": "tma_info_memory_tlb_load_stlb_mpki", + "MetricGroup": "Mem;MemoryTLB", + "MetricName": "tma_info_memory_load_stlb_mpki" }, { - "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l3_cache_access_bw", - "MetricGroup": "Mem;MemoryBW;Offcore", - "MetricName": "tma_info_memory_thread_l3_cache_access_bw_1t" + "BriefDescription": "Un-cacheable retired load per kilo instruction", + "MetricExpr": "tma_info_memory_uc_load_pki", + "MetricGroup": "Mem", + "MetricName": "tma_info_memory_mix_uc_load_pki" }, { - "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l3_cache_fill_bw", - "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_thread_l3_cache_fill_bw_1t" + "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss", + "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES", + "MetricGroup": "Mem;MemoryBW;MemoryBound", + "MetricName": "tma_info_memory_mlp", + "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)" + }, + { + "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses", + "MetricExpr": "tma_info_memory_tlb_page_walks_utilization", + "MetricGroup": "Mem;MemoryTLB", + "MetricName": "tma_info_memory_page_walks_utilization" + }, + { + "BriefDescription": "STLB (2nd level TLB) data store speculative misses per kilo instruction (misses of any page-size that complete the page walk)", + "MetricExpr": "tma_info_memory_tlb_store_stlb_mpki", + "MetricGroup": "Mem;MemoryTLB", + "MetricName": "tma_info_memory_store_stlb_mpki" }, { "BriefDescription": "STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk)", @@ -1114,55 +1271,77 @@ "MetricName": "tma_info_memory_tlb_store_stlb_mpki" }, { - "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-thread", - "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@", + "BriefDescription": "Un-cacheable retired load per kilo instruction", + "MetricExpr": "1e3 * MEM_LOAD_MISC_RETIRED.UC / INST_RETIRED.ANY", + "MetricGroup": "Mem", + "MetricName": "tma_info_memory_uc_load_pki" + }, + { + "BriefDescription": "", + "MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@)", "MetricGroup": "Cor;Pipeline;PortsUtil;SMT", "MetricName": "tma_info_pipeline_execute" }, { + "BriefDescription": "Instructions per a microcode Assist invocation", + "MetricExpr": "INST_RETIRED.ANY / (FP_ASSIST.ANY + OTHER_ASSISTS.ANY)", + "MetricGroup": "MicroSeq;Pipeline;Ret;Retire", + "MetricName": "tma_info_pipeline_ipassist", + "MetricThreshold": "tma_info_pipeline_ipassist < 100e3", + "PublicDescription": "Instructions per a microcode Assist invocation. See Assists tree node for details (lower number means higher occurrence rate)" + }, + { "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.", "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / cpu@UOPS_RETIRED.RETIRE_SLOTS\\,cmask\\=1@", "MetricGroup": "Pipeline;Ret", "MetricName": "tma_info_pipeline_retire" }, { - "BriefDescription": "Measured Average Frequency for unhalted processors [GHz]", + "BriefDescription": "Measured Average Core Frequency for unhalted processors [GHz]", "MetricExpr": "tma_info_system_turbo_utilization * TSC / 1e9 / duration_time", "MetricGroup": "Power;Summary", - "MetricName": "tma_info_system_average_frequency" + "MetricName": "tma_info_system_core_frequency" }, { - "BriefDescription": "Average CPU Utilization", + "BriefDescription": "Average CPU Utilization (percentage)", "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC", "MetricGroup": "HPC;Summary", "MetricName": "tma_info_system_cpu_utilization" }, { + "BriefDescription": "Average number of utilized CPUs", + "MetricExpr": "#num_cpus_online * tma_info_system_cpu_utilization", + "MetricGroup": "Summary", + "MetricName": "tma_info_system_cpus_utilized" + }, + { "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]", "MetricExpr": "64 * (UNC_M_CAS_COUNT.RD + UNC_M_CAS_COUNT.WR) / 1e9 / duration_time", - "MetricGroup": "HPC;Mem;MemoryBW;SoC;tma_issueBW", + "MetricGroup": "HPC;MemOffcore;MemoryBW;SoC;tma_issueBW", "MetricName": "tma_info_system_dram_bw_use", - "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_info_bottleneck_memory_bandwidth, tma_mem_bandwidth, tma_sq_full" + "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_mem_bandwidth, tma_sq_full" }, { "BriefDescription": "Giga Floating Point Operations Per Second", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * (FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE) + 8 * (FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / 1e9 / duration_time", + "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * FP_ARITH_INST_RETIRED.4_FLOPS + 8 * FP_ARITH_INST_RETIRED.8_FLOPS + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / 1e9 / duration_time", "MetricGroup": "Cor;Flops;HPC", "MetricName": "tma_info_system_gflops", - "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine." + "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width" }, { "BriefDescription": "Average IO (network or disk) Bandwidth Use for Reads [GB / sec]", "MetricExpr": "(UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0 + UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1 + UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2 + UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3) * 4 / 1e9 / duration_time", - "MetricGroup": "IoBW;Mem;Server;SoC", - "MetricName": "tma_info_system_io_read_bw" + "MetricGroup": "IoBW;MemOffcore;Server;SoC", + "MetricName": "tma_info_system_io_read_bw", + "PublicDescription": "Average IO (network or disk) Bandwidth Use for Reads [GB / sec]. Bandwidth of IO reads that are initiated by end device controllers that are requesting memory from the CPU" }, { "BriefDescription": "Average IO (network or disk) Bandwidth Use for Writes [GB / sec]", "MetricExpr": "(UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3) * 4 / 1e9 / duration_time", - "MetricGroup": "IoBW;Mem;Server;SoC", - "MetricName": "tma_info_system_io_write_bw" + "MetricGroup": "IoBW;MemOffcore;Server;SoC", + "MetricName": "tma_info_system_io_write_bw", + "PublicDescription": "Average IO (network or disk) Bandwidth Use for Writes [GB / sec]. Bandwidth of IO writes that are initiated by end device controllers that are writing memory to the CPU" }, { "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]", @@ -1187,7 +1366,7 @@ { "BriefDescription": "Average latency of data read request to external DRAM memory [in nanoseconds]", "MetricExpr": "1e9 * (UNC_M_RPQ_OCCUPANCY / UNC_M_RPQ_INSERTS) / imc_0@event\\=0x0@", - "MetricGroup": "Mem;MemoryLat;Server;SoC", + "MetricGroup": "MemOffcore;MemoryLat;Server;SoC", "MetricName": "tma_info_system_mem_dram_read_latency", "PublicDescription": "Average latency of data read request to external DRAM memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches" }, @@ -1247,6 +1426,12 @@ "MetricName": "tma_info_system_turbo_utilization" }, { + "BriefDescription": "Measured Average Uncore Frequency for the SoC [GHz]", + "MetricExpr": "tma_info_system_socket_clks / 1e9 / duration_time", + "MetricGroup": "SoC", + "MetricName": "tma_info_system_uncore_frequency" + }, + { "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.", "MetricExpr": "CPU_CLK_UNHALTED.THREAD", "MetricGroup": "Pipeline", @@ -1293,8 +1478,8 @@ }, { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses", - "MetricExpr": "ICACHE_64B.IFTAG_STALL / tma_info_thread_clks", - "MetricGroup": "BigFoot;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group", + "MetricExpr": "ICACHE_TAG.STALLS / tma_info_thread_clks", + "MetricGroup": "BigFootprint;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group", "MetricName": "tma_itlb_misses", "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS_PS;FRONTEND_RETIRED.ITLB_MISS_PS", @@ -1303,7 +1488,7 @@ { "BriefDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache", "MetricExpr": "max((CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS) / tma_info_thread_clks, 0)", - "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group", "MetricName": "tma_l1_bound", "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", "PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_RETIRED.L1_HIT_PS;MEM_LOAD_RETIRED.FB_HIT_PS. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1", @@ -1313,7 +1498,7 @@ "BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads", "MetricConstraint": "NO_GROUP_EVENTS", "MetricExpr": "MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) / (MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=1@) * ((CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks)", - "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", "MetricName": "tma_l2_bound", "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", "PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L2_HIT_PS", @@ -1322,24 +1507,24 @@ { "BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core", "MetricExpr": "(CYCLE_ACTIVITY.STALLS_L2_MISS - CYCLE_ACTIVITY.STALLS_L3_MISS) / tma_info_thread_clks", - "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", "MetricName": "tma_l3_bound", "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS", "ScaleUnit": "100%" }, { - "BriefDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)", - "MetricExpr": "17 * tma_info_system_average_frequency * MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", + "BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)", + "MetricExpr": "17 * tma_info_system_core_frequency * (MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2)) / tma_info_thread_clks", "MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group", "MetricName": "tma_l3_hit_latency", "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_info_bottleneck_memory_latency, tma_mem_latency", + "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_info_bottleneck_cache_memory_latency, tma_mem_latency", "ScaleUnit": "100%" }, { "BriefDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs)", - "MetricExpr": "ILD_STALL.LCP / tma_info_thread_clks", + "MetricExpr": "DECODE.LCP / tma_info_thread_clks", "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB", "MetricName": "tma_lcp", "MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", @@ -1353,7 +1538,7 @@ "MetricName": "tma_light_operations", "MetricThreshold": "tma_light_operations > 0.6", "MetricgroupNoGroup": "TopdownL2", - "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST", + "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST", "ScaleUnit": "100%" }, { @@ -1384,10 +1569,10 @@ }, { "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory", - "MetricExpr": "59.5 * tma_info_system_average_frequency * MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", + "MetricExpr": "59.5 * tma_info_system_core_frequency * MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", "MetricGroup": "Server;TopdownL5;tma_L5_group;tma_mem_latency_group", - "MetricName": "tma_local_dram", - "MetricThreshold": "tma_local_dram > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))", + "MetricName": "tma_local_mem", + "MetricThreshold": "tma_local_mem > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))", "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory. Caching will improve the latency and increase performance. Sample with: MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM_PS", "ScaleUnit": "100%" }, @@ -1412,21 +1597,21 @@ "ScaleUnit": "100%" }, { - "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM)", + "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)", "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@) / tma_info_thread_clks", "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW", "MetricName": "tma_mem_bandwidth", "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_bottleneck_memory_bandwidth, tma_info_system_dram_bw_use, tma_sq_full", + "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_sq_full", "ScaleUnit": "100%" }, { - "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM)", + "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM)", "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth", "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat", "MetricName": "tma_mem_latency", "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_info_bottleneck_memory_latency, tma_l3_hit_latency", + "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_info_bottleneck_cache_memory_latency, tma_l3_hit_latency", "ScaleUnit": "100%" }, { @@ -1454,7 +1639,7 @@ "MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueMC;tma_issueMS", "MetricName": "tma_microcode_sequencer", "MetricThreshold": "tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1", - "PublicDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided. Sample with: IDQ.MS_UOPS. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_ms_switches", + "PublicDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided. Sample with: IDQ.MS_UOPS. Related metrics: tma_clears_resteers, tma_info_bottleneck_irregular_overhead, tma_l1_bound, tma_machine_clears, tma_ms_switches", "ScaleUnit": "100%" }, { @@ -1471,17 +1656,17 @@ "MetricExpr": "(IDQ.ALL_MITE_CYCLES_ANY_UOPS - IDQ.ALL_MITE_CYCLES_4_UOPS) / tma_info_core_core_clks / 2", "MetricGroup": "DSBmiss;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group", "MetricName": "tma_mite", - "MetricThreshold": "tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 4 > 0.35)", + "MetricThreshold": "tma_mite > 0.1 & tma_fetch_bandwidth > 0.2", "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck. Sample with: FRONTEND_RETIRED.ANY_DSB_MISS", "ScaleUnit": "100%" }, { - "BriefDescription": "The Mixing_Vectors metric gives the percentage of injected blend uops out of all uops issued", + "BriefDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles)", "MetricExpr": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH / UOPS_ISSUED.ANY", "MetricGroup": "TopdownL5;tma_L5_group;tma_issueMV;tma_ports_utilized_0_group", "MetricName": "tma_mixing_vectors", "MetricThreshold": "tma_mixing_vectors > 0.05", - "PublicDescription": "The Mixing_Vectors metric gives the percentage of injected blend uops out of all uops issued. Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches", + "PublicDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles). Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches", "ScaleUnit": "100%" }, { @@ -1490,13 +1675,13 @@ "MetricGroup": "FetchLat;MicroSeq;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueMC;tma_issueMS;tma_issueMV;tma_issueSO", "MetricName": "tma_ms_switches", "MetricThreshold": "tma_ms_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", - "PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation", + "PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_clears_resteers, tma_info_bottleneck_irregular_overhead, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation", "ScaleUnit": "100%" }, { "BriefDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused", "MetricExpr": "tma_light_operations * (BR_INST_RETIRED.ALL_BRANCHES - UOPS_RETIRED.MACRO_FUSED) / UOPS_RETIRED.RETIRE_SLOTS", - "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", + "MetricGroup": "Branches;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", "MetricName": "tma_non_fused_branches", "MetricThreshold": "tma_non_fused_branches > 0.1 & tma_light_operations > 0.6", "PublicDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused. Non-conditional branches like direct JMP or CALL would count here. Can be used to examine fusible conditional jumps that were not fused.", @@ -1505,15 +1690,15 @@ { "BriefDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions", "MetricExpr": "tma_light_operations * INST_RETIRED.NOP / UOPS_RETIRED.RETIRE_SLOTS", - "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", + "MetricGroup": "Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group", "MetricName": "tma_nop_instructions", - "MetricThreshold": "tma_nop_instructions > 0.1 & tma_light_operations > 0.6", + "MetricThreshold": "tma_nop_instructions > 0.1 & (tma_other_light_ops > 0.3 & tma_light_operations > 0.6)", "PublicDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body. Sample with: INST_RETIRED.NOP", "ScaleUnit": "100%" }, { "BriefDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes", - "MetricExpr": "max(0, tma_light_operations - (tma_fp_arith + tma_memory_operations + tma_fused_instructions + tma_non_fused_branches + tma_nop_instructions))", + "MetricExpr": "max(0, tma_light_operations - (tma_fp_arith + tma_memory_operations + tma_fused_instructions + tma_non_fused_branches))", "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", "MetricName": "tma_other_light_ops", "MetricThreshold": "tma_other_light_ops > 0.3 & tma_light_operations > 0.6", @@ -1521,6 +1706,22 @@ "ScaleUnit": "100%" }, { + "BriefDescription": "This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types).", + "MetricExpr": "max(tma_branch_mispredicts * (1 - BR_MISP_RETIRED.ALL_BRANCHES / (INT_MISC.CLEARS_COUNT - MACHINE_CLEARS.COUNT)), 0.0001)", + "MetricGroup": "BrMispredicts;TopdownL3;tma_L3_group;tma_branch_mispredicts_group", + "MetricName": "tma_other_mispredicts", + "MetricThreshold": "tma_other_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)", + "ScaleUnit": "100%" + }, + { + "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering.", + "MetricExpr": "max(tma_machine_clears * (1 - MACHINE_CLEARS.MEMORY_ORDERING / MACHINE_CLEARS.COUNT), 0.0001)", + "MetricGroup": "Machine_Clears;TopdownL3;tma_L3_group;tma_machine_clears_group", + "MetricName": "tma_other_nukes", + "MetricThreshold": "tma_other_nukes > 0.05 & (tma_machine_clears > 0.1 & tma_bad_speculation > 0.15)", + "ScaleUnit": "100%" + }, + { "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch)", "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_0 / tma_info_core_core_clks", "MetricGroup": "Compute;TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P", @@ -1575,12 +1776,12 @@ "ScaleUnit": "100%" }, { - "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU)", + "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU)", "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_6 / tma_info_core_core_clks", "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P", "MetricName": "tma_port_6", "MetricThreshold": "tma_port_6 > 0.6", - "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_6. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2", + "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_6. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2", "ScaleUnit": "100%" }, { @@ -1594,7 +1795,7 @@ }, { "BriefDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related)", - "MetricExpr": "((EXE_ACTIVITY.EXE_BOUND_0_PORTS + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL)) / tma_info_thread_clks if ARITH.DIVIDER_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY else (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL) / tma_info_thread_clks)", + "MetricExpr": "((tma_ports_utilized_0 * tma_info_thread_clks + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL)) / tma_info_thread_clks if ARITH.DIVIDER_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY else (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL) / tma_info_thread_clks)", "MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group", "MetricName": "tma_ports_utilization", "MetricThreshold": "tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)", @@ -1603,7 +1804,7 @@ }, { "BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)", - "MetricExpr": "(UOPS_EXECUTED.CORE_CYCLES_NONE / 2 if #SMT_on else CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY) / tma_info_core_core_clks", + "MetricExpr": "(EXE_ACTIVITY.EXE_BOUND_0_PORTS + tma_core_bound * RS_EVENTS.EMPTY_CYCLES) / tma_info_thread_clks * (CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY) / tma_info_thread_clks", "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group", "MetricName": "tma_ports_utilized_0", "MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", @@ -1633,13 +1834,13 @@ "MetricExpr": "(UOPS_EXECUTED.CORE_CYCLES_GE_3 / 2 if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_3) / tma_info_core_core_clks", "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group", "MetricName": "tma_ports_utilized_3m", - "MetricThreshold": "tma_ports_utilized_3m > 0.7 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", + "MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", "ScaleUnit": "100%" }, { "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote cache in other sockets including synchronizations issues", "MetricConstraint": "NO_GROUP_EVENTS_NMI", - "MetricExpr": "(89.5 * tma_info_system_average_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM + 89.5 * tma_info_system_average_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", + "MetricExpr": "(89.5 * tma_info_system_core_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM + 89.5 * tma_info_system_core_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", "MetricGroup": "Offcore;Server;Snoop;TopdownL5;tma_L5_group;tma_issueSyncxn;tma_mem_latency_group", "MetricName": "tma_remote_cache", "MetricThreshold": "tma_remote_cache > 0.05 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))", @@ -1648,10 +1849,10 @@ }, { "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory", - "MetricExpr": "127 * tma_info_system_average_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", + "MetricExpr": "127 * tma_info_system_core_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", "MetricGroup": "Server;Snoop;TopdownL5;tma_L5_group;tma_mem_latency_group", - "MetricName": "tma_remote_dram", - "MetricThreshold": "tma_remote_dram > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))", + "MetricName": "tma_remote_mem", + "MetricThreshold": "tma_remote_mem > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))", "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory. This is caused often due to non-optimal NUMA allocations. #link to NUMA article. Sample with: MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM_PS", "ScaleUnit": "100%" }, @@ -1668,9 +1869,9 @@ { "BriefDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations", "MetricExpr": "PARTIAL_RAT_STALLS.SCOREBOARD / tma_info_thread_clks", - "MetricGroup": "PortsUtil;TopdownL5;tma_L5_group;tma_issueSO;tma_ports_utilized_0_group", + "MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group;tma_issueSO", "MetricName": "tma_serializing_operation", - "MetricThreshold": "tma_serializing_operation > 0.1 & (tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)))", + "MetricThreshold": "tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)", "PublicDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out-of-order execution which may limit performance. Sample with: PARTIAL_RAT_STALLS.SCOREBOARD. Related metrics: tma_ms_switches", "ScaleUnit": "100%" }, @@ -1699,7 +1900,7 @@ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group", "MetricName": "tma_sq_full", "MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_bottleneck_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth", + "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth", "ScaleUnit": "100%" }, { @@ -1757,10 +1958,10 @@ { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears", "MetricExpr": "9 * BACLEARS.ANY / tma_info_thread_clks", - "MetricGroup": "BigFoot;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group", + "MetricGroup": "BigFootprint;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group", "MetricName": "tma_unknown_branches", "MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))", - "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit). Sample with: BACLEARS.ANY", + "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit) hence called Unknown Branches. Sample with: BACLEARS.ANY", "ScaleUnit": "100%" }, { diff --git a/tools/perf/pmu-events/arch/x86/skylakex/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/skylakex/uncore-interconnect.json index 3eece8a728b5..f32d4d9d283a 100644 --- a/tools/perf/pmu-events/arch/x86/skylakex/uncore-interconnect.json +++ b/tools/perf/pmu-events/arch/x86/skylakex/uncore-interconnect.json @@ -38,7 +38,7 @@ "EventCode": "0x10", "EventName": "UNC_I_COHERENT_OPS.CLFLUSH", "PerPkg": "1", - "PublicDescription": "Counts the number of coherency related operations servied by the IRP", + "PublicDescription": "Counts the number of coherency related operations serviced by the IRP", "UMask": "0x80", "Unit": "IRP" }, @@ -47,7 +47,7 @@ "EventCode": "0x10", "EventName": "UNC_I_COHERENT_OPS.CRD", "PerPkg": "1", - "PublicDescription": "Counts the number of coherency related operations servied by the IRP", + "PublicDescription": "Counts the number of coherency related operations serviced by the IRP", "UMask": "0x2", "Unit": "IRP" }, @@ -56,7 +56,7 @@ "EventCode": "0x10", "EventName": "UNC_I_COHERENT_OPS.DRD", "PerPkg": "1", - "PublicDescription": "Counts the number of coherency related operations servied by the IRP", + "PublicDescription": "Counts the number of coherency related operations serviced by the IRP", "UMask": "0x4", "Unit": "IRP" }, @@ -65,7 +65,7 @@ "EventCode": "0x10", "EventName": "UNC_I_COHERENT_OPS.PCIDCAHINT", "PerPkg": "1", - "PublicDescription": "Counts the number of coherency related operations servied by the IRP", + "PublicDescription": "Counts the number of coherency related operations serviced by the IRP", "UMask": "0x20", "Unit": "IRP" }, @@ -74,7 +74,7 @@ "EventCode": "0x10", "EventName": "UNC_I_COHERENT_OPS.PCIRDCUR", "PerPkg": "1", - "PublicDescription": "Counts the number of coherency related operations servied by the IRP", + "PublicDescription": "Counts the number of coherency related operations serviced by the IRP", "UMask": "0x1", "Unit": "IRP" }, @@ -101,7 +101,7 @@ "EventCode": "0x10", "EventName": "UNC_I_COHERENT_OPS.WBMTOI", "PerPkg": "1", - "PublicDescription": "Counts the number of coherency related operations servied by the IRP", + "PublicDescription": "Counts the number of coherency related operations serviced by the IRP", "UMask": "0x40", "Unit": "IRP" }, @@ -500,7 +500,7 @@ "EventCode": "0x11", "EventName": "UNC_I_TRANSACTIONS.WRITES", "PerPkg": "1", - "PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Trackes only write requests. Each write request should have a prefetch, so there is no need to explicitly track these requests. For writes that are tickled and have to retry, the counter will be incremented for each retry.", + "PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks only write requests. Each write request should have a prefetch, so there is no need to explicitly track these requests. For writes that are tickled and have to retry, the counter will be incremented for each retry.", "UMask": "0x2", "Unit": "IRP" }, diff --git a/tools/perf/pmu-events/arch/x86/skylakex/uncore-io.json b/tools/perf/pmu-events/arch/x86/skylakex/uncore-io.json index 2a3a709018bb..743c91f3d2f0 100644 --- a/tools/perf/pmu-events/arch/x86/skylakex/uncore-io.json +++ b/tools/perf/pmu-events/arch/x86/skylakex/uncore-io.json @@ -34,7 +34,7 @@ "EventCode": "0x1", "EventName": "UNC_IIO_CLOCKTICKS", "PerPkg": "1", - "PublicDescription": "Counts clockticks of the 1GHz trafiic controller clock in the IIO unit.", + "PublicDescription": "Counts clockticks of the 1GHz traffic controller clock in the IIO unit.", "Unit": "IIO" }, { diff --git a/tools/perf/pmu-events/arch/x86/skylakex/uncore-power.json b/tools/perf/pmu-events/arch/x86/skylakex/uncore-power.json index c6254af7a468..ceef46046488 100644 --- a/tools/perf/pmu-events/arch/x86/skylakex/uncore-power.json +++ b/tools/perf/pmu-events/arch/x86/skylakex/uncore-power.json @@ -144,6 +144,7 @@ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0", "PerPkg": "1", "PublicDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", + "UMask": "0x40", "Unit": "PCU" }, { @@ -152,6 +153,7 @@ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3", "PerPkg": "1", "PublicDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", + "UMask": "0x80", "Unit": "PCU" }, { @@ -160,6 +162,7 @@ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6", "PerPkg": "1", "PublicDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", + "UMask": "0xc0", "Unit": "PCU" }, { diff --git a/tools/perf/pmu-events/arch/x86/skylakex/virtual-memory.json b/tools/perf/pmu-events/arch/x86/skylakex/virtual-memory.json index f59405877ae8..73feadaf7674 100644 --- a/tools/perf/pmu-events/arch/x86/skylakex/virtual-memory.json +++ b/tools/perf/pmu-events/arch/x86/skylakex/virtual-memory.json @@ -205,7 +205,7 @@ "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake.", "EventCode": "0x85", "EventName": "ITLB_MISSES.WALK_PENDING", - "PublicDescription": "Counts 1 per cycle for each PMH (Page Miss Handler) that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake michroarchitecture.", + "PublicDescription": "Counts 1 per cycle for each PMH (Page Miss Handler) that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake microarchitecture.", "SampleAfterValue": "100003", "UMask": "0x10" }, diff --git a/tools/perf/pmu-events/arch/x86/snowridgex/uncore-cache.json b/tools/perf/pmu-events/arch/x86/snowridgex/uncore-cache.json index a68a5bb05c22..4090e4da1bd0 100644 --- a/tools/perf/pmu-events/arch/x86/snowridgex/uncore-cache.json +++ b/tools/perf/pmu-events/arch/x86/snowridgex/uncore-cache.json @@ -1444,7 +1444,7 @@ "Unit": "CHA" }, { - "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.DATA_READ_LOCAL", + "BriefDescription": "This event is deprecated.", "Deprecated": "1", "EventCode": "0x34", "EventName": "UNC_CHA_LLC_LOOKUP.DMND_READ_LOCAL", @@ -1638,7 +1638,7 @@ "Unit": "CHA" }, { - "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.RFO_LOCAL", + "BriefDescription": "This event is deprecated.", "Deprecated": "1", "EventCode": "0x34", "EventName": "UNC_CHA_LLC_LOOKUP.RFO_PREF_LOCAL", diff --git a/tools/perf/pmu-events/arch/x86/snowridgex/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/snowridgex/uncore-interconnect.json index 7e2895f7fe3d..7cc3635b118b 100644 --- a/tools/perf/pmu-events/arch/x86/snowridgex/uncore-interconnect.json +++ b/tools/perf/pmu-events/arch/x86/snowridgex/uncore-interconnect.json @@ -38,7 +38,7 @@ "EventCode": "0x10", "EventName": "UNC_I_COHERENT_OPS.CLFLUSH", "PerPkg": "1", - "PublicDescription": "Coherent Ops : CLFlush : Counts the number of coherency related operations servied by the IRP", + "PublicDescription": "Coherent Ops : CLFlush : Counts the number of coherency related operations serviced by the IRP", "UMask": "0x80", "Unit": "IRP" }, @@ -65,7 +65,7 @@ "EventCode": "0x10", "EventName": "UNC_I_COHERENT_OPS.WBMTOI", "PerPkg": "1", - "PublicDescription": "Coherent Ops : WbMtoI : Counts the number of coherency related operations servied by the IRP", + "PublicDescription": "Coherent Ops : WbMtoI : Counts the number of coherency related operations serviced by the IRP", "UMask": "0x40", "Unit": "IRP" }, @@ -454,7 +454,7 @@ "EventCode": "0x11", "EventName": "UNC_I_TRANSACTIONS.WRITES", "PerPkg": "1", - "PublicDescription": "Inbound Transaction Count : Writes : Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID. : Trackes only write requests. Each write request should have a prefetch, so there is no need to explicitly track these requests. For writes that are tickled and have to retry, the counter will be incremented for each retry.", + "PublicDescription": "Inbound Transaction Count : Writes : Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID. : Tracks only write requests. Each write request should have a prefetch, so there is no need to explicitly track these requests. For writes that are tickled and have to retry, the counter will be incremented for each retry.", "UMask": "0x2", "Unit": "IRP" }, diff --git a/tools/perf/pmu-events/arch/x86/snowridgex/uncore-io.json b/tools/perf/pmu-events/arch/x86/snowridgex/uncore-io.json index ecdd6f0f8e8f..de156e499f56 100644 --- a/tools/perf/pmu-events/arch/x86/snowridgex/uncore-io.json +++ b/tools/perf/pmu-events/arch/x86/snowridgex/uncore-io.json @@ -2506,17 +2506,6 @@ "Unit": "IIO" }, { - "BriefDescription": "Number requests sent to PCIe from main die : From IRP", - "EventCode": "0xC2", - "EventName": "UNC_IIO_NUM_REQ_FROM_CPU.IRP", - "FCMask": "0x07", - "PerPkg": "1", - "PortMask": "0xFF", - "PublicDescription": "Number requests sent to PCIe from main die : From IRP : Captures Posted/Non-posted allocations from IRP. i.e. either non-confined P2P traffic or from the CPU", - "UMask": "0x1", - "Unit": "IIO" - }, - { "BriefDescription": "Number requests sent to PCIe from main die : From ITC", "EventCode": "0xC2", "EventName": "UNC_IIO_NUM_REQ_FROM_CPU.ITC", diff --git a/tools/perf/pmu-events/arch/x86/snowridgex/uncore-power.json b/tools/perf/pmu-events/arch/x86/snowridgex/uncore-power.json index a61ffca2dfea..dcf268467db9 100644 --- a/tools/perf/pmu-events/arch/x86/snowridgex/uncore-power.json +++ b/tools/perf/pmu-events/arch/x86/snowridgex/uncore-power.json @@ -150,6 +150,7 @@ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0", "PerPkg": "1", "PublicDescription": "Number of cores in C-State : C0 and C1 : This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", + "UMask": "0x40", "Unit": "PCU" }, { @@ -158,6 +159,7 @@ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3", "PerPkg": "1", "PublicDescription": "Number of cores in C-State : C3 : This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", + "UMask": "0x80", "Unit": "PCU" }, { @@ -166,6 +168,7 @@ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6", "PerPkg": "1", "PublicDescription": "Number of cores in C-State : C6 and C7 : This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", + "UMask": "0xc0", "Unit": "PCU" }, { diff --git a/tools/perf/pmu-events/arch/x86/tigerlake/metricgroups.json b/tools/perf/pmu-events/arch/x86/tigerlake/metricgroups.json index a151ba9cccb0..5452a1448ded 100644 --- a/tools/perf/pmu-events/arch/x86/tigerlake/metricgroups.json +++ b/tools/perf/pmu-events/arch/x86/tigerlake/metricgroups.json @@ -2,10 +2,10 @@ "Backend": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Bad": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "BadSpec": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", - "BigFoot": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "BigFootprint": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "BrMispredicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Branches": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", - "CacheMisses": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "CacheHits": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "CodeGen": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Compute": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Cor": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", @@ -25,7 +25,9 @@ "L2Evicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "LSD": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MachineClears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "Machine_Clears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "Mem": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", + "MemOffcore": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryBW": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryBound": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", "MemoryLat": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet", @@ -63,8 +65,10 @@ "tma_L5_group": "Metrics for top-down breakdown at level 5", "tma_L6_group": "Metrics for top-down breakdown at level 6", "tma_alu_op_utilization_group": "Metrics contributing to tma_alu_op_utilization category", + "tma_assists_group": "Metrics contributing to tma_assists category", "tma_backend_bound_group": "Metrics contributing to tma_backend_bound category", "tma_bad_speculation_group": "Metrics contributing to tma_bad_speculation category", + "tma_branch_mispredicts_group": "Metrics contributing to tma_branch_mispredicts category", "tma_branch_resteers_group": "Metrics contributing to tma_branch_resteers category", "tma_core_bound_group": "Metrics contributing to tma_core_bound category", "tma_dram_bound_group": "Metrics contributing to tma_dram_bound category", @@ -77,9 +81,9 @@ "tma_frontend_bound_group": "Metrics contributing to tma_frontend_bound category", "tma_heavy_operations_group": "Metrics contributing to tma_heavy_operations category", "tma_issue2P": "Metrics related by the issue $issue2P", - "tma_issueBC": "Metrics related by the issue $issueBC", "tma_issueBM": "Metrics related by the issue $issueBM", "tma_issueBW": "Metrics related by the issue $issueBW", + "tma_issueComp": "Metrics related by the issue $issueComp", "tma_issueD0": "Metrics related by the issue $issueD0", "tma_issueFB": "Metrics related by the issue $issueFB", "tma_issueFL": "Metrics related by the issue $issueFL", @@ -99,10 +103,12 @@ "tma_l3_bound_group": "Metrics contributing to tma_l3_bound category", "tma_light_operations_group": "Metrics contributing to tma_light_operations category", "tma_load_op_utilization_group": "Metrics contributing to tma_load_op_utilization category", + "tma_machine_clears_group": "Metrics contributing to tma_machine_clears category", "tma_mem_latency_group": "Metrics contributing to tma_mem_latency category", "tma_memory_bound_group": "Metrics contributing to tma_memory_bound category", "tma_microcode_sequencer_group": "Metrics contributing to tma_microcode_sequencer category", "tma_mite_group": "Metrics contributing to tma_mite category", + "tma_other_light_ops_group": "Metrics contributing to tma_other_light_ops category", "tma_ports_utilization_group": "Metrics contributing to tma_ports_utilization category", "tma_ports_utilized_0_group": "Metrics contributing to tma_ports_utilized_0 category", "tma_ports_utilized_3m_group": "Metrics contributing to tma_ports_utilized_3m category", diff --git a/tools/perf/pmu-events/arch/x86/tigerlake/other.json b/tools/perf/pmu-events/arch/x86/tigerlake/other.json index 55f3048bcfa6..117b18abcaaf 100644 --- a/tools/perf/pmu-events/arch/x86/tigerlake/other.json +++ b/tools/perf/pmu-events/arch/x86/tigerlake/other.json @@ -19,7 +19,7 @@ "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX512 turbo schedule.", "EventCode": "0x28", "EventName": "CORE_POWER.LVL2_TURBO_LICENSE", - "PublicDescription": "Core cycles where the core was running with power-delivery for license level 2 (introduced in Skylake Server microarchtecture). This includes high current AVX 512-bit instructions.", + "PublicDescription": "Core cycles where the core was running with power-delivery for license level 2 (introduced in Skylake Server microarchitecture). This includes high current AVX 512-bit instructions.", "SampleAfterValue": "200003", "UMask": "0x20" }, diff --git a/tools/perf/pmu-events/arch/x86/tigerlake/pipeline.json b/tools/perf/pmu-events/arch/x86/tigerlake/pipeline.json index 541bf1dd1679..4f85d53edec2 100644 --- a/tools/perf/pmu-events/arch/x86/tigerlake/pipeline.json +++ b/tools/perf/pmu-events/arch/x86/tigerlake/pipeline.json @@ -537,7 +537,7 @@ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread", "EventCode": "0x5e", "EventName": "RS_EVENTS.EMPTY_CYCLES", - "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into stravation periods (e.g. branch mispredictions or i-cache misses)", + "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into starvation periods (e.g. branch mispredictions or i-cache misses)", "SampleAfterValue": "1000003", "UMask": "0x1" }, @@ -561,14 +561,6 @@ "UMask": "0x2" }, { - "BriefDescription": "TMA slots wasted due to incorrect speculation by branch mispredictions", - "EventCode": "0xa4", - "EventName": "TOPDOWN.BR_MISPREDICT_SLOTS", - "PublicDescription": "Number of TMA slots that were wasted due to incorrect speculation by branch mispredictions. This event estimates number of operations that were issued but not retired from the speculative path as well as the out-of-order engine recovery past a branch misprediction.", - "SampleAfterValue": "10000003", - "UMask": "0x8" - }, - { "BriefDescription": "TMA slots available for an unhalted logical processor. Fixed counter - architectural event", "EventName": "TOPDOWN.SLOTS", "PublicDescription": "Number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method (TMA). The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core. Software can use this event as the denominator for the top-level metrics of the TMA method. This architectural event is counted on a designated fixed counter (Fixed Counter 3).", diff --git a/tools/perf/pmu-events/arch/x86/tigerlake/tgl-metrics.json b/tools/perf/pmu-events/arch/x86/tigerlake/tgl-metrics.json index f11860f39c18..8ae4f2474b25 100644 --- a/tools/perf/pmu-events/arch/x86/tigerlake/tgl-metrics.json +++ b/tools/perf/pmu-events/arch/x86/tigerlake/tgl-metrics.json @@ -98,12 +98,12 @@ "MetricExpr": "(UOPS_DISPATCHED.PORT_0 + UOPS_DISPATCHED.PORT_1 + UOPS_DISPATCHED.PORT_5 + UOPS_DISPATCHED.PORT_6) / (4 * tma_info_core_core_clks)", "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group", "MetricName": "tma_alu_op_utilization", - "MetricThreshold": "tma_alu_op_utilization > 0.6", + "MetricThreshold": "tma_alu_op_utilization > 0.4", "ScaleUnit": "100%" }, { "BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists", - "MetricExpr": "100 * ASSISTS.ANY / tma_info_thread_slots", + "MetricExpr": "34 * ASSISTS.ANY / tma_info_thread_slots", "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group", "MetricName": "tma_assists", "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)", @@ -113,7 +113,7 @@ { "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend", "DefaultMetricgroupName": "TopdownL1", - "MetricExpr": "topdown\\-be\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 5 * cpu@INT_MISC.RECOVERY_CYCLES\\,cmask\\=1\\,edge@ / tma_info_thread_slots", + "MetricExpr": "topdown\\-be\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 5 * INT_MISC.CLEARS_COUNT / tma_info_thread_slots", "MetricGroup": "Default;TmaL1;TopdownL1;tma_L1_group", "MetricName": "tma_backend_bound", "MetricThreshold": "tma_backend_bound > 0.2", @@ -135,7 +135,7 @@ { "BriefDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions.", "MetricExpr": "tma_light_operations * BR_INST_RETIRED.ALL_BRANCHES / (tma_retiring * tma_info_thread_slots)", - "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", + "MetricGroup": "Branches;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", "MetricName": "tma_branch_instructions", "MetricThreshold": "tma_branch_instructions > 0.1 & tma_light_operations > 0.6", "ScaleUnit": "100%" @@ -180,7 +180,7 @@ { "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "(49 * tma_info_system_average_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD * (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) + 48 * tma_info_system_average_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", + "MetricExpr": "(49 * tma_info_system_core_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD * (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) + 48 * tma_info_system_core_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group", "MetricName": "tma_contested_accesses", "MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", @@ -200,7 +200,7 @@ { "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "48 * tma_info_system_average_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD + MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD * (1 - OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", + "MetricExpr": "48 * tma_info_system_core_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD + MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD * (1 - OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", "MetricGroup": "Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group", "MetricName": "tma_data_sharing", "MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", @@ -212,7 +212,7 @@ "MetricExpr": "(cpu@INST_DECODED.DECODERS\\,cmask\\=1@ - cpu@INST_DECODED.DECODERS\\,cmask\\=2@) / tma_info_core_core_clks / 2", "MetricGroup": "DSBmiss;FetchBW;TopdownL4;tma_L4_group;tma_issueD0;tma_mite_group", "MetricName": "tma_decoder0_alone", - "MetricThreshold": "tma_decoder0_alone > 0.1 & (tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 5 > 0.35))", + "MetricThreshold": "tma_decoder0_alone > 0.1 & (tma_mite > 0.1 & tma_fetch_bandwidth > 0.2)", "PublicDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder. Related metrics: tma_few_uops_instructions", "ScaleUnit": "100%" }, @@ -240,7 +240,7 @@ "MetricExpr": "(IDQ.DSB_CYCLES_ANY - IDQ.DSB_CYCLES_OK) / tma_info_core_core_clks / 2", "MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group", "MetricName": "tma_dsb", - "MetricThreshold": "tma_dsb > 0.15 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 5 > 0.35)", + "MetricThreshold": "tma_dsb > 0.15 & tma_fetch_bandwidth > 0.2", "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.", "ScaleUnit": "100%" }, @@ -259,7 +259,7 @@ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group", "MetricName": "tma_dtlb_load", "MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store, tma_info_bottleneck_memory_data_tlbs", + "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store, tma_info_bottleneck_memory_data_tlbs, tma_info_bottleneck_memory_synchronization", "ScaleUnit": "100%" }, { @@ -268,12 +268,12 @@ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group", "MetricName": "tma_dtlb_store", "MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load, tma_info_bottleneck_memory_data_tlbs", + "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load, tma_info_bottleneck_memory_data_tlbs, tma_info_bottleneck_memory_synchronization", "ScaleUnit": "100%" }, { "BriefDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing", - "MetricExpr": "54 * tma_info_system_average_frequency * OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM / tma_info_thread_clks", + "MetricExpr": "54 * tma_info_system_core_frequency * OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM / tma_info_thread_clks", "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group", "MetricName": "tma_false_sharing", "MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", @@ -286,7 +286,7 @@ "MetricGroup": "MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group", "MetricName": "tma_fb_full", "MetricThreshold": "tma_fb_full > 0.3", - "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_bottleneck_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores", + "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores", "ScaleUnit": "100%" }, { @@ -294,7 +294,7 @@ "MetricExpr": "max(0, tma_frontend_bound - tma_fetch_latency)", "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB", "MetricName": "tma_fetch_bandwidth", - "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 5 > 0.35", + "MetricThreshold": "tma_fetch_bandwidth > 0.2", "MetricgroupNoGroup": "TopdownL2", "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp", "ScaleUnit": "100%" @@ -328,6 +328,15 @@ "ScaleUnit": "100%" }, { + "BriefDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists", + "MetricExpr": "34 * ASSISTS.FP / tma_info_thread_slots", + "MetricGroup": "HPC;TopdownL5;tma_L5_group;tma_assists_group", + "MetricName": "tma_fp_assists", + "MetricThreshold": "tma_fp_assists > 0.1", + "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists. FP Assist may apply when working with very small floating point values (so-called Denormals).", + "ScaleUnit": "100%" + }, + { "BriefDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired", "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ / (tma_retiring * tma_info_thread_slots)", "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P", @@ -390,13 +399,13 @@ "MetricName": "tma_heavy_operations", "MetricThreshold": "tma_heavy_operations > 0.1", "MetricgroupNoGroup": "TopdownL2", - "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.", + "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences. ([ICL+] Note this may overcount due to approximation using indirect events; [ADL+] .)", "ScaleUnit": "100%" }, { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses", - "MetricExpr": "ICACHE_16B.IFDATA_STALL / tma_info_thread_clks", - "MetricGroup": "BigFoot;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group", + "MetricExpr": "ICACHE_DATA.STALLS / tma_info_thread_clks", + "MetricGroup": "BigFootprint;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group", "MetricName": "tma_icache_misses", "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS_PS;FRONTEND_RETIRED.L1I_MISS_PS", @@ -405,7 +414,7 @@ { "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "(tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) * tma_info_thread_slots / BR_MISP_RETIRED.ALL_BRANCHES", + "MetricExpr": "tma_info_bottleneck_mispredictions * tma_info_thread_slots / BR_MISP_RETIRED.ALL_BRANCHES / 100", "MetricGroup": "Bad;BrMispredicts;tma_issueBM", "MetricName": "tma_info_bad_spec_branch_misprediction_cost", "PublicDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear). Related metrics: tma_branch_mispredicts, tma_info_bottleneck_mispredictions, tma_mispredicts_resteers" @@ -446,6 +455,12 @@ "MetricThreshold": "tma_info_bad_spec_ipmispredict < 200" }, { + "BriefDescription": "Speculative to Retired ratio of all clears (covering mispredicts and nukes)", + "MetricExpr": "INT_MISC.CLEARS_COUNT / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT)", + "MetricGroup": "BrMispredicts", + "MetricName": "tma_info_bad_spec_spec_clears_ratio" + }, + { "BriefDescription": "Probability of Core Bound bottleneck hidden by SMT-profiling artifacts", "MetricConstraint": "NO_GROUP_EVENTS", "MetricExpr": "(100 * (1 - tma_core_bound / tma_ports_utilization if tma_core_bound < tma_ports_utilization else 1) if tma_info_system_smt_2t_utilization > 0.5 else 0)", @@ -472,67 +487,102 @@ "PublicDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck. Related metrics: " }, { + "BriefDescription": "Total pipeline cost of \"useful operations\" - the baseline operations not covered by Branching_Overhead nor Irregular_Overhead.", + "MetricExpr": "100 * (tma_retiring - (BR_INST_RETIRED.ALL_BRANCHES + BR_INST_RETIRED.NEAR_CALL) / tma_info_thread_slots - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)", + "MetricGroup": "Ret", + "MetricName": "tma_info_bottleneck_base_non_br", + "MetricThreshold": "tma_info_bottleneck_base_non_br > 20" + }, + { "BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)", "MetricConstraint": "NO_GROUP_EVENTS", "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)", - "MetricGroup": "BigFoot;Fed;Frontend;IcMiss;MemoryTLB;tma_issueBC", + "MetricGroup": "BigFootprint;Fed;Frontend;IcMiss;MemoryTLB", "MetricName": "tma_info_bottleneck_big_code", - "MetricThreshold": "tma_info_bottleneck_big_code > 20", - "PublicDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses). Related metrics: tma_info_bottleneck_branching_overhead" + "MetricThreshold": "tma_info_bottleneck_big_code > 20" }, { "BriefDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls)", - "MetricExpr": "100 * ((BR_INST_RETIRED.COND + 3 * BR_INST_RETIRED.NEAR_CALL + (BR_INST_RETIRED.NEAR_TAKEN - BR_INST_RETIRED.COND_TAKEN - 2 * BR_INST_RETIRED.NEAR_CALL)) / tma_info_thread_slots)", - "MetricGroup": "Ret;tma_issueBC", + "MetricExpr": "100 * ((BR_INST_RETIRED.ALL_BRANCHES + BR_INST_RETIRED.NEAR_CALL) / tma_info_thread_slots)", + "MetricGroup": "Ret", "MetricName": "tma_info_bottleneck_branching_overhead", - "MetricThreshold": "tma_info_bottleneck_branching_overhead > 10", - "PublicDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls). Related metrics: tma_info_bottleneck_big_code" + "MetricThreshold": "tma_info_bottleneck_branching_overhead > 5" + }, + { + "BriefDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks", + "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_fb_full / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))", + "MetricGroup": "Mem;MemoryBW;Offcore;tma_issueBW", + "MetricName": "tma_info_bottleneck_cache_memory_bandwidth", + "MetricThreshold": "tma_info_bottleneck_cache_memory_bandwidth > 20", + "PublicDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full" + }, + { + "BriefDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks", + "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_store_latency / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))", + "MetricGroup": "Mem;MemoryLat;Offcore;tma_issueLat", + "MetricName": "tma_info_bottleneck_cache_memory_latency", + "MetricThreshold": "tma_info_bottleneck_cache_memory_latency > 20", + "PublicDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks. Related metrics: tma_l3_hit_latency, tma_mem_latency" + }, + { + "BriefDescription": "Total pipeline cost when the execution is compute-bound - an estimation", + "MetricExpr": "100 * (tma_core_bound * tma_divider / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_core_bound * (tma_ports_utilization / (tma_divider + tma_ports_utilization + tma_serializing_operation)) * (tma_ports_utilized_3m / (tma_ports_utilized_0 + tma_ports_utilized_1 + tma_ports_utilized_2 + tma_ports_utilized_3m)))", + "MetricGroup": "Cor;tma_issueComp", + "MetricName": "tma_info_bottleneck_compute_bound_est", + "MetricThreshold": "tma_info_bottleneck_compute_bound_est > 20", + "PublicDescription": "Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy. Related metrics: " }, { "BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "100 * (tma_frontend_bound - tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) - tma_info_bottleneck_big_code", + "MetricExpr": "100 * (tma_frontend_bound - (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * (10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts)) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) - tma_info_bottleneck_big_code", "MetricGroup": "Fed;FetchBW;Frontend", "MetricName": "tma_info_bottleneck_instruction_fetch_bw", "MetricThreshold": "tma_info_bottleneck_instruction_fetch_bw > 20" }, { - "BriefDescription": "Total pipeline cost of (external) Memory Bandwidth related bottlenecks", - "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full))) + tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_fb_full / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk))", - "MetricGroup": "Mem;MemoryBW;Offcore;tma_issueBW", - "MetricName": "tma_info_bottleneck_memory_bandwidth", - "MetricThreshold": "tma_info_bottleneck_memory_bandwidth > 20", - "PublicDescription": "Total pipeline cost of (external) Memory Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full" + "BriefDescription": "Total pipeline cost of irregular execution (e.g", + "MetricExpr": "100 * (tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * (10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts)) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_branch_mispredicts + tma_machine_clears * tma_other_nukes / tma_other_nukes + tma_core_bound * (tma_serializing_operation + tma_core_bound * RS_EVENTS.EMPTY_CYCLES / tma_info_thread_clks * tma_ports_utilized_0) / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)", + "MetricGroup": "Bad;Cor;Ret;tma_issueMS", + "MetricName": "tma_info_bottleneck_irregular_overhead", + "MetricThreshold": "tma_info_bottleneck_irregular_overhead > 10", + "PublicDescription": "Total pipeline cost of irregular execution (e.g. FP-assists in HPC, Wait time with work imbalance multithreaded workloads, overhead in system services or virtualized environments). Related metrics: tma_microcode_sequencer, tma_ms_switches" }, { "BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "100 * tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_dtlb_load / max(tma_l1_bound, tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))", + "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_load / max(tma_l1_bound, tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))", "MetricGroup": "Mem;MemoryTLB;Offcore;tma_issueTLB", "MetricName": "tma_info_bottleneck_memory_data_tlbs", "MetricThreshold": "tma_info_bottleneck_memory_data_tlbs > 20", - "PublicDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs). Related metrics: tma_dtlb_load, tma_dtlb_store" + "PublicDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs). Related metrics: tma_dtlb_load, tma_dtlb_store, tma_info_bottleneck_memory_synchronization" }, { - "BriefDescription": "Total pipeline cost of Memory Latency related bottlenecks (external memory and off-core caches)", - "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound))", - "MetricGroup": "Mem;MemoryLat;Offcore;tma_issueLat", - "MetricName": "tma_info_bottleneck_memory_latency", - "MetricThreshold": "tma_info_bottleneck_memory_latency > 20", - "PublicDescription": "Total pipeline cost of Memory Latency related bottlenecks (external memory and off-core caches). Related metrics: tma_l3_hit_latency, tma_mem_latency" + "BriefDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors)", + "MetricExpr": "100 * (tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_contested_accesses + tma_data_sharing) / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * tma_false_sharing / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores - tma_store_latency)) + tma_machine_clears * (1 - tma_other_nukes / tma_other_nukes))", + "MetricGroup": "Mem;Offcore;tma_issueTLB", + "MetricName": "tma_info_bottleneck_memory_synchronization", + "MetricThreshold": "tma_info_bottleneck_memory_synchronization > 10", + "PublicDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors). Related metrics: tma_dtlb_load, tma_dtlb_store, tma_info_bottleneck_memory_data_tlbs" }, { "BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "100 * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))", + "MetricExpr": "100 * (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))", "MetricGroup": "Bad;BadSpec;BrMispredicts;tma_issueBM", "MetricName": "tma_info_bottleneck_mispredictions", "MetricThreshold": "tma_info_bottleneck_mispredictions > 20", "PublicDescription": "Total pipeline cost of Branch Misprediction related bottlenecks. Related metrics: tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost, tma_mispredicts_resteers" }, { + "BriefDescription": "Total pipeline cost of remaining bottlenecks (apart from those listed in the Info.Bottlenecks metrics class)", + "MetricExpr": "100 - (tma_info_bottleneck_big_code + tma_info_bottleneck_instruction_fetch_bw + tma_info_bottleneck_mispredictions + tma_info_bottleneck_cache_memory_bandwidth + tma_info_bottleneck_cache_memory_latency + tma_info_bottleneck_memory_data_tlbs + tma_info_bottleneck_memory_synchronization + tma_info_bottleneck_compute_bound_est + tma_info_bottleneck_irregular_overhead + tma_info_bottleneck_branching_overhead + tma_info_bottleneck_base_non_br)", + "MetricGroup": "Cor;Offcore", + "MetricName": "tma_info_bottleneck_other_bottlenecks", + "MetricThreshold": "tma_info_bottleneck_other_bottlenecks > 20", + "PublicDescription": "Total pipeline cost of remaining bottlenecks (apart from those listed in the Info.Bottlenecks metrics class). Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls." + }, + { "BriefDescription": "Fraction of branches that are CALL or RET", "MetricExpr": "(BR_INST_RETIRED.NEAR_CALL + BR_INST_RETIRED.NEAR_RETURN) / BR_INST_RETIRED.ALL_BRANCHES", "MetricGroup": "Bad;Branches", @@ -564,7 +614,7 @@ }, { "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core", - "MetricExpr": "CPU_CLK_UNHALTED.DISTRIBUTED", + "MetricExpr": "(CPU_CLK_UNHALTED.DISTRIBUTED if #SMT_on else tma_info_thread_clks)", "MetricGroup": "SMT", "MetricName": "tma_info_core_core_clks" }, @@ -575,8 +625,14 @@ "MetricName": "tma_info_core_coreipc" }, { + "BriefDescription": "uops Executed per Cycle", + "MetricExpr": "UOPS_EXECUTED.THREAD / tma_info_thread_clks", + "MetricGroup": "Power", + "MetricName": "tma_info_core_epc" + }, + { "BriefDescription": "Floating Point Operations Per Cycle", - "MetricExpr": "(cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * cpu@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE\\,umask\\=0x18@ + 8 * cpu@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE\\,umask\\=0x60@ + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / tma_info_core_core_clks", + "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * FP_ARITH_INST_RETIRED.4_FLOPS + 8 * FP_ARITH_INST_RETIRED.8_FLOPS + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / tma_info_core_core_clks", "MetricGroup": "Flops;Ret", "MetricName": "tma_info_core_flopc" }, @@ -588,8 +644,8 @@ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)." }, { - "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core", - "MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)", + "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor)", + "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@", "MetricGroup": "Backend;Cor;Pipeline;PortsUtil", "MetricName": "tma_info_core_ilp" }, @@ -669,7 +725,7 @@ "MetricGroup": "Flops;InsType", "MetricName": "tma_info_inst_mix_iparith", "MetricThreshold": "tma_info_inst_mix_iparith < 10", - "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). May undercount due to FMA double counting. Approximated prior to BDW." + "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW." }, { "BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate)", @@ -677,7 +733,7 @@ "MetricGroup": "Flops;FpVector;InsType", "MetricName": "tma_info_inst_mix_iparith_avx128", "MetricThreshold": "tma_info_inst_mix_iparith_avx128 < 10", - "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate)", @@ -685,7 +741,7 @@ "MetricGroup": "Flops;FpVector;InsType", "MetricName": "tma_info_inst_mix_iparith_avx256", "MetricThreshold": "tma_info_inst_mix_iparith_avx256 < 10", - "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate)", @@ -693,7 +749,7 @@ "MetricGroup": "Flops;FpVector;InsType", "MetricName": "tma_info_inst_mix_iparith_avx512", "MetricThreshold": "tma_info_inst_mix_iparith_avx512 < 10", - "PublicDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate)", @@ -701,7 +757,7 @@ "MetricGroup": "Flops;FpScalar;InsType", "MetricName": "tma_info_inst_mix_iparith_scalar_dp", "MetricThreshold": "tma_info_inst_mix_iparith_scalar_dp < 10", - "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate)", @@ -709,7 +765,7 @@ "MetricGroup": "Flops;FpScalar;InsType", "MetricName": "tma_info_inst_mix_iparith_scalar_sp", "MetricThreshold": "tma_info_inst_mix_iparith_scalar_sp < 10", - "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting." + "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting." }, { "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)", @@ -727,7 +783,7 @@ }, { "BriefDescription": "Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate)", - "MetricExpr": "INST_RETIRED.ANY / (cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * cpu@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE\\,umask\\=0x18@ + 8 * cpu@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE\\,umask\\=0x60@ + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE)", + "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.SCALAR + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * FP_ARITH_INST_RETIRED.4_FLOPS + 8 * FP_ARITH_INST_RETIRED.8_FLOPS + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE)", "MetricGroup": "Flops;InsType", "MetricName": "tma_info_inst_mix_ipflop", "MetricThreshold": "tma_info_inst_mix_ipflop < 10" @@ -740,6 +796,12 @@ "MetricThreshold": "tma_info_inst_mix_ipload < 3" }, { + "BriefDescription": "Instructions per PAUSE (lower number means higher occurrence rate)", + "MetricExpr": "tma_info_inst_mix_instructions / MISC_RETIRED.PAUSE_INST", + "MetricGroup": "Flops;FpVector;InsType", + "MetricName": "tma_info_inst_mix_ippause" + }, + { "BriefDescription": "Instructions per Store (lower number means higher occurrence rate)", "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_STORES", "MetricGroup": "InsType", @@ -763,142 +825,154 @@ }, { "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]", - "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l1d_cache_fill_bw", "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_core_l1d_cache_fill_bw" + "MetricName": "tma_info_memory_core_l1d_cache_fill_bw_2t" }, { "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]", - "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l2_cache_fill_bw", "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_core_l2_cache_fill_bw" + "MetricName": "tma_info_memory_core_l2_cache_fill_bw_2t" }, { "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l3_cache_access_bw", "MetricGroup": "Mem;MemoryBW;Offcore", - "MetricName": "tma_info_memory_core_l3_cache_access_bw" + "MetricName": "tma_info_memory_core_l3_cache_access_bw_2t" }, { "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time", + "MetricExpr": "tma_info_memory_l3_cache_fill_bw", "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_core_l3_cache_fill_bw" + "MetricName": "tma_info_memory_core_l3_cache_fill_bw_2t" }, { "BriefDescription": "Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss-handling entries)", "MetricExpr": "1e3 * MEM_LOAD_RETIRED.FB_HIT / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_fb_hpki" }, { + "BriefDescription": "", + "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l1d_cache_fill_bw" + }, + { "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads", "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l1mpki" }, { "BriefDescription": "L1 cache true misses per kilo instruction for all demand loads (including speculative)", "MetricExpr": "1e3 * L2_RQSTS.ALL_DEMAND_DATA_RD / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l1mpki_load" }, { + "BriefDescription": "", + "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l2_cache_fill_bw" + }, + { "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)", "MetricExpr": "1e3 * (L2_RQSTS.REFERENCES - L2_RQSTS.MISS) / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l2hpki_all" }, { "BriefDescription": "L2 cache hits per kilo instruction for all demand loads (including speculative)", "MetricExpr": "1e3 * L2_RQSTS.DEMAND_DATA_RD_HIT / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l2hpki_load" }, { "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads", "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY", - "MetricGroup": "Backend;CacheMisses;Mem", + "MetricGroup": "Backend;CacheHits;Mem", "MetricName": "tma_info_memory_l2mpki" }, { "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all request types (including speculative)", "MetricExpr": "1e3 * L2_RQSTS.MISS / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem;Offcore", + "MetricGroup": "CacheHits;Mem;Offcore", "MetricName": "tma_info_memory_l2mpki_all" }, { "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all demand loads (including speculative)", "MetricExpr": "1e3 * L2_RQSTS.DEMAND_DATA_RD_MISS / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", + "MetricGroup": "CacheHits;Mem", "MetricName": "tma_info_memory_l2mpki_load" }, { - "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads", - "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY", - "MetricGroup": "CacheMisses;Mem", - "MetricName": "tma_info_memory_l3mpki" + "BriefDescription": "", + "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW;Offcore", + "MetricName": "tma_info_memory_l3_cache_access_bw" }, { - "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)", - "MetricExpr": "L1D_PEND_MISS.PENDING / (MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT)", - "MetricGroup": "Mem;MemoryBound;MemoryLat", - "MetricName": "tma_info_memory_load_miss_real_latency" + "BriefDescription": "", + "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l3_cache_fill_bw" }, { - "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss", - "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES", - "MetricGroup": "Mem;MemoryBW;MemoryBound", - "MetricName": "tma_info_memory_mlp", - "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)" + "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads", + "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY", + "MetricGroup": "Mem", + "MetricName": "tma_info_memory_l3mpki" }, { "BriefDescription": "Average Parallel L2 cache miss data reads", "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", "MetricGroup": "Memory_BW;Offcore", - "MetricName": "tma_info_memory_oro_data_l2_mlp" + "MetricName": "tma_info_memory_latency_data_l2_mlp" }, { "BriefDescription": "Average Latency for L2 cache miss demand Loads", "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS.DEMAND_DATA_RD", "MetricGroup": "Memory_Lat;Offcore", - "MetricName": "tma_info_memory_oro_load_l2_miss_latency" + "MetricName": "tma_info_memory_latency_load_l2_miss_latency" }, { "BriefDescription": "Average Parallel L2 cache miss demand Loads", "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / cpu@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=1@", "MetricGroup": "Memory_BW;Offcore", - "MetricName": "tma_info_memory_oro_load_l2_mlp" + "MetricName": "tma_info_memory_latency_load_l2_mlp" }, { "BriefDescription": "Average Latency for L3 cache miss demand Loads", "MetricExpr": "cpu@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,umask\\=0x10@ / OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD", "MetricGroup": "Memory_Lat;Offcore", - "MetricName": "tma_info_memory_oro_load_l3_miss_latency" + "MetricName": "tma_info_memory_latency_load_l3_miss_latency" }, { - "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l1d_cache_fill_bw", - "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_thread_l1d_cache_fill_bw_1t" + "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)", + "MetricExpr": "L1D_PEND_MISS.PENDING / (MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT)", + "MetricGroup": "Mem;MemoryBound;MemoryLat", + "MetricName": "tma_info_memory_load_miss_real_latency" }, { - "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l2_cache_fill_bw", - "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_thread_l2_cache_fill_bw_1t" + "BriefDescription": "\"Bus lock\" per kilo instruction", + "MetricExpr": "1e3 * SQ_MISC.BUS_LOCK / INST_RETIRED.ANY", + "MetricGroup": "Mem", + "MetricName": "tma_info_memory_mix_bus_lock_pki" }, { - "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l3_cache_access_bw", - "MetricGroup": "Mem;MemoryBW;Offcore", - "MetricName": "tma_info_memory_thread_l3_cache_access_bw_1t" + "BriefDescription": "Un-cacheable retired load per kilo instruction", + "MetricExpr": "1e3 * MEM_LOAD_MISC_RETIRED.UC / INST_RETIRED.ANY", + "MetricGroup": "Mem", + "MetricName": "tma_info_memory_mix_uc_load_pki" }, { - "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]", - "MetricExpr": "tma_info_memory_core_l3_cache_fill_bw", - "MetricGroup": "Mem;MemoryBW", - "MetricName": "tma_info_memory_thread_l3_cache_fill_bw_1t" + "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss", + "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES", + "MetricGroup": "Mem;MemoryBW;MemoryBound", + "MetricName": "tma_info_memory_mlp", + "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)" }, { "BriefDescription": "STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk)", @@ -926,42 +1000,56 @@ "MetricName": "tma_info_memory_tlb_store_stlb_mpki" }, { - "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-thread", - "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@", + "BriefDescription": "", + "MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@)", "MetricGroup": "Cor;Pipeline;PortsUtil;SMT", "MetricName": "tma_info_pipeline_execute" }, { + "BriefDescription": "Instructions per a microcode Assist invocation", + "MetricExpr": "INST_RETIRED.ANY / ASSISTS.ANY", + "MetricGroup": "MicroSeq;Pipeline;Ret;Retire", + "MetricName": "tma_info_pipeline_ipassist", + "MetricThreshold": "tma_info_pipeline_ipassist < 100e3", + "PublicDescription": "Instructions per a microcode Assist invocation. See Assists tree node for details (lower number means higher occurrence rate)" + }, + { "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.", "MetricExpr": "tma_retiring * tma_info_thread_slots / cpu@UOPS_RETIRED.SLOTS\\,cmask\\=1@", "MetricGroup": "Pipeline;Ret", "MetricName": "tma_info_pipeline_retire" }, { - "BriefDescription": "Measured Average Frequency for unhalted processors [GHz]", + "BriefDescription": "Measured Average Core Frequency for unhalted processors [GHz]", "MetricExpr": "tma_info_system_turbo_utilization * TSC / 1e9 / duration_time", "MetricGroup": "Power;Summary", - "MetricName": "tma_info_system_average_frequency" + "MetricName": "tma_info_system_core_frequency" }, { - "BriefDescription": "Average CPU Utilization", + "BriefDescription": "Average CPU Utilization (percentage)", "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC", "MetricGroup": "HPC;Summary", "MetricName": "tma_info_system_cpu_utilization" }, { + "BriefDescription": "Average number of utilized CPUs", + "MetricExpr": "#num_cpus_online * tma_info_system_cpu_utilization", + "MetricGroup": "Summary", + "MetricName": "tma_info_system_cpus_utilized" + }, + { "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]", "MetricExpr": "64 * (arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@) / 1e6 / duration_time / 1e3", - "MetricGroup": "HPC;Mem;MemoryBW;SoC;tma_issueBW", + "MetricGroup": "HPC;MemOffcore;MemoryBW;SoC;tma_issueBW", "MetricName": "tma_info_system_dram_bw_use", - "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_info_bottleneck_memory_bandwidth, tma_mem_bandwidth, tma_sq_full" + "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_mem_bandwidth, tma_sq_full" }, { "BriefDescription": "Giga Floating Point Operations Per Second", - "MetricExpr": "(cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * cpu@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE\\,umask\\=0x18@ + 8 * cpu@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE\\,umask\\=0x60@ + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / 1e9 / duration_time", + "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * FP_ARITH_INST_RETIRED.4_FLOPS + 8 * FP_ARITH_INST_RETIRED.8_FLOPS + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / 1e9 / duration_time", "MetricGroup": "Cor;Flops;HPC", "MetricName": "tma_info_system_gflops", - "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine." + "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width" }, { "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]", @@ -998,12 +1086,6 @@ "PublicDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches. ([RKL+]memory-controller only)" }, { - "BriefDescription": "Average latency of all requests to external memory (in Uncore cycles)", - "MetricExpr": "(UNC_ARB_TRK_OCCUPANCY.ALL + UNC_ARB_DAT_OCCUPANCY.RD) / arb@event\\=0x81\\,umask\\=0x1@", - "MetricGroup": "Mem;SoC", - "MetricName": "tma_info_system_mem_request_latency" - }, - { "BriefDescription": "Fraction of Core cycles where the core was running with power-delivery for baseline license level 0", "MetricExpr": "CORE_POWER.LVL0_TURBO_LICENSE / tma_info_core_core_clks", "MetricGroup": "Power", @@ -1097,8 +1179,8 @@ }, { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses", - "MetricExpr": "ICACHE_64B.IFTAG_STALL / tma_info_thread_clks", - "MetricGroup": "BigFoot;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group", + "MetricExpr": "ICACHE_TAG.STALLS / tma_info_thread_clks", + "MetricGroup": "BigFootprint;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group", "MetricName": "tma_itlb_misses", "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS_PS;FRONTEND_RETIRED.ITLB_MISS_PS", @@ -1107,7 +1189,7 @@ { "BriefDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache", "MetricExpr": "max((CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS) / tma_info_thread_clks, 0)", - "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group", "MetricName": "tma_l1_bound", "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", "PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_RETIRED.L1_HIT_PS;MEM_LOAD_RETIRED.FB_HIT_PS. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1", @@ -1117,7 +1199,7 @@ "BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads", "MetricConstraint": "NO_GROUP_EVENTS", "MetricExpr": "MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) / (MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + L1D_PEND_MISS.FB_FULL_PERIODS) * ((CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks)", - "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", "MetricName": "tma_l2_bound", "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", "PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L2_HIT_PS", @@ -1127,24 +1209,24 @@ "BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core", "MetricConstraint": "NO_GROUP_EVENTS_NMI", "MetricExpr": "(CYCLE_ACTIVITY.STALLS_L2_MISS - CYCLE_ACTIVITY.STALLS_L3_MISS) / tma_info_thread_clks", - "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", "MetricName": "tma_l3_bound", "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS", "ScaleUnit": "100%" }, { - "BriefDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)", - "MetricExpr": "17.5 * tma_info_system_average_frequency * MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", + "BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)", + "MetricExpr": "17.5 * tma_info_system_core_frequency * (MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2)) / tma_info_thread_clks", "MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group", "MetricName": "tma_l3_hit_latency", "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_info_bottleneck_memory_latency, tma_mem_latency", + "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_info_bottleneck_cache_memory_latency, tma_mem_latency", "ScaleUnit": "100%" }, { "BriefDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs)", - "MetricExpr": "ILD_STALL.LCP / tma_info_thread_clks", + "MetricExpr": "DECODE.LCP / tma_info_thread_clks", "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB", "MetricName": "tma_lcp", "MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", @@ -1158,7 +1240,7 @@ "MetricName": "tma_light_operations", "MetricThreshold": "tma_light_operations > 0.6", "MetricgroupNoGroup": "TopdownL2", - "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST", + "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST", "ScaleUnit": "100%" }, { @@ -1201,7 +1283,7 @@ "MetricExpr": "(LSD.CYCLES_ACTIVE - LSD.CYCLES_OK) / tma_info_core_core_clks / 2", "MetricGroup": "FetchBW;LSD;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group", "MetricName": "tma_lsd", - "MetricThreshold": "tma_lsd > 0.15 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 5 > 0.35)", + "MetricThreshold": "tma_lsd > 0.15 & tma_fetch_bandwidth > 0.2", "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to LSD (Loop Stream Detector) unit. LSD typically does well sustaining Uop supply. However; in some rare cases; optimal uop-delivery could not be reached for small loops whose size (in terms of number of uops) does not suit well the LSD structure.", "ScaleUnit": "100%" }, @@ -1216,21 +1298,21 @@ "ScaleUnit": "100%" }, { - "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM)", + "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)", "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@) / tma_info_thread_clks", "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW", "MetricName": "tma_mem_bandwidth", "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_bottleneck_memory_bandwidth, tma_info_system_dram_bw_use, tma_sq_full", + "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_sq_full", "ScaleUnit": "100%" }, { - "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM)", + "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM)", "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth", "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat", "MetricName": "tma_mem_latency", "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_info_bottleneck_memory_latency, tma_l3_hit_latency", + "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_info_bottleneck_cache_memory_latency, tma_l3_hit_latency", "ScaleUnit": "100%" }, { @@ -1254,11 +1336,11 @@ }, { "BriefDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit", - "MetricExpr": "tma_retiring * tma_info_thread_slots / UOPS_ISSUED.ANY * IDQ.MS_UOPS / tma_info_thread_slots", + "MetricExpr": "UOPS_RETIRED.SLOTS / UOPS_ISSUED.ANY * IDQ.MS_UOPS / tma_info_thread_slots", "MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueMC;tma_issueMS", "MetricName": "tma_microcode_sequencer", "MetricThreshold": "tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1", - "PublicDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided. Sample with: IDQ.MS_UOPS. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_ms_switches", + "PublicDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided. Sample with: IDQ.MS_UOPS. Related metrics: tma_clears_resteers, tma_info_bottleneck_irregular_overhead, tma_l1_bound, tma_machine_clears, tma_ms_switches", "ScaleUnit": "100%" }, { @@ -1275,7 +1357,7 @@ "MetricExpr": "(IDQ.MITE_CYCLES_ANY - IDQ.MITE_CYCLES_OK) / tma_info_core_core_clks / 2", "MetricGroup": "DSBmiss;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group", "MetricName": "tma_mite", - "MetricThreshold": "tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 5 > 0.35)", + "MetricThreshold": "tma_mite > 0.1 & tma_fetch_bandwidth > 0.2", "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck. Sample with: FRONTEND_RETIRED.ANY_DSB_MISS", "ScaleUnit": "100%" }, @@ -1284,16 +1366,16 @@ "MetricExpr": "(cpu@IDQ.MITE_UOPS\\,cmask\\=4@ - cpu@IDQ.MITE_UOPS\\,cmask\\=5@) / tma_info_thread_clks", "MetricGroup": "DSBmiss;FetchBW;TopdownL4;tma_L4_group;tma_mite_group", "MetricName": "tma_mite_4wide", - "MetricThreshold": "tma_mite_4wide > 0.05 & (tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 5 > 0.35))", + "MetricThreshold": "tma_mite_4wide > 0.05 & (tma_mite > 0.1 & tma_fetch_bandwidth > 0.2)", "ScaleUnit": "100%" }, { - "BriefDescription": "The Mixing_Vectors metric gives the percentage of injected blend uops out of all uops issued", + "BriefDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles)", "MetricExpr": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH / UOPS_ISSUED.ANY", "MetricGroup": "TopdownL5;tma_L5_group;tma_issueMV;tma_ports_utilized_0_group", "MetricName": "tma_mixing_vectors", "MetricThreshold": "tma_mixing_vectors > 0.05", - "PublicDescription": "The Mixing_Vectors metric gives the percentage of injected blend uops out of all uops issued. Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches", + "PublicDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles). Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches", "ScaleUnit": "100%" }, { @@ -1302,22 +1384,22 @@ "MetricGroup": "FetchLat;MicroSeq;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueMC;tma_issueMS;tma_issueMV;tma_issueSO", "MetricName": "tma_ms_switches", "MetricThreshold": "tma_ms_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", - "PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation", + "PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_clears_resteers, tma_info_bottleneck_irregular_overhead, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation", "ScaleUnit": "100%" }, { "BriefDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions", "MetricExpr": "tma_light_operations * INST_RETIRED.NOP / (tma_retiring * tma_info_thread_slots)", - "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", + "MetricGroup": "Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group", "MetricName": "tma_nop_instructions", - "MetricThreshold": "tma_nop_instructions > 0.1 & tma_light_operations > 0.6", + "MetricThreshold": "tma_nop_instructions > 0.1 & (tma_other_light_ops > 0.3 & tma_light_operations > 0.6)", "PublicDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body. Sample with: INST_RETIRED.NOP", "ScaleUnit": "100%" }, { "BriefDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes", "MetricConstraint": "NO_GROUP_EVENTS", - "MetricExpr": "max(0, tma_light_operations - (tma_fp_arith + tma_memory_operations + tma_branch_instructions + tma_nop_instructions))", + "MetricExpr": "max(0, tma_light_operations - (tma_fp_arith + tma_memory_operations + tma_branch_instructions))", "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", "MetricName": "tma_other_light_ops", "MetricThreshold": "tma_other_light_ops > 0.3 & tma_light_operations > 0.6", @@ -1325,6 +1407,22 @@ "ScaleUnit": "100%" }, { + "BriefDescription": "This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types).", + "MetricExpr": "max(tma_branch_mispredicts * (1 - BR_MISP_RETIRED.ALL_BRANCHES / (INT_MISC.CLEARS_COUNT - MACHINE_CLEARS.COUNT)), 0.0001)", + "MetricGroup": "BrMispredicts;TopdownL3;tma_L3_group;tma_branch_mispredicts_group", + "MetricName": "tma_other_mispredicts", + "MetricThreshold": "tma_other_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)", + "ScaleUnit": "100%" + }, + { + "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering.", + "MetricExpr": "max(tma_machine_clears * (1 - MACHINE_CLEARS.MEMORY_ORDERING / MACHINE_CLEARS.COUNT), 0.0001)", + "MetricGroup": "Machine_Clears;TopdownL3;tma_L3_group;tma_machine_clears_group", + "MetricName": "tma_other_nukes", + "MetricThreshold": "tma_other_nukes > 0.05 & (tma_machine_clears > 0.1 & tma_bad_speculation > 0.15)", + "ScaleUnit": "100%" + }, + { "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch)", "MetricExpr": "UOPS_DISPATCHED.PORT_0 / tma_info_core_core_clks", "MetricGroup": "Compute;TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P", @@ -1352,17 +1450,17 @@ "ScaleUnit": "100%" }, { - "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU)", + "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU)", "MetricExpr": "UOPS_DISPATCHED.PORT_6 / tma_info_core_core_clks", "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P", "MetricName": "tma_port_6", "MetricThreshold": "tma_port_6 > 0.6", - "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED.PORT_6. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2", + "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED.PORT_6. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2", "ScaleUnit": "100%" }, { "BriefDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related)", - "MetricExpr": "((cpu@EXE_ACTIVITY.3_PORTS_UTIL\\,umask\\=0x80@ + tma_serializing_operation * (CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY) + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL)) / tma_info_thread_clks if ARITH.DIVIDER_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY else (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL) / tma_info_thread_clks)", + "MetricExpr": "((tma_ports_utilized_0 * tma_info_thread_clks + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL)) / tma_info_thread_clks if ARITH.DIVIDER_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY else (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL) / tma_info_thread_clks)", "MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group", "MetricName": "tma_ports_utilization", "MetricThreshold": "tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)", @@ -1371,7 +1469,7 @@ }, { "BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)", - "MetricExpr": "cpu@EXE_ACTIVITY.3_PORTS_UTIL\\,umask\\=0x80@ / tma_info_thread_clks + tma_serializing_operation * (CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY) / tma_info_thread_clks", + "MetricExpr": "(cpu@EXE_ACTIVITY.3_PORTS_UTIL\\,umask\\=0x80@ + tma_core_bound * RS_EVENTS.EMPTY_CYCLES) / tma_info_thread_clks * (CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY) / tma_info_thread_clks", "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group", "MetricName": "tma_ports_utilized_0", "MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", @@ -1401,7 +1499,7 @@ "MetricExpr": "UOPS_EXECUTED.CYCLES_GE_3 / tma_info_thread_clks", "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group", "MetricName": "tma_ports_utilized_3m", - "MetricThreshold": "tma_ports_utilized_3m > 0.7 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", + "MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", "PublicDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Sample with: UOPS_EXECUTED.CYCLES_GE_3", "ScaleUnit": "100%" }, @@ -1419,18 +1517,18 @@ { "BriefDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations", "MetricExpr": "RESOURCE_STALLS.SCOREBOARD / tma_info_thread_clks", - "MetricGroup": "PortsUtil;TopdownL5;tma_L5_group;tma_issueSO;tma_ports_utilized_0_group", + "MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group;tma_issueSO", "MetricName": "tma_serializing_operation", - "MetricThreshold": "tma_serializing_operation > 0.1 & (tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)))", + "MetricThreshold": "tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)", "PublicDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out-of-order execution which may limit performance. Sample with: RESOURCE_STALLS.SCOREBOARD. Related metrics: tma_ms_switches", "ScaleUnit": "100%" }, { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions", "MetricExpr": "140 * MISC_RETIRED.PAUSE_INST / tma_info_thread_clks", - "MetricGroup": "TopdownL6;tma_L6_group;tma_serializing_operation_group", + "MetricGroup": "TopdownL4;tma_L4_group;tma_serializing_operation_group", "MetricName": "tma_slow_pause", - "MetricThreshold": "tma_slow_pause > 0.05 & (tma_serializing_operation > 0.1 & (tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))))", + "MetricThreshold": "tma_slow_pause > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions. Sample with: MISC_RETIRED.PAUSE_INST", "ScaleUnit": "100%" }, @@ -1459,7 +1557,7 @@ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group", "MetricName": "tma_sq_full", "MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", - "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_bottleneck_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth", + "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth", "ScaleUnit": "100%" }, { @@ -1527,10 +1625,10 @@ { "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears", "MetricExpr": "10 * BACLEARS.ANY / tma_info_thread_clks", - "MetricGroup": "BigFoot;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group", + "MetricGroup": "BigFootprint;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group", "MetricName": "tma_unknown_branches", "MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))", - "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit). Sample with: BACLEARS.ANY", + "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit) hence called Unknown Branches. Sample with: BACLEARS.ANY", "ScaleUnit": "100%" }, { diff --git a/tools/perf/pmu-events/arch/x86/tigerlake/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/tigerlake/uncore-interconnect.json index eed1b90a2779..48f23acc76c0 100644 --- a/tools/perf/pmu-events/arch/x86/tigerlake/uncore-interconnect.json +++ b/tools/perf/pmu-events/arch/x86/tigerlake/uncore-interconnect.json @@ -25,6 +25,7 @@ }, { "BriefDescription": "This event is deprecated. Refer to new event UNC_ARB_REQ_TRK_REQUEST.DRD", + "Deprecated": "1", "EventCode": "0x81", "EventName": "UNC_ARB_DAT_REQUESTS.RD", "PerPkg": "1", @@ -33,6 +34,7 @@ }, { "BriefDescription": "This event is deprecated. Refer to new event UNC_ARB_DAT_OCCUPANCY.ALL", + "Deprecated": "1", "EventCode": "0x85", "EventName": "UNC_ARB_IFA_OCCUPANCY.ALL", "PerPkg": "1", diff --git a/tools/perf/pmu-events/jevents.py b/tools/perf/pmu-events/jevents.py index 53ab050c8fa4..e42efc16723e 100755 --- a/tools/perf/pmu-events/jevents.py +++ b/tools/perf/pmu-events/jevents.py @@ -203,7 +203,7 @@ class JsonEvent: def llx(x: int) -> str: """Convert an int to a string similar to a printf modifier of %#llx.""" - return '0' if x == 0 else hex(x) + return str(x) if x >= 0 and x < 10 else hex(x) def fixdesc(s: str) -> str: """Fix formatting issue for the desc string.""" @@ -294,6 +294,23 @@ class JsonEvent: } return table[unit] if unit in table else f'uncore_{unit.lower()}' + def is_zero(val: str) -> bool: + try: + if val.startswith('0x'): + return int(val, 16) == 0 + else: + return int(val) == 0 + except e: + return False + + def canonicalize_value(val: str) -> str: + try: + if val.startswith('0x'): + return llx(int(val, 16)) + return str(int(val)) + except e: + return val + eventcode = 0 if 'EventCode' in jd: eventcode = int(jd['EventCode'].split(',', 1)[0], 0) @@ -356,10 +373,14 @@ class JsonEvent: ('UMask', 'umask='), ('NodeType', 'type='), ('RdWrMask', 'rdwrmask='), + ('EnAllCores', 'enallcores='), + ('EnAllSlices', 'enallslices='), + ('SliceId', 'sliceid='), + ('ThreadMask', 'threadmask='), ] for key, value in event_fields: - if key in jd and jd[key] != '0': - event += ',' + value + jd[key] + if key in jd and not is_zero(jd[key]): + event += f',{value}{canonicalize_value(jd[key])}' if filter: event += f',{filter}' if msr: diff --git a/tools/perf/scripts/python/parallel-perf.py b/tools/perf/scripts/python/parallel-perf.py new file mode 100755 index 000000000000..21f32ec5ed46 --- /dev/null +++ b/tools/perf/scripts/python/parallel-perf.py @@ -0,0 +1,988 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0 +# +# Run a perf script command multiple times in parallel, using perf script +# options --cpu and --time so that each job processes a different chunk +# of the data. +# +# Copyright (c) 2024, Intel Corporation. + +import subprocess +import argparse +import pathlib +import shlex +import time +import copy +import sys +import os +import re + +glb_prog_name = "parallel-perf.py" +glb_min_interval = 10.0 +glb_min_samples = 64 + +class Verbosity(): + + def __init__(self, quiet=False, verbose=False, debug=False): + self.normal = True + self.verbose = verbose + self.debug = debug + self.self_test = True + if self.debug: + self.verbose = True + if self.verbose: + quiet = False + if quiet: + self.normal = False + +# Manage work (Start/Wait/Kill), as represented by a subprocess.Popen command +class Work(): + + def __init__(self, cmd, pipe_to, output_dir="."): + self.popen = None + self.consumer = None + self.cmd = cmd + self.pipe_to = pipe_to + self.output_dir = output_dir + self.cmdout_name = f"{output_dir}/cmd.txt" + self.stdout_name = f"{output_dir}/out.txt" + self.stderr_name = f"{output_dir}/err.txt" + + def Command(self): + sh_cmd = [ shlex.quote(x) for x in self.cmd ] + return " ".join(self.cmd) + + def Stdout(self): + return open(self.stdout_name, "w") + + def Stderr(self): + return open(self.stderr_name, "w") + + def CreateOutputDir(self): + pathlib.Path(self.output_dir).mkdir(parents=True, exist_ok=True) + + def Start(self): + if self.popen: + return + self.CreateOutputDir() + with open(self.cmdout_name, "w") as f: + f.write(self.Command()) + f.write("\n") + stdout = self.Stdout() + stderr = self.Stderr() + if self.pipe_to: + self.popen = subprocess.Popen(self.cmd, stdout=subprocess.PIPE, stderr=stderr) + args = shlex.split(self.pipe_to) + self.consumer = subprocess.Popen(args, stdin=self.popen.stdout, stdout=stdout, stderr=stderr) + else: + self.popen = subprocess.Popen(self.cmd, stdout=stdout, stderr=stderr) + + def RemoveEmptyErrFile(self): + if os.path.exists(self.stderr_name): + if os.path.getsize(self.stderr_name) == 0: + os.unlink(self.stderr_name) + + def Errors(self): + if os.path.exists(self.stderr_name): + if os.path.getsize(self.stderr_name) != 0: + return [ f"Non-empty error file {self.stderr_name}" ] + return [] + + def TidyUp(self): + self.RemoveEmptyErrFile() + + def RawPollWait(self, p, wait): + if wait: + return p.wait() + return p.poll() + + def Poll(self, wait=False): + if not self.popen: + return None + result = self.RawPollWait(self.popen, wait) + if self.consumer: + res = result + result = self.RawPollWait(self.consumer, wait) + if result != None and res == None: + self.popen.kill() + result = None + elif result == 0 and res != None and res != 0: + result = res + if result != None: + self.TidyUp() + return result + + def Wait(self): + return self.Poll(wait=True) + + def Kill(self): + if not self.popen: + return + self.popen.kill() + if self.consumer: + self.consumer.kill() + +def KillWork(worklist, verbosity): + for w in worklist: + w.Kill() + for w in worklist: + w.Wait() + +def NumberOfCPUs(): + return os.sysconf("SC_NPROCESSORS_ONLN") + +def NanoSecsToSecsStr(x): + if x == None: + return "" + x = str(x) + if len(x) < 10: + x = "0" * (10 - len(x)) + x + return x[:len(x) - 9] + "." + x[-9:] + +def InsertOptionAfter(cmd, option, after): + try: + pos = cmd.index(after) + cmd.insert(pos + 1, option) + except: + cmd.append(option) + +def CreateWorkList(cmd, pipe_to, output_dir, cpus, time_ranges_by_cpu): + max_len = len(str(cpus[-1])) + cpu_dir_fmt = f"cpu-%.{max_len}u" + worklist = [] + pos = 0 + for cpu in cpus: + if cpu >= 0: + cpu_dir = os.path.join(output_dir, cpu_dir_fmt % cpu) + cpu_option = f"--cpu={cpu}" + else: + cpu_dir = output_dir + cpu_option = None + + tr_dir_fmt = "time-range" + + if len(time_ranges_by_cpu) > 1: + time_ranges = time_ranges_by_cpu[pos] + tr_dir_fmt += f"-{pos}" + pos += 1 + else: + time_ranges = time_ranges_by_cpu[0] + + max_len = len(str(len(time_ranges))) + tr_dir_fmt += f"-%.{max_len}u" + + i = 0 + for r in time_ranges: + if r == [None, None]: + time_option = None + work_output_dir = cpu_dir + else: + time_option = "--time=" + NanoSecsToSecsStr(r[0]) + "," + NanoSecsToSecsStr(r[1]) + work_output_dir = os.path.join(cpu_dir, tr_dir_fmt % i) + i += 1 + work_cmd = list(cmd) + if time_option != None: + InsertOptionAfter(work_cmd, time_option, "script") + if cpu_option != None: + InsertOptionAfter(work_cmd, cpu_option, "script") + w = Work(work_cmd, pipe_to, work_output_dir) + worklist.append(w) + return worklist + +def DoRunWork(worklist, nr_jobs, verbosity): + nr_to_do = len(worklist) + not_started = list(worklist) + running = [] + done = [] + chg = False + while True: + nr_done = len(done) + if chg and verbosity.normal: + nr_run = len(running) + print(f"\rThere are {nr_to_do} jobs: {nr_done} completed, {nr_run} running", flush=True, end=" ") + if verbosity.verbose: + print() + chg = False + if nr_done == nr_to_do: + break + while len(running) < nr_jobs and len(not_started): + w = not_started.pop(0) + running.append(w) + if verbosity.verbose: + print("Starting:", w.Command()) + w.Start() + chg = True + if len(running): + time.sleep(0.1) + finished = [] + not_finished = [] + while len(running): + w = running.pop(0) + r = w.Poll() + if r == None: + not_finished.append(w) + continue + if r == 0: + if verbosity.verbose: + print("Finished:", w.Command()) + finished.append(w) + chg = True + continue + if verbosity.normal and not verbosity.verbose: + print() + print("Job failed!\n return code:", r, "\n command: ", w.Command()) + if w.pipe_to: + print(" piped to: ", w.pipe_to) + print("Killing outstanding jobs") + KillWork(not_finished, verbosity) + KillWork(running, verbosity) + return False + running = not_finished + done += finished + errorlist = [] + for w in worklist: + errorlist += w.Errors() + if len(errorlist): + print("Errors:") + for e in errorlist: + print(e) + elif verbosity.normal: + print("\r"," "*50, "\rAll jobs finished successfully", flush=True) + return True + +def RunWork(worklist, nr_jobs=NumberOfCPUs(), verbosity=Verbosity()): + try: + return DoRunWork(worklist, nr_jobs, verbosity) + except: + for w in worklist: + w.Kill() + raise + return True + +def ReadHeader(perf, file_name): + return subprocess.Popen([perf, "script", "--header-only", "--input", file_name], stdout=subprocess.PIPE).stdout.read().decode("utf-8") + +def ParseHeader(hdr): + result = {} + lines = hdr.split("\n") + for line in lines: + if ":" in line and line[0] == "#": + pos = line.index(":") + name = line[1:pos-1].strip() + value = line[pos+1:].strip() + if name in result: + orig_name = name + nr = 2 + while True: + name = f"{orig_name} {nr}" + if name not in result: + break + nr += 1 + result[name] = value + return result + +def HeaderField(hdr_dict, hdr_fld): + if hdr_fld not in hdr_dict: + raise Exception(f"'{hdr_fld}' missing from header information") + return hdr_dict[hdr_fld] + +# Represent the position of an option within a command string +# and provide the option value and/or remove the option +class OptPos(): + + def Init(self, opt_element=-1, value_element=-1, opt_pos=-1, value_pos=-1, error=None): + self.opt_element = opt_element # list element that contains option + self.value_element = value_element # list element that contains option value + self.opt_pos = opt_pos # string position of option + self.value_pos = value_pos # string position of value + self.error = error # error message string + + def __init__(self, args, short_name, long_name, default=None): + self.args = list(args) + self.default = default + n = 2 + len(long_name) + m = len(short_name) + pos = -1 + for opt in args: + pos += 1 + if m and opt[:2] == f"-{short_name}": + if len(opt) == 2: + if pos + 1 < len(args): + self.Init(pos, pos + 1, 0, 0) + else: + self.Init(error = f"-{short_name} option missing value") + else: + self.Init(pos, pos, 0, 2) + return + if opt[:n] == f"--{long_name}": + if len(opt) == n: + if pos + 1 < len(args): + self.Init(pos, pos + 1, 0, 0) + else: + self.Init(error = f"--{long_name} option missing value") + elif opt[n] == "=": + self.Init(pos, pos, 0, n + 1) + else: + self.Init(error = f"--{long_name} option expected '='") + return + if m and opt[:1] == "-" and opt[:2] != "--" and short_name in opt: + ipos = opt.index(short_name) + if "-" in opt[1:]: + hpos = opt[1:].index("-") + if hpos < ipos: + continue + if ipos + 1 == len(opt): + if pos + 1 < len(args): + self.Init(pos, pos + 1, ipos, 0) + else: + self.Init(error = f"-{short_name} option missing value") + else: + self.Init(pos, pos, ipos, ipos + 1) + return + self.Init() + + def Value(self): + if self.opt_element >= 0: + if self.opt_element != self.value_element: + return self.args[self.value_element] + else: + return self.args[self.value_element][self.value_pos:] + return self.default + + def Remove(self, args): + if self.opt_element == -1: + return + if self.opt_element != self.value_element: + del args[self.value_element] + if self.opt_pos: + args[self.opt_element] = args[self.opt_element][:self.opt_pos] + else: + del args[self.opt_element] + +def DetermineInputFileName(cmd): + p = OptPos(cmd, "i", "input", "perf.data") + if p.error: + raise Exception(f"perf command {p.error}") + file_name = p.Value() + if not os.path.exists(file_name): + raise Exception(f"perf command input file '{file_name}' not found") + return file_name + +def ReadOption(args, short_name, long_name, err_prefix, remove=False): + p = OptPos(args, short_name, long_name) + if p.error: + raise Exception(f"{err_prefix}{p.error}") + value = p.Value() + if remove: + p.Remove(args) + return value + +def ExtractOption(args, short_name, long_name, err_prefix): + return ReadOption(args, short_name, long_name, err_prefix, True) + +def ReadPerfOption(args, short_name, long_name): + return ReadOption(args, short_name, long_name, "perf command ") + +def ExtractPerfOption(args, short_name, long_name): + return ExtractOption(args, short_name, long_name, "perf command ") + +def PerfDoubleQuickCommands(cmd, file_name): + cpu_str = ReadPerfOption(cmd, "C", "cpu") + time_str = ReadPerfOption(cmd, "", "time") + # Use double-quick sampling to determine trace data density + times_cmd = ["perf", "script", "--ns", "--input", file_name, "--itrace=qqi"] + if cpu_str != None and cpu_str != "": + times_cmd.append(f"--cpu={cpu_str}") + if time_str != None and time_str != "": + times_cmd.append(f"--time={time_str}") + cnts_cmd = list(times_cmd) + cnts_cmd.append("-Fcpu") + times_cmd.append("-Fcpu,time") + return cnts_cmd, times_cmd + +class CPUTimeRange(): + def __init__(self, cpu): + self.cpu = cpu + self.sample_cnt = 0 + self.time_ranges = None + self.interval = 0 + self.interval_remaining = 0 + self.remaining = 0 + self.tr_pos = 0 + +def CalcTimeRangesByCPU(line, cpu, cpu_time_ranges, max_time): + cpu_time_range = cpu_time_ranges[cpu] + cpu_time_range.remaining -= 1 + cpu_time_range.interval_remaining -= 1 + if cpu_time_range.remaining == 0: + cpu_time_range.time_ranges[cpu_time_range.tr_pos][1] = max_time + return + if cpu_time_range.interval_remaining == 0: + time = TimeVal(line[1][:-1], 0) + time_ranges = cpu_time_range.time_ranges + time_ranges[cpu_time_range.tr_pos][1] = time - 1 + time_ranges.append([time, max_time]) + cpu_time_range.tr_pos += 1 + cpu_time_range.interval_remaining = cpu_time_range.interval + +def CountSamplesByCPU(line, cpu, cpu_time_ranges): + try: + cpu_time_ranges[cpu].sample_cnt += 1 + except: + print("exception") + print("cpu", cpu) + print("len(cpu_time_ranges)", len(cpu_time_ranges)) + raise + +def ProcessCommandOutputLines(cmd, per_cpu, fn, *x): + # Assume CPU number is at beginning of line and enclosed by [] + pat = re.compile(r"\s*\[[0-9]+\]") + p = subprocess.Popen(cmd, stdout=subprocess.PIPE) + while True: + if line := p.stdout.readline(): + line = line.decode("utf-8") + if pat.match(line): + line = line.split() + if per_cpu: + # Assumes CPU number is enclosed by [] + cpu = int(line[0][1:-1]) + else: + cpu = 0 + fn(line, cpu, *x) + else: + break + p.wait() + +def IntersectTimeRanges(new_time_ranges, time_ranges): + pos = 0 + new_pos = 0 + # Can assume len(time_ranges) != 0 and len(new_time_ranges) != 0 + # Note also, there *must* be at least one intersection. + while pos < len(time_ranges) and new_pos < len(new_time_ranges): + # new end < old start => no intersection, remove new + if new_time_ranges[new_pos][1] < time_ranges[pos][0]: + del new_time_ranges[new_pos] + continue + # new start > old end => no intersection, check next + if new_time_ranges[new_pos][0] > time_ranges[pos][1]: + pos += 1 + if pos < len(time_ranges): + continue + # no next, so remove remaining + while new_pos < len(new_time_ranges): + del new_time_ranges[new_pos] + return + # Found an intersection + # new start < old start => adjust new start = old start + if new_time_ranges[new_pos][0] < time_ranges[pos][0]: + new_time_ranges[new_pos][0] = time_ranges[pos][0] + # new end > old end => keep the overlap, insert the remainder + if new_time_ranges[new_pos][1] > time_ranges[pos][1]: + r = [ time_ranges[pos][1] + 1, new_time_ranges[new_pos][1] ] + new_time_ranges[new_pos][1] = time_ranges[pos][1] + new_pos += 1 + new_time_ranges.insert(new_pos, r) + continue + # new [start, end] is within old [start, end] + new_pos += 1 + +def SplitTimeRangesByTraceDataDensity(time_ranges, cpus, nr, cmd, file_name, per_cpu, min_size, min_interval, verbosity): + if verbosity.normal: + print("\rAnalyzing...", flush=True, end=" ") + if verbosity.verbose: + print() + cnts_cmd, times_cmd = PerfDoubleQuickCommands(cmd, file_name) + + nr_cpus = cpus[-1] + 1 if per_cpu else 1 + if per_cpu: + nr_cpus = cpus[-1] + 1 + cpu_time_ranges = [ CPUTimeRange(cpu) for cpu in range(nr_cpus) ] + else: + nr_cpus = 1 + cpu_time_ranges = [ CPUTimeRange(-1) ] + + if verbosity.debug: + print("nr_cpus", nr_cpus) + print("cnts_cmd", cnts_cmd) + print("times_cmd", times_cmd) + + # Count the number of "double quick" samples per CPU + ProcessCommandOutputLines(cnts_cmd, per_cpu, CountSamplesByCPU, cpu_time_ranges) + + tot = 0 + mx = 0 + for cpu_time_range in cpu_time_ranges: + cnt = cpu_time_range.sample_cnt + tot += cnt + if cnt > mx: + mx = cnt + if verbosity.debug: + print("cpu:", cpu_time_range.cpu, "sample_cnt", cnt) + + if min_size < 1: + min_size = 1 + + if mx < min_size: + # Too little data to be worth splitting + if verbosity.debug: + print("Too little data to split by time") + if nr == 0: + nr = 1 + return [ SplitTimeRangesIntoN(time_ranges, nr, min_interval) ] + + if nr: + divisor = nr + min_size = 1 + else: + divisor = NumberOfCPUs() + + interval = int(round(tot / divisor, 0)) + if interval < min_size: + interval = min_size + + if verbosity.debug: + print("divisor", divisor) + print("min_size", min_size) + print("interval", interval) + + min_time = time_ranges[0][0] + max_time = time_ranges[-1][1] + + for cpu_time_range in cpu_time_ranges: + cnt = cpu_time_range.sample_cnt + if cnt == 0: + cpu_time_range.time_ranges = copy.deepcopy(time_ranges) + continue + # Adjust target interval for CPU to give approximately equal interval sizes + # Determine number of intervals, rounding to nearest integer + n = int(round(cnt / interval, 0)) + if n < 1: + n = 1 + # Determine interval size, rounding up + d, m = divmod(cnt, n) + if m: + d += 1 + cpu_time_range.interval = d + cpu_time_range.interval_remaining = d + cpu_time_range.remaining = cnt + # Init. time ranges for each CPU with the start time + cpu_time_range.time_ranges = [ [min_time, max_time] ] + + # Set time ranges so that the same number of "double quick" samples + # will fall into each time range. + ProcessCommandOutputLines(times_cmd, per_cpu, CalcTimeRangesByCPU, cpu_time_ranges, max_time) + + for cpu_time_range in cpu_time_ranges: + if cpu_time_range.sample_cnt: + IntersectTimeRanges(cpu_time_range.time_ranges, time_ranges) + + return [cpu_time_ranges[cpu].time_ranges for cpu in cpus] + +def SplitSingleTimeRangeIntoN(time_range, n): + if n <= 1: + return [time_range] + start = time_range[0] + end = time_range[1] + duration = int((end - start + 1) / n) + if duration < 1: + return [time_range] + time_ranges = [] + for i in range(n): + time_ranges.append([start, start + duration - 1]) + start += duration + time_ranges[-1][1] = end + return time_ranges + +def TimeRangeDuration(r): + return r[1] - r[0] + 1 + +def TotalDuration(time_ranges): + duration = 0 + for r in time_ranges: + duration += TimeRangeDuration(r) + return duration + +def SplitTimeRangesByInterval(time_ranges, interval): + new_ranges = [] + for r in time_ranges: + duration = TimeRangeDuration(r) + n = duration / interval + n = int(round(n, 0)) + new_ranges += SplitSingleTimeRangeIntoN(r, n) + return new_ranges + +def SplitTimeRangesIntoN(time_ranges, n, min_interval): + if n <= len(time_ranges): + return time_ranges + duration = TotalDuration(time_ranges) + interval = duration / n + if interval < min_interval: + interval = min_interval + return SplitTimeRangesByInterval(time_ranges, interval) + +def RecombineTimeRanges(tr): + new_tr = copy.deepcopy(tr) + n = len(new_tr) + i = 1 + while i < len(new_tr): + # if prev end + 1 == cur start, combine them + if new_tr[i - 1][1] + 1 == new_tr[i][0]: + new_tr[i][0] = new_tr[i - 1][0] + del new_tr[i - 1] + else: + i += 1 + return new_tr + +def OpenTimeRangeEnds(time_ranges, min_time, max_time): + if time_ranges[0][0] <= min_time: + time_ranges[0][0] = None + if time_ranges[-1][1] >= max_time: + time_ranges[-1][1] = None + +def BadTimeStr(time_str): + raise Exception(f"perf command bad time option: '{time_str}'\nCheck also 'time of first sample' and 'time of last sample' in perf script --header-only") + +def ValidateTimeRanges(time_ranges, time_str): + n = len(time_ranges) + for i in range(n): + start = time_ranges[i][0] + end = time_ranges[i][1] + if i != 0 and start <= time_ranges[i - 1][1]: + BadTimeStr(time_str) + if start > end: + BadTimeStr(time_str) + +def TimeVal(s, dflt): + s = s.strip() + if s == "": + return dflt + a = s.split(".") + if len(a) > 2: + raise Exception(f"Bad time value'{s}'") + x = int(a[0]) + if x < 0: + raise Exception("Negative time not allowed") + x *= 1000000000 + if len(a) > 1: + x += int((a[1] + "000000000")[:9]) + return x + +def BadCPUStr(cpu_str): + raise Exception(f"perf command bad cpu option: '{cpu_str}'\nCheck also 'nrcpus avail' in perf script --header-only") + +def ParseTimeStr(time_str, min_time, max_time): + if time_str == None or time_str == "": + return [[min_time, max_time]] + time_ranges = [] + for r in time_str.split(): + a = r.split(",") + if len(a) != 2: + BadTimeStr(time_str) + try: + start = TimeVal(a[0], min_time) + end = TimeVal(a[1], max_time) + except: + BadTimeStr(time_str) + time_ranges.append([start, end]) + ValidateTimeRanges(time_ranges, time_str) + return time_ranges + +def ParseCPUStr(cpu_str, nr_cpus): + if cpu_str == None or cpu_str == "": + return [-1] + cpus = [] + for r in cpu_str.split(","): + a = r.split("-") + if len(a) < 1 or len(a) > 2: + BadCPUStr(cpu_str) + try: + start = int(a[0].strip()) + if len(a) > 1: + end = int(a[1].strip()) + else: + end = start + except: + BadCPUStr(cpu_str) + if start < 0 or end < 0 or end < start or end >= nr_cpus: + BadCPUStr(cpu_str) + cpus.extend(range(start, end + 1)) + cpus = list(set(cpus)) # Remove duplicates + cpus.sort() + return cpus + +class ParallelPerf(): + + def __init__(self, a): + for arg_name in vars(a): + setattr(self, arg_name, getattr(a, arg_name)) + self.orig_nr = self.nr + self.orig_cmd = list(self.cmd) + self.perf = self.cmd[0] + if os.path.exists(self.output_dir): + raise Exception(f"Output '{self.output_dir}' already exists") + if self.jobs < 0 or self.nr < 0 or self.interval < 0: + raise Exception("Bad options (negative values): try -h option for help") + if self.nr != 0 and self.interval != 0: + raise Exception("Cannot specify number of time subdivisions and time interval") + if self.jobs == 0: + self.jobs = NumberOfCPUs() + if self.nr == 0 and self.interval == 0: + if self.per_cpu: + self.nr = 1 + else: + self.nr = self.jobs + + def Init(self): + if self.verbosity.debug: + print("cmd", self.cmd) + self.file_name = DetermineInputFileName(self.cmd) + self.hdr = ReadHeader(self.perf, self.file_name) + self.hdr_dict = ParseHeader(self.hdr) + self.cmd_line = HeaderField(self.hdr_dict, "cmdline") + + def ExtractTimeInfo(self): + self.min_time = TimeVal(HeaderField(self.hdr_dict, "time of first sample"), 0) + self.max_time = TimeVal(HeaderField(self.hdr_dict, "time of last sample"), 0) + self.time_str = ExtractPerfOption(self.cmd, "", "time") + self.time_ranges = ParseTimeStr(self.time_str, self.min_time, self.max_time) + if self.verbosity.debug: + print("time_ranges", self.time_ranges) + + def ExtractCPUInfo(self): + if self.per_cpu: + nr_cpus = int(HeaderField(self.hdr_dict, "nrcpus avail")) + self.cpu_str = ExtractPerfOption(self.cmd, "C", "cpu") + if self.cpu_str == None or self.cpu_str == "": + self.cpus = [ x for x in range(nr_cpus) ] + else: + self.cpus = ParseCPUStr(self.cpu_str, nr_cpus) + else: + self.cpu_str = None + self.cpus = [-1] + if self.verbosity.debug: + print("cpus", self.cpus) + + def IsIntelPT(self): + return self.cmd_line.find("intel_pt") >= 0 + + def SplitTimeRanges(self): + if self.IsIntelPT() and self.interval == 0: + self.split_time_ranges_for_each_cpu = \ + SplitTimeRangesByTraceDataDensity(self.time_ranges, self.cpus, self.orig_nr, + self.orig_cmd, self.file_name, self.per_cpu, + self.min_size, self.min_interval, self.verbosity) + elif self.nr: + self.split_time_ranges_for_each_cpu = [ SplitTimeRangesIntoN(self.time_ranges, self.nr, self.min_interval) ] + else: + self.split_time_ranges_for_each_cpu = [ SplitTimeRangesByInterval(self.time_ranges, self.interval) ] + + def CheckTimeRanges(self): + for tr in self.split_time_ranges_for_each_cpu: + # Re-combined time ranges should be the same + new_tr = RecombineTimeRanges(tr) + if new_tr != self.time_ranges: + if self.verbosity.debug: + print("tr", tr) + print("new_tr", new_tr) + raise Exception("Self test failed!") + + def OpenTimeRangeEnds(self): + for time_ranges in self.split_time_ranges_for_each_cpu: + OpenTimeRangeEnds(time_ranges, self.min_time, self.max_time) + + def CreateWorkList(self): + self.worklist = CreateWorkList(self.cmd, self.pipe_to, self.output_dir, self.cpus, self.split_time_ranges_for_each_cpu) + + def PerfDataRecordedPerCPU(self): + if "--per-thread" in self.cmd_line.split(): + return False + return True + + def DefaultToPerCPU(self): + # --no-per-cpu option takes precedence + if self.no_per_cpu: + return False + if not self.PerfDataRecordedPerCPU(): + return False + # Default to per-cpu for Intel PT data that was recorded per-cpu, + # because decoding can be done for each CPU separately. + if self.IsIntelPT(): + return True + return False + + def Config(self): + self.Init() + self.ExtractTimeInfo() + if not self.per_cpu: + self.per_cpu = self.DefaultToPerCPU() + if self.verbosity.debug: + print("per_cpu", self.per_cpu) + self.ExtractCPUInfo() + self.SplitTimeRanges() + if self.verbosity.self_test: + self.CheckTimeRanges() + # Prefer open-ended time range to starting / ending with min_time / max_time resp. + self.OpenTimeRangeEnds() + self.CreateWorkList() + + def Run(self): + if self.dry_run: + print(len(self.worklist),"jobs:") + for w in self.worklist: + print(w.Command()) + return True + result = RunWork(self.worklist, self.jobs, verbosity=self.verbosity) + if self.verbosity.verbose: + print(glb_prog_name, "done") + return result + +def RunParallelPerf(a): + pp = ParallelPerf(a) + pp.Config() + return pp.Run() + +def Main(args): + ap = argparse.ArgumentParser( + prog=glb_prog_name, formatter_class = argparse.RawDescriptionHelpFormatter, + description = +""" +Run a perf script command multiple times in parallel, using perf script options +--cpu and --time so that each job processes a different chunk of the data. +""", + epilog = +""" +Follow the options by '--' and then the perf script command e.g. + + $ perf record -a -- sleep 10 + $ parallel-perf.py --nr=4 -- perf script --ns + All jobs finished successfully + $ tree parallel-perf-output/ + parallel-perf-output/ + ├── time-range-0 + │  ├── cmd.txt + │  └── out.txt + ├── time-range-1 + │  ├── cmd.txt + │  └── out.txt + ├── time-range-2 + │  ├── cmd.txt + │  └── out.txt + └── time-range-3 + ├── cmd.txt + └── out.txt + $ find parallel-perf-output -name cmd.txt | sort | xargs grep -H . + parallel-perf-output/time-range-0/cmd.txt:perf script --time=,9466.504461499 --ns + parallel-perf-output/time-range-1/cmd.txt:perf script --time=9466.504461500,9469.005396999 --ns + parallel-perf-output/time-range-2/cmd.txt:perf script --time=9469.005397000,9471.506332499 --ns + parallel-perf-output/time-range-3/cmd.txt:perf script --time=9471.506332500, --ns + +Any perf script command can be used, including the use of perf script options +--dlfilter and --script, so that the benefit of running parallel jobs +naturally extends to them also. + +If option --pipe-to is used, standard output is first piped through that +command. Beware, if the command fails (e.g. grep with no matches), it will be +considered a fatal error. + +Final standard output is redirected to files named out.txt in separate +subdirectories under the output directory. Similarly, standard error is +written to files named err.txt. In addition, files named cmd.txt contain the +corresponding perf script command. After processing, err.txt files are removed +if they are empty. + +If any job exits with a non-zero exit code, then all jobs are killed and no +more are started. A message is printed if any job results in a non-empty +err.txt file. + +There is a separate output subdirectory for each time range. If the --per-cpu +option is used, these are further grouped under cpu-n subdirectories, e.g. + + $ parallel-perf.py --per-cpu --nr=2 -- perf script --ns --cpu=0,1 + All jobs finished successfully + $ tree parallel-perf-output + parallel-perf-output/ + ├── cpu-0 + │  ├── time-range-0 + │  │  ├── cmd.txt + │  │  └── out.txt + │  └── time-range-1 + │  ├── cmd.txt + │  └── out.txt + └── cpu-1 + ├── time-range-0 + │  ├── cmd.txt + │  └── out.txt + └── time-range-1 + ├── cmd.txt + └── out.txt + $ find parallel-perf-output -name cmd.txt | sort | xargs grep -H . + parallel-perf-output/cpu-0/time-range-0/cmd.txt:perf script --cpu=0 --time=,9469.005396999 --ns + parallel-perf-output/cpu-0/time-range-1/cmd.txt:perf script --cpu=0 --time=9469.005397000, --ns + parallel-perf-output/cpu-1/time-range-0/cmd.txt:perf script --cpu=1 --time=,9469.005396999 --ns + parallel-perf-output/cpu-1/time-range-1/cmd.txt:perf script --cpu=1 --time=9469.005397000, --ns + +Subdivisions of time range, and cpus if the --per-cpu option is used, are +expressed by the --time and --cpu perf script options respectively. If the +supplied perf script command has a --time option, then that time range is +subdivided, otherwise the time range given by 'time of first sample' to +'time of last sample' is used (refer perf script --header-only). Similarly, the +supplied perf script command may provide a --cpu option, and only those CPUs +will be processed. + +To prevent time intervals becoming too small, the --min-interval option can +be used. + +Note there is special handling for processing Intel PT traces. If an interval is +not specified and the perf record command contained the intel_pt event, then the +time range will be subdivided in order to produce subdivisions that contain +approximately the same amount of trace data. That is accomplished by counting +double-quick (--itrace=qqi) samples, and choosing time ranges that encompass +approximately the same number of samples. In that case, time ranges may not be +the same for each CPU processed. For Intel PT, --per-cpu is the default, but +that can be overridden by --no-per-cpu. Note, for Intel PT, double-quick +decoding produces 1 sample for each PSB synchronization packet, which in turn +come after a certain number of bytes output, determined by psb_period (refer +perf Intel PT documentation). The minimum number of double-quick samples that +will define a time range can be set by the --min_size option, which defaults to +64. +""") + ap.add_argument("-o", "--output-dir", default="parallel-perf-output", help="output directory (default 'parallel-perf-output')") + ap.add_argument("-j", "--jobs", type=int, default=0, help="maximum number of jobs to run in parallel at one time (default is the number of CPUs)") + ap.add_argument("-n", "--nr", type=int, default=0, help="number of time subdivisions (default is the number of jobs)") + ap.add_argument("-i", "--interval", type=float, default=0, help="subdivide the time range using this time interval (in seconds e.g. 0.1 for a tenth of a second)") + ap.add_argument("-c", "--per-cpu", action="store_true", help="process data for each CPU in parallel") + ap.add_argument("-m", "--min-interval", type=float, default=glb_min_interval, help=f"minimum interval (default {glb_min_interval} seconds)") + ap.add_argument("-p", "--pipe-to", help="command to pipe output to (optional)") + ap.add_argument("-N", "--no-per-cpu", action="store_true", help="do not process data for each CPU in parallel") + ap.add_argument("-b", "--min_size", type=int, default=glb_min_samples, help="minimum data size (for Intel PT in PSBs)") + ap.add_argument("-D", "--dry-run", action="store_true", help="do not run any jobs, just show the perf script commands") + ap.add_argument("-q", "--quiet", action="store_true", help="do not print any messages except errors") + ap.add_argument("-v", "--verbose", action="store_true", help="print more messages") + ap.add_argument("-d", "--debug", action="store_true", help="print debugging messages") + cmd_line = list(args) + try: + split_pos = cmd_line.index("--") + cmd = cmd_line[split_pos + 1:] + args = cmd_line[:split_pos] + except: + cmd = None + args = cmd_line + a = ap.parse_args(args=args[1:]) + a.cmd = cmd + a.verbosity = Verbosity(a.quiet, a.verbose, a.debug) + try: + if a.cmd == None: + if len(args) <= 1: + ap.print_help() + return True + raise Exception("Command line must contain '--' before perf command") + return RunParallelPerf(a) + except Exception as e: + print("Fatal error: ", str(e)) + if a.debug: + raise + return False + +if __name__ == "__main__": + if not Main(sys.argv): + sys.exit(1) diff --git a/tools/perf/tests/Build b/tools/perf/tests/Build index 53ba9c3e20e0..c7f9d9676095 100644 --- a/tools/perf/tests/Build +++ b/tools/perf/tests/Build @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 perf-y += builtin-test.o -perf-y += builtin-test-list.o +perf-y += tests-scripts.o perf-y += parse-events.o perf-y += dso-data.o perf-y += attr.o diff --git a/tools/perf/tests/bitmap.c b/tools/perf/tests/bitmap.c index 0173f5402a35..98956e0e0765 100644 --- a/tools/perf/tests/bitmap.c +++ b/tools/perf/tests/bitmap.c @@ -11,18 +11,19 @@ static unsigned long *get_bitmap(const char *str, int nbits) { struct perf_cpu_map *map = perf_cpu_map__new(str); - unsigned long *bm = NULL; - int i; + unsigned long *bm; bm = bitmap_zalloc(nbits); if (map && bm) { - for (i = 0; i < perf_cpu_map__nr(map); i++) - __set_bit(perf_cpu_map__cpu(map, i).cpu, bm); + int i; + struct perf_cpu cpu; + + perf_cpu_map__for_each_cpu(cpu, i, map) + __set_bit(cpu.cpu, bm); } - if (map) - perf_cpu_map__put(map); + perf_cpu_map__put(map); return bm; } diff --git a/tools/perf/tests/builtin-test-list.c b/tools/perf/tests/builtin-test-list.c deleted file mode 100644 index a65b9e547d82..000000000000 --- a/tools/perf/tests/builtin-test-list.c +++ /dev/null @@ -1,207 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#include <dirent.h> -#include <errno.h> -#include <fcntl.h> -#include <linux/ctype.h> -#include <linux/kernel.h> -#include <linux/string.h> -#include <linux/zalloc.h> -#include <string.h> -#include <stdlib.h> -#include <sys/types.h> -#include <unistd.h> -#include <subcmd/exec-cmd.h> -#include <subcmd/parse-options.h> -#include <sys/wait.h> -#include <sys/stat.h> -#include "builtin.h" -#include "builtin-test-list.h" -#include "color.h" -#include "debug.h" -#include "hist.h" -#include "intlist.h" -#include "string2.h" -#include "symbol.h" -#include "tests.h" -#include "util/rlimit.h" - - -/* - * As this is a singleton built once for the run of the process, there is - * no value in trying to free it and just let it stay around until process - * exits when it's cleaned up. - */ -static size_t files_num = 0; -static struct script_file *files = NULL; -static int files_max_width = 0; - -static const char *shell_tests__dir(char *path, size_t size) -{ - const char *devel_dirs[] = { "./tools/perf/tests", "./tests", }; - char *exec_path; - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(devel_dirs); ++i) { - struct stat st; - - if (!lstat(devel_dirs[i], &st)) { - scnprintf(path, size, "%s/shell", devel_dirs[i]); - if (!lstat(devel_dirs[i], &st)) - return path; - } - } - - /* Then installed path. */ - exec_path = get_argv_exec_path(); - scnprintf(path, size, "%s/tests/shell", exec_path); - free(exec_path); - return path; -} - -static const char *shell_test__description(char *description, size_t size, - const char *path, const char *name) -{ - FILE *fp; - char filename[PATH_MAX]; - int ch; - - path__join(filename, sizeof(filename), path, name); - fp = fopen(filename, "r"); - if (!fp) - return NULL; - - /* Skip first line - should be #!/bin/sh Shebang */ - do { - ch = fgetc(fp); - } while (ch != EOF && ch != '\n'); - - description = fgets(description, size, fp); - fclose(fp); - - /* Assume first char on line is omment everything after that desc */ - return description ? strim(description + 1) : NULL; -} - -/* Is this full file path a shell script */ -static bool is_shell_script(const char *path) -{ - const char *ext; - - ext = strrchr(path, '.'); - if (!ext) - return false; - if (!strcmp(ext, ".sh")) { /* Has .sh extension */ - if (access(path, R_OK | X_OK) == 0) /* Is executable */ - return true; - } - return false; -} - -/* Is this file in this dir a shell script (for test purposes) */ -static bool is_test_script(const char *path, const char *name) -{ - char filename[PATH_MAX]; - - path__join(filename, sizeof(filename), path, name); - if (!is_shell_script(filename)) return false; - return true; -} - -/* Duplicate a string and fall over and die if we run out of memory */ -static char *strdup_check(const char *str) -{ - char *newstr; - - newstr = strdup(str); - if (!newstr) { - pr_err("Out of memory while duplicating test script string\n"); - abort(); - } - return newstr; -} - -static void append_script(const char *dir, const char *file, const char *desc) -{ - struct script_file *files_tmp; - size_t files_num_tmp; - int width; - - files_num_tmp = files_num + 1; - if (files_num_tmp >= SIZE_MAX) { - pr_err("Too many script files\n"); - abort(); - } - /* Realloc is good enough, though we could realloc by chunks, not that - * anyone will ever measure performance here */ - files_tmp = realloc(files, - (files_num_tmp + 1) * sizeof(struct script_file)); - if (files_tmp == NULL) { - pr_err("Out of memory while building test list\n"); - abort(); - } - /* Add file to end and NULL terminate the struct array */ - files = files_tmp; - files_num = files_num_tmp; - files[files_num - 1].dir = strdup_check(dir); - files[files_num - 1].file = strdup_check(file); - files[files_num - 1].desc = strdup_check(desc); - files[files_num].dir = NULL; - files[files_num].file = NULL; - files[files_num].desc = NULL; - - width = strlen(desc); /* Track max width of desc */ - if (width > files_max_width) - files_max_width = width; -} - -static void append_scripts_in_dir(const char *path) -{ - struct dirent **entlist; - struct dirent *ent; - int n_dirs, i; - char filename[PATH_MAX]; - - /* List files, sorted by alpha */ - n_dirs = scandir(path, &entlist, NULL, alphasort); - if (n_dirs == -1) - return; - for (i = 0; i < n_dirs && (ent = entlist[i]); i++) { - if (ent->d_name[0] == '.') - continue; /* Skip hidden files */ - if (is_test_script(path, ent->d_name)) { /* It's a test */ - char bf[256]; - const char *desc = shell_test__description - (bf, sizeof(bf), path, ent->d_name); - - if (desc) /* It has a desc line - valid script */ - append_script(path, ent->d_name, desc); - } else if (is_directory(path, ent)) { /* Scan the subdir */ - path__join(filename, sizeof(filename), - path, ent->d_name); - append_scripts_in_dir(filename); - } - } - for (i = 0; i < n_dirs; i++) /* Clean up */ - zfree(&entlist[i]); - free(entlist); -} - -const struct script_file *list_script_files(void) -{ - char path_dir[PATH_MAX]; - const char *path; - - if (files) - return files; /* Singleton - we already know our list */ - - path = shell_tests__dir(path_dir, sizeof(path_dir)); /* Walk dir */ - append_scripts_in_dir(path); - - return files; -} - -int list_script_max_width(void) -{ - list_script_files(); /* Ensure we have scanned all scripts */ - return files_max_width; -} diff --git a/tools/perf/tests/builtin-test-list.h b/tools/perf/tests/builtin-test-list.h deleted file mode 100644 index eb81f3aa6683..000000000000 --- a/tools/perf/tests/builtin-test-list.h +++ /dev/null @@ -1,12 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ - -struct script_file { - char *dir; - char *file; - char *desc; -}; - -/* List available script tests to run - singleton - never freed */ -const struct script_file *list_script_files(void); -/* Get maximum width of description string */ -int list_script_max_width(void); diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c index 4a5973f9bb9b..c3d84b67ca8e 100644 --- a/tools/perf/tests/builtin-test.c +++ b/tools/perf/tests/builtin-test.c @@ -6,6 +6,7 @@ */ #include <fcntl.h> #include <errno.h> +#include <poll.h> #include <unistd.h> #include <string.h> #include <stdlib.h> @@ -21,17 +22,28 @@ #include "debug.h" #include "color.h" #include <subcmd/parse-options.h> +#include <subcmd/run-command.h> #include "string2.h" #include "symbol.h" #include "util/rlimit.h" +#include "util/strbuf.h" #include <linux/kernel.h> #include <linux/string.h> #include <subcmd/exec-cmd.h> #include <linux/zalloc.h> -#include "builtin-test-list.h" +#include "tests-scripts.h" +/* + * Command line option to not fork the test running in the same process and + * making them easier to debug. + */ static bool dont_fork; +/* Don't fork the tests in parallel and wait for their completion. */ +static bool sequential = true; +/* Do it in parallel, lacks infrastructure to avoid running tests that clash for resources, + * So leave it as the developers choice to enable while working on the needed infra */ +static bool parallel; const char *dso_to_test; const char *test_objdump_path = "objdump"; @@ -130,6 +142,7 @@ static struct test_suite *generic_tests[] = { static struct test_suite **tests[] = { generic_tests, arch_tests, + NULL, /* shell tests created at runtime. */ }; static struct test_workload *workloads[] = { @@ -208,76 +221,36 @@ static bool perf_test__matches(const char *desc, int curr, int argc, const char return false; } -static int run_test(struct test_suite *test, int subtest) -{ - int status, err = -1, child = dont_fork ? 0 : fork(); - char sbuf[STRERR_BUFSIZE]; - - if (child < 0) { - pr_err("failed to fork test: %s\n", - str_error_r(errno, sbuf, sizeof(sbuf))); - return -1; - } - - if (!child) { - if (!dont_fork) { - pr_debug("test child forked, pid %d\n", getpid()); - - if (verbose <= 0) { - int nullfd = open("/dev/null", O_WRONLY); - - if (nullfd >= 0) { - close(STDERR_FILENO); - close(STDOUT_FILENO); - - dup2(nullfd, STDOUT_FILENO); - dup2(STDOUT_FILENO, STDERR_FILENO); - close(nullfd); - } - } else { - signal(SIGSEGV, sighandler_dump_stack); - signal(SIGFPE, sighandler_dump_stack); - } - } - - err = test_function(test, subtest)(test, subtest); - if (!dont_fork) - exit(err); - } - - if (!dont_fork) { - wait(&status); +struct child_test { + struct child_process process; + struct test_suite *test; + int test_num; + int subtest; +}; - if (WIFEXITED(status)) { - err = (signed char)WEXITSTATUS(status); - pr_debug("test child finished with %d\n", err); - } else if (WIFSIGNALED(status)) { - err = -1; - pr_debug("test child interrupted\n"); - } - } +static int run_test_child(struct child_process *process) +{ + struct child_test *child = container_of(process, struct child_test, process); + int err; - return err; + pr_debug("--- start ---\n"); + pr_debug("test child forked, pid %d\n", getpid()); + err = test_function(child->test, child->subtest)(child->test, child->subtest); + pr_debug("---- end(%d) ----\n", err); + fflush(NULL); + return -err; } -#define for_each_test(j, k, t) \ - for (j = 0, k = 0; j < ARRAY_SIZE(tests); j++, k = 0) \ - while ((t = tests[j][k++]) != NULL) - -static int test_and_print(struct test_suite *t, int subtest) +static int print_test_result(struct test_suite *t, int i, int subtest, int result, int width) { - int err; - - pr_debug("\n--- start ---\n"); - err = run_test(t, subtest); - pr_debug("---- end ----\n"); + if (has_subtests(t)) { + int subw = width > 2 ? width - 2 : width; - if (!has_subtests(t)) - pr_debug("%s:", t->desc); - else - pr_debug("%s subtest %d:", t->desc, subtest + 1); + pr_info("%3d.%1d: %-*s:", i + 1, subtest + 1, subw, test_description(t, subtest)); + } else + pr_info("%3d: %-*s:", i + 1, width, test_description(t, subtest)); - switch (err) { + switch (result) { case TEST_OK: pr_info(" Ok\n"); break; @@ -296,99 +269,161 @@ static int test_and_print(struct test_suite *t, int subtest) break; } - return err; + return 0; } -struct shell_test { - const char *dir; - const char *file; -}; - -static int shell_test__run(struct test_suite *test, int subdir __maybe_unused) +static int finish_test(struct child_test *child_test, int width) { - int err; - char script[PATH_MAX]; - struct shell_test *st = test->priv; + struct test_suite *t = child_test->test; + int i = child_test->test_num; + int subi = child_test->subtest; + int err = child_test->process.err; + bool err_done = err <= 0; + struct strbuf err_output = STRBUF_INIT; + int ret; - path__join(script, sizeof(script) - 3, st->dir, st->file); + /* + * For test suites with subtests, display the suite name ahead of the + * sub test names. + */ + if (has_subtests(t) && subi == 0) + pr_info("%3d: %-*s:\n", i + 1, width, test_description(t, -1)); - if (verbose > 0) - strncat(script, " -v", sizeof(script) - strlen(script) - 1); + /* + * Busy loop reading from the child's stdout/stderr that are set to be + * non-blocking until EOF. + */ + if (!err_done) + fcntl(err, F_SETFL, O_NONBLOCK); + if (verbose > 1) { + if (has_subtests(t)) + pr_info("%3d.%1d: %s:\n", i + 1, subi + 1, test_description(t, subi)); + else + pr_info("%3d: %s:\n", i + 1, test_description(t, -1)); + } + while (!err_done) { + struct pollfd pfds[1] = { + { .fd = err, + .events = POLLIN | POLLERR | POLLHUP | POLLNVAL, + }, + }; + char buf[512]; + ssize_t len; - err = system(script); - if (!err) - return TEST_OK; + /* Poll to avoid excessive spinning, timeout set for 100ms. */ + poll(pfds, ARRAY_SIZE(pfds), /*timeout=*/100); + if (!err_done && pfds[0].revents) { + errno = 0; + len = read(err, buf, sizeof(buf) - 1); - return WEXITSTATUS(err) == 2 ? TEST_SKIP : TEST_FAIL; + if (len <= 0) { + err_done = errno != EAGAIN; + } else { + buf[len] = '\0'; + if (verbose > 1) + fprintf(stdout, "%s", buf); + else + strbuf_addstr(&err_output, buf); + } + } + } + /* Clean up child process. */ + ret = finish_command(&child_test->process); + if (verbose == 1 && ret == TEST_FAIL) { + /* Add header for test that was skipped above. */ + if (has_subtests(t)) + pr_info("%3d.%1d: %s:\n", i + 1, subi + 1, test_description(t, subi)); + else + pr_info("%3d: %s:\n", i + 1, test_description(t, -1)); + fprintf(stderr, "%s", err_output.buf); + } + strbuf_release(&err_output); + print_test_result(t, i, subi, ret, width); + if (err > 0) + close(err); + return 0; } -static int run_shell_tests(int argc, const char *argv[], int i, int width, - struct intlist *skiplist) +static int start_test(struct test_suite *test, int i, int subi, struct child_test **child, + int width) { - struct shell_test st; - const struct script_file *files, *file; + int err; - files = list_script_files(); - if (!files) + *child = NULL; + if (dont_fork) { + pr_debug("--- start ---\n"); + err = test_function(test, subi)(test, subi); + pr_debug("---- end ----\n"); + print_test_result(test, i, subi, err, width); return 0; - for (file = files; file->dir; file++) { - int curr = i++; - struct test_case test_cases[] = { - { - .desc = file->desc, - .run_case = shell_test__run, - }, - { .name = NULL, } - }; - struct test_suite test_suite = { - .desc = test_cases[0].desc, - .test_cases = test_cases, - .priv = &st, - }; - st.dir = file->dir; - - if (test_suite.desc == NULL || - !perf_test__matches(test_suite.desc, curr, argc, argv)) - continue; - - st.file = file->file; - pr_info("%3d: %-*s:", i, width, test_suite.desc); - - if (intlist__find(skiplist, i)) { - color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (user override)\n"); - continue; - } + } - test_and_print(&test_suite, 0); + *child = zalloc(sizeof(**child)); + if (!*child) + return -ENOMEM; + + (*child)->test = test; + (*child)->test_num = i; + (*child)->subtest = subi; + (*child)->process.pid = -1; + (*child)->process.no_stdin = 1; + if (verbose <= 0) { + (*child)->process.no_stdout = 1; + (*child)->process.no_stderr = 1; + } else { + (*child)->process.stdout_to_stderr = 1; + (*child)->process.out = -1; + (*child)->process.err = -1; } - return 0; + (*child)->process.no_exec_cmd = run_test_child; + err = start_command(&(*child)->process); + if (err || !sequential) + return err; + return finish_test(*child, width); } +#define for_each_test(j, k, t) \ + for (j = 0, k = 0; j < ARRAY_SIZE(tests); j++, k = 0) \ + while ((t = tests[j][k++]) != NULL) + static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist) { struct test_suite *t; unsigned int j, k; int i = 0; - int width = list_script_max_width(); + int width = 0; + size_t num_tests = 0; + struct child_test **child_tests; + int child_test_num = 0; for_each_test(j, k, t) { int len = strlen(test_description(t, -1)); if (width < len) width = len; + + if (has_subtests(t)) { + for (int subi = 0, subn = num_subtests(t); subi < subn; subi++) { + len = strlen(test_description(t, subi)); + if (width < len) + width = len; + num_tests++; + } + } else { + num_tests++; + } } + child_tests = calloc(num_tests, sizeof(*child_tests)); + if (!child_tests) + return -ENOMEM; for_each_test(j, k, t) { int curr = i++; - int subi; if (!perf_test__matches(test_description(t, -1), curr, argc, argv)) { bool skip = true; - int subn; - - subn = num_subtests(t); - for (subi = 0; subi < subn; subi++) { + for (int subi = 0, subn = num_subtests(t); subi < subn; subi++) { if (perf_test__matches(test_description(t, subi), curr, argc, argv)) skip = false; @@ -398,74 +433,45 @@ static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist) continue; } - pr_info("%3d: %-*s:", i, width, test_description(t, -1)); - if (intlist__find(skiplist, i)) { + pr_info("%3d: %-*s:", curr + 1, width, test_description(t, -1)); color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (user override)\n"); continue; } if (!has_subtests(t)) { - test_and_print(t, -1); - } else { - int subn = num_subtests(t); - /* - * minus 2 to align with normal testcases. - * For subtest we print additional '.x' in number. - * for example: - * - * 35: Test LLVM searching and compiling : - * 35.1: Basic BPF llvm compiling test : Ok - */ - int subw = width > 2 ? width - 2 : width; - - if (subn <= 0) { - color_fprintf(stderr, PERF_COLOR_YELLOW, - " Skip (not compiled in)\n"); - continue; - } - pr_info("\n"); - - for (subi = 0; subi < subn; subi++) { - int len = strlen(test_description(t, subi)); + int err = start_test(t, curr, -1, &child_tests[child_test_num++], width); - if (subw < len) - subw = len; + if (err) { + /* TODO: if !sequential waitpid the already forked children. */ + free(child_tests); + return err; } + } else { + for (int subi = 0, subn = num_subtests(t); subi < subn; subi++) { + int err; - for (subi = 0; subi < subn; subi++) { if (!perf_test__matches(test_description(t, subi), curr, argc, argv)) continue; - pr_info("%3d.%1d: %-*s:", i, subi + 1, subw, - test_description(t, subi)); - test_and_print(t, subi); + err = start_test(t, curr, subi, &child_tests[child_test_num++], + width); + if (err) + return err; } } } + for (i = 0; i < child_test_num; i++) { + if (!sequential) { + int ret = finish_test(child_tests[i], width); - return run_shell_tests(argc, argv, i, width, skiplist); -} - -static int perf_test__list_shell(int argc, const char **argv, int i) -{ - const struct script_file *files, *file; - - files = list_script_files(); - if (!files) - return 0; - for (file = files; file->dir; file++) { - int curr = i++; - struct test_suite t = { - .desc = file->desc - }; - - if (!perf_test__matches(t.desc, curr, argc, argv)) - continue; - - pr_info("%3d: %s\n", i, t.desc); + if (ret) + return ret; + } + free(child_tests[i]); } + free(child_tests); return 0; } @@ -492,9 +498,6 @@ static int perf_test__list(int argc, const char **argv) test_description(t, subi)); } } - - perf_test__list_shell(argc, argv, i); - return 0; } @@ -536,6 +539,9 @@ int cmd_test(int argc, const char **argv) "be more verbose (show symbol address, etc)"), OPT_BOOLEAN('F', "dont-fork", &dont_fork, "Do not fork for testcase"), + OPT_BOOLEAN('p', "parallel", ¶llel, "Run the tests in parallel"), + OPT_BOOLEAN('S', "sequential", &sequential, + "Run the tests one after another rather than in parallel"), OPT_STRING('w', "workload", &workload, "work", "workload to run for testing"), OPT_STRING(0, "dso", &dso_to_test, "dso", "dso to test"), OPT_STRING(0, "objdump", &test_objdump_path, "path", @@ -554,6 +560,7 @@ int cmd_test(int argc, const char **argv) /* Unbuffered output */ setvbuf(stdout, NULL, _IONBF, 0); + tests[2] = create_script_test_suites(); argc = parse_options_subcommand(argc, argv, test_options, test_subcommands, test_usage, 0); if (argc >= 1 && !strcmp(argv[0], "list")) return perf_test__list(argc - 1, argv + 1); @@ -561,6 +568,11 @@ int cmd_test(int argc, const char **argv) if (workload) return run_workload(workload, argc, argv); + if (dont_fork) + sequential = true; + else if (parallel) + sequential = false; + symbol_conf.priv_size = sizeof(int); symbol_conf.try_vmlinux_path = true; diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c index 7a3a7bbbec71..27c82cfb7e7d 100644 --- a/tools/perf/tests/code-reading.c +++ b/tools/perf/tests/code-reading.c @@ -253,9 +253,9 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode, goto out; } dso = map__dso(al.map); - pr_debug("File is: %s\n", dso->long_name); + pr_debug("File is: %s\n", dso__long_name(dso)); - if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS && !dso__is_kcore(dso)) { + if (dso__symtab_type(dso) == DSO_BINARY_TYPE__KALLSYMS && !dso__is_kcore(dso)) { pr_debug("Unexpected kernel address - skipping\n"); goto out; } @@ -274,7 +274,7 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode, * modules to manage long jumps. Check if the ip offset falls in stubs * sections for kernel modules. And skip module address after text end */ - if (dso->is_kmod && al.addr > dso->text_end) { + if (dso__is_kmod(dso) && al.addr > dso__text_end(dso)) { pr_debug("skipping the module address %#"PRIx64" after text end\n", al.addr); goto out; } @@ -315,7 +315,7 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode, state->done[state->done_cnt++] = map__start(al.map); } - objdump_name = dso->long_name; + objdump_name = dso__long_name(dso); if (dso__needs_decompress(dso)) { if (dso__decompress_kmodule_path(dso, objdump_name, decomp_name, @@ -637,11 +637,11 @@ static int do_test_code_reading(bool try_kcore) evlist__config(evlist, &opts, NULL); - evsel = evlist__first(evlist); - - evsel->core.attr.comm = 1; - evsel->core.attr.disabled = 1; - evsel->core.attr.enable_on_exec = 0; + evlist__for_each_entry(evlist, evsel) { + evsel->core.attr.comm = 1; + evsel->core.attr.disabled = 1; + evsel->core.attr.enable_on_exec = 0; + } ret = evlist__open(evlist); if (ret < 0) { diff --git a/tools/perf/tests/config-fragments/config b/tools/perf/tests/config-fragments/config index c340b3195fca..4fca12851016 100644 --- a/tools/perf/tests/config-fragments/config +++ b/tools/perf/tests/config-fragments/config @@ -9,3 +9,6 @@ CONFIG_GENERIC_TRACER=y CONFIG_FTRACE=y CONFIG_FTRACE_SYSCALLS=y CONFIG_BRANCH_PROFILE_NONE=y +CONFIG_KPROBES=y +CONFIG_KPROBE_EVENTS=y +CONFIG_UPROBE_EVENTS=y diff --git a/tools/perf/tests/dso-data.c b/tools/perf/tests/dso-data.c index 2d67422c1222..5286ae8bd2d7 100644 --- a/tools/perf/tests/dso-data.c +++ b/tools/perf/tests/dso-data.c @@ -10,6 +10,7 @@ #include <sys/resource.h> #include <api/fs/fs.h> #include "dso.h" +#include "dsos.h" #include "machine.h" #include "symbol.h" #include "tests.h" @@ -123,9 +124,10 @@ static int test__dso_data(struct test_suite *test __maybe_unused, int subtest __ TEST_ASSERT_VAL("No test file", file); memset(&machine, 0, sizeof(machine)); + dsos__init(&machine.dsos); - dso = dso__new((const char *)file); - + dso = dso__new(file); + TEST_ASSERT_VAL("Failed to add dso", !dsos__add(&machine.dsos, dso)); TEST_ASSERT_VAL("Failed to access to dso", dso__data_fd(dso, &machine) >= 0); @@ -170,6 +172,7 @@ static int test__dso_data(struct test_suite *test __maybe_unused, int subtest __ } dso__put(dso); + dsos__exit(&machine.dsos); unlink(file); return 0; } @@ -199,40 +202,35 @@ static long open_files_cnt(void) return nr - 1; } -static struct dso **dsos; - -static int dsos__create(int cnt, int size) +static int dsos__create(int cnt, int size, struct dsos *dsos) { int i; - dsos = malloc(sizeof(*dsos) * cnt); - TEST_ASSERT_VAL("failed to alloc dsos array", dsos); + dsos__init(dsos); for (i = 0; i < cnt; i++) { - char *file; + struct dso *dso; + char *file = test_file(size); - file = test_file(size); TEST_ASSERT_VAL("failed to get dso file", file); - - dsos[i] = dso__new(file); - TEST_ASSERT_VAL("failed to get dso", dsos[i]); + dso = dso__new(file); + TEST_ASSERT_VAL("failed to get dso", dso); + TEST_ASSERT_VAL("failed to add dso", !dsos__add(dsos, dso)); + dso__put(dso); } return 0; } -static void dsos__delete(int cnt) +static void dsos__delete(struct dsos *dsos) { - int i; + for (unsigned int i = 0; i < dsos->cnt; i++) { + struct dso *dso = dsos->dsos[i]; - for (i = 0; i < cnt; i++) { - struct dso *dso = dsos[i]; - - unlink(dso->name); - dso__put(dso); + dso__data_close(dso); + unlink(dso__name(dso)); } - - free(dsos); + dsos__exit(dsos); } static int set_fd_limit(int n) @@ -266,10 +264,10 @@ static int test__dso_data_cache(struct test_suite *test __maybe_unused, int subt /* and this is now our dso open FDs limit */ dso_cnt = limit / 2; TEST_ASSERT_VAL("failed to create dsos\n", - !dsos__create(dso_cnt, TEST_FILE_SIZE)); + !dsos__create(dso_cnt, TEST_FILE_SIZE, &machine.dsos)); for (i = 0; i < (dso_cnt - 1); i++) { - struct dso *dso = dsos[i]; + struct dso *dso = machine.dsos.dsos[i]; /* * Open dsos via dso__data_fd(), it opens the data @@ -289,17 +287,17 @@ static int test__dso_data_cache(struct test_suite *test __maybe_unused, int subt } /* verify the first one is already open */ - TEST_ASSERT_VAL("dsos[0] is not open", dsos[0]->data.fd != -1); + TEST_ASSERT_VAL("dsos[0] is not open", dso__data(machine.dsos.dsos[0])->fd != -1); /* open +1 dso to reach the allowed limit */ - fd = dso__data_fd(dsos[i], &machine); + fd = dso__data_fd(machine.dsos.dsos[i], &machine); TEST_ASSERT_VAL("failed to get fd", fd > 0); /* should force the first one to be closed */ - TEST_ASSERT_VAL("failed to close dsos[0]", dsos[0]->data.fd == -1); + TEST_ASSERT_VAL("failed to close dsos[0]", dso__data(machine.dsos.dsos[0])->fd == -1); /* cleanup everything */ - dsos__delete(dso_cnt); + dsos__delete(&machine.dsos); /* Make sure we did not leak any file descriptor. */ nr_end = open_files_cnt(); @@ -324,9 +322,9 @@ static int test__dso_data_reopen(struct test_suite *test __maybe_unused, int sub long nr_end, nr = open_files_cnt(), lim = new_limit(3); int fd, fd_extra; -#define dso_0 (dsos[0]) -#define dso_1 (dsos[1]) -#define dso_2 (dsos[2]) +#define dso_0 (machine.dsos.dsos[0]) +#define dso_1 (machine.dsos.dsos[1]) +#define dso_2 (machine.dsos.dsos[2]) /* Rest the internal dso open counter limit. */ reset_fd_limit(); @@ -346,7 +344,8 @@ static int test__dso_data_reopen(struct test_suite *test __maybe_unused, int sub TEST_ASSERT_VAL("failed to set file limit", !set_fd_limit((lim))); - TEST_ASSERT_VAL("failed to create dsos\n", !dsos__create(3, TEST_FILE_SIZE)); + TEST_ASSERT_VAL("failed to create dsos\n", + !dsos__create(3, TEST_FILE_SIZE, &machine.dsos)); /* open dso_0 */ fd = dso__data_fd(dso_0, &machine); @@ -371,7 +370,7 @@ static int test__dso_data_reopen(struct test_suite *test __maybe_unused, int sub * dso_0 should get closed, because we reached * the file descriptor limit */ - TEST_ASSERT_VAL("failed to close dso_0", dso_0->data.fd == -1); + TEST_ASSERT_VAL("failed to close dso_0", dso__data(dso_0)->fd == -1); /* open dso_0 */ fd = dso__data_fd(dso_0, &machine); @@ -381,11 +380,11 @@ static int test__dso_data_reopen(struct test_suite *test __maybe_unused, int sub * dso_1 should get closed, because we reached * the file descriptor limit */ - TEST_ASSERT_VAL("failed to close dso_1", dso_1->data.fd == -1); + TEST_ASSERT_VAL("failed to close dso_1", dso__data(dso_1)->fd == -1); /* cleanup everything */ close(fd_extra); - dsos__delete(3); + dsos__delete(&machine.dsos); /* Make sure we did not leak any file descriptor. */ nr_end = open_files_cnt(); diff --git a/tools/perf/tests/evsel-roundtrip-name.c b/tools/perf/tests/evsel-roundtrip-name.c index 15ff86f9da0b..1922cac13a24 100644 --- a/tools/perf/tests/evsel-roundtrip-name.c +++ b/tools/perf/tests/evsel-roundtrip-name.c @@ -37,7 +37,7 @@ static int perf_evsel__roundtrip_cache_name_test(void) continue; } evlist__for_each_entry(evlist, evsel) { - if (strcmp(evsel__name(evsel), name)) { + if (!evsel__name_is(evsel, name)) { pr_debug("%s != %s\n", evsel__name(evsel), name); ret = TEST_FAIL; } @@ -71,7 +71,7 @@ static int perf_evsel__name_array_test(const char *const names[], int nr_names) continue; } evlist__for_each_entry(evlist, evsel) { - if (strcmp(evsel__name(evsel), names[i])) { + if (!evsel__name_is(evsel, names[i])) { pr_debug("%s != %s\n", evsel__name(evsel), names[i]); ret = TEST_FAIL; } diff --git a/tools/perf/tests/expand-cgroup.c b/tools/perf/tests/expand-cgroup.c index 9c1a1f18db75..31966ff856f8 100644 --- a/tools/perf/tests/expand-cgroup.c +++ b/tools/perf/tests/expand-cgroup.c @@ -127,8 +127,7 @@ static int expand_group_events(void) parse_events_error__init(&err); ret = parse_events(evlist, event_str, &err); if (ret < 0) { - pr_debug("failed to parse event '%s', err %d, str '%s'\n", - event_str, ret, err.str); + pr_debug("failed to parse event '%s', err %d\n", event_str, ret); parse_events_error__print(&err, event_str); goto out; } diff --git a/tools/perf/tests/hists_common.c b/tools/perf/tests/hists_common.c index d08add0f4da6..187f12f5bc21 100644 --- a/tools/perf/tests/hists_common.c +++ b/tools/perf/tests/hists_common.c @@ -146,7 +146,7 @@ struct machine *setup_fake_machine(struct machines *machines) goto out; } - symbols__insert(&dso->symbols, sym); + symbols__insert(dso__symbols(dso), sym); } dso__put(dso); @@ -183,7 +183,7 @@ void print_hists_in(struct hists *hists) pr_info("%2d: entry: %-8s [%-8s] %20s: period = %"PRIu64"\n", i, thread__comm_str(he->thread), - dso->short_name, + dso__short_name(dso), he->ms.sym->name, he->stat.period); } @@ -212,7 +212,7 @@ void print_hists_out(struct hists *hists) pr_info("%2d: entry: %8s:%5d [%-8s] %20s: period = %"PRIu64"/%"PRIu64"\n", i, thread__comm_str(he->thread), thread__tid(he->thread), - dso->short_name, + dso__short_name(dso), he->ms.sym->name, he->stat.period, he->stat_acc ? he->stat_acc->period : 0); } diff --git a/tools/perf/tests/hists_cumulate.c b/tools/perf/tests/hists_cumulate.c index 71dacb0fec4d..1e0f5a310fd5 100644 --- a/tools/perf/tests/hists_cumulate.c +++ b/tools/perf/tests/hists_cumulate.c @@ -164,11 +164,11 @@ static void put_fake_samples(void) typedef int (*test_fn_t)(struct evsel *, struct machine *); #define COMM(he) (thread__comm_str(he->thread)) -#define DSO(he) (map__dso(he->ms.map)->short_name) +#define DSO(he) (dso__short_name(map__dso(he->ms.map))) #define SYM(he) (he->ms.sym->name) #define CPU(he) (he->cpu) #define DEPTH(he) (he->callchain->max_depth) -#define CDSO(cl) (map__dso(cl->ms.map)->short_name) +#define CDSO(cl) (dso__short_name(map__dso(cl->ms.map))) #define CSYM(cl) (cl->ms.sym->name) struct result { diff --git a/tools/perf/tests/hists_output.c b/tools/perf/tests/hists_output.c index ba1cccf57049..33b5cc8352a7 100644 --- a/tools/perf/tests/hists_output.c +++ b/tools/perf/tests/hists_output.c @@ -129,7 +129,7 @@ static void put_fake_samples(void) typedef int (*test_fn_t)(struct evsel *, struct machine *); #define COMM(he) (thread__comm_str(he->thread)) -#define DSO(he) (map__dso(he->ms.map)->short_name) +#define DSO(he) (dso__short_name(map__dso(he->ms.map))) #define SYM(he) (he->ms.sym->name) #define CPU(he) (he->cpu) #define PID(he) (thread__tid(he->thread)) diff --git a/tools/perf/tests/make b/tools/perf/tests/make index 8a4da7eb637a..a1f8adf85367 100644 --- a/tools/perf/tests/make +++ b/tools/perf/tests/make @@ -83,6 +83,7 @@ make_no_libelf := NO_LIBELF=1 make_no_libunwind := NO_LIBUNWIND=1 make_no_libdw_dwarf_unwind := NO_LIBDW_DWARF_UNWIND=1 make_no_backtrace := NO_BACKTRACE=1 +make_no_libcapstone := NO_CAPSTONE=1 make_no_libnuma := NO_LIBNUMA=1 make_no_libaudit := NO_LIBAUDIT=1 make_no_libbionic := NO_LIBBIONIC=1 @@ -122,7 +123,7 @@ make_minimal += NO_DEMANGLE=1 NO_LIBELF=1 NO_LIBUNWIND=1 NO_BACKTRACE=1 make_minimal += NO_LIBNUMA=1 NO_LIBAUDIT=1 NO_LIBBIONIC=1 make_minimal += NO_LIBDW_DWARF_UNWIND=1 NO_AUXTRACE=1 NO_LIBBPF=1 make_minimal += NO_LIBCRYPTO=1 NO_SDT=1 NO_JVMTI=1 NO_LIBZSTD=1 -make_minimal += NO_LIBCAP=1 NO_SYSCALL_TABLE=1 +make_minimal += NO_LIBCAP=1 NO_SYSCALL_TABLE=1 NO_CAPSTONE=1 # $(run) contains all available tests run := make_pure @@ -152,6 +153,7 @@ run += make_no_libelf run += make_no_libunwind run += make_no_libdw_dwarf_unwind run += make_no_backtrace +run += make_no_libcapstone run += make_no_libnuma run += make_no_libaudit run += make_no_libbionic diff --git a/tools/perf/tests/maps.c b/tools/perf/tests/maps.c index bb3fbfe5a73e..4f1f9385ea9c 100644 --- a/tools/perf/tests/maps.c +++ b/tools/perf/tests/maps.c @@ -26,7 +26,7 @@ static int check_maps_cb(struct map *map, void *data) if (map__start(map) != merged->start || map__end(map) != merged->end || - strcmp(map__dso(map)->name, merged->name) || + strcmp(dso__name(map__dso(map)), merged->name) || refcount_read(map__refcnt(map)) != 1) { return 1; } @@ -39,7 +39,7 @@ static int failed_cb(struct map *map, void *data __maybe_unused) pr_debug("\tstart: %" PRIu64 " end: %" PRIu64 " name: '%s' refcnt: %d\n", map__start(map), map__end(map), - map__dso(map)->name, + dso__name(map__dso(map)), refcount_read(map__refcnt(map))); return 0; @@ -156,6 +156,9 @@ static int test__maps__merge_in(struct test_suite *t __maybe_unused, int subtest TEST_ASSERT_VAL("merge check failed", !ret); maps__zput(maps); + map__zput(map_kcore1); + map__zput(map_kcore2); + map__zput(map_kcore3); return TEST_OK; } diff --git a/tools/perf/tests/mem.c b/tools/perf/tests/mem.c index 56014ec7d49d..cb3d749e157b 100644 --- a/tools/perf/tests/mem.c +++ b/tools/perf/tests/mem.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include "util/map_symbol.h" #include "util/mem-events.h" +#include "util/mem-info.h" #include "util/symbol.h" #include "linux/perf_event.h" #include "util/debug.h" @@ -12,12 +13,14 @@ static int check(union perf_mem_data_src data_src, { char out[100]; char failure[100]; - struct mem_info mi = { .data_src = data_src }; - + struct mem_info *mi = mem_info__new(); int n; - n = perf_mem__snp_scnprintf(out, sizeof out, &mi); - n += perf_mem__lvl_scnprintf(out + n, sizeof out - n, &mi); + TEST_ASSERT_VAL("Memory allocation failed", mi); + *mem_info__data_src(mi) = data_src; + n = perf_mem__snp_scnprintf(out, sizeof out, mi); + n += perf_mem__lvl_scnprintf(out + n, sizeof out - n, mi); + mem_info__put(mi); scnprintf(failure, sizeof failure, "unexpected %s", out); TEST_ASSERT_VAL(failure, !strcmp(string, out)); return 0; diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c index fbdf710d5eea..edc2adcf1bae 100644 --- a/tools/perf/tests/parse-events.c +++ b/tools/perf/tests/parse-events.c @@ -470,8 +470,7 @@ static int test__checkevent_breakpoint_modifier(struct evlist *evlist) TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); - TEST_ASSERT_VAL("wrong name", - !strcmp(evsel__name(evsel), "mem:0:u")); + TEST_ASSERT_VAL("wrong name", evsel__name_is(evsel, "mem:0:u")); return test__checkevent_breakpoint(evlist); } @@ -484,8 +483,7 @@ static int test__checkevent_breakpoint_x_modifier(struct evlist *evlist) TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); - TEST_ASSERT_VAL("wrong name", - !strcmp(evsel__name(evsel), "mem:0:x:k")); + TEST_ASSERT_VAL("wrong name", evsel__name_is(evsel, "mem:0:x:k")); return test__checkevent_breakpoint_x(evlist); } @@ -498,8 +496,7 @@ static int test__checkevent_breakpoint_r_modifier(struct evlist *evlist) TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv); TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip); - TEST_ASSERT_VAL("wrong name", - !strcmp(evsel__name(evsel), "mem:0:r:hp")); + TEST_ASSERT_VAL("wrong name", evsel__name_is(evsel, "mem:0:r:hp")); return test__checkevent_breakpoint_r(evlist); } @@ -512,8 +509,7 @@ static int test__checkevent_breakpoint_w_modifier(struct evlist *evlist) TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip); - TEST_ASSERT_VAL("wrong name", - !strcmp(evsel__name(evsel), "mem:0:w:up")); + TEST_ASSERT_VAL("wrong name", evsel__name_is(evsel, "mem:0:w:up")); return test__checkevent_breakpoint_w(evlist); } @@ -526,8 +522,7 @@ static int test__checkevent_breakpoint_rw_modifier(struct evlist *evlist) TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip); - TEST_ASSERT_VAL("wrong name", - !strcmp(evsel__name(evsel), "mem:0:rw:kp")); + TEST_ASSERT_VAL("wrong name", evsel__name_is(evsel, "mem:0:rw:kp")); return test__checkevent_breakpoint_rw(evlist); } @@ -540,8 +535,7 @@ static int test__checkevent_breakpoint_modifier_name(struct evlist *evlist) TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); - TEST_ASSERT_VAL("wrong name", - !strcmp(evsel__name(evsel), "breakpoint")); + TEST_ASSERT_VAL("wrong name", evsel__name_is(evsel, "breakpoint")); return test__checkevent_breakpoint(evlist); } @@ -554,8 +548,7 @@ static int test__checkevent_breakpoint_x_modifier_name(struct evlist *evlist) TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); - TEST_ASSERT_VAL("wrong name", - !strcmp(evsel__name(evsel), "breakpoint")); + TEST_ASSERT_VAL("wrong name", evsel__name_is(evsel, "breakpoint")); return test__checkevent_breakpoint_x(evlist); } @@ -568,8 +561,7 @@ static int test__checkevent_breakpoint_r_modifier_name(struct evlist *evlist) TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv); TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip); - TEST_ASSERT_VAL("wrong name", - !strcmp(evsel__name(evsel), "breakpoint")); + TEST_ASSERT_VAL("wrong name", evsel__name_is(evsel, "breakpoint")); return test__checkevent_breakpoint_r(evlist); } @@ -582,8 +574,7 @@ static int test__checkevent_breakpoint_w_modifier_name(struct evlist *evlist) TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip); - TEST_ASSERT_VAL("wrong name", - !strcmp(evsel__name(evsel), "breakpoint")); + TEST_ASSERT_VAL("wrong name", evsel__name_is(evsel, "breakpoint")); return test__checkevent_breakpoint_w(evlist); } @@ -596,8 +587,7 @@ static int test__checkevent_breakpoint_rw_modifier_name(struct evlist *evlist) TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip); - TEST_ASSERT_VAL("wrong name", - !strcmp(evsel__name(evsel), "breakpoint")); + TEST_ASSERT_VAL("wrong name", evsel__name_is(evsel, "breakpoint")); return test__checkevent_breakpoint_rw(evlist); } @@ -609,12 +599,12 @@ static int test__checkevent_breakpoint_2_events(struct evlist *evlist) TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries); TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->core.attr.type); - TEST_ASSERT_VAL("wrong name", !strcmp(evsel__name(evsel), "breakpoint1")); + TEST_ASSERT_VAL("wrong name", evsel__name_is(evsel, "breakpoint1")); evsel = evsel__next(evsel); TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->core.attr.type); - TEST_ASSERT_VAL("wrong name", !strcmp(evsel__name(evsel), "breakpoint2")); + TEST_ASSERT_VAL("wrong name", evsel__name_is(evsel, "breakpoint2")); return TEST_OK; } @@ -691,15 +681,14 @@ static int test__checkevent_pmu_name(struct evlist *evlist) TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries); TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type); TEST_ASSERT_VAL("wrong config", test_config(evsel, 1)); - TEST_ASSERT_VAL("wrong name", !strcmp(evsel__name(evsel), "krava")); + TEST_ASSERT_VAL("wrong name", evsel__name_is(evsel, "krava")); /* cpu/config=2/u" */ evsel = evsel__next(evsel); TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries); TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type); TEST_ASSERT_VAL("wrong config", test_config(evsel, 2)); - TEST_ASSERT_VAL("wrong name", - !strcmp(evsel__name(evsel), "cpu/config=2/u")); + TEST_ASSERT_VAL("wrong name", evsel__name_is(evsel, "cpu/config=2/u")); return TEST_OK; } @@ -953,8 +942,8 @@ static int test__group2(struct evlist *evlist) continue; } if (evsel->core.attr.type == PERF_TYPE_HARDWARE && - test_config(evsel, PERF_COUNT_HW_CACHE_REFERENCES)) { - /* cache-references + :u modifier */ + test_config(evsel, PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) { + /* branches + :u modifier */ TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); @@ -2043,7 +2032,7 @@ static const struct evlist_test test__events[] = { /* 8 */ }, { - .name = "{faults:k,cache-references}:u,cycles:k", + .name = "{faults:k,branches}:u,cycles:k", .check = test__group2, /* 9 */ }, @@ -2280,6 +2269,13 @@ static const struct evlist_test test__events[] = { .check = test__checkevent_breakpoint_2_events, /* 3 */ }, +#ifdef HAVE_LIBTRACEEVENT + { + .name = "9p:9p_client_req", + .check = test__checkevent_tracepoint, + /* 4 */ + }, +#endif }; static const struct evlist_test test__events_pmu[] = { @@ -2504,13 +2500,13 @@ static int test_event(const struct evlist_test *e) return TEST_FAIL; } parse_events_error__init(&err); - ret = parse_events(evlist, e->name, &err); + ret = __parse_events(evlist, e->name, /*pmu_filter=*/NULL, &err, /*fake_pmu=*/NULL, + /*warn_if_reordered=*/true, /*fake_tp=*/true); if (ret) { - pr_debug("failed to parse event '%s', err %d, str '%s'\n", - e->name, ret, err.str); + pr_debug("failed to parse event '%s', err %d\n", e->name, ret); parse_events_error__print(&err, e->name); ret = TEST_FAIL; - if (err.str && strstr(err.str, "can't access trace events")) + if (parse_events_error__contains(&err, "can't access trace events")) ret = TEST_SKIP; } else { ret = e->check(evlist); @@ -2533,10 +2529,11 @@ static int test_event_fake_pmu(const char *str) parse_events_error__init(&err); ret = __parse_events(evlist, str, /*pmu_filter=*/NULL, &err, - &perf_pmu__fake, /*warn_if_reordered=*/true); + &perf_pmu__fake, /*warn_if_reordered=*/true, + /*fake_tp=*/true); if (ret) { - pr_debug("failed to parse event '%s', err %d, str '%s'\n", - str, ret, err.str); + pr_debug("failed to parse event '%s', err %d\n", + str, ret); parse_events_error__print(&err, str); } diff --git a/tools/perf/tests/pmu-events.c b/tools/perf/tests/pmu-events.c index a56d32905743..ff3e7bc0a77f 100644 --- a/tools/perf/tests/pmu-events.c +++ b/tools/perf/tests/pmu-events.c @@ -70,7 +70,7 @@ static const struct perf_pmu_test_event segment_reg_loads_any = { .event = { .pmu = "default_core", .name = "segment_reg_loads.any", - .event = "event=0x6,period=200000,umask=0x80", + .event = "event=6,period=200000,umask=0x80", .desc = "Number of segment register loads", .topic = "other", }, @@ -82,7 +82,7 @@ static const struct perf_pmu_test_event dispatch_blocked_any = { .event = { .pmu = "default_core", .name = "dispatch_blocked.any", - .event = "event=0x9,period=200000,umask=0x20", + .event = "event=9,period=200000,umask=0x20", .desc = "Memory cluster signals to block micro-op dispatch for any reason", .topic = "other", }, @@ -94,11 +94,11 @@ static const struct perf_pmu_test_event eist_trans = { .event = { .pmu = "default_core", .name = "eist_trans", - .event = "event=0x3a,period=200000,umask=0x0", + .event = "event=0x3a,period=200000", .desc = "Number of Enhanced Intel SpeedStep(R) Technology (EIST) transitions", .topic = "other", }, - .alias_str = "event=0x3a,period=0x30d40,umask=0", + .alias_str = "event=0x3a,period=0x30d40", .alias_long_desc = "Number of Enhanced Intel SpeedStep(R) Technology (EIST) transitions", }; @@ -128,7 +128,7 @@ static const struct perf_pmu_test_event *core_events[] = { static const struct perf_pmu_test_event uncore_hisi_ddrc_flux_wcmd = { .event = { .name = "uncore_hisi_ddrc.flux_wcmd", - .event = "event=0x2", + .event = "event=2", .desc = "DDRC write commands", .topic = "uncore", .long_desc = "DDRC write commands", @@ -156,13 +156,13 @@ static const struct perf_pmu_test_event unc_cbo_xsnp_response_miss_eviction = { static const struct perf_pmu_test_event uncore_hyphen = { .event = { .name = "event-hyphen", - .event = "event=0xe0,umask=0x00", + .event = "event=0xe0", .desc = "UNC_CBO_HYPHEN", .topic = "uncore", .long_desc = "UNC_CBO_HYPHEN", .pmu = "uncore_cbox", }, - .alias_str = "event=0xe0,umask=0", + .alias_str = "event=0xe0", .alias_long_desc = "UNC_CBO_HYPHEN", .matching_pmu = "uncore_cbox_0", }; @@ -170,13 +170,13 @@ static const struct perf_pmu_test_event uncore_hyphen = { static const struct perf_pmu_test_event uncore_two_hyph = { .event = { .name = "event-two-hyph", - .event = "event=0xc0,umask=0x00", + .event = "event=0xc0", .desc = "UNC_CBO_TWO_HYPH", .topic = "uncore", .long_desc = "UNC_CBO_TWO_HYPH", .pmu = "uncore_cbox", }, - .alias_str = "event=0xc0,umask=0", + .alias_str = "event=0xc0", .alias_long_desc = "UNC_CBO_TWO_HYPH", .matching_pmu = "uncore_cbox_0", }; @@ -184,7 +184,7 @@ static const struct perf_pmu_test_event uncore_two_hyph = { static const struct perf_pmu_test_event uncore_hisi_l3c_rd_hit_cpipe = { .event = { .name = "uncore_hisi_l3c.rd_hit_cpipe", - .event = "event=0x7", + .event = "event=7", .desc = "Total read hits", .topic = "uncore", .long_desc = "Total read hits", @@ -265,7 +265,7 @@ static const struct perf_pmu_test_event sys_ccn_pmu_read_cycles = { static const struct perf_pmu_test_event sys_cmn_pmu_hnf_cache_miss = { .event = { .name = "sys_cmn_pmu.hnf_cache_miss", - .event = "eventid=0x1,type=0x5", + .event = "eventid=1,type=5", .desc = "Counts total cache misses in first lookup result (high priority)", .topic = "uncore", .pmu = "uncore_sys_cmn_pmu", @@ -842,7 +842,7 @@ static int check_parse_id(const char *id, struct parse_events_error *error, *cur = '/'; ret = __parse_events(evlist, dup, /*pmu_filter=*/NULL, error, fake_pmu, - /*warn_if_reordered=*/true); + /*warn_if_reordered=*/true, /*fake_tp=*/false); free(dup); evlist__delete(evlist); @@ -1105,6 +1105,6 @@ static struct test_case pmu_events_tests[] = { }; struct test_suite suite__pmu_events = { - .desc = "PMU events", + .desc = "PMU JSON event tests", .test_cases = pmu_events_tests, }; diff --git a/tools/perf/tests/pmu.c b/tools/perf/tests/pmu.c index 8f18127d876a..06cc0e46cb28 100644 --- a/tools/perf/tests/pmu.c +++ b/tools/perf/tests/pmu.c @@ -1,204 +1,353 @@ // SPDX-License-Identifier: GPL-2.0 +#include "evlist.h" +#include "evsel.h" #include "parse-events.h" #include "pmu.h" #include "tests.h" +#include "debug.h" +#include "fncache.h" +#include <api/fs/fs.h> +#include <ctype.h> +#include <dirent.h> #include <errno.h> #include <fcntl.h> #include <stdio.h> -#include <linux/kernel.h> -#include <linux/limits.h> -#include <linux/zalloc.h> - -/* Simulated format definitions. */ -static struct test_format { - const char *name; - const char *value; -} test_formats[] = { - { "krava01", "config:0-1,62-63\n", }, - { "krava02", "config:10-17\n", }, - { "krava03", "config:5\n", }, - { "krava11", "config1:0,2,4,6,8,20-28\n", }, - { "krava12", "config1:63\n", }, - { "krava13", "config1:45-47\n", }, - { "krava21", "config2:0-3,10-13,20-23,30-33,40-43,50-53,60-63\n", }, - { "krava22", "config2:8,18,48,58\n", }, - { "krava23", "config2:28-29,38\n", }, -}; +#include <stdlib.h> +#include <unistd.h> +#include <sys/stat.h> +#include <sys/types.h> -/* Simulated users input. */ -static struct parse_events_term test_terms[] = { - { - .config = "krava01", - .val.num = 15, - .type_val = PARSE_EVENTS__TERM_TYPE_NUM, - .type_term = PARSE_EVENTS__TERM_TYPE_USER, - }, - { - .config = "krava02", - .val.num = 170, - .type_val = PARSE_EVENTS__TERM_TYPE_NUM, - .type_term = PARSE_EVENTS__TERM_TYPE_USER, - }, - { - .config = "krava03", - .val.num = 1, - .type_val = PARSE_EVENTS__TERM_TYPE_NUM, - .type_term = PARSE_EVENTS__TERM_TYPE_USER, - }, - { - .config = "krava11", - .val.num = 27, - .type_val = PARSE_EVENTS__TERM_TYPE_NUM, - .type_term = PARSE_EVENTS__TERM_TYPE_USER, - }, - { - .config = "krava12", - .val.num = 1, - .type_val = PARSE_EVENTS__TERM_TYPE_NUM, - .type_term = PARSE_EVENTS__TERM_TYPE_USER, - }, - { - .config = "krava13", - .val.num = 2, - .type_val = PARSE_EVENTS__TERM_TYPE_NUM, - .type_term = PARSE_EVENTS__TERM_TYPE_USER, - }, - { - .config = "krava21", - .val.num = 119, - .type_val = PARSE_EVENTS__TERM_TYPE_NUM, - .type_term = PARSE_EVENTS__TERM_TYPE_USER, - }, - { - .config = "krava22", - .val.num = 11, - .type_val = PARSE_EVENTS__TERM_TYPE_NUM, - .type_term = PARSE_EVENTS__TERM_TYPE_USER, - }, - { - .config = "krava23", - .val.num = 2, - .type_val = PARSE_EVENTS__TERM_TYPE_NUM, - .type_term = PARSE_EVENTS__TERM_TYPE_USER, - }, -}; +/* Fake PMUs created in temp directory. */ +static LIST_HEAD(test_pmus); + +/* Cleanup test PMU directory. */ +static int test_pmu_put(const char *dir, struct perf_pmu *pmu) +{ + char buf[PATH_MAX + 20]; + int ret; + + if (scnprintf(buf, sizeof(buf), "rm -fr %s", dir) < 0) { + pr_err("Failure to set up buffer for \"%s\"\n", dir); + return -EINVAL; + } + ret = system(buf); + if (ret) + pr_err("Failure to \"%s\"\n", buf); + + list_del(&pmu->list); + perf_pmu__delete(pmu); + return ret; +} /* - * Prepare format directory data, exported by kernel - * at /sys/bus/event_source/devices/<dev>/format. + * Prepare test PMU directory data, normally exported by kernel at + * /sys/bus/event_source/devices/<pmu>/. Give as input a buffer to hold the file + * path, the result is PMU loaded using that directory. */ -static char *test_format_dir_get(char *dir, size_t sz) +static struct perf_pmu *test_pmu_get(char *dir, size_t sz) { - unsigned int i; + /* Simulated format definitions. */ + const struct test_format { + const char *name; + const char *value; + } test_formats[] = { + { "krava01", "config:0-1,62-63\n", }, + { "krava02", "config:10-17\n", }, + { "krava03", "config:5\n", }, + { "krava11", "config1:0,2,4,6,8,20-28\n", }, + { "krava12", "config1:63\n", }, + { "krava13", "config1:45-47\n", }, + { "krava21", "config2:0-3,10-13,20-23,30-33,40-43,50-53,60-63\n", }, + { "krava22", "config2:8,18,48,58\n", }, + { "krava23", "config2:28-29,38\n", }, + }; + const char *test_event = "krava01=15,krava02=170,krava03=1,krava11=27,krava12=1," + "krava13=2,krava21=119,krava22=11,krava23=2\n"; + + char name[PATH_MAX]; + int dirfd, file; + struct perf_pmu *pmu = NULL; + ssize_t len; - snprintf(dir, sz, "/tmp/perf-pmu-test-format-XXXXXX"); - if (!mkdtemp(dir)) + /* Create equivalent of sysfs mount point. */ + scnprintf(dir, sz, "/tmp/perf-pmu-test-XXXXXX"); + if (!mkdtemp(dir)) { + pr_err("mkdtemp failed\n"); + dir[0] = '\0'; return NULL; + } + dirfd = open(dir, O_DIRECTORY); + if (dirfd < 0) { + pr_err("Failed to open test directory \"%s\"\n", dir); + goto err_out; + } - for (i = 0; i < ARRAY_SIZE(test_formats); i++) { - char name[PATH_MAX]; - struct test_format *format = &test_formats[i]; - FILE *file; + /* Create the test PMU directory and give it a perf_event_attr type number. */ + if (mkdirat(dirfd, "perf-pmu-test", 0755) < 0) { + pr_err("Failed to mkdir PMU directory\n"); + goto err_out; + } + file = openat(dirfd, "perf-pmu-test/type", O_WRONLY | O_CREAT, 0600); + if (!file) { + pr_err("Failed to open for writing file \"type\"\n"); + goto err_out; + } + len = strlen("9999"); + if (write(file, "9999\n", len) < len) { + close(file); + pr_err("Failed to write to 'type' file\n"); + goto err_out; + } + close(file); - scnprintf(name, PATH_MAX, "%s/%s", dir, format->name); + /* Create format directory and files. */ + if (mkdirat(dirfd, "perf-pmu-test/format", 0755) < 0) { + pr_err("Failed to mkdir PMU format directory\n)"); + goto err_out; + } + for (size_t i = 0; i < ARRAY_SIZE(test_formats); i++) { + const struct test_format *format = &test_formats[i]; - file = fopen(name, "w"); - if (!file) - return NULL; + if (scnprintf(name, PATH_MAX, "perf-pmu-test/format/%s", format->name) < 0) { + pr_err("Failure to set up path for \"%s\"\n", format->name); + goto err_out; + } + file = openat(dirfd, name, O_WRONLY | O_CREAT, 0600); + if (!file) { + pr_err("Failed to open for writing file \"%s\"\n", name); + goto err_out; + } - if (1 != fwrite(format->value, strlen(format->value), 1, file)) - break; + if (write(file, format->value, strlen(format->value)) < 0) { + pr_err("Failed to write to file \"%s\"\n", name); + close(file); + goto err_out; + } + close(file); + } - fclose(file); + /* Create test event. */ + if (mkdirat(dirfd, "perf-pmu-test/events", 0755) < 0) { + pr_err("Failed to mkdir PMU events directory\n"); + goto err_out; + } + file = openat(dirfd, "perf-pmu-test/events/test-event", O_WRONLY | O_CREAT, 0600); + if (!file) { + pr_err("Failed to open for writing file \"type\"\n"); + goto err_out; + } + len = strlen(test_event); + if (write(file, test_event, len) < len) { + close(file); + pr_err("Failed to write to 'test-event' file\n"); + goto err_out; } + close(file); - return dir; + /* Make the PMU reading the files created above. */ + pmu = perf_pmus__add_test_pmu(dirfd, "perf-pmu-test"); + if (!pmu) + pr_err("Test PMU creation failed\n"); + +err_out: + if (!pmu) + test_pmu_put(dir, pmu); + if (dirfd >= 0) + close(dirfd); + return pmu; } -/* Cleanup format directory. */ -static int test_format_dir_put(char *dir) +static int test__pmu_format(struct test_suite *test __maybe_unused, int subtest __maybe_unused) { - char buf[PATH_MAX + 20]; + char dir[PATH_MAX]; + struct perf_event_attr attr; + struct parse_events_terms terms; + int ret = TEST_FAIL; + struct perf_pmu *pmu = test_pmu_get(dir, sizeof(dir)); - snprintf(buf, sizeof(buf), "rm -f %s/*\n", dir); - if (system(buf)) - return -1; + if (!pmu) + return TEST_FAIL; - snprintf(buf, sizeof(buf), "rmdir %s\n", dir); - return system(buf); + parse_events_terms__init(&terms); + if (parse_events_terms(&terms, + "krava01=15,krava02=170,krava03=1,krava11=27,krava12=1," + "krava13=2,krava21=119,krava22=11,krava23=2", + NULL)) { + pr_err("Term parsing failed\n"); + goto err_out; + } + + memset(&attr, 0, sizeof(attr)); + ret = perf_pmu__config_terms(pmu, &attr, &terms, /*zero=*/false, /*err=*/NULL); + if (ret) { + pr_err("perf_pmu__config_terms failed"); + goto err_out; + } + + if (attr.config != 0xc00000000002a823) { + pr_err("Unexpected config value %llx\n", attr.config); + goto err_out; + } + if (attr.config1 != 0x8000400000000145) { + pr_err("Unexpected config1 value %llx\n", attr.config1); + goto err_out; + } + if (attr.config2 != 0x0400000020041d07) { + pr_err("Unexpected config2 value %llx\n", attr.config2); + goto err_out; + } + + ret = TEST_OK; +err_out: + parse_events_terms__exit(&terms); + test_pmu_put(dir, pmu); + return ret; } -static void add_test_terms(struct parse_events_terms *terms) +static int test__pmu_events(struct test_suite *test __maybe_unused, int subtest __maybe_unused) { - unsigned int i; + char dir[PATH_MAX]; + struct parse_events_error err; + struct evlist *evlist; + struct evsel *evsel; + struct perf_event_attr *attr; + int ret = TEST_FAIL; + struct perf_pmu *pmu = test_pmu_get(dir, sizeof(dir)); + const char *event = "perf-pmu-test/test-event/"; - for (i = 0; i < ARRAY_SIZE(test_terms); i++) { - struct parse_events_term *clone; - parse_events_term__clone(&clone, &test_terms[i]); - list_add_tail(&clone->list, &terms->terms); + if (!pmu) + return TEST_FAIL; + + evlist = evlist__new(); + if (evlist == NULL) { + pr_err("Failed allocation"); + goto err_out; + } + parse_events_error__init(&err); + ret = parse_events(evlist, event, &err); + if (ret) { + pr_debug("failed to parse event '%s', err %d\n", event, ret); + parse_events_error__print(&err, event); + if (parse_events_error__contains(&err, "can't access trace events")) + ret = TEST_SKIP; + goto err_out; + } + evsel = evlist__first(evlist); + attr = &evsel->core.attr; + if (attr->config != 0xc00000000002a823) { + pr_err("Unexpected config value %llx\n", attr->config); + goto err_out; + } + if (attr->config1 != 0x8000400000000145) { + pr_err("Unexpected config1 value %llx\n", attr->config1); + goto err_out; + } + if (attr->config2 != 0x0400000020041d07) { + pr_err("Unexpected config2 value %llx\n", attr->config2); + goto err_out; } + + ret = TEST_OK; +err_out: + parse_events_error__exit(&err); + evlist__delete(evlist); + test_pmu_put(dir, pmu); + return ret; } -static int test__pmu(struct test_suite *test __maybe_unused, int subtest __maybe_unused) +static bool permitted_event_name(const char *name) { - char dir[PATH_MAX]; - char *format; - struct parse_events_terms terms; - struct perf_event_attr attr; - struct perf_pmu *pmu; - int fd; - int ret; + bool has_lower = false, has_upper = false; - parse_events_terms__init(&terms); - add_test_terms(&terms); - pmu = zalloc(sizeof(*pmu)); - if (!pmu) { - parse_events_terms__exit(&terms); - return -ENOMEM; - } - - INIT_LIST_HEAD(&pmu->format); - INIT_LIST_HEAD(&pmu->aliases); - INIT_LIST_HEAD(&pmu->caps); - format = test_format_dir_get(dir, sizeof(dir)); - if (!format) { - free(pmu); - parse_events_terms__exit(&terms); - return -EINVAL; + for (size_t i = 0; i < strlen(name); i++) { + char c = name[i]; + + if (islower(c)) { + if (has_upper) + return false; + has_lower = true; + continue; + } + if (isupper(c)) { + if (has_lower) + return false; + has_upper = true; + continue; + } + if (!isdigit(c) && c != '.' && c != '_' && c != '-') + return false; } + return true; +} - memset(&attr, 0, sizeof(attr)); +static int test__pmu_event_names(struct test_suite *test __maybe_unused, + int subtest __maybe_unused) +{ + char path[PATH_MAX]; + DIR *pmu_dir, *event_dir; + struct dirent *pmu_dent, *event_dent; + const char *sysfs = sysfs__mountpoint(); + int ret = TEST_OK; - fd = open(format, O_DIRECTORY); - if (fd < 0) { - ret = fd; - goto out; + if (!sysfs) { + pr_err("Sysfs not mounted\n"); + return TEST_FAIL; } - pmu->name = strdup("perf-pmu-test"); - ret = perf_pmu__format_parse(pmu, fd, /*eager_load=*/true); - if (ret) - goto out; + snprintf(path, sizeof(path), "%s/bus/event_source/devices/", sysfs); + pmu_dir = opendir(path); + if (!pmu_dir) { + pr_err("Error opening \"%s\"\n", path); + return TEST_FAIL; + } + while ((pmu_dent = readdir(pmu_dir))) { + if (!strcmp(pmu_dent->d_name, ".") || + !strcmp(pmu_dent->d_name, "..")) + continue; - ret = perf_pmu__config_terms(pmu, &attr, &terms, /*zero=*/false, /*err=*/NULL); - if (ret) - goto out; - - ret = -EINVAL; - if (attr.config != 0xc00000000002a823) - goto out; - if (attr.config1 != 0x8000400000000145) - goto out; - if (attr.config2 != 0x0400000020041d07) - goto out; - - ret = 0; -out: - test_format_dir_put(format); - perf_pmu__delete(pmu); - parse_events_terms__exit(&terms); + snprintf(path, sizeof(path), "%s/bus/event_source/devices/%s/type", + sysfs, pmu_dent->d_name); + + /* Does it look like a PMU? */ + if (!file_available(path)) + continue; + + /* Process events. */ + snprintf(path, sizeof(path), "%s/bus/event_source/devices/%s/events", + sysfs, pmu_dent->d_name); + + event_dir = opendir(path); + if (!event_dir) { + pr_debug("Skipping as no event directory \"%s\"\n", path); + continue; + } + while ((event_dent = readdir(event_dir))) { + const char *event_name = event_dent->d_name; + + if (!strcmp(event_name, ".") || !strcmp(event_name, "..")) + continue; + + if (!permitted_event_name(event_name)) { + pr_err("Invalid sysfs event name: %s/%s\n", + pmu_dent->d_name, event_name); + ret = TEST_FAIL; + } + } + closedir(event_dir); + } + closedir(pmu_dir); return ret; } -DEFINE_SUITE("Parse perf pmu format", pmu); +static struct test_case tests__pmu[] = { + TEST_CASE("Parsing with PMU format directory", pmu_format), + TEST_CASE("Parsing with PMU event", pmu_events), + TEST_CASE("PMU event names", pmu_event_names), + { .name = NULL, } +}; + +struct test_suite suite__pmu = { + .desc = "Sysfs PMU tests", + .test_cases = tests__pmu, +}; diff --git a/tools/perf/tests/shell/annotate.sh b/tools/perf/tests/shell/annotate.sh new file mode 100755 index 000000000000..1db1e8113d99 --- /dev/null +++ b/tools/perf/tests/shell/annotate.sh @@ -0,0 +1,83 @@ +#!/bin/sh +# perf annotate basic tests +# SPDX-License-Identifier: GPL-2.0 + +set -e + +shelldir=$(dirname "$0") + +# shellcheck source=lib/perf_has_symbol.sh +. "${shelldir}"/lib/perf_has_symbol.sh + +testsym="noploop" + +skip_test_missing_symbol ${testsym} + +err=0 +perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX) +testprog="perf test -w noploop" +# disassembly format: "percent : offset: instruction (operands ...)" +disasm_regex="[0-9]*\.[0-9]* *: *\w*: *\w*" + +cleanup() { + rm -rf "${perfdata}" + rm -rf "${perfdata}".old + + trap - EXIT TERM INT +} + +trap_cleanup() { + cleanup + exit 1 +} +trap trap_cleanup EXIT TERM INT + +test_basic() { + echo "Basic perf annotate test" + if ! perf record -o "${perfdata}" ${testprog} 2> /dev/null + then + echo "Basic annotate [Failed: perf record]" + err=1 + return + fi + + # check if it has the target symbol + if ! perf annotate -i "${perfdata}" 2> /dev/null | grep "${testsym}" + then + echo "Basic annotate [Failed: missing target symbol]" + err=1 + return + fi + + # check if it has the disassembly lines + if ! perf annotate -i "${perfdata}" 2> /dev/null | grep "${disasm_regex}" + then + echo "Basic annotate [Failed: missing disasm output from default disassembler]" + err=1 + return + fi + + # check again with a target symbol name + if ! perf annotate -i "${perfdata}" "${testsym}" 2> /dev/null | \ + grep -m 3 "${disasm_regex}" + then + echo "Basic annotate [Failed: missing disasm output when specifying the target symbol]" + err=1 + return + fi + + # check one more with external objdump tool (forced by --objdump option) + if ! perf annotate -i "${perfdata}" --objdump=objdump 2> /dev/null | \ + grep -m 3 "${disasm_regex}" + then + echo "Basic annotate [Failed: missing disasm output from non default disassembler (using --objdump)]" + err=1 + return + fi + echo "Basic annotate test [Success]" +} + +test_basic + +cleanup +exit $err diff --git a/tools/perf/tests/shell/base_probe/settings.sh b/tools/perf/tests/shell/base_probe/settings.sh new file mode 100644 index 000000000000..123621c7f95e --- /dev/null +++ b/tools/perf/tests/shell/base_probe/settings.sh @@ -0,0 +1,48 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# settings.sh of perf_probe test +# Author: Michael Petlan <mpetlan@redhat.com> +# Author: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> +# + +export TEST_NAME="perf_probe" + +export MY_ARCH=`arch` + +if [ -n "$PERFSUITE_RUN_DIR" ]; then + # when $PERFSUITE_RUN_DIR is set to something, all the logs and temp files will be placed there + # --> the $PERFSUITE_RUN_DIR/perf_something/examples and $PERFSUITE_RUN_DIR/perf_something/logs + # dirs will be used for that + export PERFSUITE_RUN_DIR=`readlink -f $PERFSUITE_RUN_DIR` + export CURRENT_TEST_DIR="$PERFSUITE_RUN_DIR/$TEST_NAME" + export MAKE_TARGET_DIR="$CURRENT_TEST_DIR/examples" + test -d "$MAKE_TARGET_DIR" || mkdir -p "$MAKE_TARGET_DIR" + export LOGS_DIR="$PERFSUITE_RUN_DIR/$TEST_NAME/logs" + test -d "$LOGS_DIR" || mkdir -p "$LOGS_DIR" +else + # when $PERFSUITE_RUN_DIR is not set, logs will be placed here + export CURRENT_TEST_DIR="." + export LOGS_DIR="." +fi + +check_kprobes_available() +{ + test -e /sys/kernel/debug/tracing/kprobe_events +} + +check_uprobes_available() +{ + test -e /sys/kernel/debug/tracing/uprobe_events +} + +clear_all_probes() +{ + echo 0 > /sys/kernel/debug/tracing/events/enable + check_kprobes_available && echo > /sys/kernel/debug/tracing/kprobe_events + check_uprobes_available && echo > /sys/kernel/debug/tracing/uprobe_events +} + +check_sdt_support() +{ + $CMD_PERF list sdt | grep sdt > /dev/null 2> /dev/null +} diff --git a/tools/perf/tests/shell/base_probe/test_adding_kernel.sh b/tools/perf/tests/shell/base_probe/test_adding_kernel.sh new file mode 100755 index 000000000000..63bb8974b38e --- /dev/null +++ b/tools/perf/tests/shell/base_probe/test_adding_kernel.sh @@ -0,0 +1,279 @@ +#!/bin/bash +# Add 'perf probe's, list and remove them +# SPDX-License-Identifier: GPL-2.0 + +# +# test_adding_kernel of perf_probe test +# Author: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> +# Author: Michael Petlan <mpetlan@redhat.com> +# +# Description: +# +# This test tests adding of probes, their correct listing +# and removing. +# + +# include working environment +. ../common/init.sh +. ./settings.sh + +# shellcheck disable=SC2034 # the variable is later used after the working environment is included +THIS_TEST_NAME=`basename $0 .sh` +TEST_RESULT=0 + +TEST_PROBE=${TEST_PROBE:-"inode_permission"} + +check_kprobes_available +if [ $? -ne 0 ]; then + print_overall_skipped + exit 0 +fi + + +### basic probe adding + +for opt in "" "-a" "--add"; do + clear_all_probes + $CMD_PERF probe $opt $TEST_PROBE 2> $LOGS_DIR/adding_kernel_add$opt.err + PERF_EXIT_CODE=$? + + ../common/check_all_patterns_found.pl "Added new events?:" "probe:$TEST_PROBE" "on $TEST_PROBE" < $LOGS_DIR/adding_kernel_add$opt.err + CHECK_EXIT_CODE=$? + + print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "adding probe $TEST_PROBE :: $opt" + (( TEST_RESULT += $? )) +done + + +### listing added probe :: perf list + +# any added probes should appear in perf-list output +$CMD_PERF list probe:\* > $LOGS_DIR/adding_kernel_list.log +PERF_EXIT_CODE=$? + +../common/check_all_lines_matched.pl "$RE_LINE_EMPTY" "List of pre-defined events" "probe:${TEST_PROBE}(?:_\d+)?\s+\[Tracepoint event\]" "Metric Groups:" < $LOGS_DIR/adding_kernel_list.log +CHECK_EXIT_CODE=$? + +print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "listing added probe :: perf list" +(( TEST_RESULT += $? )) + + +### listing added probe :: perf probe -l + +# '-l' should list all the added probes as well +$CMD_PERF probe -l > $LOGS_DIR/adding_kernel_list-l.log +PERF_EXIT_CODE=$? + +../common/check_all_patterns_found.pl "\s*probe:${TEST_PROBE}(?:_\d+)?\s+\(on ${TEST_PROBE}(?:[:\+]$RE_NUMBER_HEX)?@.+\)" < $LOGS_DIR/adding_kernel_list-l.log +CHECK_EXIT_CODE=$? + +print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "listing added probe :: perf probe -l" +(( TEST_RESULT += $? )) + + +### using added probe + +$CMD_PERF stat -e probe:$TEST_PROBE\* -o $LOGS_DIR/adding_kernel_using_probe.log -- cat /proc/uptime > /dev/null +PERF_EXIT_CODE=$? + +REGEX_STAT_HEADER="\s*Performance counter stats for \'cat /proc/uptime\':" +REGEX_STAT_VALUES="\s*\d+\s+probe:$TEST_PROBE" +# the value should be greater than 1 +REGEX_STAT_VALUE_NONZERO="\s*[1-9][0-9]*\s+probe:$TEST_PROBE" +REGEX_STAT_TIME="\s*$RE_NUMBER\s+seconds (?:time elapsed|user|sys)" +../common/check_all_lines_matched.pl "$REGEX_STAT_HEADER" "$REGEX_STAT_VALUES" "$REGEX_STAT_TIME" "$RE_LINE_COMMENT" "$RE_LINE_EMPTY" < $LOGS_DIR/adding_kernel_using_probe.log +CHECK_EXIT_CODE=$? +../common/check_all_patterns_found.pl "$REGEX_STAT_HEADER" "$REGEX_STAT_VALUE_NONZERO" "$REGEX_STAT_TIME" < $LOGS_DIR/adding_kernel_using_probe.log +(( CHECK_EXIT_CODE += $? )) + +print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "using added probe" +(( TEST_RESULT += $? )) + + +### removing added probe + +# '-d' should remove the probe +$CMD_PERF probe -d $TEST_PROBE\* 2> $LOGS_DIR/adding_kernel_removing.err +PERF_EXIT_CODE=$? + +../common/check_all_lines_matched.pl "Removed event: probe:$TEST_PROBE" < $LOGS_DIR/adding_kernel_removing.err +CHECK_EXIT_CODE=$? + +print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "deleting added probe" +(( TEST_RESULT += $? )) + + +### listing removed probe + +# removed probes should NOT appear in perf-list output +$CMD_PERF list probe:\* > $LOGS_DIR/adding_kernel_list_removed.log +PERF_EXIT_CODE=$? + +../common/check_all_lines_matched.pl "$RE_LINE_EMPTY" "List of pre-defined events" "Metric Groups:" < $LOGS_DIR/adding_kernel_list_removed.log +CHECK_EXIT_CODE=$? + +print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "listing removed probe (should NOT be listed)" +(( TEST_RESULT += $? )) + + +### dry run + +# the '-n' switch should run it in dry mode +$CMD_PERF probe -n --add $TEST_PROBE 2> $LOGS_DIR/adding_kernel_dryrun.err +PERF_EXIT_CODE=$? + +# check for the output (should be the same as usual) +../common/check_all_patterns_found.pl "Added new events?:" "probe:$TEST_PROBE" "on $TEST_PROBE" < $LOGS_DIR/adding_kernel_dryrun.err +CHECK_EXIT_CODE=$? + +# check that no probe was added in real +! ( $CMD_PERF probe -l | grep "probe:$TEST_PROBE" ) +(( CHECK_EXIT_CODE += $? )) + +print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "dry run :: adding probe" +(( TEST_RESULT += $? )) + + +### force-adding probes + +# when using '--force' a probe should be added even if it is already there +$CMD_PERF probe --add $TEST_PROBE 2> $LOGS_DIR/adding_kernel_forceadd_01.err +PERF_EXIT_CODE=$? + +../common/check_all_patterns_found.pl "Added new events?:" "probe:$TEST_PROBE" "on $TEST_PROBE" < $LOGS_DIR/adding_kernel_forceadd_01.err +CHECK_EXIT_CODE=$? + +print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "force-adding probes :: first probe adding" +(( TEST_RESULT += $? )) + +# adding existing probe without '--force' should fail +! $CMD_PERF probe --add $TEST_PROBE 2> $LOGS_DIR/adding_kernel_forceadd_02.err +PERF_EXIT_CODE=$? + +../common/check_all_patterns_found.pl "Error: event \"$TEST_PROBE\" already exists." "Error: Failed to add events." < $LOGS_DIR/adding_kernel_forceadd_02.err +CHECK_EXIT_CODE=$? + +print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "force-adding probes :: second probe adding (without force)" +(( TEST_RESULT += $? )) + +# adding existing probe with '--force' should pass +NO_OF_PROBES=`$CMD_PERF probe -l | wc -l` +$CMD_PERF probe --force --add $TEST_PROBE 2> $LOGS_DIR/adding_kernel_forceadd_03.err +PERF_EXIT_CODE=$? + +../common/check_all_patterns_found.pl "Added new events?:" "probe:${TEST_PROBE}_${NO_OF_PROBES}" "on $TEST_PROBE" < $LOGS_DIR/adding_kernel_forceadd_03.err +CHECK_EXIT_CODE=$? + +print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "force-adding probes :: second probe adding (with force)" +(( TEST_RESULT += $? )) + + +### using doubled probe + +# since they are the same, they should produce the same results +$CMD_PERF stat -e probe:$TEST_PROBE -e probe:${TEST_PROBE}_${NO_OF_PROBES} -x';' -o $LOGS_DIR/adding_kernel_using_two.log -- bash -c 'cat /proc/cpuinfo > /dev/null' +PERF_EXIT_CODE=$? + +REGEX_LINE="$RE_NUMBER;+probe:${TEST_PROBE}_?(?:$NO_OF_PROBES)?;$RE_NUMBER;$RE_NUMBER" +../common/check_all_lines_matched.pl "$REGEX_LINE" "$RE_LINE_EMPTY" "$RE_LINE_COMMENT" < $LOGS_DIR/adding_kernel_using_two.log +CHECK_EXIT_CODE=$? + +VALUE_1=`grep "$TEST_PROBE;" $LOGS_DIR/adding_kernel_using_two.log | awk -F';' '{print $1}'` +VALUE_2=`grep "${TEST_PROBE}_${NO_OF_PROBES};" $LOGS_DIR/adding_kernel_using_two.log | awk -F';' '{print $1}'` + +test $VALUE_1 -eq $VALUE_2 +(( CHECK_EXIT_CODE += $? )) + +print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "using doubled probe" + + +### removing multiple probes + +# using wildcards should remove all matching probes +$CMD_PERF probe --del \* 2> $LOGS_DIR/adding_kernel_removing_wildcard.err +PERF_EXIT_CODE=$? + +../common/check_all_lines_matched.pl "Removed event: probe:$TEST_PROBE" "Removed event: probe:${TEST_PROBE}_1" < $LOGS_DIR/adding_kernel_removing_wildcard.err +CHECK_EXIT_CODE=$? + +print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "removing multiple probes" +(( TEST_RESULT += $? )) + + +### wildcard adding support + +$CMD_PERF probe -nf --max-probes=512 -a 'vfs_* $params' 2> $LOGS_DIR/adding_kernel_adding_wildcard.err +PERF_EXIT_CODE=$? + +../common/check_all_patterns_found.pl "probe:vfs_mknod" "probe:vfs_create" "probe:vfs_rmdir" "probe:vfs_link" "probe:vfs_write" < $LOGS_DIR/adding_kernel_adding_wildcard.err +CHECK_EXIT_CODE=$? + +print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "wildcard adding support" +(( TEST_RESULT += $? )) + + +### non-existing variable + +# perf probe should survive a non-existing variable probing attempt +{ $CMD_PERF probe 'vfs_read somenonexistingrandomstuffwhichisalsoprettylongorevenlongertoexceed64' ; } 2> $LOGS_DIR/adding_kernel_nonexisting.err +PERF_EXIT_CODE=$? + +# the exitcode should not be 0 or segfault +test $PERF_EXIT_CODE -ne 139 -a $PERF_EXIT_CODE -ne 0 +PERF_EXIT_CODE=$? + +# check that the error message is reasonable +../common/check_all_patterns_found.pl "Failed to find" "somenonexistingrandomstuffwhichisalsoprettylongorevenlongertoexceed64" < $LOGS_DIR/adding_kernel_nonexisting.err +CHECK_EXIT_CODE=$? +../common/check_all_patterns_found.pl "in this function|at this address" "Error" "Failed to add events" < $LOGS_DIR/adding_kernel_nonexisting.err +(( CHECK_EXIT_CODE += $? )) +../common/check_all_lines_matched.pl "Failed to find" "Error" "Probe point .+ not found" "optimized out" "Use.+\-\-range option to show.+location range" < $LOGS_DIR/adding_kernel_nonexisting.err +(( CHECK_EXIT_CODE += $? )) +../common/check_no_patterns_found.pl "$RE_SEGFAULT" < $LOGS_DIR/adding_kernel_nonexisting.err +(( CHECK_EXIT_CODE += $? )) + +print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "non-existing variable" +(( TEST_RESULT += $? )) + + +### function with return value + +# adding probe with return value +$CMD_PERF probe --add "$TEST_PROBE%return \$retval" 2> $LOGS_DIR/adding_kernel_func_retval_add.err +PERF_EXIT_CODE=$? + +../common/check_all_patterns_found.pl "Added new events?:" "probe:$TEST_PROBE" "on $TEST_PROBE%return with \\\$retval" < $LOGS_DIR/adding_kernel_func_retval_add.err +CHECK_EXIT_CODE=$? + +print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "function with retval :: add" +(( TEST_RESULT += $? )) + +# recording some data +$CMD_PERF record -e probe:$TEST_PROBE\* -o $CURRENT_TEST_DIR/perf.data -- cat /proc/cpuinfo > /dev/null 2> $LOGS_DIR/adding_kernel_func_retval_record.err +PERF_EXIT_CODE=$? + +../common/check_all_patterns_found.pl "$RE_LINE_RECORD1" "$RE_LINE_RECORD2" < $LOGS_DIR/adding_kernel_func_retval_record.err +CHECK_EXIT_CODE=$? + +print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "function with retval :: record" +(( TEST_RESULT += $? )) + +# perf script should report the function calls with the correct arg values +$CMD_PERF script -i $CURRENT_TEST_DIR/perf.data > $LOGS_DIR/adding_kernel_func_retval_script.log +PERF_EXIT_CODE=$? + +REGEX_SCRIPT_LINE="\s*cat\s+$RE_NUMBER\s+\[$RE_NUMBER\]\s+$RE_NUMBER:\s+probe:$TEST_PROBE\w*:\s+\($RE_NUMBER_HEX\s+<\-\s+$RE_NUMBER_HEX\)\s+arg1=$RE_NUMBER_HEX" +../common/check_all_lines_matched.pl "$REGEX_SCRIPT_LINE" < $LOGS_DIR/adding_kernel_func_retval_script.log +CHECK_EXIT_CODE=$? +../common/check_all_patterns_found.pl "$REGEX_SCRIPT_LINE" < $LOGS_DIR/adding_kernel_func_retval_script.log +(( CHECK_EXIT_CODE += $? )) + +print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "function argument probing :: script" +(( TEST_RESULT += $? )) + + +clear_all_probes + +# print overall results +print_overall_results "$TEST_RESULT" +exit $? diff --git a/tools/perf/tests/shell/common/check_all_lines_matched.pl b/tools/perf/tests/shell/common/check_all_lines_matched.pl new file mode 100755 index 000000000000..fded48959a3f --- /dev/null +++ b/tools/perf/tests/shell/common/check_all_lines_matched.pl @@ -0,0 +1,39 @@ +#!/usr/bin/perl +# SPDX-License-Identifier: GPL-2.0 + +@regexps = @ARGV; + +$max_printed_lines = 20; +$max_printed_lines = $ENV{TESTLOG_ERR_MSG_MAX_LINES} if (defined $ENV{TESTLOG_ERR_MSG_MAX_LINES}); + +$quiet = 1; +$quiet = 0 if (defined $ENV{TESTLOG_VERBOSITY} && $ENV{TESTLOG_VERBOSITY} ge 2); + +$passed = 1; +$lines_printed = 0; + +while (<STDIN>) +{ + s/\n//; + + $line_matched = 0; + for $r (@regexps) + { + if (/$r/) + { + $line_matched = 1; + last; + } + } + + unless ($line_matched) + { + if ($lines_printed++ < $max_printed_lines) + { + print "Line did not match any pattern: \"$_\"\n" unless $quiet; + } + $passed = 0; + } +} + +exit ($passed == 0); diff --git a/tools/perf/tests/shell/common/check_all_patterns_found.pl b/tools/perf/tests/shell/common/check_all_patterns_found.pl new file mode 100755 index 000000000000..11bdf1d3460a --- /dev/null +++ b/tools/perf/tests/shell/common/check_all_patterns_found.pl @@ -0,0 +1,34 @@ +#!/usr/bin/perl +# SPDX-License-Identifier: GPL-2.0 + +@regexps = @ARGV; + +$quiet = 1; +$quiet = 0 if (defined $ENV{TESTLOG_VERBOSITY} && $ENV{TESTLOG_VERBOSITY} ge 2); + +%found = (); +$passed = 1; + +while (<STDIN>) +{ + s/\n//; + + for $r (@regexps) + { + if (/$r/) + { + $found{$r} = 1; # FIXME: maybe add counters -- how many times was the regexp matched + } + } +} + +for $r (@regexps) +{ + unless (exists $found{$r}) + { + print "Regexp not found: \"$r\"\n" unless $quiet; + $passed = 0; + } +} + +exit ($passed == 0); diff --git a/tools/perf/tests/shell/common/check_no_patterns_found.pl b/tools/perf/tests/shell/common/check_no_patterns_found.pl new file mode 100755 index 000000000000..770999e87a5f --- /dev/null +++ b/tools/perf/tests/shell/common/check_no_patterns_found.pl @@ -0,0 +1,34 @@ +#!/usr/bin/perl +# SPDX-License-Identifier: GPL-2.0 + +@regexps = @ARGV; + +$quiet = 1; +$quiet = 0 if (defined $ENV{TESTLOG_VERBOSITY} && $ENV{TESTLOG_VERBOSITY} ge 2); + +%found = (); +$passed = 1; + +while (<STDIN>) +{ + s/\n//; + + for $r (@regexps) + { + if (/$r/) + { + $found{$r} = 1; + } + } +} + +for $r (@regexps) +{ + if (exists $found{$r}) + { + print "Regexp found: \"$r\"\n" unless $quiet; + $passed = 0; + } +} + +exit ($passed == 0); diff --git a/tools/perf/tests/shell/common/init.sh b/tools/perf/tests/shell/common/init.sh new file mode 100644 index 000000000000..aadeaf782e03 --- /dev/null +++ b/tools/perf/tests/shell/common/init.sh @@ -0,0 +1,117 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# init.sh +# Author: Michael Petlan <mpetlan@redhat.com> +# +# Description: +# +# This file should be used for initialization of basic functions +# for checking, reporting results etc. +# +# + + +. ../common/settings.sh +. ../common/patterns.sh + +THIS_TEST_NAME=`basename $0 .sh` + +_echo() +{ + test "$TESTLOG_VERBOSITY" -ne 0 && echo -e "$@" +} + +print_results() +{ + PERF_RETVAL="$1"; shift + CHECK_RETVAL="$1"; shift + FAILURE_REASON="" + TASK_COMMENT="$@" + if [ $PERF_RETVAL -eq 0 -a $CHECK_RETVAL -eq 0 ]; then + _echo "$MPASS-- [ PASS ] --$MEND $TEST_NAME :: $THIS_TEST_NAME :: $TASK_COMMENT" + return 0 + else + if [ $PERF_RETVAL -ne 0 ]; then + FAILURE_REASON="command exitcode" + fi + if [ $CHECK_RETVAL -ne 0 ]; then + test -n "$FAILURE_REASON" && FAILURE_REASON="$FAILURE_REASON + " + FAILURE_REASON="$FAILURE_REASON""output regexp parsing" + fi + _echo "$MFAIL-- [ FAIL ] --$MEND $TEST_NAME :: $THIS_TEST_NAME :: $TASK_COMMENT ($FAILURE_REASON)" + return 1 + fi +} + +print_overall_results() +{ + RETVAL="$1"; shift + if [ $RETVAL -eq 0 ]; then + _echo "$MALLPASS## [ PASS ] ##$MEND $TEST_NAME :: $THIS_TEST_NAME SUMMARY" + else + _echo "$MALLFAIL## [ FAIL ] ##$MEND $TEST_NAME :: $THIS_TEST_NAME SUMMARY :: $RETVAL failures found" + fi + return $RETVAL +} + +print_testcase_skipped() +{ + TASK_COMMENT="$@" + _echo "$MSKIP-- [ SKIP ] --$MEND $TEST_NAME :: $THIS_TEST_NAME :: $TASK_COMMENT :: testcase skipped" + return 0 +} + +print_overall_skipped() +{ + _echo "$MSKIP## [ SKIP ] ##$MEND $TEST_NAME :: $THIS_TEST_NAME :: testcase skipped" + return 0 +} + +print_warning() +{ + WARN_COMMENT="$@" + _echo "$MWARN-- [ WARN ] --$MEND $TEST_NAME :: $THIS_TEST_NAME :: $WARN_COMMENT" + return 0 +} + +# this function should skip a testcase if the testsuite is not run in +# a runmode that fits the testcase --> if the suite runs in BASIC mode +# all STANDARD and EXPERIMENTAL testcases will be skipped; if the suite +# runs in STANDARD mode, all EXPERIMENTAL testcases will be skipped and +# if the suite runs in EXPERIMENTAL mode, nothing is skipped +consider_skipping() +{ + TESTCASE_RUNMODE="$1" + # the runmode of a testcase needs to be at least the current suite's runmode + if [ $PERFTOOL_TESTSUITE_RUNMODE -lt $TESTCASE_RUNMODE ]; then + print_overall_skipped + exit 0 + fi +} + +detect_baremetal() +{ + # return values: + # 0 = bare metal + # 1 = virtualization detected + # 2 = unknown state + VIRT=`systemd-detect-virt 2>/dev/null` + test $? -eq 127 && return 2 + test "$VIRT" = "none" +} + +detect_intel() +{ + # return values: + # 0 = is Intel + # 1 = is not Intel or unknown + grep "vendor_id" < /proc/cpuinfo | grep -q "GenuineIntel" +} + +detect_amd() +{ + # return values: + # 0 = is AMD + # 1 = is not AMD or unknown + grep "vendor_id" < /proc/cpuinfo | grep -q "AMD" +} diff --git a/tools/perf/tests/shell/common/patterns.sh b/tools/perf/tests/shell/common/patterns.sh new file mode 100644 index 000000000000..21dab25c7b7f --- /dev/null +++ b/tools/perf/tests/shell/common/patterns.sh @@ -0,0 +1,268 @@ +# SPDX-License-Identifier: GPL-2.0 + +export RE_NUMBER="[0-9\.]+" +# Number +# Examples: +# 123.456 + + +export RE_NUMBER_HEX="[0-9A-Fa-f]+" +# Hexadecimal number +# Examples: +# 1234 +# a58d +# aBcD +# deadbeef + + +export RE_DATE_YYYYMMDD="[0-9]{4}-(?:(?:01|03|05|07|08|10|12)-(?:[0-2][0-9]|3[0-1])|02-[0-2][0-9]|(?:(?:04|06|09|11)-(?:[0-2][0-9]|30)))" +# Date in YYYY-MM-DD form +# Examples: +# 1990-02-29 +# 0015-07-31 +# 2456-12-31 +#! 2012-13-01 +#! 1963-09-31 + + +export RE_TIME="(?:[0-1][0-9]|2[0-3]):[0-5][0-9]:[0-5][0-9]" +# Time +# Examples: +# 15:12:27 +# 23:59:59 +#! 24:00:00 +#! 11:25:60 +#! 17:60:15 + + +export RE_DATE_TIME="\w+\s+\w+\s+$RE_NUMBER\s+$RE_TIME\s+$RE_NUMBER" +# Time and date +# Examples: +# Wed Feb 12 10:46:26 2020 +# Mon Mar 2 13:27:06 2020 +#! St úno 12 10:57:21 CET 2020 +#! Po úno 14 15:17:32 2010 + + +export RE_ADDRESS="0x$RE_NUMBER_HEX" +# Memory address +# Examples: +# 0x123abc +# 0xffffffff9abe8ae8 +# 0x0 + + +export RE_ADDRESS_NOT_NULL="0x[0-9A-Fa-f]*[1-9A-Fa-f]+[0-9A-Fa-f]*" +# Memory address (not NULL) +# Examples: +# 0xffffffff9abe8ae8 +#! 0x0 +#! 0x0000000000000000 + +export RE_PROCESS_PID="[^\/]+\/\d+" +# A process with PID +# Example: +# sleep/4102 +# test_overhead./866185 +# in:imjournal/1096 +# random#$& test/866607 + +export RE_EVENT_ANY="[\w\-\:\/_=,]+" +# Name of any event (universal) +# Examples: +# cpu-cycles +# cpu/event=12,umask=34/ +# r41e1 +# nfs:nfs_getattr_enter + + +export RE_EVENT="[\w\-:_]+" +# Name of an usual event +# Examples: +# cpu-cycles + + +export RE_EVENT_RAW="r$RE_NUMBER_HEX" +# Specification of a raw event +# Examples: +# r41e1 +# r1a + + +export RE_EVENT_CPU="cpu/(\w+=$RE_NUMBER_HEX,?)+/p*" +# Specification of a CPU event +# Examples: +# cpu/event=12,umask=34/pp + + +export RE_EVENT_UNCORE="uncore/[\w_]+/" +# Specification of an uncore event +# Examples: +# uncore/qhl_request_local_reads/ + + +export RE_EVENT_SUBSYSTEM="[\w\-]+:[\w\-]+" +# Name of an event from subsystem +# Examples: +# ext4:ext4_ordered_write_end +# sched:sched_switch + + +export RE_FILE_NAME="[\w\+\.-]+" +# A filename +# Examples: +# libstdc++.so.6 +#! some/path + + +export RE_PATH_ABSOLUTE="(?:\/$RE_FILE_NAME)+" +# A full filepath +# Examples: +# /usr/lib64/somelib.so.5.4.0 +# /lib/modules/4.3.0-rc5/kernel/fs/xfs/xfs.ko +# /usr/bin/mv +#! some/relative/path +#! ./some/relative/path + + +export RE_PATH="(?:$RE_FILE_NAME)?$RE_PATH_ABSOLUTE" +# A filepath +# Examples: +# /usr/lib64/somelib.so.5.4.0 +# /lib/modules/4.3.0-rc5/kernel/fs/xfs/xfs.ko +# ./.emacs +# src/fs/file.c + + +export RE_DSO="(?:$RE_PATH_ABSOLUTE(?: \(deleted\))?|\[kernel\.kallsyms\]|\[unknown\]|\[vdso\]|\[kernel\.vmlinux\][\.\w]*)" +# A DSO name in various result tables +# Examples: +# /usr/lib64/somelib.so.5.4.0 +# /usr/bin/somebinart (deleted) +# /lib/modules/4.3.0-rc5/kernel/fs/xfs/xfs.ko +# [kernel.kallsyms] +# [kernel.vmlinux] +# [vdso] +# [unknown] + + +export RE_LINE_COMMENT="^#.*" +# A comment line +# Examples: +# # Started on Thu Sep 10 11:43:00 2015 + + +export RE_LINE_EMPTY="^\s*$" +# An empty line with possible whitespaces +# Examples: +# + + +export RE_LINE_RECORD1="^\[\s+perf\s+record:\s+Woken up $RE_NUMBER times? to write data\s+\].*$" +# The first line of perf-record "OK" output +# Examples: +# [ perf record: Woken up 1 times to write data ] + + +export RE_LINE_RECORD2="^\[\s+perf\s+record:\s+Captured and wrote $RE_NUMBER\s*MB\s+(?:[\w\+\.-]*(?:$RE_PATH)?\/)?perf\.data(?:\.\d+)?\s*\(~?$RE_NUMBER samples\)\s+\].*$" +# The second line of perf-record "OK" output +# Examples: +# [ perf record: Captured and wrote 0.405 MB perf.data (109 samples) ] +# [ perf record: Captured and wrote 0.405 MB perf.data (~109 samples) ] +# [ perf record: Captured and wrote 0.405 MB /some/temp/dir/perf.data (109 samples) ] +# [ perf record: Captured and wrote 0.405 MB ./perf.data (109 samples) ] +# [ perf record: Captured and wrote 0.405 MB ./perf.data.3 (109 samples) ] + + +export RE_LINE_RECORD2_TOLERANT="^\[\s+perf\s+record:\s+Captured and wrote $RE_NUMBER\s*MB\s+(?:[\w\+\.-]*(?:$RE_PATH)?\/)?perf\.data(?:\.\d+)?\s*(?:\(~?$RE_NUMBER samples\))?\s+\].*$" +# The second line of perf-record "OK" output, even no samples is OK here +# Examples: +# [ perf record: Captured and wrote 0.405 MB perf.data (109 samples) ] +# [ perf record: Captured and wrote 0.405 MB perf.data (~109 samples) ] +# [ perf record: Captured and wrote 0.405 MB /some/temp/dir/perf.data (109 samples) ] +# [ perf record: Captured and wrote 0.405 MB ./perf.data (109 samples) ] +# [ perf record: Captured and wrote 0.405 MB ./perf.data.3 (109 samples) ] +# [ perf record: Captured and wrote 0.405 MB perf.data ] + + +export RE_LINE_RECORD2_TOLERANT_FILENAME="^\[\s+perf\s+record:\s+Captured and wrote $RE_NUMBER\s*MB\s+(?:[\w\+\.-]*(?:$RE_PATH)?\/)?perf\w*\.data(?:\.\d+)?\s*\(~?$RE_NUMBER samples\)\s+\].*$" +# The second line of perf-record "OK" output +# Examples: +# [ perf record: Captured and wrote 0.405 MB perf.data (109 samples) ] +# [ perf record: Captured and wrote 0.405 MB perf_ls.data (~109 samples) ] +# [ perf record: Captured and wrote 0.405 MB perf_aNyCaSe.data (109 samples) ] +# [ perf record: Captured and wrote 0.405 MB ./perfdata.data.3 (109 samples) ] +#! [ perf record: Captured and wrote 0.405 MB /some/temp/dir/my_own.data (109 samples) ] +#! [ perf record: Captured and wrote 0.405 MB ./UPPERCASE.data (109 samples) ] +#! [ perf record: Captured and wrote 0.405 MB ./aNyKiNDoF.data.3 (109 samples) ] +#! [ perf record: Captured and wrote 0.405 MB perf.data ] + + +export RE_LINE_TRACE_FULL="^\s*$RE_NUMBER\s*\(\s*$RE_NUMBER\s*ms\s*\):\s*$RE_PROCESS_PID\s+.*\)\s+=\s+(:?\-?$RE_NUMBER|0x$RE_NUMBER_HEX).*$" +# A line of perf-trace output +# Examples: +# 0.115 ( 0.005 ms): sleep/4102 open(filename: 0xd09e2ab2, flags: CLOEXEC ) = 3 +# 0.157 ( 0.005 ms): sleep/4102 mmap(len: 3932736, prot: EXEC|READ, flags: PRIVATE|DENYWRITE, fd: 3 ) = 0x7f89d0605000 +#! 0.115 ( 0.005 ms): sleep/4102 open(filename: 0xd09e2ab2, flags: CLOEXEC ) = + +export RE_LINE_TRACE_ONE_PROC="^\s*$RE_NUMBER\s*\(\s*$RE_NUMBER\s*ms\s*\):\s*\w+\(.*\)\s+=\s+(?:\-?$RE_NUMBER|0x$RE_NUMBER_HEX).*$" +# A line of perf-trace output +# Examples: +# 0.115 ( 0.005 ms): open(filename: 0xd09e2ab2, flags: CLOEXEC ) = 3 +# 0.157 ( 0.005 ms): mmap(len: 3932736, prot: EXEC|READ, flags: PRIVATE|DENYWRITE, fd: 3 ) = 0x7f89d0605000 +#! 0.115 ( 0.005 ms): open(filename: 0xd09e2ab2, flags: CLOEXEC ) = + +export RE_LINE_TRACE_CONTINUED="^\s*(:?$RE_NUMBER|\?)\s*\(\s*($RE_NUMBER\s*ms\s*)?\):\s*($RE_PROCESS_PID\s*)?\.\.\.\s*\[continued\]:\s+\w+\(\).*\s+=\s+(?:\-?$RE_NUMBER|0x$RE_NUMBER_HEX).*$" +# A line of perf-trace output +# Examples: +# 0.000 ( 0.000 ms): ... [continued]: nanosleep()) = 0 +# 0.000 ( 0.000 ms): ... [continued]: nanosleep()) = 0x00000000 +# ? ( ): packagekitd/94838 ... [continued]: poll()) = 0 (Timeout) +#! 0.000 ( 0.000 ms): ... [continued]: nanosleep()) = + +export RE_LINE_TRACE_UNFINISHED="^\s*$RE_NUMBER\s*\(\s*\):\s*$RE_PROCESS_PID\s+.*\)\s+\.\.\.\s*$" +# A line of perf-trace output +# Examples: +# 901.040 ( ): in:imjournal/1096 ppoll(ufds: 0x7f701a5adb70, nfds: 1, tsp: 0x7f701a5adaf0, sigsetsize: 8) ... +# 613.727 ( ): gmain/1099 poll(ufds: 0x56248f6b64b0, nfds: 2, timeout_msecs: 3996) ... + +export RE_LINE_TRACE_SUMMARY_HEADER="\s*syscall\s+calls\s+(?:errors\s+)?total\s+min\s+avg\s+max\s+stddev" +# A header of a perf-trace summary table +# Example: +# syscall calls total min avg max stddev +# syscall calls errors total min avg max stddev + + +export RE_LINE_TRACE_SUMMARY_CONTENT="^\s*\w+\s+(?:$RE_NUMBER\s+){5,6}$RE_NUMBER%" +# A line of a perf-trace summary table +# Example: +# open 3 0.017 0.005 0.006 0.007 10.90% +# openat 2 0 0.017 0.008 0.009 0.010 12.29% + + +export RE_LINE_REPORT_CONTENT="^\s+$RE_NUMBER%\s+\w+\s+\S+\s+\S+\s+\S+" # FIXME +# A line from typicap perf report --stdio output +# Example: +# 100.00% sleep [kernel.vmlinux] [k] syscall_return_slowpath + + +export RE_TASK="\s+[\w~\/ \.\+:#-]+(?:\[-1(?:\/\d+)?\]|\[\d+(?:\/\d+)?\])" +# A name of a task used for perf sched timehist -s +# Example: +# sleep[62755] +# runtest.sh[62762] +# gmain[705/682] +# xfsaild/dm-0[495] +# kworker/u8:1-ev[62714] +# :-1[-1/62756] +# :-1[-1] +# :-1[62756] + + +export RE_SEGFAULT=".*(?:Segmentation\sfault|SIGSEGV|\score\s|dumped|segfault).*" +# Possible variations of the segfault message +# Example: +# /bin/bash: line 1: 32 Segmentation fault timeout 15s +# Segmentation fault (core dumped) +# Program terminated with signal SIGSEGV +#! WARNING: 12323431 isn't a 'cpu_core', please use a CPU list in the 'cpu_core' range (0-15) diff --git a/tools/perf/tests/shell/common/settings.sh b/tools/perf/tests/shell/common/settings.sh new file mode 100644 index 000000000000..361641dbaaad --- /dev/null +++ b/tools/perf/tests/shell/common/settings.sh @@ -0,0 +1,79 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# settings.sh +# Author: Michael Petlan <mpetlan@redhat.com> +# +# Description: +# +# This file contains global settings for the whole testsuite. +# Its purpose is to make it easier when it is necessary i.e. to +# change the usual sample command which is used in all of the tests +# in many files. +# +# This file is intended to be sourced in the tests. +# + +#### which perf to use in the testing +export CMD_PERF=${CMD_PERF:-`which perf`} + +#### basic programs examinated by perf +export CMD_BASIC_SLEEP="sleep 0.1" +export CMD_QUICK_SLEEP="sleep 0.01" +export CMD_LONGER_SLEEP="sleep 2" +export CMD_DOUBLE_LONGER_SLEEP="sleep 4" +export CMD_VERY_LONG_SLEEP="sleep 30" +export CMD_SIMPLE="true" + +#### testsuite run mode +# define constants: +export RUNMODE_BASIC=0 +export RUNMODE_STANDARD=1 +export RUNMODE_EXPERIMENTAL=2 +# default runmode is STANDARD +export PERFTOOL_TESTSUITE_RUNMODE=${PERFTOOL_TESTSUITE_RUNMODE:-$RUNMODE_STANDARD} + +#### common settings +export TESTLOG_VERBOSITY=${TESTLOG_VERBOSITY:-2} +export TESTLOG_FORCE_COLOR=${TESTLOG_FORCE_COLOR:-n} +export TESTLOG_ERR_MSG_MAX_LINES=${TESTLOG_ERR_MSG_MAX_LINES:-20} +export TESTLOG_CLEAN=${TESTLOG_CLEAN:-y} + +#### other environment-related settings +export TEST_IGNORE_MISSING_PMU=${TEST_IGNORE_MISSING_PMU:-n} + +#### clear locale +export LC_ALL=C + +#### colors +if [ -t 1 -o "$TESTLOG_FORCE_COLOR" = "yes" ]; then + export MPASS="\e[32m" + export MALLPASS="\e[1;32m" + export MFAIL="\e[31m" + export MALLFAIL="\e[1;31m" + export MWARN="\e[1;35m" + export MSKIP="\e[33m" + export MHIGH="\e[1;33m" + export MEND="\e[m" +else + export MPASS="" + export MALLPASS="" + export MFAIL="" + export MALLFAIL="" + export MWARN="" + export MSKIP="" + export MHIGH="" + export MEND="" +fi + + +#### test parametrization +if [ ! -d ./common ]; then + # set parameters based on runmode + if [ -f ../common/parametrization.$PERFTOOL_TESTSUITE_RUNMODE.sh ]; then + . ../common/parametrization.$PERFTOOL_TESTSUITE_RUNMODE.sh + fi + # if some parameters haven't been set until now, set them to default + if [ -f ../common/parametrization.sh ]; then + . ../common/parametrization.sh + fi +fi diff --git a/tools/perf/tests/shell/lib/perf_has_symbol.sh b/tools/perf/tests/shell/lib/perf_has_symbol.sh index 5d59c32ae3e7..561c93b75d77 100644 --- a/tools/perf/tests/shell/lib/perf_has_symbol.sh +++ b/tools/perf/tests/shell/lib/perf_has_symbol.sh @@ -3,7 +3,7 @@ perf_has_symbol() { - if perf test -vv "Symbols" 2>&1 | grep "[[:space:]]$1$"; then + if perf test -vv -F "Symbols" 2>&1 | grep "[[:space:]]$1$"; then echo "perf does have symbol '$1'" return 0 fi diff --git a/tools/perf/tests/shell/lib/perf_json_output_lint.py b/tools/perf/tests/shell/lib/perf_json_output_lint.py index ea55d5ea1ced..abc1fd737782 100644 --- a/tools/perf/tests/shell/lib/perf_json_output_lint.py +++ b/tools/perf/tests/shell/lib/perf_json_output_lint.py @@ -15,6 +15,7 @@ ap.add_argument('--event', action='store_true') ap.add_argument('--per-core', action='store_true') ap.add_argument('--per-thread', action='store_true') ap.add_argument('--per-cache', action='store_true') +ap.add_argument('--per-cluster', action='store_true') ap.add_argument('--per-die', action='store_true') ap.add_argument('--per-node', action='store_true') ap.add_argument('--per-socket', action='store_true') @@ -49,6 +50,7 @@ def check_json_output(expected_items): 'cgroup': lambda x: True, 'cpu': lambda x: isint(x), 'cache': lambda x: True, + 'cluster': lambda x: True, 'die': lambda x: True, 'event': lambda x: True, 'event-runtime': lambda x: isfloat(x), @@ -88,7 +90,7 @@ try: expected_items = 7 elif args.interval or args.per_thread or args.system_wide_no_aggr: expected_items = 8 - elif args.per_core or args.per_socket or args.per_node or args.per_die or args.per_cache: + elif args.per_core or args.per_socket or args.per_node or args.per_die or args.per_cluster or args.per_cache: expected_items = 9 else: # If no option is specified, don't check the number of items. diff --git a/tools/perf/tests/shell/lib/perf_metric_validation.py b/tools/perf/tests/shell/lib/perf_metric_validation.py index 50a34a9cc040..a2d235252183 100644 --- a/tools/perf/tests/shell/lib/perf_metric_validation.py +++ b/tools/perf/tests/shell/lib/perf_metric_validation.py @@ -1,4 +1,4 @@ -#SPDX-License-Identifier: GPL-2.0 +# SPDX-License-Identifier: GPL-2.0 import re import csv import json @@ -6,36 +6,61 @@ import argparse from pathlib import Path import subprocess + +class TestError: + def __init__(self, metric: list[str], wl: str, value: list[float], low: float, up=float('nan'), description=str()): + self.metric: list = metric # multiple metrics in relationship type tests + self.workloads = [wl] # multiple workloads possible + self.collectedValue: list = value + self.valueLowBound = low + self.valueUpBound = up + self.description = description + + def __repr__(self) -> str: + if len(self.metric) > 1: + return "\nMetric Relationship Error: \tThe collected value of metric {0}\n\ + \tis {1} in workload(s): {2} \n\ + \tbut expected value range is [{3}, {4}]\n\ + \tRelationship rule description: \'{5}\'".format(self.metric, self.collectedValue, self.workloads, + self.valueLowBound, self.valueUpBound, self.description) + elif len(self.collectedValue) == 0: + return "\nNo Metric Value Error: \tMetric {0} returns with no value \n\ + \tworkload(s): {1}".format(self.metric, self.workloads) + else: + return "\nWrong Metric Value Error: \tThe collected value of metric {0}\n\ + \tis {1} in workload(s): {2}\n\ + \tbut expected value range is [{3}, {4}]"\ + .format(self.metric, self.collectedValue, self.workloads, + self.valueLowBound, self.valueUpBound) + + class Validator: def __init__(self, rulefname, reportfname='', t=5, debug=False, datafname='', fullrulefname='', workload='true', metrics=''): self.rulefname = rulefname self.reportfname = reportfname self.rules = None - self.collectlist:str = metrics + self.collectlist: str = metrics self.metrics = self.__set_metrics(metrics) self.skiplist = set() self.tolerance = t self.workloads = [x for x in workload.split(",") if x] - self.wlidx = 0 # idx of current workloads - self.allresults = dict() # metric results of all workload - self.allignoremetrics = dict() # metrics with no results or negative results - self.allfailtests = dict() + self.wlidx = 0 # idx of current workloads + self.allresults = dict() # metric results of all workload self.alltotalcnt = dict() self.allpassedcnt = dict() - self.allerrlist = dict() - self.results = dict() # metric results of current workload + self.results = dict() # metric results of current workload # vars for test pass/failure statistics - self.ignoremetrics= set() # metrics with no results or negative results, neg result counts as a failed test - self.failtests = dict() + # metrics with no results or negative results, neg result counts failed tests + self.ignoremetrics = set() self.totalcnt = 0 self.passedcnt = 0 # vars for errors self.errlist = list() # vars for Rule Generator - self.pctgmetrics = set() # Percentage rule + self.pctgmetrics = set() # Percentage rule # vars for debug self.datafname = datafname @@ -69,10 +94,10 @@ class Validator: ensure_ascii=True, indent=4) - def get_results(self, idx:int = 0): + def get_results(self, idx: int = 0): return self.results[idx] - def get_bounds(self, lb, ub, error, alias={}, ridx:int = 0) -> list: + def get_bounds(self, lb, ub, error, alias={}, ridx: int = 0) -> list: """ Get bounds and tolerance from lb, ub, and error. If missing lb, use 0.0; missing ub, use float('inf); missing error, use self.tolerance. @@ -85,7 +110,7 @@ class Validator: tolerance, denormalized base on upper bound value """ # init ubv and lbv to invalid values - def get_bound_value (bound, initval, ridx): + def get_bound_value(bound, initval, ridx): val = initval if isinstance(bound, int) or isinstance(bound, float): val = bound @@ -113,10 +138,10 @@ class Validator: return lbv, ubv, denormerr - def get_value(self, name:str, ridx:int = 0) -> list: + def get_value(self, name: str, ridx: int = 0) -> list: """ Get value of the metric from self.results. - If result of this metric is not provided, the metric name will be added into self.ignoremetics and self.errlist. + If result of this metric is not provided, the metric name will be added into self.ignoremetics. All future test(s) on this metric will fail. @param name: name of the metric @@ -142,7 +167,7 @@ class Validator: Check if metrics value are non-negative. One metric is counted as one test. Failure: when metric value is negative or not provided. - Metrics with negative value will be added into the self.failtests['PositiveValueTest'] and self.ignoremetrics. + Metrics with negative value will be added into self.ignoremetrics. """ negmetric = dict() pcnt = 0 @@ -155,25 +180,27 @@ class Validator: else: pcnt += 1 tcnt += 1 + # The first round collect_perf() run these metrics with simple workload + # "true". We give metrics a second chance with a longer workload if less + # than 20 metrics failed positive test. if len(rerun) > 0 and len(rerun) < 20: second_results = dict() self.second_test(rerun, second_results) for name, val in second_results.items(): - if name not in negmetric: continue + if name not in negmetric: + continue if val >= 0: del negmetric[name] pcnt += 1 - self.failtests['PositiveValueTest']['Total Tests'] = tcnt - self.failtests['PositiveValueTest']['Passed Tests'] = pcnt if len(negmetric.keys()): self.ignoremetrics.update(negmetric.keys()) - negmessage = ["{0}(={1:.4f})".format(name, val) for name, val in negmetric.items()] - self.failtests['PositiveValueTest']['Failed Tests'].append({'NegativeValue': negmessage}) + self.errlist.extend( + [TestError([m], self.workloads[self.wlidx], negmetric[m], 0) for m in negmetric.keys()]) return - def evaluate_formula(self, formula:str, alias:dict, ridx:int = 0): + def evaluate_formula(self, formula: str, alias: dict, ridx: int = 0): """ Evaluate the value of formula. @@ -187,10 +214,11 @@ class Validator: sign = "+" f = str() - #TODO: support parenthesis? + # TODO: support parenthesis? for i in range(len(formula)): if i+1 == len(formula) or formula[i] in ('+', '-', '*', '/'): - s = alias[formula[b:i]] if i+1 < len(formula) else alias[formula[b:]] + s = alias[formula[b:i]] if i + \ + 1 < len(formula) else alias[formula[b:]] v = self.get_value(s, ridx) if not v: errs.append(s) @@ -228,49 +256,49 @@ class Validator: alias = dict() for m in rule['Metrics']: alias[m['Alias']] = m['Name'] - lbv, ubv, t = self.get_bounds(rule['RangeLower'], rule['RangeUpper'], rule['ErrorThreshold'], alias, ridx=rule['RuleIndex']) - val, f = self.evaluate_formula(rule['Formula'], alias, ridx=rule['RuleIndex']) + lbv, ubv, t = self.get_bounds( + rule['RangeLower'], rule['RangeUpper'], rule['ErrorThreshold'], alias, ridx=rule['RuleIndex']) + val, f = self.evaluate_formula( + rule['Formula'], alias, ridx=rule['RuleIndex']) + + lb = rule['RangeLower'] + ub = rule['RangeUpper'] + if isinstance(lb, str): + if lb in alias: + lb = alias[lb] + if isinstance(ub, str): + if ub in alias: + ub = alias[ub] + if val == -1: - self.failtests['RelationshipTest']['Failed Tests'].append({'RuleIndex': rule['RuleIndex'], 'Description':f}) + self.errlist.append(TestError([m['Name'] for m in rule['Metrics']], self.workloads[self.wlidx], [], + lb, ub, rule['Description'])) elif not self.check_bound(val, lbv, ubv, t): - lb = rule['RangeLower'] - ub = rule['RangeUpper'] - if isinstance(lb, str): - if lb in alias: - lb = alias[lb] - if isinstance(ub, str): - if ub in alias: - ub = alias[ub] - self.failtests['RelationshipTest']['Failed Tests'].append({'RuleIndex': rule['RuleIndex'], 'Formula':f, - 'RangeLower': lb, 'LowerBoundValue': self.get_value(lb), - 'RangeUpper': ub, 'UpperBoundValue':self.get_value(ub), - 'ErrorThreshold': t, 'CollectedValue': val}) + self.errlist.append(TestError([m['Name'] for m in rule['Metrics']], self.workloads[self.wlidx], [val], + lb, ub, rule['Description'])) else: self.passedcnt += 1 - self.failtests['RelationshipTest']['Passed Tests'] += 1 self.totalcnt += 1 - self.failtests['RelationshipTest']['Total Tests'] += 1 return - # Single Metric Test - def single_test(self, rule:dict): + def single_test(self, rule: dict): """ Validate if the metrics are in the required value range. eg. lower_bound <= metrics_value <= upper_bound One metric is counted as one test in this type of test. One rule may include one or more metrics. Failure: when the metric value not provided or the value is outside the bounds. - This test updates self.total_cnt and records failed tests in self.failtest['SingleMetricTest']. + This test updates self.total_cnt. @param rule: dict with metrics to validate and the value range requirement """ - lbv, ubv, t = self.get_bounds(rule['RangeLower'], rule['RangeUpper'], rule['ErrorThreshold']) + lbv, ubv, t = self.get_bounds( + rule['RangeLower'], rule['RangeUpper'], rule['ErrorThreshold']) metrics = rule['Metrics'] passcnt = 0 totalcnt = 0 - faillist = list() failures = dict() rerun = list() for m in metrics: @@ -286,25 +314,20 @@ class Validator: second_results = dict() self.second_test(rerun, second_results) for name, val in second_results.items(): - if name not in failures: continue + if name not in failures: + continue if self.check_bound(val, lbv, ubv, t): passcnt += 1 del failures[name] else: - failures[name] = val + failures[name] = [val] self.results[0][name] = val self.totalcnt += totalcnt self.passedcnt += passcnt - self.failtests['SingleMetricTest']['Total Tests'] += totalcnt - self.failtests['SingleMetricTest']['Passed Tests'] += passcnt if len(failures.keys()) != 0: - faillist = [{'MetricName':name, 'CollectedValue':val} for name, val in failures.items()] - self.failtests['SingleMetricTest']['Failed Tests'].append({'RuleIndex':rule['RuleIndex'], - 'RangeLower': rule['RangeLower'], - 'RangeUpper': rule['RangeUpper'], - 'ErrorThreshold':rule['ErrorThreshold'], - 'Failure':faillist}) + self.errlist.extend([TestError([name], self.workloads[self.wlidx], val, + rule['RangeLower'], rule['RangeUpper']) for name, val in failures.items()]) return @@ -312,19 +335,11 @@ class Validator: """ Create final report and write into a JSON file. """ - alldata = list() - for i in range(0, len(self.workloads)): - reportstas = {"Total Rule Count": self.alltotalcnt[i], "Passed Rule Count": self.allpassedcnt[i]} - data = {"Metric Validation Statistics": reportstas, "Tests in Category": self.allfailtests[i], - "Errors":self.allerrlist[i]} - alldata.append({"Workload": self.workloads[i], "Report": data}) - - json_str = json.dumps(alldata, indent=4) - print("Test validation finished. Final report: ") - print(json_str) + print(self.errlist) if self.debug: - allres = [{"Workload": self.workloads[i], "Results": self.allresults[i]} for i in range(0, len(self.workloads))] + allres = [{"Workload": self.workloads[i], "Results": self.allresults[i]} + for i in range(0, len(self.workloads))] self.json_dump(allres, self.datafname) def check_rule(self, testtype, metric_list): @@ -342,13 +357,13 @@ class Validator: return True # Start of Collector and Converter - def convert(self, data: list, metricvalues:dict): + def convert(self, data: list, metricvalues: dict): """ Convert collected metric data from the -j output to dict of {metric_name:value}. """ for json_string in data: try: - result =json.loads(json_string) + result = json.loads(json_string) if "metric-unit" in result and result["metric-unit"] != "(null)" and result["metric-unit"] != "": name = result["metric-unit"].split(" ")[1] if len(result["metric-unit"].split(" ")) > 1 \ else result["metric-unit"] @@ -365,9 +380,10 @@ class Validator: print(" ".join(command)) cmd = subprocess.run(command, stderr=subprocess.PIPE, encoding='utf-8') data = [x+'}' for x in cmd.stderr.split('}\n') if x] + if data[0][0] != '{': + data[0] = data[0][data[0].find('{'):] return data - def collect_perf(self, workload: str): """ Collect metric data with "perf stat -M" on given workload with -a and -j. @@ -385,14 +401,18 @@ class Validator: if rule["TestType"] == "RelationshipTest": metrics = [m["Name"] for m in rule["Metrics"]] if not any(m not in collectlist[0] for m in metrics): - collectlist[rule["RuleIndex"]] = [",".join(list(set(metrics)))] + collectlist[rule["RuleIndex"]] = [ + ",".join(list(set(metrics)))] for idx, metrics in collectlist.items(): - if idx == 0: wl = "true" - else: wl = workload + if idx == 0: + wl = "true" + else: + wl = workload for metric in metrics: data = self._run_perf(metric, wl) - if idx not in self.results: self.results[idx] = dict() + if idx not in self.results: + self.results[idx] = dict() self.convert(data, self.results[idx]) return @@ -412,7 +432,8 @@ class Validator: 2) create metric name list """ command = ['perf', 'list', '-j', '--details', 'metrics'] - cmd = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8') + cmd = subprocess.run(command, stdout=subprocess.PIPE, + stderr=subprocess.PIPE, encoding='utf-8') try: data = json.loads(cmd.stdout) for m in data: @@ -453,12 +474,12 @@ class Validator: rules = data['RelationshipRules'] self.skiplist = set([name.lower() for name in data['SkipList']]) self.rules = self.remove_unsupported_rules(rules) - pctgrule = {'RuleIndex':0, - 'TestType':'SingleMetricTest', - 'RangeLower':'0', + pctgrule = {'RuleIndex': 0, + 'TestType': 'SingleMetricTest', + 'RangeLower': '0', 'RangeUpper': '100', 'ErrorThreshold': self.tolerance, - 'Description':'Metrics in percent unit have value with in [0, 100]', + 'Description': 'Metrics in percent unit have value with in [0, 100]', 'Metrics': [{'Name': m.lower()} for m in self.pctgmetrics]} self.rules.append(pctgrule) @@ -469,8 +490,9 @@ class Validator: idx += 1 if self.debug: - #TODO: need to test and generate file name correctly - data = {'RelationshipRules':self.rules, 'SupportedMetrics': [{"MetricName": name} for name in self.metrics]} + # TODO: need to test and generate file name correctly + data = {'RelationshipRules': self.rules, 'SupportedMetrics': [ + {"MetricName": name} for name in self.metrics]} self.json_dump(data, self.fullrulefname) return @@ -482,20 +504,17 @@ class Validator: @param key: key to the dictionaries (index of self.workloads). ''' self.allresults[key] = self.results - self.allignoremetrics[key] = self.ignoremetrics - self.allfailtests[key] = self.failtests self.alltotalcnt[key] = self.totalcnt self.allpassedcnt[key] = self.passedcnt - self.allerrlist[key] = self.errlist - #Initialize data structures before data validation of each workload + # Initialize data structures before data validation of each workload def _init_data(self): - testtypes = ['PositiveValueTest', 'RelationshipTest', 'SingleMetricTest'] + testtypes = ['PositiveValueTest', + 'RelationshipTest', 'SingleMetricTest'] self.results = dict() - self.ignoremetrics= set() + self.ignoremetrics = set() self.errlist = list() - self.failtests = {k:{'Total Tests':0, 'Passed Tests':0, 'Failed Tests':[]} for k in testtypes} self.totalcnt = 0 self.passedcnt = 0 @@ -525,32 +544,33 @@ class Validator: testtype = r['TestType'] if not self.check_rule(testtype, r['Metrics']): continue - if testtype == 'RelationshipTest': + if testtype == 'RelationshipTest': self.relationship_test(r) elif testtype == 'SingleMetricTest': self.single_test(r) else: print("Unsupported Test Type: ", testtype) - self.errlist.append("Unsupported Test Type from rule: " + r['RuleIndex']) - self._storewldata(i) print("Workload: ", self.workloads[i]) - print("Total metrics collected: ", self.failtests['PositiveValueTest']['Total Tests']) - print("Non-negative metric count: ", self.failtests['PositiveValueTest']['Passed Tests']) print("Total Test Count: ", self.totalcnt) print("Passed Test Count: ", self.passedcnt) - + self._storewldata(i) self.create_report() - return sum(self.alltotalcnt.values()) != sum(self.allpassedcnt.values()) + return len(self.errlist) > 0 # End of Class Validator def main() -> None: - parser = argparse.ArgumentParser(description="Launch metric value validation") - - parser.add_argument("-rule", help="Base validation rule file", required=True) - parser.add_argument("-output_dir", help="Path for validator output file, report file", required=True) - parser.add_argument("-debug", help="Debug run, save intermediate data to files", action="store_true", default=False) - parser.add_argument("-wl", help="Workload to run while data collection", default="true") + parser = argparse.ArgumentParser( + description="Launch metric value validation") + + parser.add_argument( + "-rule", help="Base validation rule file", required=True) + parser.add_argument( + "-output_dir", help="Path for validator output file, report file", required=True) + parser.add_argument("-debug", help="Debug run, save intermediate data to files", + action="store_true", default=False) + parser.add_argument( + "-wl", help="Workload to run while data collection", default="true") parser.add_argument("-m", help="Metric list to validate", default="") args = parser.parse_args() outpath = Path(args.output_dir) @@ -559,8 +579,8 @@ def main() -> None: datafile = Path.joinpath(outpath, 'perf_data.json') validator = Validator(args.rule, reportf, debug=args.debug, - datafname=datafile, fullrulefname=fullrule, workload=args.wl, - metrics=args.m) + datafname=datafile, fullrulefname=fullrule, workload=args.wl, + metrics=args.m) ret = validator.test() return ret @@ -569,6 +589,3 @@ def main() -> None: if __name__ == "__main__": import sys sys.exit(main()) - - - diff --git a/tools/perf/tests/shell/lib/stat_output.sh b/tools/perf/tests/shell/lib/stat_output.sh index 3cc158a64326..9a176ceae4a3 100644 --- a/tools/perf/tests/shell/lib/stat_output.sh +++ b/tools/perf/tests/shell/lib/stat_output.sh @@ -79,7 +79,7 @@ check_per_thread() echo "[Skip] paranoid and not root" return fi - perf stat --per-thread -a $2 true + perf stat --per-thread -p $$ $2 true commachecker --per-thread echo "[Success]" } @@ -97,6 +97,18 @@ check_per_cache_instance() echo "[Success]" } +check_per_cluster() +{ + echo -n "Checking $1 output: per cluster " + if ParanoidAndNotRoot 0 + then + echo "[Skip] paranoid and not root" + return + fi + perf stat --per-cluster -a $2 true + echo "[Success]" +} + check_per_die() { echo -n "Checking $1 output: per die " diff --git a/tools/perf/tests/shell/perftool-testsuite_probe.sh b/tools/perf/tests/shell/perftool-testsuite_probe.sh new file mode 100755 index 000000000000..a0fec33a0358 --- /dev/null +++ b/tools/perf/tests/shell/perftool-testsuite_probe.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# perftool-testsuite_probe +# SPDX-License-Identifier: GPL-2.0 + +test -d "$(dirname "$0")/base_probe" || exit 2 +cd "$(dirname "$0")/base_probe" || exit 2 +status=0 + +PERFSUITE_RUN_DIR=$(mktemp -d /tmp/"$(basename "$0" .sh)".XXX) +export PERFSUITE_RUN_DIR + +for testcase in setup.sh test_*; do # skip setup.sh if not present or not executable + test -x "$testcase" || continue + ./"$testcase" + (( status += $? )) +done + +if ! [ "$PERFTEST_KEEP_LOGS" = "y" ]; then + rm -rf "$PERFSUITE_RUN_DIR" +fi + +test $status -ne 0 && exit 1 +exit 0 diff --git a/tools/perf/tests/shell/script.sh b/tools/perf/tests/shell/script.sh index fa4d71e2e72a..c1a603653662 100755 --- a/tools/perf/tests/shell/script.sh +++ b/tools/perf/tests/shell/script.sh @@ -17,7 +17,7 @@ cleanup() sane=$(echo "${temp_dir}" | cut -b 1-21) if [ "${sane}" = "/tmp/perf-test-script" ] ; then echo "--- Cleaning up ---" - rm -f "${temp_dir}/"* + rm -rf "${temp_dir:?}/"* rmdir "${temp_dir}" fi } @@ -65,7 +65,31 @@ _end_of_file_ echo "DB test [Success]" } +test_parallel_perf() +{ + echo "parallel-perf test" + if ! python3 --version >/dev/null 2>&1 ; then + echo "SKIP: no python3" + err=2 + return + fi + pp=$(dirname "$0")/../../scripts/python/parallel-perf.py + if [ ! -f "${pp}" ] ; then + echo "SKIP: parallel-perf.py script not found " + err=2 + return + fi + perf_data="${temp_dir}/pp-perf.data" + output1_dir="${temp_dir}/output1" + output2_dir="${temp_dir}/output2" + perf record -o "${perf_data}" --sample-cpu uname + python3 "${pp}" -o "${output1_dir}" --jobs 4 --verbose -- perf script -i "${perf_data}" + python3 "${pp}" -o "${output2_dir}" --jobs 4 --verbose --per-cpu -- perf script -i "${perf_data}" + echo "parallel-perf test [Success]" +} + test_db +test_parallel_perf cleanup diff --git a/tools/perf/tests/shell/stat+csv_output.sh b/tools/perf/tests/shell/stat+csv_output.sh index f1818fa6d9ce..fc2d8cc6e5e0 100755 --- a/tools/perf/tests/shell/stat+csv_output.sh +++ b/tools/perf/tests/shell/stat+csv_output.sh @@ -42,6 +42,7 @@ function commachecker() ;; "--per-socket") exp=8 ;; "--per-node") exp=8 ;; "--per-die") exp=8 + ;; "--per-cluster") exp=8 ;; "--per-cache") exp=8 esac @@ -79,6 +80,7 @@ then check_system_wide_no_aggr "CSV" "$perf_cmd" check_per_core "CSV" "$perf_cmd" check_per_cache_instance "CSV" "$perf_cmd" + check_per_cluster "CSV" "$perf_cmd" check_per_die "CSV" "$perf_cmd" check_per_socket "CSV" "$perf_cmd" else diff --git a/tools/perf/tests/shell/stat+json_output.sh b/tools/perf/tests/shell/stat+json_output.sh index 3bc900533a5d..6b630d33c328 100755 --- a/tools/perf/tests/shell/stat+json_output.sh +++ b/tools/perf/tests/shell/stat+json_output.sh @@ -105,7 +105,7 @@ check_per_thread() echo "[Skip] paranoia and not root" return fi - perf stat -j --per-thread -a -o "${stat_output}" true + perf stat -j --per-thread -p $$ -o "${stat_output}" true $PYTHON $pythonchecker --per-thread --file "${stat_output}" echo "[Success]" } @@ -122,6 +122,18 @@ check_per_cache_instance() echo "[Success]" } +check_per_cluster() +{ + echo -n "Checking json output: per cluster " + if ParanoidAndNotRoot 0 + then + echo "[Skip] paranoia and not root" + return + fi + perf stat -j --per-cluster -a true 2>&1 | $PYTHON $pythonchecker --per-cluster + echo "[Success]" +} + check_per_die() { echo -n "Checking json output: per die " @@ -200,6 +212,7 @@ then check_system_wide_no_aggr check_per_core check_per_cache_instance + check_per_cluster check_per_die check_per_socket else diff --git a/tools/perf/tests/shell/stat+std_output.sh b/tools/perf/tests/shell/stat+std_output.sh index 4fcdd1a9142c..cbf2894b2c84 100755 --- a/tools/perf/tests/shell/stat+std_output.sh +++ b/tools/perf/tests/shell/stat+std_output.sh @@ -13,7 +13,7 @@ stat_output=$(mktemp /tmp/__perf_test.stat_output.std.XXXXX) event_name=(cpu-clock task-clock context-switches cpu-migrations page-faults stalled-cycles-frontend stalled-cycles-backend cycles instructions branches branch-misses) event_metric=("CPUs utilized" "CPUs utilized" "/sec" "/sec" "/sec" "frontend cycles idle" "backend cycles idle" "GHz" "insn per cycle" "/sec" "of all branches") -skip_metric=("stalled cycles per insn" "tma_") +skip_metric=("stalled cycles per insn" "tma_" "retiring" "frontend_bound" "bad_speculation" "backend_bound") cleanup() { rm -f "${stat_output}" @@ -40,6 +40,7 @@ function commachecker() ;; "--per-node") prefix=3 ;; "--per-die") prefix=3 ;; "--per-cache") prefix=3 + ;; "--per-cluster") prefix=3 esac while read line @@ -99,6 +100,7 @@ then check_system_wide_no_aggr "STD" "$perf_cmd" check_per_core "STD" "$perf_cmd" check_per_cache_instance "STD" "$perf_cmd" + check_per_cluster "STD" "$perf_cmd" check_per_die "STD" "$perf_cmd" check_per_socket "STD" "$perf_cmd" else diff --git a/tools/perf/tests/shell/stat_bpf_counters.sh b/tools/perf/tests/shell/stat_bpf_counters.sh index a87bb2814b4c..61f8149d854e 100755 --- a/tools/perf/tests/shell/stat_bpf_counters.sh +++ b/tools/perf/tests/shell/stat_bpf_counters.sh @@ -4,21 +4,59 @@ set -e -# check whether $2 is within +/- 10% of $1 +workload="perf bench sched messaging -g 1 -l 100 -t" + +# check whether $2 is within +/- 20% of $1 compare_number() { - first_num=$1 - second_num=$2 - - # upper bound is first_num * 110% - upper=$(expr $first_num + $first_num / 10 ) - # lower bound is first_num * 90% - lower=$(expr $first_num - $first_num / 10 ) - - if [ $second_num -gt $upper ] || [ $second_num -lt $lower ]; then - echo "The difference between $first_num and $second_num are greater than 10%." - exit 1 - fi + first_num=$1 + second_num=$2 + + # upper bound is first_num * 120% + upper=$(expr $first_num + $first_num / 5 ) + # lower bound is first_num * 80% + lower=$(expr $first_num - $first_num / 5 ) + + if [ $second_num -gt $upper ] || [ $second_num -lt $lower ]; then + echo "The difference between $first_num and $second_num are greater than 20%." + exit 1 + fi +} + +check_counts() +{ + base_cycles=$1 + bpf_cycles=$2 + + if [ "$base_cycles" = "<not" ]; then + echo "Skipping: cycles event not counted" + exit 2 + fi + if [ "$bpf_cycles" = "<not" ]; then + echo "Failed: cycles not counted with --bpf-counters" + exit 1 + fi +} + +test_bpf_counters() +{ + printf "Testing --bpf-counters " + base_cycles=$(perf stat --no-big-num -e cycles -- $workload 2>&1 | awk '/cycles/ {print $1}') + bpf_cycles=$(perf stat --no-big-num --bpf-counters -e cycles -- $workload 2>&1 | awk '/cycles/ {print $1}') + check_counts $base_cycles $bpf_cycles + compare_number $base_cycles $bpf_cycles + echo "[Success]" +} + +test_bpf_modifier() +{ + printf "Testing bpf event modifier " + stat_output=$(perf stat --no-big-num -e cycles/name=base_cycles/,cycles/name=bpf_cycles/b -- $workload 2>&1) + base_cycles=$(echo "$stat_output"| awk '/base_cycles/ {print $1}') + bpf_cycles=$(echo "$stat_output"| awk '/bpf_cycles/ {print $1}') + check_counts $base_cycles $bpf_cycles + compare_number $base_cycles $bpf_cycles + echo "[Success]" } # skip if --bpf-counters is not supported @@ -30,16 +68,7 @@ if ! perf stat -e cycles --bpf-counters true > /dev/null 2>&1; then exit 2 fi -base_cycles=$(perf stat --no-big-num -e cycles -- perf bench sched messaging -g 1 -l 100 -t 2>&1 | awk '/cycles/ {print $1}') -if [ "$base_cycles" = "<not" ]; then - echo "Skipping: cycles event not counted" - exit 2 -fi -bpf_cycles=$(perf stat --no-big-num --bpf-counters -e cycles -- perf bench sched messaging -g 1 -l 100 -t 2>&1 | awk '/cycles/ {print $1}') -if [ "$bpf_cycles" = "<not" ]; then - echo "Failed: cycles not counted with --bpf-counters" - exit 1 -fi +test_bpf_counters +test_bpf_modifier -compare_number $base_cycles $bpf_cycles exit 0 diff --git a/tools/perf/tests/shell/stat_metrics_values.sh b/tools/perf/tests/shell/stat_metrics_values.sh index 7ca172599aa6..279f19c5919a 100755 --- a/tools/perf/tests/shell/stat_metrics_values.sh +++ b/tools/perf/tests/shell/stat_metrics_values.sh @@ -19,6 +19,8 @@ echo "Output will be stored in: $tmpdir" $PYTHON $pythonvalidator -rule $rulefile -output_dir $tmpdir -wl "${workload}" ret=$? rm -rf $tmpdir - +if [ $ret -ne 0 ]; then + echo "Metric validation return with erros. Please check metrics reported with errors." +fi exit $ret diff --git a/tools/perf/tests/shell/test_arm_callgraph_fp.sh b/tools/perf/tests/shell/test_arm_callgraph_fp.sh index e342e6c8aa50..61898e256616 100755 --- a/tools/perf/tests/shell/test_arm_callgraph_fp.sh +++ b/tools/perf/tests/shell/test_arm_callgraph_fp.sh @@ -6,7 +6,15 @@ shelldir=$(dirname "$0") # shellcheck source=lib/perf_has_symbol.sh . "${shelldir}"/lib/perf_has_symbol.sh -lscpu | grep -q "aarch64" || exit 2 +if [ "$(uname -m)" != "aarch64" ]; then + exit 2 +fi + +if perf version --build-options | grep HAVE_DWARF_UNWIND_SUPPORT | grep -q OFF +then + echo "Skipping, no dwarf unwind support" + exit 2 +fi skip_test_missing_symbol leafloop diff --git a/tools/perf/tests/shell/test_arm_coresight.sh b/tools/perf/tests/shell/test_arm_coresight.sh index 65dd85207125..3302ea0b9672 100755 --- a/tools/perf/tests/shell/test_arm_coresight.sh +++ b/tools/perf/tests/shell/test_arm_coresight.sh @@ -188,7 +188,7 @@ arm_cs_etm_snapshot_test() { arm_cs_etm_basic_test() { echo "Recording trace with '$*'" - perf record -o ${perfdata} "$@" -- ls > /dev/null 2>&1 + perf record -o ${perfdata} "$@" -m,8M -- ls > /dev/null 2>&1 perf_script_branch_samples ls && perf_report_branch_samples ls && diff --git a/tools/perf/tests/symbols.c b/tools/perf/tests/symbols.c index 16e1c5502b09..ee20a366f32f 100644 --- a/tools/perf/tests/symbols.c +++ b/tools/perf/tests/symbols.c @@ -41,6 +41,30 @@ static void exit_test_info(struct test_info *ti) machine__delete(ti->machine); } +struct dso_map { + struct dso *dso; + struct map *map; +}; + +static int find_map_cb(struct map *map, void *d) +{ + struct dso_map *data = d; + + if (map__dso(map) != data->dso) + return 0; + data->map = map; + return 1; +} + +static struct map *find_module_map(struct machine *machine, struct dso *dso) +{ + struct dso_map data = { .dso = dso }; + + machine__for_each_kernel_map(machine, find_map_cb, &data); + + return data.map; +} + static void get_test_dso_filename(char *filename, size_t max_sz) { if (dso_to_test) @@ -51,6 +75,26 @@ static void get_test_dso_filename(char *filename, size_t max_sz) static int create_map(struct test_info *ti, char *filename, struct map **map_p) { + struct dso *dso = machine__findnew_dso(ti->machine, filename); + + /* + * If 'filename' matches a current kernel module, must use a kernel + * map. Find the one that already exists. + */ + if (dso && dso__kernel(dso) != DSO_SPACE__USER) { + *map_p = find_module_map(ti->machine, dso); + dso__put(dso); + if (!*map_p) { + pr_debug("Failed to find map for current kernel module %s", + filename); + return TEST_FAIL; + } + map__get(*map_p); + return TEST_OK; + } + + dso__put(dso); + /* Create a dummy map at 0x100000 */ *map_p = map__new(ti->machine, 0x100000, 0xffffffff, 0, NULL, PROT_EXEC, 0, NULL, filename, ti->thread); @@ -72,7 +116,7 @@ static int test_dso(struct dso *dso) if (verbose > 1) dso__fprintf(dso, stderr); - for (nd = rb_first_cached(&dso->symbols); nd; nd = rb_next(nd)) { + for (nd = rb_first_cached(dso__symbols(dso)); nd; nd = rb_next(nd)) { struct symbol *sym = rb_entry(nd, struct symbol, rb_node); if (sym->type != STT_FUNC && sym->type != STT_GNU_IFUNC) @@ -97,6 +141,26 @@ static int test_dso(struct dso *dso) return ret; } +static int subdivided_dso_cb(struct dso *dso, struct machine *machine __maybe_unused, void *d) +{ + struct dso *text_dso = d; + + if (dso != text_dso && strstarts(dso__short_name(dso), dso__short_name(text_dso))) + if (test_dso(dso) != TEST_OK) + return -1; + + return 0; +} + +static int process_subdivided_dso(struct machine *machine, struct dso *dso) +{ + int ret; + + ret = machine__for_each_dso(machine, subdivided_dso_cb, dso); + + return ret < 0 ? TEST_FAIL : TEST_OK; +} + static int test_file(struct test_info *ti, char *filename) { struct map *map = NULL; @@ -124,6 +188,10 @@ static int test_file(struct test_info *ti, char *filename) } ret = test_dso(dso); + + /* Module dso is split into many dsos by section */ + if (ret == TEST_OK && dso__kernel(dso) != DSO_SPACE__USER) + ret = process_subdivided_dso(ti->machine, dso); out_put: map__put(map); diff --git a/tools/perf/tests/tests-scripts.c b/tools/perf/tests/tests-scripts.c new file mode 100644 index 000000000000..e2042b368269 --- /dev/null +++ b/tools/perf/tests/tests-scripts.c @@ -0,0 +1,257 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include <dirent.h> +#include <errno.h> +#include <fcntl.h> +#include <linux/ctype.h> +#include <linux/kernel.h> +#include <linux/string.h> +#include <linux/zalloc.h> +#include <string.h> +#include <stdlib.h> +#include <sys/types.h> +#include <unistd.h> +#include <subcmd/exec-cmd.h> +#include <subcmd/parse-options.h> +#include <sys/wait.h> +#include <sys/stat.h> +#include <api/io.h> +#include "builtin.h" +#include "tests-scripts.h" +#include "color.h" +#include "debug.h" +#include "hist.h" +#include "intlist.h" +#include "string2.h" +#include "symbol.h" +#include "tests.h" +#include "util/rlimit.h" +#include "util/util.h" + +static int shell_tests__dir_fd(void) +{ + char path[PATH_MAX], *exec_path; + static const char * const devel_dirs[] = { "./tools/perf/tests/shell", "./tests/shell", }; + + for (size_t i = 0; i < ARRAY_SIZE(devel_dirs); ++i) { + int fd = open(devel_dirs[i], O_PATH); + + if (fd >= 0) + return fd; + } + + /* Then installed path. */ + exec_path = get_argv_exec_path(); + scnprintf(path, sizeof(path), "%s/tests/shell", exec_path); + free(exec_path); + return open(path, O_PATH); +} + +static char *shell_test__description(int dir_fd, const char *name) +{ + struct io io; + char buf[128], desc[256]; + int ch, pos = 0; + + io__init(&io, openat(dir_fd, name, O_RDONLY), buf, sizeof(buf)); + if (io.fd < 0) + return NULL; + + /* Skip first line - should be #!/bin/sh Shebang */ + if (io__get_char(&io) != '#') + goto err_out; + if (io__get_char(&io) != '!') + goto err_out; + do { + ch = io__get_char(&io); + if (ch < 0) + goto err_out; + } while (ch != '\n'); + + do { + ch = io__get_char(&io); + if (ch < 0) + goto err_out; + } while (ch == '#' || isspace(ch)); + while (ch > 0 && ch != '\n') { + desc[pos++] = ch; + if (pos >= (int)sizeof(desc) - 1) + break; + ch = io__get_char(&io); + } + while (pos > 0 && isspace(desc[--pos])) + ; + desc[++pos] = '\0'; + close(io.fd); + return strdup(desc); +err_out: + close(io.fd); + return NULL; +} + +/* Is this full file path a shell script */ +static bool is_shell_script(int dir_fd, const char *path) +{ + const char *ext; + + ext = strrchr(path, '.'); + if (!ext) + return false; + if (!strcmp(ext, ".sh")) { /* Has .sh extension */ + if (faccessat(dir_fd, path, R_OK | X_OK, 0) == 0) /* Is executable */ + return true; + } + return false; +} + +/* Is this file in this dir a shell script (for test purposes) */ +static bool is_test_script(int dir_fd, const char *name) +{ + return is_shell_script(dir_fd, name); +} + +/* Duplicate a string and fall over and die if we run out of memory */ +static char *strdup_check(const char *str) +{ + char *newstr; + + newstr = strdup(str); + if (!newstr) { + pr_err("Out of memory while duplicating test script string\n"); + abort(); + } + return newstr; +} + +static int shell_test__run(struct test_suite *test, int subtest __maybe_unused) +{ + const char *file = test->priv; + int err; + char *cmd = NULL; + + if (asprintf(&cmd, "%s%s", file, verbose ? " -v" : "") < 0) + return TEST_FAIL; + err = system(cmd); + free(cmd); + if (!err) + return TEST_OK; + + return WEXITSTATUS(err) == 2 ? TEST_SKIP : TEST_FAIL; +} + +static void append_script(int dir_fd, const char *name, char *desc, + struct test_suite ***result, + size_t *result_sz) +{ + char filename[PATH_MAX], link[128]; + struct test_suite *test_suite, **result_tmp; + struct test_case *tests; + size_t len; + + snprintf(link, sizeof(link), "/proc/%d/fd/%d", getpid(), dir_fd); + len = readlink(link, filename, sizeof(filename)); + if (len < 0) { + pr_err("Failed to readlink %s", link); + return; + } + filename[len++] = '/'; + strcpy(&filename[len], name); + + tests = calloc(2, sizeof(*tests)); + if (!tests) { + pr_err("Out of memory while building script test suite list\n"); + return; + } + tests[0].name = strdup_check(name); + tests[0].desc = strdup_check(desc); + tests[0].run_case = shell_test__run; + + test_suite = zalloc(sizeof(*test_suite)); + if (!test_suite) { + pr_err("Out of memory while building script test suite list\n"); + free(tests); + return; + } + test_suite->desc = desc; + test_suite->test_cases = tests; + test_suite->priv = strdup_check(filename); + /* Realloc is good enough, though we could realloc by chunks, not that + * anyone will ever measure performance here */ + result_tmp = realloc(*result, (*result_sz + 1) * sizeof(*result_tmp)); + if (result_tmp == NULL) { + pr_err("Out of memory while building script test suite list\n"); + free(tests); + free(test_suite); + return; + } + /* Add file to end and NULL terminate the struct array */ + *result = result_tmp; + (*result)[*result_sz] = test_suite; + (*result_sz)++; +} + +static void append_scripts_in_dir(int dir_fd, + struct test_suite ***result, + size_t *result_sz) +{ + struct dirent **entlist; + struct dirent *ent; + int n_dirs, i; + + /* List files, sorted by alpha */ + n_dirs = scandirat(dir_fd, ".", &entlist, NULL, alphasort); + if (n_dirs == -1) + return; + for (i = 0; i < n_dirs && (ent = entlist[i]); i++) { + int fd; + + if (ent->d_name[0] == '.') + continue; /* Skip hidden files */ + if (is_test_script(dir_fd, ent->d_name)) { /* It's a test */ + char *desc = shell_test__description(dir_fd, ent->d_name); + + if (desc) /* It has a desc line - valid script */ + append_script(dir_fd, ent->d_name, desc, result, result_sz); + continue; + } + if (ent->d_type != DT_DIR) { + struct stat st; + + if (ent->d_type != DT_UNKNOWN) + continue; + fstatat(dir_fd, ent->d_name, &st, 0); + if (!S_ISDIR(st.st_mode)) + continue; + } + fd = openat(dir_fd, ent->d_name, O_PATH); + append_scripts_in_dir(fd, result, result_sz); + } + for (i = 0; i < n_dirs; i++) /* Clean up */ + zfree(&entlist[i]); + free(entlist); +} + +struct test_suite **create_script_test_suites(void) +{ + struct test_suite **result = NULL, **result_tmp; + size_t result_sz = 0; + int dir_fd = shell_tests__dir_fd(); /* Walk dir */ + + /* + * Append scripts if fd is good, otherwise return a NULL terminated zero + * length array. + */ + if (dir_fd >= 0) + append_scripts_in_dir(dir_fd, &result, &result_sz); + + result_tmp = realloc(result, (result_sz + 1) * sizeof(*result_tmp)); + if (result_tmp == NULL) { + pr_err("Out of memory while building script test suite list\n"); + abort(); + } + /* NULL terminate the test suite array. */ + result = result_tmp; + result[result_sz] = NULL; + if (dir_fd >= 0) + close(dir_fd); + return result; +} diff --git a/tools/perf/tests/tests-scripts.h b/tools/perf/tests/tests-scripts.h new file mode 100644 index 000000000000..b553ad26ea17 --- /dev/null +++ b/tools/perf/tests/tests-scripts.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef TESTS_SCRIPTS_H +#define TESTS_SCRIPTS_H + +#include "tests.h" + +struct test_suite **create_script_test_suites(void); + +#endif /* TESTS_SCRIPTS_H */ diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h index dad3d7414142..3aa7701ee0e9 100644 --- a/tools/perf/tests/tests.h +++ b/tools/perf/tests/tests.h @@ -4,11 +4,17 @@ #include <stdbool.h> +enum { + TEST_OK = 0, + TEST_FAIL = -1, + TEST_SKIP = -2, +}; + #define TEST_ASSERT_VAL(text, cond) \ do { \ if (!(cond)) { \ pr_debug("FAILED %s:%d %s\n", __FILE__, __LINE__, text); \ - return -1; \ + return TEST_FAIL; \ } \ } while (0) @@ -17,16 +23,10 @@ do { \ if (val != expected) { \ pr_debug("FAILED %s:%d %s (%d != %d)\n", \ __FILE__, __LINE__, text, val, expected); \ - return -1; \ + return TEST_FAIL; \ } \ } while (0) -enum { - TEST_OK = 0, - TEST_FAIL = -1, - TEST_SKIP = -2, -}; - struct test_suite; typedef int (*test_fnptr)(struct test_suite *, int); diff --git a/tools/perf/tests/thread-maps-share.c b/tools/perf/tests/thread-maps-share.c index 7fa6f7c568e2..e9ecd30a5c05 100644 --- a/tools/perf/tests/thread-maps-share.c +++ b/tools/perf/tests/thread-maps-share.c @@ -46,9 +46,9 @@ static int test__thread_maps_share(struct test_suite *test __maybe_unused, int s TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(maps__refcnt(maps)), 4); /* test the maps pointer is shared */ - TEST_ASSERT_VAL("maps don't match", RC_CHK_EQUAL(maps, thread__maps(t1))); - TEST_ASSERT_VAL("maps don't match", RC_CHK_EQUAL(maps, thread__maps(t2))); - TEST_ASSERT_VAL("maps don't match", RC_CHK_EQUAL(maps, thread__maps(t3))); + TEST_ASSERT_VAL("maps don't match", maps__equal(maps, thread__maps(t1))); + TEST_ASSERT_VAL("maps don't match", maps__equal(maps, thread__maps(t2))); + TEST_ASSERT_VAL("maps don't match", maps__equal(maps, thread__maps(t3))); /* * Verify the other leader was created by previous call. @@ -73,7 +73,7 @@ static int test__thread_maps_share(struct test_suite *test __maybe_unused, int s other_maps = thread__maps(other); TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(maps__refcnt(other_maps)), 2); - TEST_ASSERT_VAL("maps don't match", RC_CHK_EQUAL(other_maps, thread__maps(other_leader))); + TEST_ASSERT_VAL("maps don't match", maps__equal(other_maps, thread__maps(other_leader))); /* release thread group */ thread__put(t3); diff --git a/tools/perf/tests/topology.c b/tools/perf/tests/topology.c index 2a842f53fbb5..a8cb5ba898ab 100644 --- a/tools/perf/tests/topology.c +++ b/tools/perf/tests/topology.c @@ -68,6 +68,7 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map) }; int i; struct aggr_cpu_id id; + struct perf_cpu cpu; session = perf_session__new(&data, NULL); TEST_ASSERT_VAL("can't get session", !IS_ERR(session)); @@ -113,8 +114,7 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map) TEST_ASSERT_VAL("Session header CPU map not set", session->header.env.cpu); for (i = 0; i < session->header.env.nr_cpus_avail; i++) { - struct perf_cpu cpu = { .cpu = i }; - + cpu.cpu = i; if (!perf_cpu_map__has(map, cpu)) continue; pr_debug("CPU %d, core %d, socket %d\n", i, @@ -123,48 +123,48 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map) } // Test that CPU ID contains socket, die, core and CPU - for (i = 0; i < perf_cpu_map__nr(map); i++) { - id = aggr_cpu_id__cpu(perf_cpu_map__cpu(map, i), NULL); + perf_cpu_map__for_each_cpu(cpu, i, map) { + id = aggr_cpu_id__cpu(cpu, NULL); TEST_ASSERT_VAL("Cpu map - CPU ID doesn't match", - perf_cpu_map__cpu(map, i).cpu == id.cpu.cpu); + cpu.cpu == id.cpu.cpu); TEST_ASSERT_VAL("Cpu map - Core ID doesn't match", - session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].core_id == id.core); + session->header.env.cpu[cpu.cpu].core_id == id.core); TEST_ASSERT_VAL("Cpu map - Socket ID doesn't match", - session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].socket_id == + session->header.env.cpu[cpu.cpu].socket_id == id.socket); TEST_ASSERT_VAL("Cpu map - Die ID doesn't match", - session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].die_id == id.die); + session->header.env.cpu[cpu.cpu].die_id == id.die); TEST_ASSERT_VAL("Cpu map - Node ID is set", id.node == -1); TEST_ASSERT_VAL("Cpu map - Thread IDX is set", id.thread_idx == -1); } // Test that core ID contains socket, die and core - for (i = 0; i < perf_cpu_map__nr(map); i++) { - id = aggr_cpu_id__core(perf_cpu_map__cpu(map, i), NULL); + perf_cpu_map__for_each_cpu(cpu, i, map) { + id = aggr_cpu_id__core(cpu, NULL); TEST_ASSERT_VAL("Core map - Core ID doesn't match", - session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].core_id == id.core); + session->header.env.cpu[cpu.cpu].core_id == id.core); TEST_ASSERT_VAL("Core map - Socket ID doesn't match", - session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].socket_id == + session->header.env.cpu[cpu.cpu].socket_id == id.socket); TEST_ASSERT_VAL("Core map - Die ID doesn't match", - session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].die_id == id.die); + session->header.env.cpu[cpu.cpu].die_id == id.die); TEST_ASSERT_VAL("Core map - Node ID is set", id.node == -1); TEST_ASSERT_VAL("Core map - Thread IDX is set", id.thread_idx == -1); } // Test that die ID contains socket and die - for (i = 0; i < perf_cpu_map__nr(map); i++) { - id = aggr_cpu_id__die(perf_cpu_map__cpu(map, i), NULL); + perf_cpu_map__for_each_cpu(cpu, i, map) { + id = aggr_cpu_id__die(cpu, NULL); TEST_ASSERT_VAL("Die map - Socket ID doesn't match", - session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].socket_id == + session->header.env.cpu[cpu.cpu].socket_id == id.socket); TEST_ASSERT_VAL("Die map - Die ID doesn't match", - session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].die_id == id.die); + session->header.env.cpu[cpu.cpu].die_id == id.die); TEST_ASSERT_VAL("Die map - Node ID is set", id.node == -1); TEST_ASSERT_VAL("Die map - Core is set", id.core == -1); @@ -173,10 +173,10 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map) } // Test that socket ID contains only socket - for (i = 0; i < perf_cpu_map__nr(map); i++) { - id = aggr_cpu_id__socket(perf_cpu_map__cpu(map, i), NULL); + perf_cpu_map__for_each_cpu(cpu, i, map) { + id = aggr_cpu_id__socket(cpu, NULL); TEST_ASSERT_VAL("Socket map - Socket ID doesn't match", - session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].socket_id == + session->header.env.cpu[cpu.cpu].socket_id == id.socket); TEST_ASSERT_VAL("Socket map - Node ID is set", id.node == -1); @@ -187,10 +187,10 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map) } // Test that node ID contains only node - for (i = 0; i < perf_cpu_map__nr(map); i++) { - id = aggr_cpu_id__node(perf_cpu_map__cpu(map, i), NULL); + perf_cpu_map__for_each_cpu(cpu, i, map) { + id = aggr_cpu_id__node(cpu, NULL); TEST_ASSERT_VAL("Node map - Node ID doesn't match", - cpu__get_node(perf_cpu_map__cpu(map, i)) == id.node); + cpu__get_node(cpu) == id.node); TEST_ASSERT_VAL("Node map - Socket is set", id.socket == -1); TEST_ASSERT_VAL("Node map - Die ID is set", id.die == -1); TEST_ASSERT_VAL("Node map - Core is set", id.core == -1); diff --git a/tools/perf/tests/vmlinux-kallsyms.c b/tools/perf/tests/vmlinux-kallsyms.c index 822f893e67d5..e30fd55f8e51 100644 --- a/tools/perf/tests/vmlinux-kallsyms.c +++ b/tools/perf/tests/vmlinux-kallsyms.c @@ -129,11 +129,12 @@ static int test__vmlinux_matches_kallsyms_cb1(struct map *map, void *data) * cases. */ struct map *pair = maps__find_by_name(args->kallsyms.kmaps, - (dso->kernel ? dso->short_name : dso->name)); + (dso__kernel(dso) ? dso__short_name(dso) : dso__name(dso))); - if (pair) + if (pair) { map__set_priv(pair, 1); - else { + map__put(pair); + } else { if (!args->header_printed) { pr_info("WARN: Maps only in vmlinux:\n"); args->header_printed = true; @@ -151,10 +152,8 @@ static int test__vmlinux_matches_kallsyms_cb2(struct map *map, void *data) u64 mem_end = map__unmap_ip(args->vmlinux_map, map__end(map)); pair = maps__find(args->kallsyms.kmaps, mem_start); - if (pair == NULL || map__priv(pair)) - return 0; - if (map__start(pair) == mem_start) { + if (pair != NULL && !map__priv(pair) && map__start(pair) == mem_start) { struct dso *dso = map__dso(map); if (!args->header_printed) { @@ -163,13 +162,14 @@ static int test__vmlinux_matches_kallsyms_cb2(struct map *map, void *data) } pr_info("WARN: %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as", - map__start(map), map__end(map), map__pgoff(map), dso->name); + map__start(map), map__end(map), map__pgoff(map), dso__name(dso)); if (mem_end != map__end(pair)) pr_info(":\nWARN: *%" PRIx64 "-%" PRIx64 " %" PRIx64, map__start(pair), map__end(pair), map__pgoff(pair)); - pr_info(" %s\n", dso->name); + pr_info(" %s\n", dso__name(dso)); map__set_priv(pair, 1); } + map__put(pair); return 0; } diff --git a/tools/perf/tests/workloads/datasym.c b/tools/perf/tests/workloads/datasym.c index ddd40bc63448..8e08fc75a973 100644 --- a/tools/perf/tests/workloads/datasym.c +++ b/tools/perf/tests/workloads/datasym.c @@ -16,6 +16,22 @@ static int datasym(int argc __maybe_unused, const char **argv __maybe_unused) { for (;;) { buf1.data1++; + if (buf1.data1 == 123) { + /* + * Add some 'noise' in the loop to work around errata + * 1694299 on Arm N1. + * + * Bias exists in SPE sampling which can cause the load + * and store instructions to be skipped entirely. This + * comes and goes randomly depending on the offset the + * linker places the datasym loop at in the Perf binary. + * With an extra branch in the middle of the loop that + * isn't always taken, the instruction stream is no + * longer a continuous repeating pattern that interacts + * badly with the bias. + */ + buf1.data1++; + } buf1.data2 += buf1.data1; } return 0; diff --git a/tools/perf/trace/beauty/Build b/tools/perf/trace/beauty/Build index d11ce256f511..cb3c1399ff40 100644 --- a/tools/perf/trace/beauty/Build +++ b/tools/perf/trace/beauty/Build @@ -1,6 +1,7 @@ perf-y += clone.o perf-y += fcntl.o perf-y += flock.o +perf-y += fs_at_flags.o perf-y += fsmount.o perf-y += fspick.o ifeq ($(SRCARCH),$(filter $(SRCARCH),x86)) @@ -19,3 +20,17 @@ perf-y += statx.o perf-y += sync_file_range.o perf-y += timespec.o perf-y += tracepoints/ + +ifdef SHELLCHECK + SHELL_TESTS := $(wildcard trace/beauty/*.sh) + TEST_LOGS := $(SHELL_TESTS:trace/beauty/%=%.shellcheck_log) +else + SHELL_TESTS := + TEST_LOGS := +endif + +$(OUTPUT)%.shellcheck_log: % + $(call rule_mkdir) + $(Q)$(call echo-cmd,test)shellcheck -s bash -a -S warning "$<" > $@ || (cat $@ && rm $@ && false) + +perf-y += $(TEST_LOGS) diff --git a/tools/perf/trace/beauty/arch/x86/include/asm/irq_vectors.h b/tools/perf/trace/beauty/arch/x86/include/asm/irq_vectors.h new file mode 100644 index 000000000000..d18bfb238f66 --- /dev/null +++ b/tools/perf/trace/beauty/arch/x86/include/asm/irq_vectors.h @@ -0,0 +1,140 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_X86_IRQ_VECTORS_H +#define _ASM_X86_IRQ_VECTORS_H + +#include <linux/threads.h> +/* + * Linux IRQ vector layout. + * + * There are 256 IDT entries (per CPU - each entry is 8 bytes) which can + * be defined by Linux. They are used as a jump table by the CPU when a + * given vector is triggered - by a CPU-external, CPU-internal or + * software-triggered event. + * + * Linux sets the kernel code address each entry jumps to early during + * bootup, and never changes them. This is the general layout of the + * IDT entries: + * + * Vectors 0 ... 31 : system traps and exceptions - hardcoded events + * Vectors 32 ... 127 : device interrupts + * Vector 128 : legacy int80 syscall interface + * Vectors 129 ... LOCAL_TIMER_VECTOR-1 + * Vectors LOCAL_TIMER_VECTOR ... 255 : special interrupts + * + * 64-bit x86 has per CPU IDT tables, 32-bit has one shared IDT table. + * + * This file enumerates the exact layout of them: + */ + +/* This is used as an interrupt vector when programming the APIC. */ +#define NMI_VECTOR 0x02 + +/* + * IDT vectors usable for external interrupt sources start at 0x20. + * (0x80 is the syscall vector, 0x30-0x3f are for ISA) + */ +#define FIRST_EXTERNAL_VECTOR 0x20 + +#define IA32_SYSCALL_VECTOR 0x80 + +/* + * Vectors 0x30-0x3f are used for ISA interrupts. + * round up to the next 16-vector boundary + */ +#define ISA_IRQ_VECTOR(irq) (((FIRST_EXTERNAL_VECTOR + 16) & ~15) + irq) + +/* + * Special IRQ vectors used by the SMP architecture, 0xf0-0xff + * + * some of the following vectors are 'rare', they are merged + * into a single vector (CALL_FUNCTION_VECTOR) to save vector space. + * TLB, reschedule and local APIC vectors are performance-critical. + */ + +#define SPURIOUS_APIC_VECTOR 0xff +/* + * Sanity check + */ +#if ((SPURIOUS_APIC_VECTOR & 0x0F) != 0x0F) +# error SPURIOUS_APIC_VECTOR definition error +#endif + +#define ERROR_APIC_VECTOR 0xfe +#define RESCHEDULE_VECTOR 0xfd +#define CALL_FUNCTION_VECTOR 0xfc +#define CALL_FUNCTION_SINGLE_VECTOR 0xfb +#define THERMAL_APIC_VECTOR 0xfa +#define THRESHOLD_APIC_VECTOR 0xf9 +#define REBOOT_VECTOR 0xf8 + +/* + * Generic system vector for platform specific use + */ +#define X86_PLATFORM_IPI_VECTOR 0xf7 + +/* + * IRQ work vector: + */ +#define IRQ_WORK_VECTOR 0xf6 + +/* 0xf5 - unused, was UV_BAU_MESSAGE */ +#define DEFERRED_ERROR_VECTOR 0xf4 + +/* Vector on which hypervisor callbacks will be delivered */ +#define HYPERVISOR_CALLBACK_VECTOR 0xf3 + +/* Vector for KVM to deliver posted interrupt IPI */ +#define POSTED_INTR_VECTOR 0xf2 +#define POSTED_INTR_WAKEUP_VECTOR 0xf1 +#define POSTED_INTR_NESTED_VECTOR 0xf0 + +#define MANAGED_IRQ_SHUTDOWN_VECTOR 0xef + +#if IS_ENABLED(CONFIG_HYPERV) +#define HYPERV_REENLIGHTENMENT_VECTOR 0xee +#define HYPERV_STIMER0_VECTOR 0xed +#endif + +#define LOCAL_TIMER_VECTOR 0xec + +#define NR_VECTORS 256 + +#ifdef CONFIG_X86_LOCAL_APIC +#define FIRST_SYSTEM_VECTOR LOCAL_TIMER_VECTOR +#else +#define FIRST_SYSTEM_VECTOR NR_VECTORS +#endif + +#define NR_EXTERNAL_VECTORS (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) +#define NR_SYSTEM_VECTORS (NR_VECTORS - FIRST_SYSTEM_VECTOR) + +/* + * Size the maximum number of interrupts. + * + * If the irq_desc[] array has a sparse layout, we can size things + * generously - it scales up linearly with the maximum number of CPUs, + * and the maximum number of IO-APICs, whichever is higher. + * + * In other cases we size more conservatively, to not create too large + * static arrays. + */ + +#define NR_IRQS_LEGACY 16 + +#define CPU_VECTOR_LIMIT (64 * NR_CPUS) +#define IO_APIC_VECTOR_LIMIT (32 * MAX_IO_APICS) + +#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_PCI_MSI) +#define NR_IRQS \ + (CPU_VECTOR_LIMIT > IO_APIC_VECTOR_LIMIT ? \ + (NR_VECTORS + CPU_VECTOR_LIMIT) : \ + (NR_VECTORS + IO_APIC_VECTOR_LIMIT)) +#elif defined(CONFIG_X86_IO_APIC) +#define NR_IRQS (NR_VECTORS + IO_APIC_VECTOR_LIMIT) +#elif defined(CONFIG_PCI_MSI) +#define NR_IRQS (NR_VECTORS + CPU_VECTOR_LIMIT) +#else +#define NR_IRQS NR_IRQS_LEGACY +#endif + +#endif /* _ASM_X86_IRQ_VECTORS_H */ diff --git a/tools/perf/trace/beauty/arch/x86/include/uapi/asm/prctl.h b/tools/perf/trace/beauty/arch/x86/include/uapi/asm/prctl.h new file mode 100644 index 000000000000..384e2cc6ac19 --- /dev/null +++ b/tools/perf/trace/beauty/arch/x86/include/uapi/asm/prctl.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _ASM_X86_PRCTL_H +#define _ASM_X86_PRCTL_H + +#define ARCH_SET_GS 0x1001 +#define ARCH_SET_FS 0x1002 +#define ARCH_GET_FS 0x1003 +#define ARCH_GET_GS 0x1004 + +#define ARCH_GET_CPUID 0x1011 +#define ARCH_SET_CPUID 0x1012 + +#define ARCH_GET_XCOMP_SUPP 0x1021 +#define ARCH_GET_XCOMP_PERM 0x1022 +#define ARCH_REQ_XCOMP_PERM 0x1023 +#define ARCH_GET_XCOMP_GUEST_PERM 0x1024 +#define ARCH_REQ_XCOMP_GUEST_PERM 0x1025 + +#define ARCH_XCOMP_TILECFG 17 +#define ARCH_XCOMP_TILEDATA 18 + +#define ARCH_MAP_VDSO_X32 0x2001 +#define ARCH_MAP_VDSO_32 0x2002 +#define ARCH_MAP_VDSO_64 0x2003 + +/* Don't use 0x3001-0x3004 because of old glibcs */ + +#define ARCH_GET_UNTAG_MASK 0x4001 +#define ARCH_ENABLE_TAGGED_ADDR 0x4002 +#define ARCH_GET_MAX_TAG_BITS 0x4003 +#define ARCH_FORCE_TAGGED_SVA 0x4004 + +#define ARCH_SHSTK_ENABLE 0x5001 +#define ARCH_SHSTK_DISABLE 0x5002 +#define ARCH_SHSTK_LOCK 0x5003 +#define ARCH_SHSTK_UNLOCK 0x5004 +#define ARCH_SHSTK_STATUS 0x5005 + +/* ARCH_SHSTK_ features bits */ +#define ARCH_SHSTK_SHSTK (1ULL << 0) +#define ARCH_SHSTK_WRSS (1ULL << 1) + +#endif /* _ASM_X86_PRCTL_H */ diff --git a/tools/perf/trace/beauty/arch_errno_names.sh b/tools/perf/trace/beauty/arch_errno_names.sh index 7df4bf5b55a3..30d3889b2957 100755 --- a/tools/perf/trace/beauty/arch_errno_names.sh +++ b/tools/perf/trace/beauty/arch_errno_names.sh @@ -60,10 +60,12 @@ create_arch_errno_table_func() printf 'arch_syscalls__strerrno_t *arch_syscalls__strerrno_function(const char *arch)\n' printf '{\n' for arch in $archlist; do - printf '\tif (!strcmp(arch, "%s"))\n' $(arch_string "$arch") - printf '\t\treturn errno_to_name__%s;\n' $(arch_string "$arch") + arch_str=$(arch_string "$arch") + printf '\tif (!strcmp(arch, "%s"))\n' "$arch_str" + printf '\t\treturn errno_to_name__%s;\n' "$arch_str" done - printf '\treturn errno_to_name__%s;\n' $(arch_string "$default") + arch_str=$(arch_string "$default") + printf '\treturn errno_to_name__%s;\n' "$arch_str" printf '}\n' } diff --git a/tools/perf/trace/beauty/beauty.h b/tools/perf/trace/beauty/beauty.h index 9feb794f5c6e..78d10d92d351 100644 --- a/tools/perf/trace/beauty/beauty.h +++ b/tools/perf/trace/beauty/beauty.h @@ -234,8 +234,11 @@ size_t syscall_arg__scnprintf_socket_protocol(char *bf, size_t size, struct sysc size_t syscall_arg__scnprintf_socket_level(char *bf, size_t size, struct syscall_arg *arg); #define SCA_SK_LEVEL syscall_arg__scnprintf_socket_level -size_t syscall_arg__scnprintf_statx_flags(char *bf, size_t size, struct syscall_arg *arg); -#define SCA_STATX_FLAGS syscall_arg__scnprintf_statx_flags +size_t syscall_arg__scnprintf_fs_at_flags(char *bf, size_t size, struct syscall_arg *arg); +#define SCA_FS_AT_FLAGS syscall_arg__scnprintf_fs_at_flags + +size_t syscall_arg__scnprintf_faccessat2_flags(char *bf, size_t size, struct syscall_arg *arg); +#define SCA_FACCESSAT2_FLAGS syscall_arg__scnprintf_faccessat2_flags size_t syscall_arg__scnprintf_statx_mask(char *bf, size_t size, struct syscall_arg *arg); #define SCA_STATX_MASK syscall_arg__scnprintf_statx_mask diff --git a/tools/perf/trace/beauty/clone.c b/tools/perf/trace/beauty/clone.c index f4db894e0af6..c9fa8f7e82b9 100644 --- a/tools/perf/trace/beauty/clone.c +++ b/tools/perf/trace/beauty/clone.c @@ -7,52 +7,16 @@ #include "trace/beauty/beauty.h" #include <linux/kernel.h> +#include <linux/log2.h> #include <sys/types.h> -#include <uapi/linux/sched.h> +#include <sched.h> static size_t clone__scnprintf_flags(unsigned long flags, char *bf, size_t size, bool show_prefix) { - const char *prefix = "CLONE_"; - int printed = 0; +#include "trace/beauty/generated/clone_flags_array.c" + static DEFINE_STRARRAY(clone_flags, "CLONE_"); -#define P_FLAG(n) \ - if (flags & CLONE_##n) { \ - printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \ - flags &= ~CLONE_##n; \ - } - - P_FLAG(VM); - P_FLAG(FS); - P_FLAG(FILES); - P_FLAG(SIGHAND); - P_FLAG(PIDFD); - P_FLAG(PTRACE); - P_FLAG(VFORK); - P_FLAG(PARENT); - P_FLAG(THREAD); - P_FLAG(NEWNS); - P_FLAG(SYSVSEM); - P_FLAG(SETTLS); - P_FLAG(PARENT_SETTID); - P_FLAG(CHILD_CLEARTID); - P_FLAG(DETACHED); - P_FLAG(UNTRACED); - P_FLAG(CHILD_SETTID); - P_FLAG(NEWCGROUP); - P_FLAG(NEWUTS); - P_FLAG(NEWIPC); - P_FLAG(NEWUSER); - P_FLAG(NEWPID); - P_FLAG(NEWNET); - P_FLAG(IO); - P_FLAG(CLEAR_SIGHAND); - P_FLAG(INTO_CGROUP); -#undef P_FLAG - - if (flags) - printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags); - - return printed; + return strarray__scnprintf_flags(&strarray__clone_flags, bf, size, show_prefix, flags); } size_t syscall_arg__scnprintf_clone_flags(char *bf, size_t size, struct syscall_arg *arg) diff --git a/tools/perf/trace/beauty/clone.sh b/tools/perf/trace/beauty/clone.sh new file mode 100755 index 000000000000..18b6c0d75693 --- /dev/null +++ b/tools/perf/trace/beauty/clone.sh @@ -0,0 +1,17 @@ +#!/bin/sh +# SPDX-License-Identifier: LGPL-2.1 + +if [ $# -ne 1 ] ; then + beauty_uapi_linux_dir=tools/perf/trace/beauty/include/uapi/linux/ +else + beauty_uapi_linux_dir=$1 +fi + +linux_sched=${beauty_uapi_linux_dir}/sched.h + +printf "static const char *clone_flags[] = {\n" +regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+CLONE_([^_]+[[:alnum:]_]+)[[:space:]]+(0x[[:xdigit:]]+)[[:space:]]*.*' +grep -E $regex ${linux_sched} | \ + sed -r "s/$regex/\2 \1/g" | \ + xargs printf "\t[ilog2(%s) + 1] = \"%s\",\n" +printf "};\n" diff --git a/tools/perf/trace/beauty/fcntl.c b/tools/perf/trace/beauty/fcntl.c index 56ef83b3d130..d075904dccce 100644 --- a/tools/perf/trace/beauty/fcntl.c +++ b/tools/perf/trace/beauty/fcntl.c @@ -7,7 +7,7 @@ #include "trace/beauty/beauty.h" #include <linux/kernel.h> -#include <uapi/linux/fcntl.h> +#include <linux/fcntl.h> static size_t fcntl__scnprintf_getfd(unsigned long val, char *bf, size_t size, bool show_prefix) { diff --git a/tools/perf/trace/beauty/flock.c b/tools/perf/trace/beauty/flock.c index c14274edd6d9..a6514a6f07cf 100644 --- a/tools/perf/trace/beauty/flock.c +++ b/tools/perf/trace/beauty/flock.c @@ -2,7 +2,7 @@ #include "trace/beauty/beauty.h" #include <linux/kernel.h> -#include <uapi/linux/fcntl.h> +#include <linux/fcntl.h> #ifndef LOCK_MAND #define LOCK_MAND 32 diff --git a/tools/perf/trace/beauty/fs_at_flags.c b/tools/perf/trace/beauty/fs_at_flags.c new file mode 100644 index 000000000000..c200669cb944 --- /dev/null +++ b/tools/perf/trace/beauty/fs_at_flags.c @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: LGPL-2.1 +/* + * trace/beauty/fs_at_flags.c + * + * Copyright (C) 2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> + */ + +#include "trace/beauty/beauty.h" +#include <sys/types.h> +#include <linux/fcntl.h> +#include <linux/log2.h> + +/* + * uapi/linux/fcntl.h does not keep a copy in tools headers directory, + * for system with kernel versions before v5.8, need to sync AT_EACCESS macro. + */ +#ifndef AT_EACCESS +#define AT_EACCESS 0x200 +#endif + +#include "trace/beauty/generated/fs_at_flags_array.c" +static DEFINE_STRARRAY(fs_at_flags, "AT_"); + +static size_t fs_at__scnprintf_flags(unsigned long flags, char *bf, size_t size, bool show_prefix) +{ + return strarray__scnprintf_flags(&strarray__fs_at_flags, bf, size, show_prefix, flags); +} + +size_t syscall_arg__scnprintf_fs_at_flags(char *bf, size_t size, struct syscall_arg *arg) +{ + bool show_prefix = arg->show_string_prefix; + int flags = arg->val; + + return fs_at__scnprintf_flags(flags, bf, size, show_prefix); +} + +static size_t faccessat2__scnprintf_flags(unsigned long flags, char *bf, size_t size, bool show_prefix) +{ + int printed = 0; + + // AT_EACCESS is the same as AT_REMOVEDIR, that is in fs_at_flags_array, + // special case it here. + if (flags & AT_EACCESS) { + flags &= ~AT_EACCESS; + printed += scnprintf(bf + printed, size - printed, "%sEACCESS%s", + show_prefix ? strarray__fs_at_flags.prefix : "", flags ? "|" : ""); + } + + return strarray__scnprintf_flags(&strarray__fs_at_flags, bf + printed, size - printed, show_prefix, flags); +} + +size_t syscall_arg__scnprintf_faccessat2_flags(char *bf, size_t size, struct syscall_arg *arg) +{ + bool show_prefix = arg->show_string_prefix; + int flags = arg->val; + + return faccessat2__scnprintf_flags(flags, bf, size, show_prefix); +} diff --git a/tools/perf/trace/beauty/fs_at_flags.sh b/tools/perf/trace/beauty/fs_at_flags.sh new file mode 100755 index 000000000000..456f59addf74 --- /dev/null +++ b/tools/perf/trace/beauty/fs_at_flags.sh @@ -0,0 +1,21 @@ +#!/bin/sh +# SPDX-License-Identifier: LGPL-2.1 + +if [ $# -ne 1 ] ; then + beauty_uapi_linux_dir=tools/perf/trace/beauty/include/uapi/linux/ +else + beauty_uapi_linux_dir=$1 +fi + +linux_fcntl=${beauty_uapi_linux_dir}/fcntl.h + +printf "static const char *fs_at_flags[] = {\n" +regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+AT_([^_]+[[:alnum:]_]+)[[:space:]]+(0x[[:xdigit:]]+)[[:space:]]*.*' +# AT_EACCESS is only meaningful to faccessat, so we will special case it there... +# AT_STATX_SYNC_TYPE is not a bit, its a mask of AT_STATX_SYNC_AS_STAT, AT_STATX_FORCE_SYNC and AT_STATX_DONT_SYNC +grep -E $regex ${linux_fcntl} | \ + grep -v AT_EACCESS | \ + grep -v AT_STATX_SYNC_TYPE | \ + sed -r "s/$regex/\2 \1/g" | \ + xargs printf "\t[ilog2(%s) + 1] = \"%s\",\n" +printf "};\n" diff --git a/tools/perf/trace/beauty/fsconfig.sh b/tools/perf/trace/beauty/fsconfig.sh index bc6ef7bb7a5f..09cee79de00c 100755 --- a/tools/perf/trace/beauty/fsconfig.sh +++ b/tools/perf/trace/beauty/fsconfig.sh @@ -2,12 +2,12 @@ # SPDX-License-Identifier: LGPL-2.1 if [ $# -ne 1 ] ; then - linux_header_dir=tools/include/uapi/linux + beauty_uapi_linux_dir=tools/perf/trace/beauty/include/uapi/linux/ else - linux_header_dir=$1 + beauty_uapi_linux_dir=$1 fi -linux_mount=${linux_header_dir}/mount.h +linux_mount=${beauty_uapi_linux_dir}/mount.h printf "static const char *fsconfig_cmds[] = {\n" ms='[[:space:]]*' diff --git a/tools/perf/trace/beauty/fsmount.c b/tools/perf/trace/beauty/fsmount.c index 30c8c082a3c3..28c2c16fc1a8 100644 --- a/tools/perf/trace/beauty/fsmount.c +++ b/tools/perf/trace/beauty/fsmount.c @@ -7,7 +7,14 @@ #include "trace/beauty/beauty.h" #include <linux/log2.h> -#include <uapi/linux/mount.h> +#include <sys/mount.h> + +#ifndef MOUNT_ATTR__ATIME +#define MOUNT_ATTR__ATIME 0x00000070 /* Setting on how atime should be updated */ +#endif +#ifndef MOUNT_ATTR_RELATIME +#define MOUNT_ATTR_RELATIME 0x00000000 /* - Update atime relative to mtime/ctime. */ +#endif static size_t fsmount__scnprintf_attr_flags(unsigned long flags, char *bf, size_t size, bool show_prefix) { diff --git a/tools/perf/trace/beauty/fsmount.sh b/tools/perf/trace/beauty/fsmount.sh index cba8897a751f..6b67a54cdeee 100755 --- a/tools/perf/trace/beauty/fsmount.sh +++ b/tools/perf/trace/beauty/fsmount.sh @@ -2,12 +2,12 @@ # SPDX-License-Identifier: LGPL-2.1 if [ $# -ne 1 ] ; then - linux_header_dir=tools/include/uapi/linux + beauty_uapi_linux_dir=tools/perf/trace/beauty/include/uapi/linux/ else - linux_header_dir=$1 + beauty_uapi_linux_dir=$1 fi -linux_mount=${linux_header_dir}/mount.h +linux_mount=${beauty_uapi_linux_dir}/mount.h # Remove MOUNT_ATTR_RELATIME as it is zeros, handle it a special way in the beautifier # Only handle MOUNT_ATTR_ followed by a capital letter/num as __ is special case diff --git a/tools/perf/trace/beauty/fspick.sh b/tools/perf/trace/beauty/fspick.sh index 1f088329b96e..0d9951c22b95 100755 --- a/tools/perf/trace/beauty/fspick.sh +++ b/tools/perf/trace/beauty/fspick.sh @@ -2,12 +2,12 @@ # SPDX-License-Identifier: LGPL-2.1 if [ $# -ne 1 ] ; then - linux_header_dir=tools/include/uapi/linux + beauty_uapi_linux_dir=tools/perf/trace/beauty/include/uapi/linux/ else - linux_header_dir=$1 + beauty_uapi_linux_dir=$1 fi -linux_mount=${linux_header_dir}/mount.h +linux_mount=${beauty_uapi_linux_dir}/mount.h printf "static const char *fspick_flags[] = {\n" regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+FSPICK_([[:alnum:]_]+)[[:space:]]+(0x[[:xdigit:]]+)[[:space:]]*.*' diff --git a/tools/perf/trace/beauty/include/linux/socket.h b/tools/perf/trace/beauty/include/linux/socket.h index cfcb7e2c3813..139c330ccf2c 100644 --- a/tools/perf/trace/beauty/include/linux/socket.h +++ b/tools/perf/trace/beauty/include/linux/socket.h @@ -422,13 +422,6 @@ extern long __sys_recvmsg_sock(struct socket *sock, struct msghdr *msg, struct user_msghdr __user *umsg, struct sockaddr __user *uaddr, unsigned int flags); -extern int sendmsg_copy_msghdr(struct msghdr *msg, - struct user_msghdr __user *umsg, unsigned flags, - struct iovec **iov); -extern int recvmsg_copy_msghdr(struct msghdr *msg, - struct user_msghdr __user *umsg, unsigned flags, - struct sockaddr __user **uaddr, - struct iovec **iov); extern int __copy_msghdr(struct msghdr *kmsg, struct user_msghdr *umsg, struct sockaddr __user **save_addr); diff --git a/tools/perf/trace/beauty/include/uapi/linux/fcntl.h b/tools/perf/trace/beauty/include/uapi/linux/fcntl.h new file mode 100644 index 000000000000..282e90aeb163 --- /dev/null +++ b/tools/perf/trace/beauty/include/uapi/linux/fcntl.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_LINUX_FCNTL_H +#define _UAPI_LINUX_FCNTL_H + +#include <asm/fcntl.h> +#include <linux/openat2.h> + +#define F_SETLEASE (F_LINUX_SPECIFIC_BASE + 0) +#define F_GETLEASE (F_LINUX_SPECIFIC_BASE + 1) + +/* + * Cancel a blocking posix lock; internal use only until we expose an + * asynchronous lock api to userspace: + */ +#define F_CANCELLK (F_LINUX_SPECIFIC_BASE + 5) + +/* Create a file descriptor with FD_CLOEXEC set. */ +#define F_DUPFD_CLOEXEC (F_LINUX_SPECIFIC_BASE + 6) + +/* + * Request nofications on a directory. + * See below for events that may be notified. + */ +#define F_NOTIFY (F_LINUX_SPECIFIC_BASE+2) + +/* + * Set and get of pipe page size array + */ +#define F_SETPIPE_SZ (F_LINUX_SPECIFIC_BASE + 7) +#define F_GETPIPE_SZ (F_LINUX_SPECIFIC_BASE + 8) + +/* + * Set/Get seals + */ +#define F_ADD_SEALS (F_LINUX_SPECIFIC_BASE + 9) +#define F_GET_SEALS (F_LINUX_SPECIFIC_BASE + 10) + +/* + * Types of seals + */ +#define F_SEAL_SEAL 0x0001 /* prevent further seals from being set */ +#define F_SEAL_SHRINK 0x0002 /* prevent file from shrinking */ +#define F_SEAL_GROW 0x0004 /* prevent file from growing */ +#define F_SEAL_WRITE 0x0008 /* prevent writes */ +#define F_SEAL_FUTURE_WRITE 0x0010 /* prevent future writes while mapped */ +#define F_SEAL_EXEC 0x0020 /* prevent chmod modifying exec bits */ +/* (1U << 31) is reserved for signed error codes */ + +/* + * Set/Get write life time hints. {GET,SET}_RW_HINT operate on the + * underlying inode, while {GET,SET}_FILE_RW_HINT operate only on + * the specific file. + */ +#define F_GET_RW_HINT (F_LINUX_SPECIFIC_BASE + 11) +#define F_SET_RW_HINT (F_LINUX_SPECIFIC_BASE + 12) +#define F_GET_FILE_RW_HINT (F_LINUX_SPECIFIC_BASE + 13) +#define F_SET_FILE_RW_HINT (F_LINUX_SPECIFIC_BASE + 14) + +/* + * Valid hint values for F_{GET,SET}_RW_HINT. 0 is "not set", or can be + * used to clear any hints previously set. + */ +#define RWH_WRITE_LIFE_NOT_SET 0 +#define RWH_WRITE_LIFE_NONE 1 +#define RWH_WRITE_LIFE_SHORT 2 +#define RWH_WRITE_LIFE_MEDIUM 3 +#define RWH_WRITE_LIFE_LONG 4 +#define RWH_WRITE_LIFE_EXTREME 5 + +/* + * The originally introduced spelling is remained from the first + * versions of the patch set that introduced the feature, see commit + * v4.13-rc1~212^2~51. + */ +#define RWF_WRITE_LIFE_NOT_SET RWH_WRITE_LIFE_NOT_SET + +/* + * Types of directory notifications that may be requested. + */ +#define DN_ACCESS 0x00000001 /* File accessed */ +#define DN_MODIFY 0x00000002 /* File modified */ +#define DN_CREATE 0x00000004 /* File created */ +#define DN_DELETE 0x00000008 /* File removed */ +#define DN_RENAME 0x00000010 /* File renamed */ +#define DN_ATTRIB 0x00000020 /* File changed attibutes */ +#define DN_MULTISHOT 0x80000000 /* Don't remove notifier */ + +/* + * The constants AT_REMOVEDIR and AT_EACCESS have the same value. AT_EACCESS is + * meaningful only to faccessat, while AT_REMOVEDIR is meaningful only to + * unlinkat. The two functions do completely different things and therefore, + * the flags can be allowed to overlap. For example, passing AT_REMOVEDIR to + * faccessat would be undefined behavior and thus treating it equivalent to + * AT_EACCESS is valid undefined behavior. + */ +#define AT_FDCWD -100 /* Special value used to indicate + openat should use the current + working directory. */ +#define AT_SYMLINK_NOFOLLOW 0x100 /* Do not follow symbolic links. */ +#define AT_EACCESS 0x200 /* Test access permitted for + effective IDs, not real IDs. */ +#define AT_REMOVEDIR 0x200 /* Remove directory instead of + unlinking file. */ +#define AT_SYMLINK_FOLLOW 0x400 /* Follow symbolic links. */ +#define AT_NO_AUTOMOUNT 0x800 /* Suppress terminal automount traversal */ +#define AT_EMPTY_PATH 0x1000 /* Allow empty relative pathname */ + +#define AT_STATX_SYNC_TYPE 0x6000 /* Type of synchronisation required from statx() */ +#define AT_STATX_SYNC_AS_STAT 0x0000 /* - Do whatever stat() does */ +#define AT_STATX_FORCE_SYNC 0x2000 /* - Force the attributes to be sync'd with the server */ +#define AT_STATX_DONT_SYNC 0x4000 /* - Don't sync attributes with the server */ + +#define AT_RECURSIVE 0x8000 /* Apply to the entire subtree */ + +/* Flags for name_to_handle_at(2). We reuse AT_ flag space to save bits... */ +#define AT_HANDLE_FID AT_REMOVEDIR /* file handle is needed to + compare object identity and may not + be usable to open_by_handle_at(2) */ +#if defined(__KERNEL__) +#define AT_GETATTR_NOSEC 0x80000000 +#endif + +#endif /* _UAPI_LINUX_FCNTL_H */ diff --git a/tools/perf/trace/beauty/include/uapi/linux/fs.h b/tools/perf/trace/beauty/include/uapi/linux/fs.h new file mode 100644 index 000000000000..45e4e64fd664 --- /dev/null +++ b/tools/perf/trace/beauty/include/uapi/linux/fs.h @@ -0,0 +1,396 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_LINUX_FS_H +#define _UAPI_LINUX_FS_H + +/* + * This file has definitions for some important file table structures + * and constants and structures used by various generic file system + * ioctl's. Please do not make any changes in this file before + * sending patches for review to linux-fsdevel@vger.kernel.org and + * linux-api@vger.kernel.org. + */ + +#include <linux/limits.h> +#include <linux/ioctl.h> +#include <linux/types.h> +#ifndef __KERNEL__ +#include <linux/fscrypt.h> +#endif + +/* Use of MS_* flags within the kernel is restricted to core mount(2) code. */ +#if !defined(__KERNEL__) +#include <linux/mount.h> +#endif + +/* + * It's silly to have NR_OPEN bigger than NR_FILE, but you can change + * the file limit at runtime and only root can increase the per-process + * nr_file rlimit, so it's safe to set up a ridiculously high absolute + * upper limit on files-per-process. + * + * Some programs (notably those using select()) may have to be + * recompiled to take full advantage of the new limits.. + */ + +/* Fixed constants first: */ +#undef NR_OPEN +#define INR_OPEN_CUR 1024 /* Initial setting for nfile rlimits */ +#define INR_OPEN_MAX 4096 /* Hard limit for nfile rlimits */ + +#define BLOCK_SIZE_BITS 10 +#define BLOCK_SIZE (1<<BLOCK_SIZE_BITS) + +#define SEEK_SET 0 /* seek relative to beginning of file */ +#define SEEK_CUR 1 /* seek relative to current file position */ +#define SEEK_END 2 /* seek relative to end of file */ +#define SEEK_DATA 3 /* seek to the next data */ +#define SEEK_HOLE 4 /* seek to the next hole */ +#define SEEK_MAX SEEK_HOLE + +#define RENAME_NOREPLACE (1 << 0) /* Don't overwrite target */ +#define RENAME_EXCHANGE (1 << 1) /* Exchange source and dest */ +#define RENAME_WHITEOUT (1 << 2) /* Whiteout source */ + +struct file_clone_range { + __s64 src_fd; + __u64 src_offset; + __u64 src_length; + __u64 dest_offset; +}; + +struct fstrim_range { + __u64 start; + __u64 len; + __u64 minlen; +}; + +/* + * We include a length field because some filesystems (vfat) have an identifier + * that we do want to expose as a UUID, but doesn't have the standard length. + * + * We use a fixed size buffer beacuse this interface will, by fiat, never + * support "UUIDs" longer than 16 bytes; we don't want to force all downstream + * users to have to deal with that. + */ +struct fsuuid2 { + __u8 len; + __u8 uuid[16]; +}; + +struct fs_sysfs_path { + __u8 len; + __u8 name[128]; +}; + +/* extent-same (dedupe) ioctls; these MUST match the btrfs ioctl definitions */ +#define FILE_DEDUPE_RANGE_SAME 0 +#define FILE_DEDUPE_RANGE_DIFFERS 1 + +/* from struct btrfs_ioctl_file_extent_same_info */ +struct file_dedupe_range_info { + __s64 dest_fd; /* in - destination file */ + __u64 dest_offset; /* in - start of extent in destination */ + __u64 bytes_deduped; /* out - total # of bytes we were able + * to dedupe from this file. */ + /* status of this dedupe operation: + * < 0 for error + * == FILE_DEDUPE_RANGE_SAME if dedupe succeeds + * == FILE_DEDUPE_RANGE_DIFFERS if data differs + */ + __s32 status; /* out - see above description */ + __u32 reserved; /* must be zero */ +}; + +/* from struct btrfs_ioctl_file_extent_same_args */ +struct file_dedupe_range { + __u64 src_offset; /* in - start of extent in source */ + __u64 src_length; /* in - length of extent */ + __u16 dest_count; /* in - total elements in info array */ + __u16 reserved1; /* must be zero */ + __u32 reserved2; /* must be zero */ + struct file_dedupe_range_info info[]; +}; + +/* And dynamically-tunable limits and defaults: */ +struct files_stat_struct { + unsigned long nr_files; /* read only */ + unsigned long nr_free_files; /* read only */ + unsigned long max_files; /* tunable */ +}; + +struct inodes_stat_t { + long nr_inodes; + long nr_unused; + long dummy[5]; /* padding for sysctl ABI compatibility */ +}; + + +#define NR_FILE 8192 /* this can well be larger on a larger system */ + +/* + * Structure for FS_IOC_FSGETXATTR[A] and FS_IOC_FSSETXATTR. + */ +struct fsxattr { + __u32 fsx_xflags; /* xflags field value (get/set) */ + __u32 fsx_extsize; /* extsize field value (get/set)*/ + __u32 fsx_nextents; /* nextents field value (get) */ + __u32 fsx_projid; /* project identifier (get/set) */ + __u32 fsx_cowextsize; /* CoW extsize field value (get/set)*/ + unsigned char fsx_pad[8]; +}; + +/* + * Flags for the fsx_xflags field + */ +#define FS_XFLAG_REALTIME 0x00000001 /* data in realtime volume */ +#define FS_XFLAG_PREALLOC 0x00000002 /* preallocated file extents */ +#define FS_XFLAG_IMMUTABLE 0x00000008 /* file cannot be modified */ +#define FS_XFLAG_APPEND 0x00000010 /* all writes append */ +#define FS_XFLAG_SYNC 0x00000020 /* all writes synchronous */ +#define FS_XFLAG_NOATIME 0x00000040 /* do not update access time */ +#define FS_XFLAG_NODUMP 0x00000080 /* do not include in backups */ +#define FS_XFLAG_RTINHERIT 0x00000100 /* create with rt bit set */ +#define FS_XFLAG_PROJINHERIT 0x00000200 /* create with parents projid */ +#define FS_XFLAG_NOSYMLINKS 0x00000400 /* disallow symlink creation */ +#define FS_XFLAG_EXTSIZE 0x00000800 /* extent size allocator hint */ +#define FS_XFLAG_EXTSZINHERIT 0x00001000 /* inherit inode extent size */ +#define FS_XFLAG_NODEFRAG 0x00002000 /* do not defragment */ +#define FS_XFLAG_FILESTREAM 0x00004000 /* use filestream allocator */ +#define FS_XFLAG_DAX 0x00008000 /* use DAX for IO */ +#define FS_XFLAG_COWEXTSIZE 0x00010000 /* CoW extent size allocator hint */ +#define FS_XFLAG_HASATTR 0x80000000 /* no DIFLAG for this */ + +/* the read-only stuff doesn't really belong here, but any other place is + probably as bad and I don't want to create yet another include file. */ + +#define BLKROSET _IO(0x12,93) /* set device read-only (0 = read-write) */ +#define BLKROGET _IO(0x12,94) /* get read-only status (0 = read_write) */ +#define BLKRRPART _IO(0x12,95) /* re-read partition table */ +#define BLKGETSIZE _IO(0x12,96) /* return device size /512 (long *arg) */ +#define BLKFLSBUF _IO(0x12,97) /* flush buffer cache */ +#define BLKRASET _IO(0x12,98) /* set read ahead for block device */ +#define BLKRAGET _IO(0x12,99) /* get current read ahead setting */ +#define BLKFRASET _IO(0x12,100)/* set filesystem (mm/filemap.c) read-ahead */ +#define BLKFRAGET _IO(0x12,101)/* get filesystem (mm/filemap.c) read-ahead */ +#define BLKSECTSET _IO(0x12,102)/* set max sectors per request (ll_rw_blk.c) */ +#define BLKSECTGET _IO(0x12,103)/* get max sectors per request (ll_rw_blk.c) */ +#define BLKSSZGET _IO(0x12,104)/* get block device sector size */ +#if 0 +#define BLKPG _IO(0x12,105)/* See blkpg.h */ + +/* Some people are morons. Do not use sizeof! */ + +#define BLKELVGET _IOR(0x12,106,size_t)/* elevator get */ +#define BLKELVSET _IOW(0x12,107,size_t)/* elevator set */ +/* This was here just to show that the number is taken - + probably all these _IO(0x12,*) ioctls should be moved to blkpg.h. */ +#endif +/* A jump here: 108-111 have been used for various private purposes. */ +#define BLKBSZGET _IOR(0x12,112,size_t) +#define BLKBSZSET _IOW(0x12,113,size_t) +#define BLKGETSIZE64 _IOR(0x12,114,size_t) /* return device size in bytes (u64 *arg) */ +#define BLKTRACESETUP _IOWR(0x12,115,struct blk_user_trace_setup) +#define BLKTRACESTART _IO(0x12,116) +#define BLKTRACESTOP _IO(0x12,117) +#define BLKTRACETEARDOWN _IO(0x12,118) +#define BLKDISCARD _IO(0x12,119) +#define BLKIOMIN _IO(0x12,120) +#define BLKIOOPT _IO(0x12,121) +#define BLKALIGNOFF _IO(0x12,122) +#define BLKPBSZGET _IO(0x12,123) +#define BLKDISCARDZEROES _IO(0x12,124) +#define BLKSECDISCARD _IO(0x12,125) +#define BLKROTATIONAL _IO(0x12,126) +#define BLKZEROOUT _IO(0x12,127) +#define BLKGETDISKSEQ _IOR(0x12,128,__u64) +/* + * A jump here: 130-136 are reserved for zoned block devices + * (see uapi/linux/blkzoned.h) + */ + +#define BMAP_IOCTL 1 /* obsolete - kept for compatibility */ +#define FIBMAP _IO(0x00,1) /* bmap access */ +#define FIGETBSZ _IO(0x00,2) /* get the block size used for bmap */ +#define FIFREEZE _IOWR('X', 119, int) /* Freeze */ +#define FITHAW _IOWR('X', 120, int) /* Thaw */ +#define FITRIM _IOWR('X', 121, struct fstrim_range) /* Trim */ +#define FICLONE _IOW(0x94, 9, int) +#define FICLONERANGE _IOW(0x94, 13, struct file_clone_range) +#define FIDEDUPERANGE _IOWR(0x94, 54, struct file_dedupe_range) + +#define FSLABEL_MAX 256 /* Max chars for the interface; each fs may differ */ + +#define FS_IOC_GETFLAGS _IOR('f', 1, long) +#define FS_IOC_SETFLAGS _IOW('f', 2, long) +#define FS_IOC_GETVERSION _IOR('v', 1, long) +#define FS_IOC_SETVERSION _IOW('v', 2, long) +#define FS_IOC_FIEMAP _IOWR('f', 11, struct fiemap) +#define FS_IOC32_GETFLAGS _IOR('f', 1, int) +#define FS_IOC32_SETFLAGS _IOW('f', 2, int) +#define FS_IOC32_GETVERSION _IOR('v', 1, int) +#define FS_IOC32_SETVERSION _IOW('v', 2, int) +#define FS_IOC_FSGETXATTR _IOR('X', 31, struct fsxattr) +#define FS_IOC_FSSETXATTR _IOW('X', 32, struct fsxattr) +#define FS_IOC_GETFSLABEL _IOR(0x94, 49, char[FSLABEL_MAX]) +#define FS_IOC_SETFSLABEL _IOW(0x94, 50, char[FSLABEL_MAX]) +/* Returns the external filesystem UUID, the same one blkid returns */ +#define FS_IOC_GETFSUUID _IOR(0x15, 0, struct fsuuid2) +/* + * Returns the path component under /sys/fs/ that refers to this filesystem; + * also /sys/kernel/debug/ for filesystems with debugfs exports + */ +#define FS_IOC_GETFSSYSFSPATH _IOR(0x15, 1, struct fs_sysfs_path) + +/* + * Inode flags (FS_IOC_GETFLAGS / FS_IOC_SETFLAGS) + * + * Note: for historical reasons, these flags were originally used and + * defined for use by ext2/ext3, and then other file systems started + * using these flags so they wouldn't need to write their own version + * of chattr/lsattr (which was shipped as part of e2fsprogs). You + * should think twice before trying to use these flags in new + * contexts, or trying to assign these flags, since they are used both + * as the UAPI and the on-disk encoding for ext2/3/4. Also, we are + * almost out of 32-bit flags. :-) + * + * We have recently hoisted FS_IOC_FSGETXATTR / FS_IOC_FSSETXATTR from + * XFS to the generic FS level interface. This uses a structure that + * has padding and hence has more room to grow, so it may be more + * appropriate for many new use cases. + * + * Please do not change these flags or interfaces before checking with + * linux-fsdevel@vger.kernel.org and linux-api@vger.kernel.org. + */ +#define FS_SECRM_FL 0x00000001 /* Secure deletion */ +#define FS_UNRM_FL 0x00000002 /* Undelete */ +#define FS_COMPR_FL 0x00000004 /* Compress file */ +#define FS_SYNC_FL 0x00000008 /* Synchronous updates */ +#define FS_IMMUTABLE_FL 0x00000010 /* Immutable file */ +#define FS_APPEND_FL 0x00000020 /* writes to file may only append */ +#define FS_NODUMP_FL 0x00000040 /* do not dump file */ +#define FS_NOATIME_FL 0x00000080 /* do not update atime */ +/* Reserved for compression usage... */ +#define FS_DIRTY_FL 0x00000100 +#define FS_COMPRBLK_FL 0x00000200 /* One or more compressed clusters */ +#define FS_NOCOMP_FL 0x00000400 /* Don't compress */ +/* End compression flags --- maybe not all used */ +#define FS_ENCRYPT_FL 0x00000800 /* Encrypted file */ +#define FS_BTREE_FL 0x00001000 /* btree format dir */ +#define FS_INDEX_FL 0x00001000 /* hash-indexed directory */ +#define FS_IMAGIC_FL 0x00002000 /* AFS directory */ +#define FS_JOURNAL_DATA_FL 0x00004000 /* Reserved for ext3 */ +#define FS_NOTAIL_FL 0x00008000 /* file tail should not be merged */ +#define FS_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */ +#define FS_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/ +#define FS_HUGE_FILE_FL 0x00040000 /* Reserved for ext4 */ +#define FS_EXTENT_FL 0x00080000 /* Extents */ +#define FS_VERITY_FL 0x00100000 /* Verity protected inode */ +#define FS_EA_INODE_FL 0x00200000 /* Inode used for large EA */ +#define FS_EOFBLOCKS_FL 0x00400000 /* Reserved for ext4 */ +#define FS_NOCOW_FL 0x00800000 /* Do not cow file */ +#define FS_DAX_FL 0x02000000 /* Inode is DAX */ +#define FS_INLINE_DATA_FL 0x10000000 /* Reserved for ext4 */ +#define FS_PROJINHERIT_FL 0x20000000 /* Create with parents projid */ +#define FS_CASEFOLD_FL 0x40000000 /* Folder is case insensitive */ +#define FS_RESERVED_FL 0x80000000 /* reserved for ext2 lib */ + +#define FS_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */ +#define FS_FL_USER_MODIFIABLE 0x000380FF /* User modifiable flags */ + + +#define SYNC_FILE_RANGE_WAIT_BEFORE 1 +#define SYNC_FILE_RANGE_WRITE 2 +#define SYNC_FILE_RANGE_WAIT_AFTER 4 +#define SYNC_FILE_RANGE_WRITE_AND_WAIT (SYNC_FILE_RANGE_WRITE | \ + SYNC_FILE_RANGE_WAIT_BEFORE | \ + SYNC_FILE_RANGE_WAIT_AFTER) + +/* + * Flags for preadv2/pwritev2: + */ + +typedef int __bitwise __kernel_rwf_t; + +/* high priority request, poll if possible */ +#define RWF_HIPRI ((__force __kernel_rwf_t)0x00000001) + +/* per-IO O_DSYNC */ +#define RWF_DSYNC ((__force __kernel_rwf_t)0x00000002) + +/* per-IO O_SYNC */ +#define RWF_SYNC ((__force __kernel_rwf_t)0x00000004) + +/* per-IO, return -EAGAIN if operation would block */ +#define RWF_NOWAIT ((__force __kernel_rwf_t)0x00000008) + +/* per-IO O_APPEND */ +#define RWF_APPEND ((__force __kernel_rwf_t)0x00000010) + +/* per-IO negation of O_APPEND */ +#define RWF_NOAPPEND ((__force __kernel_rwf_t)0x00000020) + +/* mask of flags supported by the kernel */ +#define RWF_SUPPORTED (RWF_HIPRI | RWF_DSYNC | RWF_SYNC | RWF_NOWAIT |\ + RWF_APPEND | RWF_NOAPPEND) + +/* Pagemap ioctl */ +#define PAGEMAP_SCAN _IOWR('f', 16, struct pm_scan_arg) + +/* Bitmasks provided in pm_scan_args masks and reported in page_region.categories. */ +#define PAGE_IS_WPALLOWED (1 << 0) +#define PAGE_IS_WRITTEN (1 << 1) +#define PAGE_IS_FILE (1 << 2) +#define PAGE_IS_PRESENT (1 << 3) +#define PAGE_IS_SWAPPED (1 << 4) +#define PAGE_IS_PFNZERO (1 << 5) +#define PAGE_IS_HUGE (1 << 6) +#define PAGE_IS_SOFT_DIRTY (1 << 7) + +/* + * struct page_region - Page region with flags + * @start: Start of the region + * @end: End of the region (exclusive) + * @categories: PAGE_IS_* category bitmask for the region + */ +struct page_region { + __u64 start; + __u64 end; + __u64 categories; +}; + +/* Flags for PAGEMAP_SCAN ioctl */ +#define PM_SCAN_WP_MATCHING (1 << 0) /* Write protect the pages matched. */ +#define PM_SCAN_CHECK_WPASYNC (1 << 1) /* Abort the scan when a non-WP-enabled page is found. */ + +/* + * struct pm_scan_arg - Pagemap ioctl argument + * @size: Size of the structure + * @flags: Flags for the IOCTL + * @start: Starting address of the region + * @end: Ending address of the region + * @walk_end Address where the scan stopped (written by kernel). + * walk_end == end (address tags cleared) informs that the scan completed on entire range. + * @vec: Address of page_region struct array for output + * @vec_len: Length of the page_region struct array + * @max_pages: Optional limit for number of returned pages (0 = disabled) + * @category_inverted: PAGE_IS_* categories which values match if 0 instead of 1 + * @category_mask: Skip pages for which any category doesn't match + * @category_anyof_mask: Skip pages for which no category matches + * @return_mask: PAGE_IS_* categories that are to be reported in `page_region`s returned + */ +struct pm_scan_arg { + __u64 size; + __u64 flags; + __u64 start; + __u64 end; + __u64 walk_end; + __u64 vec; + __u64 vec_len; + __u64 max_pages; + __u64 category_inverted; + __u64 category_mask; + __u64 category_anyof_mask; + __u64 return_mask; +}; + +#endif /* _UAPI_LINUX_FS_H */ diff --git a/tools/perf/trace/beauty/include/uapi/linux/mount.h b/tools/perf/trace/beauty/include/uapi/linux/mount.h new file mode 100644 index 000000000000..ad5478dbad00 --- /dev/null +++ b/tools/perf/trace/beauty/include/uapi/linux/mount.h @@ -0,0 +1,211 @@ +#ifndef _UAPI_LINUX_MOUNT_H +#define _UAPI_LINUX_MOUNT_H + +#include <linux/types.h> + +/* + * These are the fs-independent mount-flags: up to 32 flags are supported + * + * Usage of these is restricted within the kernel to core mount(2) code and + * callers of sys_mount() only. Filesystems should be using the SB_* + * equivalent instead. + */ +#define MS_RDONLY 1 /* Mount read-only */ +#define MS_NOSUID 2 /* Ignore suid and sgid bits */ +#define MS_NODEV 4 /* Disallow access to device special files */ +#define MS_NOEXEC 8 /* Disallow program execution */ +#define MS_SYNCHRONOUS 16 /* Writes are synced at once */ +#define MS_REMOUNT 32 /* Alter flags of a mounted FS */ +#define MS_MANDLOCK 64 /* Allow mandatory locks on an FS */ +#define MS_DIRSYNC 128 /* Directory modifications are synchronous */ +#define MS_NOSYMFOLLOW 256 /* Do not follow symlinks */ +#define MS_NOATIME 1024 /* Do not update access times. */ +#define MS_NODIRATIME 2048 /* Do not update directory access times */ +#define MS_BIND 4096 +#define MS_MOVE 8192 +#define MS_REC 16384 +#define MS_VERBOSE 32768 /* War is peace. Verbosity is silence. + MS_VERBOSE is deprecated. */ +#define MS_SILENT 32768 +#define MS_POSIXACL (1<<16) /* VFS does not apply the umask */ +#define MS_UNBINDABLE (1<<17) /* change to unbindable */ +#define MS_PRIVATE (1<<18) /* change to private */ +#define MS_SLAVE (1<<19) /* change to slave */ +#define MS_SHARED (1<<20) /* change to shared */ +#define MS_RELATIME (1<<21) /* Update atime relative to mtime/ctime. */ +#define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */ +#define MS_I_VERSION (1<<23) /* Update inode I_version field */ +#define MS_STRICTATIME (1<<24) /* Always perform atime updates */ +#define MS_LAZYTIME (1<<25) /* Update the on-disk [acm]times lazily */ + +/* These sb flags are internal to the kernel */ +#define MS_SUBMOUNT (1<<26) +#define MS_NOREMOTELOCK (1<<27) +#define MS_NOSEC (1<<28) +#define MS_BORN (1<<29) +#define MS_ACTIVE (1<<30) +#define MS_NOUSER (1<<31) + +/* + * Superblock flags that can be altered by MS_REMOUNT + */ +#define MS_RMT_MASK (MS_RDONLY|MS_SYNCHRONOUS|MS_MANDLOCK|MS_I_VERSION|\ + MS_LAZYTIME) + +/* + * Old magic mount flag and mask + */ +#define MS_MGC_VAL 0xC0ED0000 +#define MS_MGC_MSK 0xffff0000 + +/* + * open_tree() flags. + */ +#define OPEN_TREE_CLONE 1 /* Clone the target tree and attach the clone */ +#define OPEN_TREE_CLOEXEC O_CLOEXEC /* Close the file on execve() */ + +/* + * move_mount() flags. + */ +#define MOVE_MOUNT_F_SYMLINKS 0x00000001 /* Follow symlinks on from path */ +#define MOVE_MOUNT_F_AUTOMOUNTS 0x00000002 /* Follow automounts on from path */ +#define MOVE_MOUNT_F_EMPTY_PATH 0x00000004 /* Empty from path permitted */ +#define MOVE_MOUNT_T_SYMLINKS 0x00000010 /* Follow symlinks on to path */ +#define MOVE_MOUNT_T_AUTOMOUNTS 0x00000020 /* Follow automounts on to path */ +#define MOVE_MOUNT_T_EMPTY_PATH 0x00000040 /* Empty to path permitted */ +#define MOVE_MOUNT_SET_GROUP 0x00000100 /* Set sharing group instead */ +#define MOVE_MOUNT_BENEATH 0x00000200 /* Mount beneath top mount */ +#define MOVE_MOUNT__MASK 0x00000377 + +/* + * fsopen() flags. + */ +#define FSOPEN_CLOEXEC 0x00000001 + +/* + * fspick() flags. + */ +#define FSPICK_CLOEXEC 0x00000001 +#define FSPICK_SYMLINK_NOFOLLOW 0x00000002 +#define FSPICK_NO_AUTOMOUNT 0x00000004 +#define FSPICK_EMPTY_PATH 0x00000008 + +/* + * The type of fsconfig() call made. + */ +enum fsconfig_command { + FSCONFIG_SET_FLAG = 0, /* Set parameter, supplying no value */ + FSCONFIG_SET_STRING = 1, /* Set parameter, supplying a string value */ + FSCONFIG_SET_BINARY = 2, /* Set parameter, supplying a binary blob value */ + FSCONFIG_SET_PATH = 3, /* Set parameter, supplying an object by path */ + FSCONFIG_SET_PATH_EMPTY = 4, /* Set parameter, supplying an object by (empty) path */ + FSCONFIG_SET_FD = 5, /* Set parameter, supplying an object by fd */ + FSCONFIG_CMD_CREATE = 6, /* Create new or reuse existing superblock */ + FSCONFIG_CMD_RECONFIGURE = 7, /* Invoke superblock reconfiguration */ + FSCONFIG_CMD_CREATE_EXCL = 8, /* Create new superblock, fail if reusing existing superblock */ +}; + +/* + * fsmount() flags. + */ +#define FSMOUNT_CLOEXEC 0x00000001 + +/* + * Mount attributes. + */ +#define MOUNT_ATTR_RDONLY 0x00000001 /* Mount read-only */ +#define MOUNT_ATTR_NOSUID 0x00000002 /* Ignore suid and sgid bits */ +#define MOUNT_ATTR_NODEV 0x00000004 /* Disallow access to device special files */ +#define MOUNT_ATTR_NOEXEC 0x00000008 /* Disallow program execution */ +#define MOUNT_ATTR__ATIME 0x00000070 /* Setting on how atime should be updated */ +#define MOUNT_ATTR_RELATIME 0x00000000 /* - Update atime relative to mtime/ctime. */ +#define MOUNT_ATTR_NOATIME 0x00000010 /* - Do not update access times. */ +#define MOUNT_ATTR_STRICTATIME 0x00000020 /* - Always perform atime updates */ +#define MOUNT_ATTR_NODIRATIME 0x00000080 /* Do not update directory access times */ +#define MOUNT_ATTR_IDMAP 0x00100000 /* Idmap mount to @userns_fd in struct mount_attr. */ +#define MOUNT_ATTR_NOSYMFOLLOW 0x00200000 /* Do not follow symlinks */ + +/* + * mount_setattr() + */ +struct mount_attr { + __u64 attr_set; + __u64 attr_clr; + __u64 propagation; + __u64 userns_fd; +}; + +/* List of all mount_attr versions. */ +#define MOUNT_ATTR_SIZE_VER0 32 /* sizeof first published struct */ + + +/* + * Structure for getting mount/superblock/filesystem info with statmount(2). + * + * The interface is similar to statx(2): individual fields or groups can be + * selected with the @mask argument of statmount(). Kernel will set the @mask + * field according to the supported fields. + * + * If string fields are selected, then the caller needs to pass a buffer that + * has space after the fixed part of the structure. Nul terminated strings are + * copied there and offsets relative to @str are stored in the relevant fields. + * If the buffer is too small, then EOVERFLOW is returned. The actually used + * size is returned in @size. + */ +struct statmount { + __u32 size; /* Total size, including strings */ + __u32 __spare1; + __u64 mask; /* What results were written */ + __u32 sb_dev_major; /* Device ID */ + __u32 sb_dev_minor; + __u64 sb_magic; /* ..._SUPER_MAGIC */ + __u32 sb_flags; /* SB_{RDONLY,SYNCHRONOUS,DIRSYNC,LAZYTIME} */ + __u32 fs_type; /* [str] Filesystem type */ + __u64 mnt_id; /* Unique ID of mount */ + __u64 mnt_parent_id; /* Unique ID of parent (for root == mnt_id) */ + __u32 mnt_id_old; /* Reused IDs used in proc/.../mountinfo */ + __u32 mnt_parent_id_old; + __u64 mnt_attr; /* MOUNT_ATTR_... */ + __u64 mnt_propagation; /* MS_{SHARED,SLAVE,PRIVATE,UNBINDABLE} */ + __u64 mnt_peer_group; /* ID of shared peer group */ + __u64 mnt_master; /* Mount receives propagation from this ID */ + __u64 propagate_from; /* Propagation from in current namespace */ + __u32 mnt_root; /* [str] Root of mount relative to root of fs */ + __u32 mnt_point; /* [str] Mountpoint relative to current root */ + __u64 __spare2[50]; + char str[]; /* Variable size part containing strings */ +}; + +/* + * Structure for passing mount ID and miscellaneous parameters to statmount(2) + * and listmount(2). + * + * For statmount(2) @param represents the request mask. + * For listmount(2) @param represents the last listed mount id (or zero). + */ +struct mnt_id_req { + __u32 size; + __u32 spare; + __u64 mnt_id; + __u64 param; +}; + +/* List of all mnt_id_req versions. */ +#define MNT_ID_REQ_SIZE_VER0 24 /* sizeof first published struct */ + +/* + * @mask bits for statmount(2) + */ +#define STATMOUNT_SB_BASIC 0x00000001U /* Want/got sb_... */ +#define STATMOUNT_MNT_BASIC 0x00000002U /* Want/got mnt_... */ +#define STATMOUNT_PROPAGATE_FROM 0x00000004U /* Want/got propagate_from */ +#define STATMOUNT_MNT_ROOT 0x00000008U /* Want/got mnt_root */ +#define STATMOUNT_MNT_POINT 0x00000010U /* Want/got mnt_point */ +#define STATMOUNT_FS_TYPE 0x00000020U /* Want/got fs_type */ + +/* + * Special @mnt_id values that can be passed to listmount + */ +#define LSMT_ROOT 0xffffffffffffffff /* root mount */ + +#endif /* _UAPI_LINUX_MOUNT_H */ diff --git a/tools/perf/trace/beauty/include/uapi/linux/prctl.h b/tools/perf/trace/beauty/include/uapi/linux/prctl.h new file mode 100644 index 000000000000..370ed14b1ae0 --- /dev/null +++ b/tools/perf/trace/beauty/include/uapi/linux/prctl.h @@ -0,0 +1,309 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _LINUX_PRCTL_H +#define _LINUX_PRCTL_H + +#include <linux/types.h> + +/* Values to pass as first argument to prctl() */ + +#define PR_SET_PDEATHSIG 1 /* Second arg is a signal */ +#define PR_GET_PDEATHSIG 2 /* Second arg is a ptr to return the signal */ + +/* Get/set current->mm->dumpable */ +#define PR_GET_DUMPABLE 3 +#define PR_SET_DUMPABLE 4 + +/* Get/set unaligned access control bits (if meaningful) */ +#define PR_GET_UNALIGN 5 +#define PR_SET_UNALIGN 6 +# define PR_UNALIGN_NOPRINT 1 /* silently fix up unaligned user accesses */ +# define PR_UNALIGN_SIGBUS 2 /* generate SIGBUS on unaligned user access */ + +/* Get/set whether or not to drop capabilities on setuid() away from + * uid 0 (as per security/commoncap.c) */ +#define PR_GET_KEEPCAPS 7 +#define PR_SET_KEEPCAPS 8 + +/* Get/set floating-point emulation control bits (if meaningful) */ +#define PR_GET_FPEMU 9 +#define PR_SET_FPEMU 10 +# define PR_FPEMU_NOPRINT 1 /* silently emulate fp operations accesses */ +# define PR_FPEMU_SIGFPE 2 /* don't emulate fp operations, send SIGFPE instead */ + +/* Get/set floating-point exception mode (if meaningful) */ +#define PR_GET_FPEXC 11 +#define PR_SET_FPEXC 12 +# define PR_FP_EXC_SW_ENABLE 0x80 /* Use FPEXC for FP exception enables */ +# define PR_FP_EXC_DIV 0x010000 /* floating point divide by zero */ +# define PR_FP_EXC_OVF 0x020000 /* floating point overflow */ +# define PR_FP_EXC_UND 0x040000 /* floating point underflow */ +# define PR_FP_EXC_RES 0x080000 /* floating point inexact result */ +# define PR_FP_EXC_INV 0x100000 /* floating point invalid operation */ +# define PR_FP_EXC_DISABLED 0 /* FP exceptions disabled */ +# define PR_FP_EXC_NONRECOV 1 /* async non-recoverable exc. mode */ +# define PR_FP_EXC_ASYNC 2 /* async recoverable exception mode */ +# define PR_FP_EXC_PRECISE 3 /* precise exception mode */ + +/* Get/set whether we use statistical process timing or accurate timestamp + * based process timing */ +#define PR_GET_TIMING 13 +#define PR_SET_TIMING 14 +# define PR_TIMING_STATISTICAL 0 /* Normal, traditional, + statistical process timing */ +# define PR_TIMING_TIMESTAMP 1 /* Accurate timestamp based + process timing */ + +#define PR_SET_NAME 15 /* Set process name */ +#define PR_GET_NAME 16 /* Get process name */ + +/* Get/set process endian */ +#define PR_GET_ENDIAN 19 +#define PR_SET_ENDIAN 20 +# define PR_ENDIAN_BIG 0 +# define PR_ENDIAN_LITTLE 1 /* True little endian mode */ +# define PR_ENDIAN_PPC_LITTLE 2 /* "PowerPC" pseudo little endian */ + +/* Get/set process seccomp mode */ +#define PR_GET_SECCOMP 21 +#define PR_SET_SECCOMP 22 + +/* Get/set the capability bounding set (as per security/commoncap.c) */ +#define PR_CAPBSET_READ 23 +#define PR_CAPBSET_DROP 24 + +/* Get/set the process' ability to use the timestamp counter instruction */ +#define PR_GET_TSC 25 +#define PR_SET_TSC 26 +# define PR_TSC_ENABLE 1 /* allow the use of the timestamp counter */ +# define PR_TSC_SIGSEGV 2 /* throw a SIGSEGV instead of reading the TSC */ + +/* Get/set securebits (as per security/commoncap.c) */ +#define PR_GET_SECUREBITS 27 +#define PR_SET_SECUREBITS 28 + +/* + * Get/set the timerslack as used by poll/select/nanosleep + * A value of 0 means "use default" + */ +#define PR_SET_TIMERSLACK 29 +#define PR_GET_TIMERSLACK 30 + +#define PR_TASK_PERF_EVENTS_DISABLE 31 +#define PR_TASK_PERF_EVENTS_ENABLE 32 + +/* + * Set early/late kill mode for hwpoison memory corruption. + * This influences when the process gets killed on a memory corruption. + */ +#define PR_MCE_KILL 33 +# define PR_MCE_KILL_CLEAR 0 +# define PR_MCE_KILL_SET 1 + +# define PR_MCE_KILL_LATE 0 +# define PR_MCE_KILL_EARLY 1 +# define PR_MCE_KILL_DEFAULT 2 + +#define PR_MCE_KILL_GET 34 + +/* + * Tune up process memory map specifics. + */ +#define PR_SET_MM 35 +# define PR_SET_MM_START_CODE 1 +# define PR_SET_MM_END_CODE 2 +# define PR_SET_MM_START_DATA 3 +# define PR_SET_MM_END_DATA 4 +# define PR_SET_MM_START_STACK 5 +# define PR_SET_MM_START_BRK 6 +# define PR_SET_MM_BRK 7 +# define PR_SET_MM_ARG_START 8 +# define PR_SET_MM_ARG_END 9 +# define PR_SET_MM_ENV_START 10 +# define PR_SET_MM_ENV_END 11 +# define PR_SET_MM_AUXV 12 +# define PR_SET_MM_EXE_FILE 13 +# define PR_SET_MM_MAP 14 +# define PR_SET_MM_MAP_SIZE 15 + +/* + * This structure provides new memory descriptor + * map which mostly modifies /proc/pid/stat[m] + * output for a task. This mostly done in a + * sake of checkpoint/restore functionality. + */ +struct prctl_mm_map { + __u64 start_code; /* code section bounds */ + __u64 end_code; + __u64 start_data; /* data section bounds */ + __u64 end_data; + __u64 start_brk; /* heap for brk() syscall */ + __u64 brk; + __u64 start_stack; /* stack starts at */ + __u64 arg_start; /* command line arguments bounds */ + __u64 arg_end; + __u64 env_start; /* environment variables bounds */ + __u64 env_end; + __u64 *auxv; /* auxiliary vector */ + __u32 auxv_size; /* vector size */ + __u32 exe_fd; /* /proc/$pid/exe link file */ +}; + +/* + * Set specific pid that is allowed to ptrace the current task. + * A value of 0 mean "no process". + */ +#define PR_SET_PTRACER 0x59616d61 +# define PR_SET_PTRACER_ANY ((unsigned long)-1) + +#define PR_SET_CHILD_SUBREAPER 36 +#define PR_GET_CHILD_SUBREAPER 37 + +/* + * If no_new_privs is set, then operations that grant new privileges (i.e. + * execve) will either fail or not grant them. This affects suid/sgid, + * file capabilities, and LSMs. + * + * Operations that merely manipulate or drop existing privileges (setresuid, + * capset, etc.) will still work. Drop those privileges if you want them gone. + * + * Changing LSM security domain is considered a new privilege. So, for example, + * asking selinux for a specific new context (e.g. with runcon) will result + * in execve returning -EPERM. + * + * See Documentation/userspace-api/no_new_privs.rst for more details. + */ +#define PR_SET_NO_NEW_PRIVS 38 +#define PR_GET_NO_NEW_PRIVS 39 + +#define PR_GET_TID_ADDRESS 40 + +#define PR_SET_THP_DISABLE 41 +#define PR_GET_THP_DISABLE 42 + +/* + * No longer implemented, but left here to ensure the numbers stay reserved: + */ +#define PR_MPX_ENABLE_MANAGEMENT 43 +#define PR_MPX_DISABLE_MANAGEMENT 44 + +#define PR_SET_FP_MODE 45 +#define PR_GET_FP_MODE 46 +# define PR_FP_MODE_FR (1 << 0) /* 64b FP registers */ +# define PR_FP_MODE_FRE (1 << 1) /* 32b compatibility */ + +/* Control the ambient capability set */ +#define PR_CAP_AMBIENT 47 +# define PR_CAP_AMBIENT_IS_SET 1 +# define PR_CAP_AMBIENT_RAISE 2 +# define PR_CAP_AMBIENT_LOWER 3 +# define PR_CAP_AMBIENT_CLEAR_ALL 4 + +/* arm64 Scalable Vector Extension controls */ +/* Flag values must be kept in sync with ptrace NT_ARM_SVE interface */ +#define PR_SVE_SET_VL 50 /* set task vector length */ +# define PR_SVE_SET_VL_ONEXEC (1 << 18) /* defer effect until exec */ +#define PR_SVE_GET_VL 51 /* get task vector length */ +/* Bits common to PR_SVE_SET_VL and PR_SVE_GET_VL */ +# define PR_SVE_VL_LEN_MASK 0xffff +# define PR_SVE_VL_INHERIT (1 << 17) /* inherit across exec */ + +/* Per task speculation control */ +#define PR_GET_SPECULATION_CTRL 52 +#define PR_SET_SPECULATION_CTRL 53 +/* Speculation control variants */ +# define PR_SPEC_STORE_BYPASS 0 +# define PR_SPEC_INDIRECT_BRANCH 1 +# define PR_SPEC_L1D_FLUSH 2 +/* Return and control values for PR_SET/GET_SPECULATION_CTRL */ +# define PR_SPEC_NOT_AFFECTED 0 +# define PR_SPEC_PRCTL (1UL << 0) +# define PR_SPEC_ENABLE (1UL << 1) +# define PR_SPEC_DISABLE (1UL << 2) +# define PR_SPEC_FORCE_DISABLE (1UL << 3) +# define PR_SPEC_DISABLE_NOEXEC (1UL << 4) + +/* Reset arm64 pointer authentication keys */ +#define PR_PAC_RESET_KEYS 54 +# define PR_PAC_APIAKEY (1UL << 0) +# define PR_PAC_APIBKEY (1UL << 1) +# define PR_PAC_APDAKEY (1UL << 2) +# define PR_PAC_APDBKEY (1UL << 3) +# define PR_PAC_APGAKEY (1UL << 4) + +/* Tagged user address controls for arm64 */ +#define PR_SET_TAGGED_ADDR_CTRL 55 +#define PR_GET_TAGGED_ADDR_CTRL 56 +# define PR_TAGGED_ADDR_ENABLE (1UL << 0) +/* MTE tag check fault modes */ +# define PR_MTE_TCF_NONE 0UL +# define PR_MTE_TCF_SYNC (1UL << 1) +# define PR_MTE_TCF_ASYNC (1UL << 2) +# define PR_MTE_TCF_MASK (PR_MTE_TCF_SYNC | PR_MTE_TCF_ASYNC) +/* MTE tag inclusion mask */ +# define PR_MTE_TAG_SHIFT 3 +# define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT) +/* Unused; kept only for source compatibility */ +# define PR_MTE_TCF_SHIFT 1 + +/* Control reclaim behavior when allocating memory */ +#define PR_SET_IO_FLUSHER 57 +#define PR_GET_IO_FLUSHER 58 + +/* Dispatch syscalls to a userspace handler */ +#define PR_SET_SYSCALL_USER_DISPATCH 59 +# define PR_SYS_DISPATCH_OFF 0 +# define PR_SYS_DISPATCH_ON 1 +/* The control values for the user space selector when dispatch is enabled */ +# define SYSCALL_DISPATCH_FILTER_ALLOW 0 +# define SYSCALL_DISPATCH_FILTER_BLOCK 1 + +/* Set/get enabled arm64 pointer authentication keys */ +#define PR_PAC_SET_ENABLED_KEYS 60 +#define PR_PAC_GET_ENABLED_KEYS 61 + +/* Request the scheduler to share a core */ +#define PR_SCHED_CORE 62 +# define PR_SCHED_CORE_GET 0 +# define PR_SCHED_CORE_CREATE 1 /* create unique core_sched cookie */ +# define PR_SCHED_CORE_SHARE_TO 2 /* push core_sched cookie to pid */ +# define PR_SCHED_CORE_SHARE_FROM 3 /* pull core_sched cookie to pid */ +# define PR_SCHED_CORE_MAX 4 +# define PR_SCHED_CORE_SCOPE_THREAD 0 +# define PR_SCHED_CORE_SCOPE_THREAD_GROUP 1 +# define PR_SCHED_CORE_SCOPE_PROCESS_GROUP 2 + +/* arm64 Scalable Matrix Extension controls */ +/* Flag values must be in sync with SVE versions */ +#define PR_SME_SET_VL 63 /* set task vector length */ +# define PR_SME_SET_VL_ONEXEC (1 << 18) /* defer effect until exec */ +#define PR_SME_GET_VL 64 /* get task vector length */ +/* Bits common to PR_SME_SET_VL and PR_SME_GET_VL */ +# define PR_SME_VL_LEN_MASK 0xffff +# define PR_SME_VL_INHERIT (1 << 17) /* inherit across exec */ + +/* Memory deny write / execute */ +#define PR_SET_MDWE 65 +# define PR_MDWE_REFUSE_EXEC_GAIN (1UL << 0) +# define PR_MDWE_NO_INHERIT (1UL << 1) + +#define PR_GET_MDWE 66 + +#define PR_SET_VMA 0x53564d41 +# define PR_SET_VMA_ANON_NAME 0 + +#define PR_GET_AUXV 0x41555856 + +#define PR_SET_MEMORY_MERGE 67 +#define PR_GET_MEMORY_MERGE 68 + +#define PR_RISCV_V_SET_CONTROL 69 +#define PR_RISCV_V_GET_CONTROL 70 +# define PR_RISCV_V_VSTATE_CTRL_DEFAULT 0 +# define PR_RISCV_V_VSTATE_CTRL_OFF 1 +# define PR_RISCV_V_VSTATE_CTRL_ON 2 +# define PR_RISCV_V_VSTATE_CTRL_INHERIT (1 << 4) +# define PR_RISCV_V_VSTATE_CTRL_CUR_MASK 0x3 +# define PR_RISCV_V_VSTATE_CTRL_NEXT_MASK 0xc +# define PR_RISCV_V_VSTATE_CTRL_MASK 0x1f + +#endif /* _LINUX_PRCTL_H */ diff --git a/tools/perf/trace/beauty/include/uapi/linux/sched.h b/tools/perf/trace/beauty/include/uapi/linux/sched.h new file mode 100644 index 000000000000..3bac0a8ceab2 --- /dev/null +++ b/tools/perf/trace/beauty/include/uapi/linux/sched.h @@ -0,0 +1,148 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_LINUX_SCHED_H +#define _UAPI_LINUX_SCHED_H + +#include <linux/types.h> + +/* + * cloning flags: + */ +#define CSIGNAL 0x000000ff /* signal mask to be sent at exit */ +#define CLONE_VM 0x00000100 /* set if VM shared between processes */ +#define CLONE_FS 0x00000200 /* set if fs info shared between processes */ +#define CLONE_FILES 0x00000400 /* set if open files shared between processes */ +#define CLONE_SIGHAND 0x00000800 /* set if signal handlers and blocked signals shared */ +#define CLONE_PIDFD 0x00001000 /* set if a pidfd should be placed in parent */ +#define CLONE_PTRACE 0x00002000 /* set if we want to let tracing continue on the child too */ +#define CLONE_VFORK 0x00004000 /* set if the parent wants the child to wake it up on mm_release */ +#define CLONE_PARENT 0x00008000 /* set if we want to have the same parent as the cloner */ +#define CLONE_THREAD 0x00010000 /* Same thread group? */ +#define CLONE_NEWNS 0x00020000 /* New mount namespace group */ +#define CLONE_SYSVSEM 0x00040000 /* share system V SEM_UNDO semantics */ +#define CLONE_SETTLS 0x00080000 /* create a new TLS for the child */ +#define CLONE_PARENT_SETTID 0x00100000 /* set the TID in the parent */ +#define CLONE_CHILD_CLEARTID 0x00200000 /* clear the TID in the child */ +#define CLONE_DETACHED 0x00400000 /* Unused, ignored */ +#define CLONE_UNTRACED 0x00800000 /* set if the tracing process can't force CLONE_PTRACE on this clone */ +#define CLONE_CHILD_SETTID 0x01000000 /* set the TID in the child */ +#define CLONE_NEWCGROUP 0x02000000 /* New cgroup namespace */ +#define CLONE_NEWUTS 0x04000000 /* New utsname namespace */ +#define CLONE_NEWIPC 0x08000000 /* New ipc namespace */ +#define CLONE_NEWUSER 0x10000000 /* New user namespace */ +#define CLONE_NEWPID 0x20000000 /* New pid namespace */ +#define CLONE_NEWNET 0x40000000 /* New network namespace */ +#define CLONE_IO 0x80000000 /* Clone io context */ + +/* Flags for the clone3() syscall. */ +#define CLONE_CLEAR_SIGHAND 0x100000000ULL /* Clear any signal handler and reset to SIG_DFL. */ +#define CLONE_INTO_CGROUP 0x200000000ULL /* Clone into a specific cgroup given the right permissions. */ + +/* + * cloning flags intersect with CSIGNAL so can be used with unshare and clone3 + * syscalls only: + */ +#define CLONE_NEWTIME 0x00000080 /* New time namespace */ + +#ifndef __ASSEMBLY__ +/** + * struct clone_args - arguments for the clone3 syscall + * @flags: Flags for the new process as listed above. + * All flags are valid except for CSIGNAL and + * CLONE_DETACHED. + * @pidfd: If CLONE_PIDFD is set, a pidfd will be + * returned in this argument. + * @child_tid: If CLONE_CHILD_SETTID is set, the TID of the + * child process will be returned in the child's + * memory. + * @parent_tid: If CLONE_PARENT_SETTID is set, the TID of + * the child process will be returned in the + * parent's memory. + * @exit_signal: The exit_signal the parent process will be + * sent when the child exits. + * @stack: Specify the location of the stack for the + * child process. + * Note, @stack is expected to point to the + * lowest address. The stack direction will be + * determined by the kernel and set up + * appropriately based on @stack_size. + * @stack_size: The size of the stack for the child process. + * @tls: If CLONE_SETTLS is set, the tls descriptor + * is set to tls. + * @set_tid: Pointer to an array of type *pid_t. The size + * of the array is defined using @set_tid_size. + * This array is used to select PIDs/TIDs for + * newly created processes. The first element in + * this defines the PID in the most nested PID + * namespace. Each additional element in the array + * defines the PID in the parent PID namespace of + * the original PID namespace. If the array has + * less entries than the number of currently + * nested PID namespaces only the PIDs in the + * corresponding namespaces are set. + * @set_tid_size: This defines the size of the array referenced + * in @set_tid. This cannot be larger than the + * kernel's limit of nested PID namespaces. + * @cgroup: If CLONE_INTO_CGROUP is specified set this to + * a file descriptor for the cgroup. + * + * The structure is versioned by size and thus extensible. + * New struct members must go at the end of the struct and + * must be properly 64bit aligned. + */ +struct clone_args { + __aligned_u64 flags; + __aligned_u64 pidfd; + __aligned_u64 child_tid; + __aligned_u64 parent_tid; + __aligned_u64 exit_signal; + __aligned_u64 stack; + __aligned_u64 stack_size; + __aligned_u64 tls; + __aligned_u64 set_tid; + __aligned_u64 set_tid_size; + __aligned_u64 cgroup; +}; +#endif + +#define CLONE_ARGS_SIZE_VER0 64 /* sizeof first published struct */ +#define CLONE_ARGS_SIZE_VER1 80 /* sizeof second published struct */ +#define CLONE_ARGS_SIZE_VER2 88 /* sizeof third published struct */ + +/* + * Scheduling policies + */ +#define SCHED_NORMAL 0 +#define SCHED_FIFO 1 +#define SCHED_RR 2 +#define SCHED_BATCH 3 +/* SCHED_ISO: reserved but not implemented yet */ +#define SCHED_IDLE 5 +#define SCHED_DEADLINE 6 + +/* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */ +#define SCHED_RESET_ON_FORK 0x40000000 + +/* + * For the sched_{set,get}attr() calls + */ +#define SCHED_FLAG_RESET_ON_FORK 0x01 +#define SCHED_FLAG_RECLAIM 0x02 +#define SCHED_FLAG_DL_OVERRUN 0x04 +#define SCHED_FLAG_KEEP_POLICY 0x08 +#define SCHED_FLAG_KEEP_PARAMS 0x10 +#define SCHED_FLAG_UTIL_CLAMP_MIN 0x20 +#define SCHED_FLAG_UTIL_CLAMP_MAX 0x40 + +#define SCHED_FLAG_KEEP_ALL (SCHED_FLAG_KEEP_POLICY | \ + SCHED_FLAG_KEEP_PARAMS) + +#define SCHED_FLAG_UTIL_CLAMP (SCHED_FLAG_UTIL_CLAMP_MIN | \ + SCHED_FLAG_UTIL_CLAMP_MAX) + +#define SCHED_FLAG_ALL (SCHED_FLAG_RESET_ON_FORK | \ + SCHED_FLAG_RECLAIM | \ + SCHED_FLAG_DL_OVERRUN | \ + SCHED_FLAG_KEEP_ALL | \ + SCHED_FLAG_UTIL_CLAMP) + +#endif /* _UAPI_LINUX_SCHED_H */ diff --git a/tools/perf/trace/beauty/include/uapi/linux/stat.h b/tools/perf/trace/beauty/include/uapi/linux/stat.h new file mode 100644 index 000000000000..2f2ee82d5517 --- /dev/null +++ b/tools/perf/trace/beauty/include/uapi/linux/stat.h @@ -0,0 +1,195 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_LINUX_STAT_H +#define _UAPI_LINUX_STAT_H + +#include <linux/types.h> + +#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) + +#define S_IFMT 00170000 +#define S_IFSOCK 0140000 +#define S_IFLNK 0120000 +#define S_IFREG 0100000 +#define S_IFBLK 0060000 +#define S_IFDIR 0040000 +#define S_IFCHR 0020000 +#define S_IFIFO 0010000 +#define S_ISUID 0004000 +#define S_ISGID 0002000 +#define S_ISVTX 0001000 + +#define S_ISLNK(m) (((m) & S_IFMT) == S_IFLNK) +#define S_ISREG(m) (((m) & S_IFMT) == S_IFREG) +#define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR) +#define S_ISCHR(m) (((m) & S_IFMT) == S_IFCHR) +#define S_ISBLK(m) (((m) & S_IFMT) == S_IFBLK) +#define S_ISFIFO(m) (((m) & S_IFMT) == S_IFIFO) +#define S_ISSOCK(m) (((m) & S_IFMT) == S_IFSOCK) + +#define S_IRWXU 00700 +#define S_IRUSR 00400 +#define S_IWUSR 00200 +#define S_IXUSR 00100 + +#define S_IRWXG 00070 +#define S_IRGRP 00040 +#define S_IWGRP 00020 +#define S_IXGRP 00010 + +#define S_IRWXO 00007 +#define S_IROTH 00004 +#define S_IWOTH 00002 +#define S_IXOTH 00001 + +#endif + +/* + * Timestamp structure for the timestamps in struct statx. + * + * tv_sec holds the number of seconds before (negative) or after (positive) + * 00:00:00 1st January 1970 UTC. + * + * tv_nsec holds a number of nanoseconds (0..999,999,999) after the tv_sec time. + * + * __reserved is held in case we need a yet finer resolution. + */ +struct statx_timestamp { + __s64 tv_sec; + __u32 tv_nsec; + __s32 __reserved; +}; + +/* + * Structures for the extended file attribute retrieval system call + * (statx()). + * + * The caller passes a mask of what they're specifically interested in as a + * parameter to statx(). What statx() actually got will be indicated in + * st_mask upon return. + * + * For each bit in the mask argument: + * + * - if the datum is not supported: + * + * - the bit will be cleared, and + * + * - the datum will be set to an appropriate fabricated value if one is + * available (eg. CIFS can take a default uid and gid), otherwise + * + * - the field will be cleared; + * + * - otherwise, if explicitly requested: + * + * - the datum will be synchronised to the server if AT_STATX_FORCE_SYNC is + * set or if the datum is considered out of date, and + * + * - the field will be filled in and the bit will be set; + * + * - otherwise, if not requested, but available in approximate form without any + * effort, it will be filled in anyway, and the bit will be set upon return + * (it might not be up to date, however, and no attempt will be made to + * synchronise the internal state first); + * + * - otherwise the field and the bit will be cleared before returning. + * + * Items in STATX_BASIC_STATS may be marked unavailable on return, but they + * will have values installed for compatibility purposes so that stat() and + * co. can be emulated in userspace. + */ +struct statx { + /* 0x00 */ + __u32 stx_mask; /* What results were written [uncond] */ + __u32 stx_blksize; /* Preferred general I/O size [uncond] */ + __u64 stx_attributes; /* Flags conveying information about the file [uncond] */ + /* 0x10 */ + __u32 stx_nlink; /* Number of hard links */ + __u32 stx_uid; /* User ID of owner */ + __u32 stx_gid; /* Group ID of owner */ + __u16 stx_mode; /* File mode */ + __u16 __spare0[1]; + /* 0x20 */ + __u64 stx_ino; /* Inode number */ + __u64 stx_size; /* File size */ + __u64 stx_blocks; /* Number of 512-byte blocks allocated */ + __u64 stx_attributes_mask; /* Mask to show what's supported in stx_attributes */ + /* 0x40 */ + struct statx_timestamp stx_atime; /* Last access time */ + struct statx_timestamp stx_btime; /* File creation time */ + struct statx_timestamp stx_ctime; /* Last attribute change time */ + struct statx_timestamp stx_mtime; /* Last data modification time */ + /* 0x80 */ + __u32 stx_rdev_major; /* Device ID of special file [if bdev/cdev] */ + __u32 stx_rdev_minor; + __u32 stx_dev_major; /* ID of device containing file [uncond] */ + __u32 stx_dev_minor; + /* 0x90 */ + __u64 stx_mnt_id; + __u32 stx_dio_mem_align; /* Memory buffer alignment for direct I/O */ + __u32 stx_dio_offset_align; /* File offset alignment for direct I/O */ + /* 0xa0 */ + __u64 __spare3[12]; /* Spare space for future expansion */ + /* 0x100 */ +}; + +/* + * Flags to be stx_mask + * + * Query request/result mask for statx() and struct statx::stx_mask. + * + * These bits should be set in the mask argument of statx() to request + * particular items when calling statx(). + */ +#define STATX_TYPE 0x00000001U /* Want/got stx_mode & S_IFMT */ +#define STATX_MODE 0x00000002U /* Want/got stx_mode & ~S_IFMT */ +#define STATX_NLINK 0x00000004U /* Want/got stx_nlink */ +#define STATX_UID 0x00000008U /* Want/got stx_uid */ +#define STATX_GID 0x00000010U /* Want/got stx_gid */ +#define STATX_ATIME 0x00000020U /* Want/got stx_atime */ +#define STATX_MTIME 0x00000040U /* Want/got stx_mtime */ +#define STATX_CTIME 0x00000080U /* Want/got stx_ctime */ +#define STATX_INO 0x00000100U /* Want/got stx_ino */ +#define STATX_SIZE 0x00000200U /* Want/got stx_size */ +#define STATX_BLOCKS 0x00000400U /* Want/got stx_blocks */ +#define STATX_BASIC_STATS 0x000007ffU /* The stuff in the normal stat struct */ +#define STATX_BTIME 0x00000800U /* Want/got stx_btime */ +#define STATX_MNT_ID 0x00001000U /* Got stx_mnt_id */ +#define STATX_DIOALIGN 0x00002000U /* Want/got direct I/O alignment info */ +#define STATX_MNT_ID_UNIQUE 0x00004000U /* Want/got extended stx_mount_id */ + +#define STATX__RESERVED 0x80000000U /* Reserved for future struct statx expansion */ + +#ifndef __KERNEL__ +/* + * This is deprecated, and shall remain the same value in the future. To avoid + * confusion please use the equivalent (STATX_BASIC_STATS | STATX_BTIME) + * instead. + */ +#define STATX_ALL 0x00000fffU +#endif + +/* + * Attributes to be found in stx_attributes and masked in stx_attributes_mask. + * + * These give information about the features or the state of a file that might + * be of use to ordinary userspace programs such as GUIs or ls rather than + * specialised tools. + * + * Note that the flags marked [I] correspond to the FS_IOC_SETFLAGS flags + * semantically. Where possible, the numerical value is picked to correspond + * also. Note that the DAX attribute indicates that the file is in the CPU + * direct access state. It does not correspond to the per-inode flag that + * some filesystems support. + * + */ +#define STATX_ATTR_COMPRESSED 0x00000004 /* [I] File is compressed by the fs */ +#define STATX_ATTR_IMMUTABLE 0x00000010 /* [I] File is marked immutable */ +#define STATX_ATTR_APPEND 0x00000020 /* [I] File is append-only */ +#define STATX_ATTR_NODUMP 0x00000040 /* [I] File is not to be dumped */ +#define STATX_ATTR_ENCRYPTED 0x00000800 /* [I] File requires key to decrypt in fs */ +#define STATX_ATTR_AUTOMOUNT 0x00001000 /* Dir: Automount trigger */ +#define STATX_ATTR_MOUNT_ROOT 0x00002000 /* Root of a mount */ +#define STATX_ATTR_VERITY 0x00100000 /* [I] Verity protected file */ +#define STATX_ATTR_DAX 0x00200000 /* File is currently in DAX state */ + + +#endif /* _UAPI_LINUX_STAT_H */ diff --git a/tools/perf/trace/beauty/include/uapi/linux/usbdevice_fs.h b/tools/perf/trace/beauty/include/uapi/linux/usbdevice_fs.h new file mode 100644 index 000000000000..74a84e02422a --- /dev/null +++ b/tools/perf/trace/beauty/include/uapi/linux/usbdevice_fs.h @@ -0,0 +1,231 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +/*****************************************************************************/ + +/* + * usbdevice_fs.h -- USB device file system. + * + * Copyright (C) 2000 + * Thomas Sailer (sailer@ife.ee.ethz.ch) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * History: + * 0.1 04.01.2000 Created + */ + +/*****************************************************************************/ + +#ifndef _UAPI_LINUX_USBDEVICE_FS_H +#define _UAPI_LINUX_USBDEVICE_FS_H + +#include <linux/types.h> +#include <linux/magic.h> + +/* --------------------------------------------------------------------- */ + +/* usbdevfs ioctl codes */ + +struct usbdevfs_ctrltransfer { + __u8 bRequestType; + __u8 bRequest; + __u16 wValue; + __u16 wIndex; + __u16 wLength; + __u32 timeout; /* in milliseconds */ + void __user *data; +}; + +struct usbdevfs_bulktransfer { + unsigned int ep; + unsigned int len; + unsigned int timeout; /* in milliseconds */ + void __user *data; +}; + +struct usbdevfs_setinterface { + unsigned int interface; + unsigned int altsetting; +}; + +struct usbdevfs_disconnectsignal { + unsigned int signr; + void __user *context; +}; + +#define USBDEVFS_MAXDRIVERNAME 255 + +struct usbdevfs_getdriver { + unsigned int interface; + char driver[USBDEVFS_MAXDRIVERNAME + 1]; +}; + +struct usbdevfs_connectinfo { + unsigned int devnum; + unsigned char slow; +}; + +struct usbdevfs_conninfo_ex { + __u32 size; /* Size of the structure from the kernel's */ + /* point of view. Can be used by userspace */ + /* to determine how much data can be */ + /* used/trusted. */ + __u32 busnum; /* USB bus number, as enumerated by the */ + /* kernel, the device is connected to. */ + __u32 devnum; /* Device address on the bus. */ + __u32 speed; /* USB_SPEED_* constants from ch9.h */ + __u8 num_ports; /* Number of ports the device is connected */ + /* to on the way to the root hub. It may */ + /* be bigger than size of 'ports' array so */ + /* userspace can detect overflows. */ + __u8 ports[7]; /* List of ports on the way from the root */ + /* hub to the device. Current limit in */ + /* USB specification is 7 tiers (root hub, */ + /* 5 intermediate hubs, device), which */ + /* gives at most 6 port entries. */ +}; + +#define USBDEVFS_URB_SHORT_NOT_OK 0x01 +#define USBDEVFS_URB_ISO_ASAP 0x02 +#define USBDEVFS_URB_BULK_CONTINUATION 0x04 +#define USBDEVFS_URB_NO_FSBR 0x20 /* Not used */ +#define USBDEVFS_URB_ZERO_PACKET 0x40 +#define USBDEVFS_URB_NO_INTERRUPT 0x80 + +#define USBDEVFS_URB_TYPE_ISO 0 +#define USBDEVFS_URB_TYPE_INTERRUPT 1 +#define USBDEVFS_URB_TYPE_CONTROL 2 +#define USBDEVFS_URB_TYPE_BULK 3 + +struct usbdevfs_iso_packet_desc { + unsigned int length; + unsigned int actual_length; + unsigned int status; +}; + +struct usbdevfs_urb { + unsigned char type; + unsigned char endpoint; + int status; + unsigned int flags; + void __user *buffer; + int buffer_length; + int actual_length; + int start_frame; + union { + int number_of_packets; /* Only used for isoc urbs */ + unsigned int stream_id; /* Only used with bulk streams */ + }; + int error_count; + unsigned int signr; /* signal to be sent on completion, + or 0 if none should be sent. */ + void __user *usercontext; + struct usbdevfs_iso_packet_desc iso_frame_desc[]; +}; + +/* ioctls for talking directly to drivers */ +struct usbdevfs_ioctl { + int ifno; /* interface 0..N ; negative numbers reserved */ + int ioctl_code; /* MUST encode size + direction of data so the + * macros in <asm/ioctl.h> give correct values */ + void __user *data; /* param buffer (in, or out) */ +}; + +/* You can do most things with hubs just through control messages, + * except find out what device connects to what port. */ +struct usbdevfs_hub_portinfo { + char nports; /* number of downstream ports in this hub */ + char port [127]; /* e.g. port 3 connects to device 27 */ +}; + +/* System and bus capability flags */ +#define USBDEVFS_CAP_ZERO_PACKET 0x01 +#define USBDEVFS_CAP_BULK_CONTINUATION 0x02 +#define USBDEVFS_CAP_NO_PACKET_SIZE_LIM 0x04 +#define USBDEVFS_CAP_BULK_SCATTER_GATHER 0x08 +#define USBDEVFS_CAP_REAP_AFTER_DISCONNECT 0x10 +#define USBDEVFS_CAP_MMAP 0x20 +#define USBDEVFS_CAP_DROP_PRIVILEGES 0x40 +#define USBDEVFS_CAP_CONNINFO_EX 0x80 +#define USBDEVFS_CAP_SUSPEND 0x100 + +/* USBDEVFS_DISCONNECT_CLAIM flags & struct */ + +/* disconnect-and-claim if the driver matches the driver field */ +#define USBDEVFS_DISCONNECT_CLAIM_IF_DRIVER 0x01 +/* disconnect-and-claim except when the driver matches the driver field */ +#define USBDEVFS_DISCONNECT_CLAIM_EXCEPT_DRIVER 0x02 + +struct usbdevfs_disconnect_claim { + unsigned int interface; + unsigned int flags; + char driver[USBDEVFS_MAXDRIVERNAME + 1]; +}; + +struct usbdevfs_streams { + unsigned int num_streams; /* Not used by USBDEVFS_FREE_STREAMS */ + unsigned int num_eps; + unsigned char eps[]; +}; + +/* + * USB_SPEED_* values returned by USBDEVFS_GET_SPEED are defined in + * linux/usb/ch9.h + */ + +#define USBDEVFS_CONTROL _IOWR('U', 0, struct usbdevfs_ctrltransfer) +#define USBDEVFS_CONTROL32 _IOWR('U', 0, struct usbdevfs_ctrltransfer32) +#define USBDEVFS_BULK _IOWR('U', 2, struct usbdevfs_bulktransfer) +#define USBDEVFS_BULK32 _IOWR('U', 2, struct usbdevfs_bulktransfer32) +#define USBDEVFS_RESETEP _IOR('U', 3, unsigned int) +#define USBDEVFS_SETINTERFACE _IOR('U', 4, struct usbdevfs_setinterface) +#define USBDEVFS_SETCONFIGURATION _IOR('U', 5, unsigned int) +#define USBDEVFS_GETDRIVER _IOW('U', 8, struct usbdevfs_getdriver) +#define USBDEVFS_SUBMITURB _IOR('U', 10, struct usbdevfs_urb) +#define USBDEVFS_SUBMITURB32 _IOR('U', 10, struct usbdevfs_urb32) +#define USBDEVFS_DISCARDURB _IO('U', 11) +#define USBDEVFS_REAPURB _IOW('U', 12, void *) +#define USBDEVFS_REAPURB32 _IOW('U', 12, __u32) +#define USBDEVFS_REAPURBNDELAY _IOW('U', 13, void *) +#define USBDEVFS_REAPURBNDELAY32 _IOW('U', 13, __u32) +#define USBDEVFS_DISCSIGNAL _IOR('U', 14, struct usbdevfs_disconnectsignal) +#define USBDEVFS_DISCSIGNAL32 _IOR('U', 14, struct usbdevfs_disconnectsignal32) +#define USBDEVFS_CLAIMINTERFACE _IOR('U', 15, unsigned int) +#define USBDEVFS_RELEASEINTERFACE _IOR('U', 16, unsigned int) +#define USBDEVFS_CONNECTINFO _IOW('U', 17, struct usbdevfs_connectinfo) +#define USBDEVFS_IOCTL _IOWR('U', 18, struct usbdevfs_ioctl) +#define USBDEVFS_IOCTL32 _IOWR('U', 18, struct usbdevfs_ioctl32) +#define USBDEVFS_HUB_PORTINFO _IOR('U', 19, struct usbdevfs_hub_portinfo) +#define USBDEVFS_RESET _IO('U', 20) +#define USBDEVFS_CLEAR_HALT _IOR('U', 21, unsigned int) +#define USBDEVFS_DISCONNECT _IO('U', 22) +#define USBDEVFS_CONNECT _IO('U', 23) +#define USBDEVFS_CLAIM_PORT _IOR('U', 24, unsigned int) +#define USBDEVFS_RELEASE_PORT _IOR('U', 25, unsigned int) +#define USBDEVFS_GET_CAPABILITIES _IOR('U', 26, __u32) +#define USBDEVFS_DISCONNECT_CLAIM _IOR('U', 27, struct usbdevfs_disconnect_claim) +#define USBDEVFS_ALLOC_STREAMS _IOR('U', 28, struct usbdevfs_streams) +#define USBDEVFS_FREE_STREAMS _IOR('U', 29, struct usbdevfs_streams) +#define USBDEVFS_DROP_PRIVILEGES _IOW('U', 30, __u32) +#define USBDEVFS_GET_SPEED _IO('U', 31) +/* + * Returns struct usbdevfs_conninfo_ex; length is variable to allow + * extending size of the data returned. + */ +#define USBDEVFS_CONNINFO_EX(len) _IOC(_IOC_READ, 'U', 32, len) +#define USBDEVFS_FORBID_SUSPEND _IO('U', 33) +#define USBDEVFS_ALLOW_SUSPEND _IO('U', 34) +#define USBDEVFS_WAIT_FOR_RESUME _IO('U', 35) + +#endif /* _UAPI_LINUX_USBDEVICE_FS_H */ diff --git a/tools/perf/trace/beauty/include/uapi/linux/vhost.h b/tools/perf/trace/beauty/include/uapi/linux/vhost.h new file mode 100644 index 000000000000..b95dd84eef2d --- /dev/null +++ b/tools/perf/trace/beauty/include/uapi/linux/vhost.h @@ -0,0 +1,238 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _LINUX_VHOST_H +#define _LINUX_VHOST_H +/* Userspace interface for in-kernel virtio accelerators. */ + +/* vhost is used to reduce the number of system calls involved in virtio. + * + * Existing virtio net code is used in the guest without modification. + * + * This header includes interface used by userspace hypervisor for + * device configuration. + */ + +#include <linux/vhost_types.h> +#include <linux/types.h> +#include <linux/ioctl.h> + +#define VHOST_FILE_UNBIND -1 + +/* ioctls */ + +#define VHOST_VIRTIO 0xAF + +/* Features bitmask for forward compatibility. Transport bits are used for + * vhost specific features. */ +#define VHOST_GET_FEATURES _IOR(VHOST_VIRTIO, 0x00, __u64) +#define VHOST_SET_FEATURES _IOW(VHOST_VIRTIO, 0x00, __u64) + +/* Set current process as the (exclusive) owner of this file descriptor. This + * must be called before any other vhost command. Further calls to + * VHOST_OWNER_SET fail until VHOST_OWNER_RESET is called. */ +#define VHOST_SET_OWNER _IO(VHOST_VIRTIO, 0x01) +/* Give up ownership, and reset the device to default values. + * Allows subsequent call to VHOST_OWNER_SET to succeed. */ +#define VHOST_RESET_OWNER _IO(VHOST_VIRTIO, 0x02) + +/* Set up/modify memory layout */ +#define VHOST_SET_MEM_TABLE _IOW(VHOST_VIRTIO, 0x03, struct vhost_memory) + +/* Write logging setup. */ +/* Memory writes can optionally be logged by setting bit at an offset + * (calculated from the physical address) from specified log base. + * The bit is set using an atomic 32 bit operation. */ +/* Set base address for logging. */ +#define VHOST_SET_LOG_BASE _IOW(VHOST_VIRTIO, 0x04, __u64) +/* Specify an eventfd file descriptor to signal on log write. */ +#define VHOST_SET_LOG_FD _IOW(VHOST_VIRTIO, 0x07, int) +/* By default, a device gets one vhost_worker that its virtqueues share. This + * command allows the owner of the device to create an additional vhost_worker + * for the device. It can later be bound to 1 or more of its virtqueues using + * the VHOST_ATTACH_VRING_WORKER command. + * + * This must be called after VHOST_SET_OWNER and the caller must be the owner + * of the device. The new thread will inherit caller's cgroups and namespaces, + * and will share the caller's memory space. The new thread will also be + * counted against the caller's RLIMIT_NPROC value. + * + * The worker's ID used in other commands will be returned in + * vhost_worker_state. + */ +#define VHOST_NEW_WORKER _IOR(VHOST_VIRTIO, 0x8, struct vhost_worker_state) +/* Free a worker created with VHOST_NEW_WORKER if it's not attached to any + * virtqueue. If userspace is not able to call this for workers its created, + * the kernel will free all the device's workers when the device is closed. + */ +#define VHOST_FREE_WORKER _IOW(VHOST_VIRTIO, 0x9, struct vhost_worker_state) + +/* Ring setup. */ +/* Set number of descriptors in ring. This parameter can not + * be modified while ring is running (bound to a device). */ +#define VHOST_SET_VRING_NUM _IOW(VHOST_VIRTIO, 0x10, struct vhost_vring_state) +/* Set addresses for the ring. */ +#define VHOST_SET_VRING_ADDR _IOW(VHOST_VIRTIO, 0x11, struct vhost_vring_addr) +/* Base value where queue looks for available descriptors */ +#define VHOST_SET_VRING_BASE _IOW(VHOST_VIRTIO, 0x12, struct vhost_vring_state) +/* Get accessor: reads index, writes value in num */ +#define VHOST_GET_VRING_BASE _IOWR(VHOST_VIRTIO, 0x12, struct vhost_vring_state) + +/* Set the vring byte order in num. Valid values are VHOST_VRING_LITTLE_ENDIAN + * or VHOST_VRING_BIG_ENDIAN (other values return -EINVAL). + * The byte order cannot be changed while the device is active: trying to do so + * returns -EBUSY. + * This is a legacy only API that is simply ignored when VIRTIO_F_VERSION_1 is + * set. + * Not all kernel configurations support this ioctl, but all configurations that + * support SET also support GET. + */ +#define VHOST_VRING_LITTLE_ENDIAN 0 +#define VHOST_VRING_BIG_ENDIAN 1 +#define VHOST_SET_VRING_ENDIAN _IOW(VHOST_VIRTIO, 0x13, struct vhost_vring_state) +#define VHOST_GET_VRING_ENDIAN _IOW(VHOST_VIRTIO, 0x14, struct vhost_vring_state) +/* Attach a vhost_worker created with VHOST_NEW_WORKER to one of the device's + * virtqueues. + * + * This will replace the virtqueue's existing worker. If the replaced worker + * is no longer attached to any virtqueues, it can be freed with + * VHOST_FREE_WORKER. + */ +#define VHOST_ATTACH_VRING_WORKER _IOW(VHOST_VIRTIO, 0x15, \ + struct vhost_vring_worker) +/* Return the vring worker's ID */ +#define VHOST_GET_VRING_WORKER _IOWR(VHOST_VIRTIO, 0x16, \ + struct vhost_vring_worker) + +/* The following ioctls use eventfd file descriptors to signal and poll + * for events. */ + +/* Set eventfd to poll for added buffers */ +#define VHOST_SET_VRING_KICK _IOW(VHOST_VIRTIO, 0x20, struct vhost_vring_file) +/* Set eventfd to signal when buffers have beed used */ +#define VHOST_SET_VRING_CALL _IOW(VHOST_VIRTIO, 0x21, struct vhost_vring_file) +/* Set eventfd to signal an error */ +#define VHOST_SET_VRING_ERR _IOW(VHOST_VIRTIO, 0x22, struct vhost_vring_file) +/* Set busy loop timeout (in us) */ +#define VHOST_SET_VRING_BUSYLOOP_TIMEOUT _IOW(VHOST_VIRTIO, 0x23, \ + struct vhost_vring_state) +/* Get busy loop timeout (in us) */ +#define VHOST_GET_VRING_BUSYLOOP_TIMEOUT _IOW(VHOST_VIRTIO, 0x24, \ + struct vhost_vring_state) + +/* Set or get vhost backend capability */ + +#define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64) +#define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64) + +/* VHOST_NET specific defines */ + +/* Attach virtio net ring to a raw socket, or tap device. + * The socket must be already bound to an ethernet device, this device will be + * used for transmit. Pass fd -1 to unbind from the socket and the transmit + * device. This can be used to stop the ring (e.g. for migration). */ +#define VHOST_NET_SET_BACKEND _IOW(VHOST_VIRTIO, 0x30, struct vhost_vring_file) + +/* VHOST_SCSI specific defines */ + +#define VHOST_SCSI_SET_ENDPOINT _IOW(VHOST_VIRTIO, 0x40, struct vhost_scsi_target) +#define VHOST_SCSI_CLEAR_ENDPOINT _IOW(VHOST_VIRTIO, 0x41, struct vhost_scsi_target) +/* Changing this breaks userspace. */ +#define VHOST_SCSI_GET_ABI_VERSION _IOW(VHOST_VIRTIO, 0x42, int) +/* Set and get the events missed flag */ +#define VHOST_SCSI_SET_EVENTS_MISSED _IOW(VHOST_VIRTIO, 0x43, __u32) +#define VHOST_SCSI_GET_EVENTS_MISSED _IOW(VHOST_VIRTIO, 0x44, __u32) + +/* VHOST_VSOCK specific defines */ + +#define VHOST_VSOCK_SET_GUEST_CID _IOW(VHOST_VIRTIO, 0x60, __u64) +#define VHOST_VSOCK_SET_RUNNING _IOW(VHOST_VIRTIO, 0x61, int) + +/* VHOST_VDPA specific defines */ + +/* Get the device id. The device ids follow the same definition of + * the device id defined in virtio-spec. + */ +#define VHOST_VDPA_GET_DEVICE_ID _IOR(VHOST_VIRTIO, 0x70, __u32) +/* Get and set the status. The status bits follow the same definition + * of the device status defined in virtio-spec. + */ +#define VHOST_VDPA_GET_STATUS _IOR(VHOST_VIRTIO, 0x71, __u8) +#define VHOST_VDPA_SET_STATUS _IOW(VHOST_VIRTIO, 0x72, __u8) +/* Get and set the device config. The device config follows the same + * definition of the device config defined in virtio-spec. + */ +#define VHOST_VDPA_GET_CONFIG _IOR(VHOST_VIRTIO, 0x73, \ + struct vhost_vdpa_config) +#define VHOST_VDPA_SET_CONFIG _IOW(VHOST_VIRTIO, 0x74, \ + struct vhost_vdpa_config) +/* Enable/disable the ring. */ +#define VHOST_VDPA_SET_VRING_ENABLE _IOW(VHOST_VIRTIO, 0x75, \ + struct vhost_vring_state) +/* Get the max ring size. */ +#define VHOST_VDPA_GET_VRING_NUM _IOR(VHOST_VIRTIO, 0x76, __u16) + +/* Set event fd for config interrupt*/ +#define VHOST_VDPA_SET_CONFIG_CALL _IOW(VHOST_VIRTIO, 0x77, int) + +/* Get the valid iova range */ +#define VHOST_VDPA_GET_IOVA_RANGE _IOR(VHOST_VIRTIO, 0x78, \ + struct vhost_vdpa_iova_range) +/* Get the config size */ +#define VHOST_VDPA_GET_CONFIG_SIZE _IOR(VHOST_VIRTIO, 0x79, __u32) + +/* Get the number of address spaces. */ +#define VHOST_VDPA_GET_AS_NUM _IOR(VHOST_VIRTIO, 0x7A, unsigned int) + +/* Get the group for a virtqueue: read index, write group in num, + * The virtqueue index is stored in the index field of + * vhost_vring_state. The group for this specific virtqueue is + * returned via num field of vhost_vring_state. + */ +#define VHOST_VDPA_GET_VRING_GROUP _IOWR(VHOST_VIRTIO, 0x7B, \ + struct vhost_vring_state) +/* Set the ASID for a virtqueue group. The group index is stored in + * the index field of vhost_vring_state, the ASID associated with this + * group is stored at num field of vhost_vring_state. + */ +#define VHOST_VDPA_SET_GROUP_ASID _IOW(VHOST_VIRTIO, 0x7C, \ + struct vhost_vring_state) + +/* Suspend a device so it does not process virtqueue requests anymore + * + * After the return of ioctl the device must preserve all the necessary state + * (the virtqueue vring base plus the possible device specific states) that is + * required for restoring in the future. The device must not change its + * configuration after that point. + */ +#define VHOST_VDPA_SUSPEND _IO(VHOST_VIRTIO, 0x7D) + +/* Resume a device so it can resume processing virtqueue requests + * + * After the return of this ioctl the device will have restored all the + * necessary states and it is fully operational to continue processing the + * virtqueue descriptors. + */ +#define VHOST_VDPA_RESUME _IO(VHOST_VIRTIO, 0x7E) + +/* Get the group for the descriptor table including driver & device areas + * of a virtqueue: read index, write group in num. + * The virtqueue index is stored in the index field of vhost_vring_state. + * The group ID of the descriptor table for this specific virtqueue + * is returned via num field of vhost_vring_state. + */ +#define VHOST_VDPA_GET_VRING_DESC_GROUP _IOWR(VHOST_VIRTIO, 0x7F, \ + struct vhost_vring_state) + + +/* Get the count of all virtqueues */ +#define VHOST_VDPA_GET_VQS_COUNT _IOR(VHOST_VIRTIO, 0x80, __u32) + +/* Get the number of virtqueue groups. */ +#define VHOST_VDPA_GET_GROUP_NUM _IOR(VHOST_VIRTIO, 0x81, __u32) + +/* Get the queue size of a specific virtqueue. + * userspace set the vring index in vhost_vring_state.index + * kernel set the queue size in vhost_vring_state.num + */ +#define VHOST_VDPA_GET_VRING_SIZE _IOWR(VHOST_VIRTIO, 0x82, \ + struct vhost_vring_state) +#endif diff --git a/tools/perf/trace/beauty/include/uapi/sound/asound.h b/tools/perf/trace/beauty/include/uapi/sound/asound.h new file mode 100644 index 000000000000..628d46a0da92 --- /dev/null +++ b/tools/perf/trace/beauty/include/uapi/sound/asound.h @@ -0,0 +1,1252 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +/* + * Advanced Linux Sound Architecture - ALSA - Driver + * Copyright (c) 1994-2003 by Jaroslav Kysela <perex@perex.cz>, + * Abramo Bagnara <abramo@alsa-project.org> + */ + +#ifndef _UAPI__SOUND_ASOUND_H +#define _UAPI__SOUND_ASOUND_H + +#if defined(__KERNEL__) || defined(__linux__) +#include <linux/types.h> +#include <asm/byteorder.h> +#else +#include <endian.h> +#include <sys/ioctl.h> +#endif + +#ifndef __KERNEL__ +#include <stdlib.h> +#include <time.h> +#endif + +/* + * protocol version + */ + +#define SNDRV_PROTOCOL_VERSION(major, minor, subminor) (((major)<<16)|((minor)<<8)|(subminor)) +#define SNDRV_PROTOCOL_MAJOR(version) (((version)>>16)&0xffff) +#define SNDRV_PROTOCOL_MINOR(version) (((version)>>8)&0xff) +#define SNDRV_PROTOCOL_MICRO(version) ((version)&0xff) +#define SNDRV_PROTOCOL_INCOMPATIBLE(kversion, uversion) \ + (SNDRV_PROTOCOL_MAJOR(kversion) != SNDRV_PROTOCOL_MAJOR(uversion) || \ + (SNDRV_PROTOCOL_MAJOR(kversion) == SNDRV_PROTOCOL_MAJOR(uversion) && \ + SNDRV_PROTOCOL_MINOR(kversion) != SNDRV_PROTOCOL_MINOR(uversion))) + +/**************************************************************************** + * * + * Digital audio interface * + * * + ****************************************************************************/ + +#define AES_IEC958_STATUS_SIZE 24 + +struct snd_aes_iec958 { + unsigned char status[AES_IEC958_STATUS_SIZE]; /* AES/IEC958 channel status bits */ + unsigned char subcode[147]; /* AES/IEC958 subcode bits */ + unsigned char pad; /* nothing */ + unsigned char dig_subframe[4]; /* AES/IEC958 subframe bits */ +}; + +/**************************************************************************** + * * + * CEA-861 Audio InfoFrame. Used in HDMI and DisplayPort * + * * + ****************************************************************************/ + +struct snd_cea_861_aud_if { + unsigned char db1_ct_cc; /* coding type and channel count */ + unsigned char db2_sf_ss; /* sample frequency and size */ + unsigned char db3; /* not used, all zeros */ + unsigned char db4_ca; /* channel allocation code */ + unsigned char db5_dminh_lsv; /* downmix inhibit & level-shit values */ +}; + +/**************************************************************************** + * * + * Section for driver hardware dependent interface - /dev/snd/hw? * + * * + ****************************************************************************/ + +#define SNDRV_HWDEP_VERSION SNDRV_PROTOCOL_VERSION(1, 0, 1) + +enum { + SNDRV_HWDEP_IFACE_OPL2 = 0, + SNDRV_HWDEP_IFACE_OPL3, + SNDRV_HWDEP_IFACE_OPL4, + SNDRV_HWDEP_IFACE_SB16CSP, /* Creative Signal Processor */ + SNDRV_HWDEP_IFACE_EMU10K1, /* FX8010 processor in EMU10K1 chip */ + SNDRV_HWDEP_IFACE_YSS225, /* Yamaha FX processor */ + SNDRV_HWDEP_IFACE_ICS2115, /* Wavetable synth */ + SNDRV_HWDEP_IFACE_SSCAPE, /* Ensoniq SoundScape ISA card (MC68EC000) */ + SNDRV_HWDEP_IFACE_VX, /* Digigram VX cards */ + SNDRV_HWDEP_IFACE_MIXART, /* Digigram miXart cards */ + SNDRV_HWDEP_IFACE_USX2Y, /* Tascam US122, US224 & US428 usb */ + SNDRV_HWDEP_IFACE_EMUX_WAVETABLE, /* EmuX wavetable */ + SNDRV_HWDEP_IFACE_BLUETOOTH, /* Bluetooth audio */ + SNDRV_HWDEP_IFACE_USX2Y_PCM, /* Tascam US122, US224 & US428 rawusb pcm */ + SNDRV_HWDEP_IFACE_PCXHR, /* Digigram PCXHR */ + SNDRV_HWDEP_IFACE_SB_RC, /* SB Extigy/Audigy2NX remote control */ + SNDRV_HWDEP_IFACE_HDA, /* HD-audio */ + SNDRV_HWDEP_IFACE_USB_STREAM, /* direct access to usb stream */ + SNDRV_HWDEP_IFACE_FW_DICE, /* TC DICE FireWire device */ + SNDRV_HWDEP_IFACE_FW_FIREWORKS, /* Echo Audio Fireworks based device */ + SNDRV_HWDEP_IFACE_FW_BEBOB, /* BridgeCo BeBoB based device */ + SNDRV_HWDEP_IFACE_FW_OXFW, /* Oxford OXFW970/971 based device */ + SNDRV_HWDEP_IFACE_FW_DIGI00X, /* Digidesign Digi 002/003 family */ + SNDRV_HWDEP_IFACE_FW_TASCAM, /* TASCAM FireWire series */ + SNDRV_HWDEP_IFACE_LINE6, /* Line6 USB processors */ + SNDRV_HWDEP_IFACE_FW_MOTU, /* MOTU FireWire series */ + SNDRV_HWDEP_IFACE_FW_FIREFACE, /* RME Fireface series */ + + /* Don't forget to change the following: */ + SNDRV_HWDEP_IFACE_LAST = SNDRV_HWDEP_IFACE_FW_FIREFACE +}; + +struct snd_hwdep_info { + unsigned int device; /* WR: device number */ + int card; /* R: card number */ + unsigned char id[64]; /* ID (user selectable) */ + unsigned char name[80]; /* hwdep name */ + int iface; /* hwdep interface */ + unsigned char reserved[64]; /* reserved for future */ +}; + +/* generic DSP loader */ +struct snd_hwdep_dsp_status { + unsigned int version; /* R: driver-specific version */ + unsigned char id[32]; /* R: driver-specific ID string */ + unsigned int num_dsps; /* R: number of DSP images to transfer */ + unsigned int dsp_loaded; /* R: bit flags indicating the loaded DSPs */ + unsigned int chip_ready; /* R: 1 = initialization finished */ + unsigned char reserved[16]; /* reserved for future use */ +}; + +struct snd_hwdep_dsp_image { + unsigned int index; /* W: DSP index */ + unsigned char name[64]; /* W: ID (e.g. file name) */ + unsigned char __user *image; /* W: binary image */ + size_t length; /* W: size of image in bytes */ + unsigned long driver_data; /* W: driver-specific data */ +}; + +#define SNDRV_HWDEP_IOCTL_PVERSION _IOR ('H', 0x00, int) +#define SNDRV_HWDEP_IOCTL_INFO _IOR ('H', 0x01, struct snd_hwdep_info) +#define SNDRV_HWDEP_IOCTL_DSP_STATUS _IOR('H', 0x02, struct snd_hwdep_dsp_status) +#define SNDRV_HWDEP_IOCTL_DSP_LOAD _IOW('H', 0x03, struct snd_hwdep_dsp_image) + +/***************************************************************************** + * * + * Digital Audio (PCM) interface - /dev/snd/pcm?? * + * * + *****************************************************************************/ + +#define SNDRV_PCM_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 17) + +typedef unsigned long snd_pcm_uframes_t; +typedef signed long snd_pcm_sframes_t; + +enum { + SNDRV_PCM_CLASS_GENERIC = 0, /* standard mono or stereo device */ + SNDRV_PCM_CLASS_MULTI, /* multichannel device */ + SNDRV_PCM_CLASS_MODEM, /* software modem class */ + SNDRV_PCM_CLASS_DIGITIZER, /* digitizer class */ + /* Don't forget to change the following: */ + SNDRV_PCM_CLASS_LAST = SNDRV_PCM_CLASS_DIGITIZER, +}; + +enum { + SNDRV_PCM_SUBCLASS_GENERIC_MIX = 0, /* mono or stereo subdevices are mixed together */ + SNDRV_PCM_SUBCLASS_MULTI_MIX, /* multichannel subdevices are mixed together */ + /* Don't forget to change the following: */ + SNDRV_PCM_SUBCLASS_LAST = SNDRV_PCM_SUBCLASS_MULTI_MIX, +}; + +enum { + SNDRV_PCM_STREAM_PLAYBACK = 0, + SNDRV_PCM_STREAM_CAPTURE, + SNDRV_PCM_STREAM_LAST = SNDRV_PCM_STREAM_CAPTURE, +}; + +typedef int __bitwise snd_pcm_access_t; +#define SNDRV_PCM_ACCESS_MMAP_INTERLEAVED ((__force snd_pcm_access_t) 0) /* interleaved mmap */ +#define SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED ((__force snd_pcm_access_t) 1) /* noninterleaved mmap */ +#define SNDRV_PCM_ACCESS_MMAP_COMPLEX ((__force snd_pcm_access_t) 2) /* complex mmap */ +#define SNDRV_PCM_ACCESS_RW_INTERLEAVED ((__force snd_pcm_access_t) 3) /* readi/writei */ +#define SNDRV_PCM_ACCESS_RW_NONINTERLEAVED ((__force snd_pcm_access_t) 4) /* readn/writen */ +#define SNDRV_PCM_ACCESS_LAST SNDRV_PCM_ACCESS_RW_NONINTERLEAVED + +typedef int __bitwise snd_pcm_format_t; +#define SNDRV_PCM_FORMAT_S8 ((__force snd_pcm_format_t) 0) +#define SNDRV_PCM_FORMAT_U8 ((__force snd_pcm_format_t) 1) +#define SNDRV_PCM_FORMAT_S16_LE ((__force snd_pcm_format_t) 2) +#define SNDRV_PCM_FORMAT_S16_BE ((__force snd_pcm_format_t) 3) +#define SNDRV_PCM_FORMAT_U16_LE ((__force snd_pcm_format_t) 4) +#define SNDRV_PCM_FORMAT_U16_BE ((__force snd_pcm_format_t) 5) +#define SNDRV_PCM_FORMAT_S24_LE ((__force snd_pcm_format_t) 6) /* low three bytes */ +#define SNDRV_PCM_FORMAT_S24_BE ((__force snd_pcm_format_t) 7) /* low three bytes */ +#define SNDRV_PCM_FORMAT_U24_LE ((__force snd_pcm_format_t) 8) /* low three bytes */ +#define SNDRV_PCM_FORMAT_U24_BE ((__force snd_pcm_format_t) 9) /* low three bytes */ +/* + * For S32/U32 formats, 'msbits' hardware parameter is often used to deliver information about the + * available bit count in most significant bit. It's for the case of so-called 'left-justified' or + * `right-padding` sample which has less width than 32 bit. + */ +#define SNDRV_PCM_FORMAT_S32_LE ((__force snd_pcm_format_t) 10) +#define SNDRV_PCM_FORMAT_S32_BE ((__force snd_pcm_format_t) 11) +#define SNDRV_PCM_FORMAT_U32_LE ((__force snd_pcm_format_t) 12) +#define SNDRV_PCM_FORMAT_U32_BE ((__force snd_pcm_format_t) 13) +#define SNDRV_PCM_FORMAT_FLOAT_LE ((__force snd_pcm_format_t) 14) /* 4-byte float, IEEE-754 32-bit, range -1.0 to 1.0 */ +#define SNDRV_PCM_FORMAT_FLOAT_BE ((__force snd_pcm_format_t) 15) /* 4-byte float, IEEE-754 32-bit, range -1.0 to 1.0 */ +#define SNDRV_PCM_FORMAT_FLOAT64_LE ((__force snd_pcm_format_t) 16) /* 8-byte float, IEEE-754 64-bit, range -1.0 to 1.0 */ +#define SNDRV_PCM_FORMAT_FLOAT64_BE ((__force snd_pcm_format_t) 17) /* 8-byte float, IEEE-754 64-bit, range -1.0 to 1.0 */ +#define SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE ((__force snd_pcm_format_t) 18) /* IEC-958 subframe, Little Endian */ +#define SNDRV_PCM_FORMAT_IEC958_SUBFRAME_BE ((__force snd_pcm_format_t) 19) /* IEC-958 subframe, Big Endian */ +#define SNDRV_PCM_FORMAT_MU_LAW ((__force snd_pcm_format_t) 20) +#define SNDRV_PCM_FORMAT_A_LAW ((__force snd_pcm_format_t) 21) +#define SNDRV_PCM_FORMAT_IMA_ADPCM ((__force snd_pcm_format_t) 22) +#define SNDRV_PCM_FORMAT_MPEG ((__force snd_pcm_format_t) 23) +#define SNDRV_PCM_FORMAT_GSM ((__force snd_pcm_format_t) 24) +#define SNDRV_PCM_FORMAT_S20_LE ((__force snd_pcm_format_t) 25) /* in four bytes, LSB justified */ +#define SNDRV_PCM_FORMAT_S20_BE ((__force snd_pcm_format_t) 26) /* in four bytes, LSB justified */ +#define SNDRV_PCM_FORMAT_U20_LE ((__force snd_pcm_format_t) 27) /* in four bytes, LSB justified */ +#define SNDRV_PCM_FORMAT_U20_BE ((__force snd_pcm_format_t) 28) /* in four bytes, LSB justified */ +/* gap in the numbering for a future standard linear format */ +#define SNDRV_PCM_FORMAT_SPECIAL ((__force snd_pcm_format_t) 31) +#define SNDRV_PCM_FORMAT_S24_3LE ((__force snd_pcm_format_t) 32) /* in three bytes */ +#define SNDRV_PCM_FORMAT_S24_3BE ((__force snd_pcm_format_t) 33) /* in three bytes */ +#define SNDRV_PCM_FORMAT_U24_3LE ((__force snd_pcm_format_t) 34) /* in three bytes */ +#define SNDRV_PCM_FORMAT_U24_3BE ((__force snd_pcm_format_t) 35) /* in three bytes */ +#define SNDRV_PCM_FORMAT_S20_3LE ((__force snd_pcm_format_t) 36) /* in three bytes */ +#define SNDRV_PCM_FORMAT_S20_3BE ((__force snd_pcm_format_t) 37) /* in three bytes */ +#define SNDRV_PCM_FORMAT_U20_3LE ((__force snd_pcm_format_t) 38) /* in three bytes */ +#define SNDRV_PCM_FORMAT_U20_3BE ((__force snd_pcm_format_t) 39) /* in three bytes */ +#define SNDRV_PCM_FORMAT_S18_3LE ((__force snd_pcm_format_t) 40) /* in three bytes */ +#define SNDRV_PCM_FORMAT_S18_3BE ((__force snd_pcm_format_t) 41) /* in three bytes */ +#define SNDRV_PCM_FORMAT_U18_3LE ((__force snd_pcm_format_t) 42) /* in three bytes */ +#define SNDRV_PCM_FORMAT_U18_3BE ((__force snd_pcm_format_t) 43) /* in three bytes */ +#define SNDRV_PCM_FORMAT_G723_24 ((__force snd_pcm_format_t) 44) /* 8 samples in 3 bytes */ +#define SNDRV_PCM_FORMAT_G723_24_1B ((__force snd_pcm_format_t) 45) /* 1 sample in 1 byte */ +#define SNDRV_PCM_FORMAT_G723_40 ((__force snd_pcm_format_t) 46) /* 8 Samples in 5 bytes */ +#define SNDRV_PCM_FORMAT_G723_40_1B ((__force snd_pcm_format_t) 47) /* 1 sample in 1 byte */ +#define SNDRV_PCM_FORMAT_DSD_U8 ((__force snd_pcm_format_t) 48) /* DSD, 1-byte samples DSD (x8) */ +#define SNDRV_PCM_FORMAT_DSD_U16_LE ((__force snd_pcm_format_t) 49) /* DSD, 2-byte samples DSD (x16), little endian */ +#define SNDRV_PCM_FORMAT_DSD_U32_LE ((__force snd_pcm_format_t) 50) /* DSD, 4-byte samples DSD (x32), little endian */ +#define SNDRV_PCM_FORMAT_DSD_U16_BE ((__force snd_pcm_format_t) 51) /* DSD, 2-byte samples DSD (x16), big endian */ +#define SNDRV_PCM_FORMAT_DSD_U32_BE ((__force snd_pcm_format_t) 52) /* DSD, 4-byte samples DSD (x32), big endian */ +#define SNDRV_PCM_FORMAT_LAST SNDRV_PCM_FORMAT_DSD_U32_BE +#define SNDRV_PCM_FORMAT_FIRST SNDRV_PCM_FORMAT_S8 + +#ifdef SNDRV_LITTLE_ENDIAN +#define SNDRV_PCM_FORMAT_S16 SNDRV_PCM_FORMAT_S16_LE +#define SNDRV_PCM_FORMAT_U16 SNDRV_PCM_FORMAT_U16_LE +#define SNDRV_PCM_FORMAT_S24 SNDRV_PCM_FORMAT_S24_LE +#define SNDRV_PCM_FORMAT_U24 SNDRV_PCM_FORMAT_U24_LE +#define SNDRV_PCM_FORMAT_S32 SNDRV_PCM_FORMAT_S32_LE +#define SNDRV_PCM_FORMAT_U32 SNDRV_PCM_FORMAT_U32_LE +#define SNDRV_PCM_FORMAT_FLOAT SNDRV_PCM_FORMAT_FLOAT_LE +#define SNDRV_PCM_FORMAT_FLOAT64 SNDRV_PCM_FORMAT_FLOAT64_LE +#define SNDRV_PCM_FORMAT_IEC958_SUBFRAME SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE +#define SNDRV_PCM_FORMAT_S20 SNDRV_PCM_FORMAT_S20_LE +#define SNDRV_PCM_FORMAT_U20 SNDRV_PCM_FORMAT_U20_LE +#endif +#ifdef SNDRV_BIG_ENDIAN +#define SNDRV_PCM_FORMAT_S16 SNDRV_PCM_FORMAT_S16_BE +#define SNDRV_PCM_FORMAT_U16 SNDRV_PCM_FORMAT_U16_BE +#define SNDRV_PCM_FORMAT_S24 SNDRV_PCM_FORMAT_S24_BE +#define SNDRV_PCM_FORMAT_U24 SNDRV_PCM_FORMAT_U24_BE +#define SNDRV_PCM_FORMAT_S32 SNDRV_PCM_FORMAT_S32_BE +#define SNDRV_PCM_FORMAT_U32 SNDRV_PCM_FORMAT_U32_BE +#define SNDRV_PCM_FORMAT_FLOAT SNDRV_PCM_FORMAT_FLOAT_BE +#define SNDRV_PCM_FORMAT_FLOAT64 SNDRV_PCM_FORMAT_FLOAT64_BE +#define SNDRV_PCM_FORMAT_IEC958_SUBFRAME SNDRV_PCM_FORMAT_IEC958_SUBFRAME_BE +#define SNDRV_PCM_FORMAT_S20 SNDRV_PCM_FORMAT_S20_BE +#define SNDRV_PCM_FORMAT_U20 SNDRV_PCM_FORMAT_U20_BE +#endif + +typedef int __bitwise snd_pcm_subformat_t; +#define SNDRV_PCM_SUBFORMAT_STD ((__force snd_pcm_subformat_t) 0) +#define SNDRV_PCM_SUBFORMAT_MSBITS_MAX ((__force snd_pcm_subformat_t) 1) +#define SNDRV_PCM_SUBFORMAT_MSBITS_20 ((__force snd_pcm_subformat_t) 2) +#define SNDRV_PCM_SUBFORMAT_MSBITS_24 ((__force snd_pcm_subformat_t) 3) +#define SNDRV_PCM_SUBFORMAT_LAST SNDRV_PCM_SUBFORMAT_MSBITS_24 + +#define SNDRV_PCM_INFO_MMAP 0x00000001 /* hardware supports mmap */ +#define SNDRV_PCM_INFO_MMAP_VALID 0x00000002 /* period data are valid during transfer */ +#define SNDRV_PCM_INFO_DOUBLE 0x00000004 /* Double buffering needed for PCM start/stop */ +#define SNDRV_PCM_INFO_BATCH 0x00000010 /* double buffering */ +#define SNDRV_PCM_INFO_SYNC_APPLPTR 0x00000020 /* need the explicit sync of appl_ptr update */ +#define SNDRV_PCM_INFO_PERFECT_DRAIN 0x00000040 /* silencing at the end of stream is not required */ +#define SNDRV_PCM_INFO_INTERLEAVED 0x00000100 /* channels are interleaved */ +#define SNDRV_PCM_INFO_NONINTERLEAVED 0x00000200 /* channels are not interleaved */ +#define SNDRV_PCM_INFO_COMPLEX 0x00000400 /* complex frame organization (mmap only) */ +#define SNDRV_PCM_INFO_BLOCK_TRANSFER 0x00010000 /* hardware transfer block of samples */ +#define SNDRV_PCM_INFO_OVERRANGE 0x00020000 /* hardware supports ADC (capture) overrange detection */ +#define SNDRV_PCM_INFO_RESUME 0x00040000 /* hardware supports stream resume after suspend */ +#define SNDRV_PCM_INFO_PAUSE 0x00080000 /* pause ioctl is supported */ +#define SNDRV_PCM_INFO_HALF_DUPLEX 0x00100000 /* only half duplex */ +#define SNDRV_PCM_INFO_JOINT_DUPLEX 0x00200000 /* playback and capture stream are somewhat correlated */ +#define SNDRV_PCM_INFO_SYNC_START 0x00400000 /* pcm support some kind of sync go */ +#define SNDRV_PCM_INFO_NO_PERIOD_WAKEUP 0x00800000 /* period wakeup can be disabled */ +#define SNDRV_PCM_INFO_HAS_WALL_CLOCK 0x01000000 /* (Deprecated)has audio wall clock for audio/system time sync */ +#define SNDRV_PCM_INFO_HAS_LINK_ATIME 0x01000000 /* report hardware link audio time, reset on startup */ +#define SNDRV_PCM_INFO_HAS_LINK_ABSOLUTE_ATIME 0x02000000 /* report absolute hardware link audio time, not reset on startup */ +#define SNDRV_PCM_INFO_HAS_LINK_ESTIMATED_ATIME 0x04000000 /* report estimated link audio time */ +#define SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME 0x08000000 /* report synchronized audio/system time */ +#define SNDRV_PCM_INFO_EXPLICIT_SYNC 0x10000000 /* needs explicit sync of pointers and data */ +#define SNDRV_PCM_INFO_NO_REWINDS 0x20000000 /* hardware can only support monotonic changes of appl_ptr */ +#define SNDRV_PCM_INFO_DRAIN_TRIGGER 0x40000000 /* internal kernel flag - trigger in drain */ +#define SNDRV_PCM_INFO_FIFO_IN_FRAMES 0x80000000 /* internal kernel flag - FIFO size is in frames */ + +#if (__BITS_PER_LONG == 32 && defined(__USE_TIME_BITS64)) || defined __KERNEL__ +#define __SND_STRUCT_TIME64 +#endif + +typedef int __bitwise snd_pcm_state_t; +#define SNDRV_PCM_STATE_OPEN ((__force snd_pcm_state_t) 0) /* stream is open */ +#define SNDRV_PCM_STATE_SETUP ((__force snd_pcm_state_t) 1) /* stream has a setup */ +#define SNDRV_PCM_STATE_PREPARED ((__force snd_pcm_state_t) 2) /* stream is ready to start */ +#define SNDRV_PCM_STATE_RUNNING ((__force snd_pcm_state_t) 3) /* stream is running */ +#define SNDRV_PCM_STATE_XRUN ((__force snd_pcm_state_t) 4) /* stream reached an xrun */ +#define SNDRV_PCM_STATE_DRAINING ((__force snd_pcm_state_t) 5) /* stream is draining */ +#define SNDRV_PCM_STATE_PAUSED ((__force snd_pcm_state_t) 6) /* stream is paused */ +#define SNDRV_PCM_STATE_SUSPENDED ((__force snd_pcm_state_t) 7) /* hardware is suspended */ +#define SNDRV_PCM_STATE_DISCONNECTED ((__force snd_pcm_state_t) 8) /* hardware is disconnected */ +#define SNDRV_PCM_STATE_LAST SNDRV_PCM_STATE_DISCONNECTED + +enum { + SNDRV_PCM_MMAP_OFFSET_DATA = 0x00000000, + SNDRV_PCM_MMAP_OFFSET_STATUS_OLD = 0x80000000, + SNDRV_PCM_MMAP_OFFSET_CONTROL_OLD = 0x81000000, + SNDRV_PCM_MMAP_OFFSET_STATUS_NEW = 0x82000000, + SNDRV_PCM_MMAP_OFFSET_CONTROL_NEW = 0x83000000, +#ifdef __SND_STRUCT_TIME64 + SNDRV_PCM_MMAP_OFFSET_STATUS = SNDRV_PCM_MMAP_OFFSET_STATUS_NEW, + SNDRV_PCM_MMAP_OFFSET_CONTROL = SNDRV_PCM_MMAP_OFFSET_CONTROL_NEW, +#else + SNDRV_PCM_MMAP_OFFSET_STATUS = SNDRV_PCM_MMAP_OFFSET_STATUS_OLD, + SNDRV_PCM_MMAP_OFFSET_CONTROL = SNDRV_PCM_MMAP_OFFSET_CONTROL_OLD, +#endif +}; + +union snd_pcm_sync_id { + unsigned char id[16]; + unsigned short id16[8]; + unsigned int id32[4]; +}; + +struct snd_pcm_info { + unsigned int device; /* RO/WR (control): device number */ + unsigned int subdevice; /* RO/WR (control): subdevice number */ + int stream; /* RO/WR (control): stream direction */ + int card; /* R: card number */ + unsigned char id[64]; /* ID (user selectable) */ + unsigned char name[80]; /* name of this device */ + unsigned char subname[32]; /* subdevice name */ + int dev_class; /* SNDRV_PCM_CLASS_* */ + int dev_subclass; /* SNDRV_PCM_SUBCLASS_* */ + unsigned int subdevices_count; + unsigned int subdevices_avail; + union snd_pcm_sync_id sync; /* hardware synchronization ID */ + unsigned char reserved[64]; /* reserved for future... */ +}; + +typedef int snd_pcm_hw_param_t; +#define SNDRV_PCM_HW_PARAM_ACCESS 0 /* Access type */ +#define SNDRV_PCM_HW_PARAM_FORMAT 1 /* Format */ +#define SNDRV_PCM_HW_PARAM_SUBFORMAT 2 /* Subformat */ +#define SNDRV_PCM_HW_PARAM_FIRST_MASK SNDRV_PCM_HW_PARAM_ACCESS +#define SNDRV_PCM_HW_PARAM_LAST_MASK SNDRV_PCM_HW_PARAM_SUBFORMAT + +#define SNDRV_PCM_HW_PARAM_SAMPLE_BITS 8 /* Bits per sample */ +#define SNDRV_PCM_HW_PARAM_FRAME_BITS 9 /* Bits per frame */ +#define SNDRV_PCM_HW_PARAM_CHANNELS 10 /* Channels */ +#define SNDRV_PCM_HW_PARAM_RATE 11 /* Approx rate */ +#define SNDRV_PCM_HW_PARAM_PERIOD_TIME 12 /* Approx distance between + * interrupts in us + */ +#define SNDRV_PCM_HW_PARAM_PERIOD_SIZE 13 /* Approx frames between + * interrupts + */ +#define SNDRV_PCM_HW_PARAM_PERIOD_BYTES 14 /* Approx bytes between + * interrupts + */ +#define SNDRV_PCM_HW_PARAM_PERIODS 15 /* Approx interrupts per + * buffer + */ +#define SNDRV_PCM_HW_PARAM_BUFFER_TIME 16 /* Approx duration of buffer + * in us + */ +#define SNDRV_PCM_HW_PARAM_BUFFER_SIZE 17 /* Size of buffer in frames */ +#define SNDRV_PCM_HW_PARAM_BUFFER_BYTES 18 /* Size of buffer in bytes */ +#define SNDRV_PCM_HW_PARAM_TICK_TIME 19 /* Approx tick duration in us */ +#define SNDRV_PCM_HW_PARAM_FIRST_INTERVAL SNDRV_PCM_HW_PARAM_SAMPLE_BITS +#define SNDRV_PCM_HW_PARAM_LAST_INTERVAL SNDRV_PCM_HW_PARAM_TICK_TIME + +#define SNDRV_PCM_HW_PARAMS_NORESAMPLE (1<<0) /* avoid rate resampling */ +#define SNDRV_PCM_HW_PARAMS_EXPORT_BUFFER (1<<1) /* export buffer */ +#define SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP (1<<2) /* disable period wakeups */ +#define SNDRV_PCM_HW_PARAMS_NO_DRAIN_SILENCE (1<<3) /* suppress drain with the filling + * of the silence samples + */ + +struct snd_interval { + unsigned int min, max; + unsigned int openmin:1, + openmax:1, + integer:1, + empty:1; +}; + +#define SNDRV_MASK_MAX 256 + +struct snd_mask { + __u32 bits[(SNDRV_MASK_MAX+31)/32]; +}; + +struct snd_pcm_hw_params { + unsigned int flags; + struct snd_mask masks[SNDRV_PCM_HW_PARAM_LAST_MASK - + SNDRV_PCM_HW_PARAM_FIRST_MASK + 1]; + struct snd_mask mres[5]; /* reserved masks */ + struct snd_interval intervals[SNDRV_PCM_HW_PARAM_LAST_INTERVAL - + SNDRV_PCM_HW_PARAM_FIRST_INTERVAL + 1]; + struct snd_interval ires[9]; /* reserved intervals */ + unsigned int rmask; /* W: requested masks */ + unsigned int cmask; /* R: changed masks */ + unsigned int info; /* R: Info flags for returned setup */ + unsigned int msbits; /* R: used most significant bits (in sample bit-width) */ + unsigned int rate_num; /* R: rate numerator */ + unsigned int rate_den; /* R: rate denominator */ + snd_pcm_uframes_t fifo_size; /* R: chip FIFO size in frames */ + unsigned char reserved[64]; /* reserved for future */ +}; + +enum { + SNDRV_PCM_TSTAMP_NONE = 0, + SNDRV_PCM_TSTAMP_ENABLE, + SNDRV_PCM_TSTAMP_LAST = SNDRV_PCM_TSTAMP_ENABLE, +}; + +struct snd_pcm_sw_params { + int tstamp_mode; /* timestamp mode */ + unsigned int period_step; + unsigned int sleep_min; /* min ticks to sleep */ + snd_pcm_uframes_t avail_min; /* min avail frames for wakeup */ + snd_pcm_uframes_t xfer_align; /* obsolete: xfer size need to be a multiple */ + snd_pcm_uframes_t start_threshold; /* min hw_avail frames for automatic start */ + /* + * The following two thresholds alleviate playback buffer underruns; when + * hw_avail drops below the threshold, the respective action is triggered: + */ + snd_pcm_uframes_t stop_threshold; /* - stop playback */ + snd_pcm_uframes_t silence_threshold; /* - pre-fill buffer with silence */ + snd_pcm_uframes_t silence_size; /* max size of silence pre-fill; when >= boundary, + * fill played area with silence immediately */ + snd_pcm_uframes_t boundary; /* pointers wrap point */ + unsigned int proto; /* protocol version */ + unsigned int tstamp_type; /* timestamp type (req. proto >= 2.0.12) */ + unsigned char reserved[56]; /* reserved for future */ +}; + +struct snd_pcm_channel_info { + unsigned int channel; + __kernel_off_t offset; /* mmap offset */ + unsigned int first; /* offset to first sample in bits */ + unsigned int step; /* samples distance in bits */ +}; + +enum { + /* + * first definition for backwards compatibility only, + * maps to wallclock/link time for HDAudio playback and DEFAULT/DMA time for everything else + */ + SNDRV_PCM_AUDIO_TSTAMP_TYPE_COMPAT = 0, + + /* timestamp definitions */ + SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT = 1, /* DMA time, reported as per hw_ptr */ + SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK = 2, /* link time reported by sample or wallclock counter, reset on startup */ + SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_ABSOLUTE = 3, /* link time reported by sample or wallclock counter, not reset on startup */ + SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_ESTIMATED = 4, /* link time estimated indirectly */ + SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_SYNCHRONIZED = 5, /* link time synchronized with system time */ + SNDRV_PCM_AUDIO_TSTAMP_TYPE_LAST = SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_SYNCHRONIZED +}; + +#ifndef __KERNEL__ +/* explicit padding avoids incompatibility between i386 and x86-64 */ +typedef struct { unsigned char pad[sizeof(time_t) - sizeof(int)]; } __time_pad; + +struct snd_pcm_status { + snd_pcm_state_t state; /* stream state */ + __time_pad pad1; /* align to timespec */ + struct timespec trigger_tstamp; /* time when stream was started/stopped/paused */ + struct timespec tstamp; /* reference timestamp */ + snd_pcm_uframes_t appl_ptr; /* appl ptr */ + snd_pcm_uframes_t hw_ptr; /* hw ptr */ + snd_pcm_sframes_t delay; /* current delay in frames */ + snd_pcm_uframes_t avail; /* number of frames available */ + snd_pcm_uframes_t avail_max; /* max frames available on hw since last status */ + snd_pcm_uframes_t overrange; /* count of ADC (capture) overrange detections from last status */ + snd_pcm_state_t suspended_state; /* suspended stream state */ + __u32 audio_tstamp_data; /* needed for 64-bit alignment, used for configs/report to/from userspace */ + struct timespec audio_tstamp; /* sample counter, wall clock, PHC or on-demand sync'ed */ + struct timespec driver_tstamp; /* useful in case reference system tstamp is reported with delay */ + __u32 audio_tstamp_accuracy; /* in ns units, only valid if indicated in audio_tstamp_data */ + unsigned char reserved[52-2*sizeof(struct timespec)]; /* must be filled with zero */ +}; +#endif + +/* + * For mmap operations, we need the 64-bit layout, both for compat mode, + * and for y2038 compatibility. For 64-bit applications, the two definitions + * are identical, so we keep the traditional version. + */ +#ifdef __SND_STRUCT_TIME64 +#define __snd_pcm_mmap_status64 snd_pcm_mmap_status +#define __snd_pcm_mmap_control64 snd_pcm_mmap_control +#define __snd_pcm_sync_ptr64 snd_pcm_sync_ptr +#ifdef __KERNEL__ +#define __snd_timespec64 __kernel_timespec +#else +#define __snd_timespec64 timespec +#endif +struct __snd_timespec { + __s32 tv_sec; + __s32 tv_nsec; +}; +#else +#define __snd_pcm_mmap_status snd_pcm_mmap_status +#define __snd_pcm_mmap_control snd_pcm_mmap_control +#define __snd_pcm_sync_ptr snd_pcm_sync_ptr +#define __snd_timespec timespec +struct __snd_timespec64 { + __s64 tv_sec; + __s64 tv_nsec; +}; + +#endif + +struct __snd_pcm_mmap_status { + snd_pcm_state_t state; /* RO: state - SNDRV_PCM_STATE_XXXX */ + int pad1; /* Needed for 64 bit alignment */ + snd_pcm_uframes_t hw_ptr; /* RO: hw ptr (0...boundary-1) */ + struct __snd_timespec tstamp; /* Timestamp */ + snd_pcm_state_t suspended_state; /* RO: suspended stream state */ + struct __snd_timespec audio_tstamp; /* from sample counter or wall clock */ +}; + +struct __snd_pcm_mmap_control { + snd_pcm_uframes_t appl_ptr; /* RW: appl ptr (0...boundary-1) */ + snd_pcm_uframes_t avail_min; /* RW: min available frames for wakeup */ +}; + +#define SNDRV_PCM_SYNC_PTR_HWSYNC (1<<0) /* execute hwsync */ +#define SNDRV_PCM_SYNC_PTR_APPL (1<<1) /* get appl_ptr from driver (r/w op) */ +#define SNDRV_PCM_SYNC_PTR_AVAIL_MIN (1<<2) /* get avail_min from driver */ + +struct __snd_pcm_sync_ptr { + unsigned int flags; + union { + struct __snd_pcm_mmap_status status; + unsigned char reserved[64]; + } s; + union { + struct __snd_pcm_mmap_control control; + unsigned char reserved[64]; + } c; +}; + +#if defined(__BYTE_ORDER) ? __BYTE_ORDER == __BIG_ENDIAN : defined(__BIG_ENDIAN) +typedef char __pad_before_uframe[sizeof(__u64) - sizeof(snd_pcm_uframes_t)]; +typedef char __pad_after_uframe[0]; +#endif + +#if defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : defined(__LITTLE_ENDIAN) +typedef char __pad_before_uframe[0]; +typedef char __pad_after_uframe[sizeof(__u64) - sizeof(snd_pcm_uframes_t)]; +#endif + +struct __snd_pcm_mmap_status64 { + snd_pcm_state_t state; /* RO: state - SNDRV_PCM_STATE_XXXX */ + __u32 pad1; /* Needed for 64 bit alignment */ + __pad_before_uframe __pad1; + snd_pcm_uframes_t hw_ptr; /* RO: hw ptr (0...boundary-1) */ + __pad_after_uframe __pad2; + struct __snd_timespec64 tstamp; /* Timestamp */ + snd_pcm_state_t suspended_state;/* RO: suspended stream state */ + __u32 pad3; /* Needed for 64 bit alignment */ + struct __snd_timespec64 audio_tstamp; /* sample counter or wall clock */ +}; + +struct __snd_pcm_mmap_control64 { + __pad_before_uframe __pad1; + snd_pcm_uframes_t appl_ptr; /* RW: appl ptr (0...boundary-1) */ + __pad_before_uframe __pad2; // This should be __pad_after_uframe, but binary + // backwards compatibility constraints prevent a fix. + + __pad_before_uframe __pad3; + snd_pcm_uframes_t avail_min; /* RW: min available frames for wakeup */ + __pad_after_uframe __pad4; +}; + +struct __snd_pcm_sync_ptr64 { + __u32 flags; + __u32 pad1; + union { + struct __snd_pcm_mmap_status64 status; + unsigned char reserved[64]; + } s; + union { + struct __snd_pcm_mmap_control64 control; + unsigned char reserved[64]; + } c; +}; + +struct snd_xferi { + snd_pcm_sframes_t result; + void __user *buf; + snd_pcm_uframes_t frames; +}; + +struct snd_xfern { + snd_pcm_sframes_t result; + void __user * __user *bufs; + snd_pcm_uframes_t frames; +}; + +enum { + SNDRV_PCM_TSTAMP_TYPE_GETTIMEOFDAY = 0, /* gettimeofday equivalent */ + SNDRV_PCM_TSTAMP_TYPE_MONOTONIC, /* posix_clock_monotonic equivalent */ + SNDRV_PCM_TSTAMP_TYPE_MONOTONIC_RAW, /* monotonic_raw (no NTP) */ + SNDRV_PCM_TSTAMP_TYPE_LAST = SNDRV_PCM_TSTAMP_TYPE_MONOTONIC_RAW, +}; + +/* channel positions */ +enum { + SNDRV_CHMAP_UNKNOWN = 0, + SNDRV_CHMAP_NA, /* N/A, silent */ + SNDRV_CHMAP_MONO, /* mono stream */ + /* this follows the alsa-lib mixer channel value + 3 */ + SNDRV_CHMAP_FL, /* front left */ + SNDRV_CHMAP_FR, /* front right */ + SNDRV_CHMAP_RL, /* rear left */ + SNDRV_CHMAP_RR, /* rear right */ + SNDRV_CHMAP_FC, /* front center */ + SNDRV_CHMAP_LFE, /* LFE */ + SNDRV_CHMAP_SL, /* side left */ + SNDRV_CHMAP_SR, /* side right */ + SNDRV_CHMAP_RC, /* rear center */ + /* new definitions */ + SNDRV_CHMAP_FLC, /* front left center */ + SNDRV_CHMAP_FRC, /* front right center */ + SNDRV_CHMAP_RLC, /* rear left center */ + SNDRV_CHMAP_RRC, /* rear right center */ + SNDRV_CHMAP_FLW, /* front left wide */ + SNDRV_CHMAP_FRW, /* front right wide */ + SNDRV_CHMAP_FLH, /* front left high */ + SNDRV_CHMAP_FCH, /* front center high */ + SNDRV_CHMAP_FRH, /* front right high */ + SNDRV_CHMAP_TC, /* top center */ + SNDRV_CHMAP_TFL, /* top front left */ + SNDRV_CHMAP_TFR, /* top front right */ + SNDRV_CHMAP_TFC, /* top front center */ + SNDRV_CHMAP_TRL, /* top rear left */ + SNDRV_CHMAP_TRR, /* top rear right */ + SNDRV_CHMAP_TRC, /* top rear center */ + /* new definitions for UAC2 */ + SNDRV_CHMAP_TFLC, /* top front left center */ + SNDRV_CHMAP_TFRC, /* top front right center */ + SNDRV_CHMAP_TSL, /* top side left */ + SNDRV_CHMAP_TSR, /* top side right */ + SNDRV_CHMAP_LLFE, /* left LFE */ + SNDRV_CHMAP_RLFE, /* right LFE */ + SNDRV_CHMAP_BC, /* bottom center */ + SNDRV_CHMAP_BLC, /* bottom left center */ + SNDRV_CHMAP_BRC, /* bottom right center */ + SNDRV_CHMAP_LAST = SNDRV_CHMAP_BRC, +}; + +#define SNDRV_CHMAP_POSITION_MASK 0xffff +#define SNDRV_CHMAP_PHASE_INVERSE (0x01 << 16) +#define SNDRV_CHMAP_DRIVER_SPEC (0x02 << 16) + +#define SNDRV_PCM_IOCTL_PVERSION _IOR('A', 0x00, int) +#define SNDRV_PCM_IOCTL_INFO _IOR('A', 0x01, struct snd_pcm_info) +#define SNDRV_PCM_IOCTL_TSTAMP _IOW('A', 0x02, int) +#define SNDRV_PCM_IOCTL_TTSTAMP _IOW('A', 0x03, int) +#define SNDRV_PCM_IOCTL_USER_PVERSION _IOW('A', 0x04, int) +#define SNDRV_PCM_IOCTL_HW_REFINE _IOWR('A', 0x10, struct snd_pcm_hw_params) +#define SNDRV_PCM_IOCTL_HW_PARAMS _IOWR('A', 0x11, struct snd_pcm_hw_params) +#define SNDRV_PCM_IOCTL_HW_FREE _IO('A', 0x12) +#define SNDRV_PCM_IOCTL_SW_PARAMS _IOWR('A', 0x13, struct snd_pcm_sw_params) +#define SNDRV_PCM_IOCTL_STATUS _IOR('A', 0x20, struct snd_pcm_status) +#define SNDRV_PCM_IOCTL_DELAY _IOR('A', 0x21, snd_pcm_sframes_t) +#define SNDRV_PCM_IOCTL_HWSYNC _IO('A', 0x22) +#define __SNDRV_PCM_IOCTL_SYNC_PTR _IOWR('A', 0x23, struct __snd_pcm_sync_ptr) +#define __SNDRV_PCM_IOCTL_SYNC_PTR64 _IOWR('A', 0x23, struct __snd_pcm_sync_ptr64) +#define SNDRV_PCM_IOCTL_SYNC_PTR _IOWR('A', 0x23, struct snd_pcm_sync_ptr) +#define SNDRV_PCM_IOCTL_STATUS_EXT _IOWR('A', 0x24, struct snd_pcm_status) +#define SNDRV_PCM_IOCTL_CHANNEL_INFO _IOR('A', 0x32, struct snd_pcm_channel_info) +#define SNDRV_PCM_IOCTL_PREPARE _IO('A', 0x40) +#define SNDRV_PCM_IOCTL_RESET _IO('A', 0x41) +#define SNDRV_PCM_IOCTL_START _IO('A', 0x42) +#define SNDRV_PCM_IOCTL_DROP _IO('A', 0x43) +#define SNDRV_PCM_IOCTL_DRAIN _IO('A', 0x44) +#define SNDRV_PCM_IOCTL_PAUSE _IOW('A', 0x45, int) +#define SNDRV_PCM_IOCTL_REWIND _IOW('A', 0x46, snd_pcm_uframes_t) +#define SNDRV_PCM_IOCTL_RESUME _IO('A', 0x47) +#define SNDRV_PCM_IOCTL_XRUN _IO('A', 0x48) +#define SNDRV_PCM_IOCTL_FORWARD _IOW('A', 0x49, snd_pcm_uframes_t) +#define SNDRV_PCM_IOCTL_WRITEI_FRAMES _IOW('A', 0x50, struct snd_xferi) +#define SNDRV_PCM_IOCTL_READI_FRAMES _IOR('A', 0x51, struct snd_xferi) +#define SNDRV_PCM_IOCTL_WRITEN_FRAMES _IOW('A', 0x52, struct snd_xfern) +#define SNDRV_PCM_IOCTL_READN_FRAMES _IOR('A', 0x53, struct snd_xfern) +#define SNDRV_PCM_IOCTL_LINK _IOW('A', 0x60, int) +#define SNDRV_PCM_IOCTL_UNLINK _IO('A', 0x61) + +/***************************************************************************** + * * + * MIDI v1.0 interface * + * * + *****************************************************************************/ + +/* + * Raw MIDI section - /dev/snd/midi?? + */ + +#define SNDRV_RAWMIDI_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 4) + +enum { + SNDRV_RAWMIDI_STREAM_OUTPUT = 0, + SNDRV_RAWMIDI_STREAM_INPUT, + SNDRV_RAWMIDI_STREAM_LAST = SNDRV_RAWMIDI_STREAM_INPUT, +}; + +#define SNDRV_RAWMIDI_INFO_OUTPUT 0x00000001 +#define SNDRV_RAWMIDI_INFO_INPUT 0x00000002 +#define SNDRV_RAWMIDI_INFO_DUPLEX 0x00000004 +#define SNDRV_RAWMIDI_INFO_UMP 0x00000008 + +struct snd_rawmidi_info { + unsigned int device; /* RO/WR (control): device number */ + unsigned int subdevice; /* RO/WR (control): subdevice number */ + int stream; /* WR: stream */ + int card; /* R: card number */ + unsigned int flags; /* SNDRV_RAWMIDI_INFO_XXXX */ + unsigned char id[64]; /* ID (user selectable) */ + unsigned char name[80]; /* name of device */ + unsigned char subname[32]; /* name of active or selected subdevice */ + unsigned int subdevices_count; + unsigned int subdevices_avail; + unsigned char reserved[64]; /* reserved for future use */ +}; + +#define SNDRV_RAWMIDI_MODE_FRAMING_MASK (7<<0) +#define SNDRV_RAWMIDI_MODE_FRAMING_SHIFT 0 +#define SNDRV_RAWMIDI_MODE_FRAMING_NONE (0<<0) +#define SNDRV_RAWMIDI_MODE_FRAMING_TSTAMP (1<<0) +#define SNDRV_RAWMIDI_MODE_CLOCK_MASK (7<<3) +#define SNDRV_RAWMIDI_MODE_CLOCK_SHIFT 3 +#define SNDRV_RAWMIDI_MODE_CLOCK_NONE (0<<3) +#define SNDRV_RAWMIDI_MODE_CLOCK_REALTIME (1<<3) +#define SNDRV_RAWMIDI_MODE_CLOCK_MONOTONIC (2<<3) +#define SNDRV_RAWMIDI_MODE_CLOCK_MONOTONIC_RAW (3<<3) + +#define SNDRV_RAWMIDI_FRAMING_DATA_LENGTH 16 + +struct snd_rawmidi_framing_tstamp { + /* For now, frame_type is always 0. Midi 2.0 is expected to add new + * types here. Applications are expected to skip unknown frame types. + */ + __u8 frame_type; + __u8 length; /* number of valid bytes in data field */ + __u8 reserved[2]; + __u32 tv_nsec; /* nanoseconds */ + __u64 tv_sec; /* seconds */ + __u8 data[SNDRV_RAWMIDI_FRAMING_DATA_LENGTH]; +} __packed; + +struct snd_rawmidi_params { + int stream; + size_t buffer_size; /* queue size in bytes */ + size_t avail_min; /* minimum avail bytes for wakeup */ + unsigned int no_active_sensing: 1; /* do not send active sensing byte in close() */ + unsigned int mode; /* For input data only, frame incoming data */ + unsigned char reserved[12]; /* reserved for future use */ +}; + +#ifndef __KERNEL__ +struct snd_rawmidi_status { + int stream; + __time_pad pad1; + struct timespec tstamp; /* Timestamp */ + size_t avail; /* available bytes */ + size_t xruns; /* count of overruns since last status (in bytes) */ + unsigned char reserved[16]; /* reserved for future use */ +}; +#endif + +/* UMP EP info flags */ +#define SNDRV_UMP_EP_INFO_STATIC_BLOCKS 0x01 + +/* UMP EP Protocol / JRTS capability bits */ +#define SNDRV_UMP_EP_INFO_PROTO_MIDI_MASK 0x0300 +#define SNDRV_UMP_EP_INFO_PROTO_MIDI1 0x0100 /* MIDI 1.0 */ +#define SNDRV_UMP_EP_INFO_PROTO_MIDI2 0x0200 /* MIDI 2.0 */ +#define SNDRV_UMP_EP_INFO_PROTO_JRTS_MASK 0x0003 +#define SNDRV_UMP_EP_INFO_PROTO_JRTS_TX 0x0001 /* JRTS Transmit */ +#define SNDRV_UMP_EP_INFO_PROTO_JRTS_RX 0x0002 /* JRTS Receive */ + +/* UMP Endpoint information */ +struct snd_ump_endpoint_info { + int card; /* card number */ + int device; /* device number */ + unsigned int flags; /* additional info */ + unsigned int protocol_caps; /* protocol capabilities */ + unsigned int protocol; /* current protocol */ + unsigned int num_blocks; /* # of function blocks */ + unsigned short version; /* UMP major/minor version */ + unsigned short family_id; /* MIDI device family ID */ + unsigned short model_id; /* MIDI family model ID */ + unsigned int manufacturer_id; /* MIDI manufacturer ID */ + unsigned char sw_revision[4]; /* software revision */ + unsigned short padding; + unsigned char name[128]; /* endpoint name string */ + unsigned char product_id[128]; /* unique product id string */ + unsigned char reserved[32]; +} __packed; + +/* UMP direction */ +#define SNDRV_UMP_DIR_INPUT 0x01 +#define SNDRV_UMP_DIR_OUTPUT 0x02 +#define SNDRV_UMP_DIR_BIDIRECTION 0x03 + +/* UMP block info flags */ +#define SNDRV_UMP_BLOCK_IS_MIDI1 (1U << 0) /* MIDI 1.0 port w/o restrict */ +#define SNDRV_UMP_BLOCK_IS_LOWSPEED (1U << 1) /* 31.25Kbps B/W MIDI1 port */ + +/* UMP block user-interface hint */ +#define SNDRV_UMP_BLOCK_UI_HINT_UNKNOWN 0x00 +#define SNDRV_UMP_BLOCK_UI_HINT_RECEIVER 0x01 +#define SNDRV_UMP_BLOCK_UI_HINT_SENDER 0x02 +#define SNDRV_UMP_BLOCK_UI_HINT_BOTH 0x03 + +/* UMP groups and blocks */ +#define SNDRV_UMP_MAX_GROUPS 16 +#define SNDRV_UMP_MAX_BLOCKS 32 + +/* UMP Block information */ +struct snd_ump_block_info { + int card; /* card number */ + int device; /* device number */ + unsigned char block_id; /* block ID (R/W) */ + unsigned char direction; /* UMP direction */ + unsigned char active; /* Activeness */ + unsigned char first_group; /* first group ID */ + unsigned char num_groups; /* number of groups */ + unsigned char midi_ci_version; /* MIDI-CI support version */ + unsigned char sysex8_streams; /* max number of sysex8 streams */ + unsigned char ui_hint; /* user interface hint */ + unsigned int flags; /* various info flags */ + unsigned char name[128]; /* block name string */ + unsigned char reserved[32]; +} __packed; + +#define SNDRV_RAWMIDI_IOCTL_PVERSION _IOR('W', 0x00, int) +#define SNDRV_RAWMIDI_IOCTL_INFO _IOR('W', 0x01, struct snd_rawmidi_info) +#define SNDRV_RAWMIDI_IOCTL_USER_PVERSION _IOW('W', 0x02, int) +#define SNDRV_RAWMIDI_IOCTL_PARAMS _IOWR('W', 0x10, struct snd_rawmidi_params) +#define SNDRV_RAWMIDI_IOCTL_STATUS _IOWR('W', 0x20, struct snd_rawmidi_status) +#define SNDRV_RAWMIDI_IOCTL_DROP _IOW('W', 0x30, int) +#define SNDRV_RAWMIDI_IOCTL_DRAIN _IOW('W', 0x31, int) +/* Additional ioctls for UMP rawmidi devices */ +#define SNDRV_UMP_IOCTL_ENDPOINT_INFO _IOR('W', 0x40, struct snd_ump_endpoint_info) +#define SNDRV_UMP_IOCTL_BLOCK_INFO _IOR('W', 0x41, struct snd_ump_block_info) + +/* + * Timer section - /dev/snd/timer + */ + +#define SNDRV_TIMER_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 7) + +enum { + SNDRV_TIMER_CLASS_NONE = -1, + SNDRV_TIMER_CLASS_SLAVE = 0, + SNDRV_TIMER_CLASS_GLOBAL, + SNDRV_TIMER_CLASS_CARD, + SNDRV_TIMER_CLASS_PCM, + SNDRV_TIMER_CLASS_LAST = SNDRV_TIMER_CLASS_PCM, +}; + +/* slave timer classes */ +enum { + SNDRV_TIMER_SCLASS_NONE = 0, + SNDRV_TIMER_SCLASS_APPLICATION, + SNDRV_TIMER_SCLASS_SEQUENCER, /* alias */ + SNDRV_TIMER_SCLASS_OSS_SEQUENCER, /* alias */ + SNDRV_TIMER_SCLASS_LAST = SNDRV_TIMER_SCLASS_OSS_SEQUENCER, +}; + +/* global timers (device member) */ +#define SNDRV_TIMER_GLOBAL_SYSTEM 0 +#define SNDRV_TIMER_GLOBAL_RTC 1 /* unused */ +#define SNDRV_TIMER_GLOBAL_HPET 2 +#define SNDRV_TIMER_GLOBAL_HRTIMER 3 + +/* info flags */ +#define SNDRV_TIMER_FLG_SLAVE (1<<0) /* cannot be controlled */ + +struct snd_timer_id { + int dev_class; + int dev_sclass; + int card; + int device; + int subdevice; +}; + +struct snd_timer_ginfo { + struct snd_timer_id tid; /* requested timer ID */ + unsigned int flags; /* timer flags - SNDRV_TIMER_FLG_* */ + int card; /* card number */ + unsigned char id[64]; /* timer identification */ + unsigned char name[80]; /* timer name */ + unsigned long reserved0; /* reserved for future use */ + unsigned long resolution; /* average period resolution in ns */ + unsigned long resolution_min; /* minimal period resolution in ns */ + unsigned long resolution_max; /* maximal period resolution in ns */ + unsigned int clients; /* active timer clients */ + unsigned char reserved[32]; +}; + +struct snd_timer_gparams { + struct snd_timer_id tid; /* requested timer ID */ + unsigned long period_num; /* requested precise period duration (in seconds) - numerator */ + unsigned long period_den; /* requested precise period duration (in seconds) - denominator */ + unsigned char reserved[32]; +}; + +struct snd_timer_gstatus { + struct snd_timer_id tid; /* requested timer ID */ + unsigned long resolution; /* current period resolution in ns */ + unsigned long resolution_num; /* precise current period resolution (in seconds) - numerator */ + unsigned long resolution_den; /* precise current period resolution (in seconds) - denominator */ + unsigned char reserved[32]; +}; + +struct snd_timer_select { + struct snd_timer_id id; /* bind to timer ID */ + unsigned char reserved[32]; /* reserved */ +}; + +struct snd_timer_info { + unsigned int flags; /* timer flags - SNDRV_TIMER_FLG_* */ + int card; /* card number */ + unsigned char id[64]; /* timer identificator */ + unsigned char name[80]; /* timer name */ + unsigned long reserved0; /* reserved for future use */ + unsigned long resolution; /* average period resolution in ns */ + unsigned char reserved[64]; /* reserved */ +}; + +#define SNDRV_TIMER_PSFLG_AUTO (1<<0) /* auto start, otherwise one-shot */ +#define SNDRV_TIMER_PSFLG_EXCLUSIVE (1<<1) /* exclusive use, precise start/stop/pause/continue */ +#define SNDRV_TIMER_PSFLG_EARLY_EVENT (1<<2) /* write early event to the poll queue */ + +struct snd_timer_params { + unsigned int flags; /* flags - SNDRV_TIMER_PSFLG_* */ + unsigned int ticks; /* requested resolution in ticks */ + unsigned int queue_size; /* total size of queue (32-1024) */ + unsigned int reserved0; /* reserved, was: failure locations */ + unsigned int filter; /* event filter (bitmask of SNDRV_TIMER_EVENT_*) */ + unsigned char reserved[60]; /* reserved */ +}; + +#ifndef __KERNEL__ +struct snd_timer_status { + struct timespec tstamp; /* Timestamp - last update */ + unsigned int resolution; /* current period resolution in ns */ + unsigned int lost; /* counter of master tick lost */ + unsigned int overrun; /* count of read queue overruns */ + unsigned int queue; /* used queue size */ + unsigned char reserved[64]; /* reserved */ +}; +#endif + +#define SNDRV_TIMER_IOCTL_PVERSION _IOR('T', 0x00, int) +#define SNDRV_TIMER_IOCTL_NEXT_DEVICE _IOWR('T', 0x01, struct snd_timer_id) +#define SNDRV_TIMER_IOCTL_TREAD_OLD _IOW('T', 0x02, int) +#define SNDRV_TIMER_IOCTL_GINFO _IOWR('T', 0x03, struct snd_timer_ginfo) +#define SNDRV_TIMER_IOCTL_GPARAMS _IOW('T', 0x04, struct snd_timer_gparams) +#define SNDRV_TIMER_IOCTL_GSTATUS _IOWR('T', 0x05, struct snd_timer_gstatus) +#define SNDRV_TIMER_IOCTL_SELECT _IOW('T', 0x10, struct snd_timer_select) +#define SNDRV_TIMER_IOCTL_INFO _IOR('T', 0x11, struct snd_timer_info) +#define SNDRV_TIMER_IOCTL_PARAMS _IOW('T', 0x12, struct snd_timer_params) +#define SNDRV_TIMER_IOCTL_STATUS _IOR('T', 0x14, struct snd_timer_status) +/* The following four ioctls are changed since 1.0.9 due to confliction */ +#define SNDRV_TIMER_IOCTL_START _IO('T', 0xa0) +#define SNDRV_TIMER_IOCTL_STOP _IO('T', 0xa1) +#define SNDRV_TIMER_IOCTL_CONTINUE _IO('T', 0xa2) +#define SNDRV_TIMER_IOCTL_PAUSE _IO('T', 0xa3) +#define SNDRV_TIMER_IOCTL_TREAD64 _IOW('T', 0xa4, int) + +#if __BITS_PER_LONG == 64 +#define SNDRV_TIMER_IOCTL_TREAD SNDRV_TIMER_IOCTL_TREAD_OLD +#else +#define SNDRV_TIMER_IOCTL_TREAD ((sizeof(__kernel_long_t) >= sizeof(time_t)) ? \ + SNDRV_TIMER_IOCTL_TREAD_OLD : \ + SNDRV_TIMER_IOCTL_TREAD64) +#endif + +struct snd_timer_read { + unsigned int resolution; + unsigned int ticks; +}; + +enum { + SNDRV_TIMER_EVENT_RESOLUTION = 0, /* val = resolution in ns */ + SNDRV_TIMER_EVENT_TICK, /* val = ticks */ + SNDRV_TIMER_EVENT_START, /* val = resolution in ns */ + SNDRV_TIMER_EVENT_STOP, /* val = 0 */ + SNDRV_TIMER_EVENT_CONTINUE, /* val = resolution in ns */ + SNDRV_TIMER_EVENT_PAUSE, /* val = 0 */ + SNDRV_TIMER_EVENT_EARLY, /* val = 0, early event */ + SNDRV_TIMER_EVENT_SUSPEND, /* val = 0 */ + SNDRV_TIMER_EVENT_RESUME, /* val = resolution in ns */ + /* master timer events for slave timer instances */ + SNDRV_TIMER_EVENT_MSTART = SNDRV_TIMER_EVENT_START + 10, + SNDRV_TIMER_EVENT_MSTOP = SNDRV_TIMER_EVENT_STOP + 10, + SNDRV_TIMER_EVENT_MCONTINUE = SNDRV_TIMER_EVENT_CONTINUE + 10, + SNDRV_TIMER_EVENT_MPAUSE = SNDRV_TIMER_EVENT_PAUSE + 10, + SNDRV_TIMER_EVENT_MSUSPEND = SNDRV_TIMER_EVENT_SUSPEND + 10, + SNDRV_TIMER_EVENT_MRESUME = SNDRV_TIMER_EVENT_RESUME + 10, +}; + +#ifndef __KERNEL__ +struct snd_timer_tread { + int event; + __time_pad pad1; + struct timespec tstamp; + unsigned int val; + __time_pad pad2; +}; +#endif + +/**************************************************************************** + * * + * Section for driver control interface - /dev/snd/control? * + * * + ****************************************************************************/ + +#define SNDRV_CTL_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 9) + +struct snd_ctl_card_info { + int card; /* card number */ + int pad; /* reserved for future (was type) */ + unsigned char id[16]; /* ID of card (user selectable) */ + unsigned char driver[16]; /* Driver name */ + unsigned char name[32]; /* Short name of soundcard */ + unsigned char longname[80]; /* name + info text about soundcard */ + unsigned char reserved_[16]; /* reserved for future (was ID of mixer) */ + unsigned char mixername[80]; /* visual mixer identification */ + unsigned char components[128]; /* card components / fine identification, delimited with one space (AC97 etc..) */ +}; + +typedef int __bitwise snd_ctl_elem_type_t; +#define SNDRV_CTL_ELEM_TYPE_NONE ((__force snd_ctl_elem_type_t) 0) /* invalid */ +#define SNDRV_CTL_ELEM_TYPE_BOOLEAN ((__force snd_ctl_elem_type_t) 1) /* boolean type */ +#define SNDRV_CTL_ELEM_TYPE_INTEGER ((__force snd_ctl_elem_type_t) 2) /* integer type */ +#define SNDRV_CTL_ELEM_TYPE_ENUMERATED ((__force snd_ctl_elem_type_t) 3) /* enumerated type */ +#define SNDRV_CTL_ELEM_TYPE_BYTES ((__force snd_ctl_elem_type_t) 4) /* byte array */ +#define SNDRV_CTL_ELEM_TYPE_IEC958 ((__force snd_ctl_elem_type_t) 5) /* IEC958 (S/PDIF) setup */ +#define SNDRV_CTL_ELEM_TYPE_INTEGER64 ((__force snd_ctl_elem_type_t) 6) /* 64-bit integer type */ +#define SNDRV_CTL_ELEM_TYPE_LAST SNDRV_CTL_ELEM_TYPE_INTEGER64 + +typedef int __bitwise snd_ctl_elem_iface_t; +#define SNDRV_CTL_ELEM_IFACE_CARD ((__force snd_ctl_elem_iface_t) 0) /* global control */ +#define SNDRV_CTL_ELEM_IFACE_HWDEP ((__force snd_ctl_elem_iface_t) 1) /* hardware dependent device */ +#define SNDRV_CTL_ELEM_IFACE_MIXER ((__force snd_ctl_elem_iface_t) 2) /* virtual mixer device */ +#define SNDRV_CTL_ELEM_IFACE_PCM ((__force snd_ctl_elem_iface_t) 3) /* PCM device */ +#define SNDRV_CTL_ELEM_IFACE_RAWMIDI ((__force snd_ctl_elem_iface_t) 4) /* RawMidi device */ +#define SNDRV_CTL_ELEM_IFACE_TIMER ((__force snd_ctl_elem_iface_t) 5) /* timer device */ +#define SNDRV_CTL_ELEM_IFACE_SEQUENCER ((__force snd_ctl_elem_iface_t) 6) /* sequencer client */ +#define SNDRV_CTL_ELEM_IFACE_LAST SNDRV_CTL_ELEM_IFACE_SEQUENCER + +#define SNDRV_CTL_ELEM_ACCESS_READ (1<<0) +#define SNDRV_CTL_ELEM_ACCESS_WRITE (1<<1) +#define SNDRV_CTL_ELEM_ACCESS_READWRITE (SNDRV_CTL_ELEM_ACCESS_READ|SNDRV_CTL_ELEM_ACCESS_WRITE) +#define SNDRV_CTL_ELEM_ACCESS_VOLATILE (1<<2) /* control value may be changed without a notification */ +/* (1 << 3) is unused. */ +#define SNDRV_CTL_ELEM_ACCESS_TLV_READ (1<<4) /* TLV read is possible */ +#define SNDRV_CTL_ELEM_ACCESS_TLV_WRITE (1<<5) /* TLV write is possible */ +#define SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE (SNDRV_CTL_ELEM_ACCESS_TLV_READ|SNDRV_CTL_ELEM_ACCESS_TLV_WRITE) +#define SNDRV_CTL_ELEM_ACCESS_TLV_COMMAND (1<<6) /* TLV command is possible */ +#define SNDRV_CTL_ELEM_ACCESS_INACTIVE (1<<8) /* control does actually nothing, but may be updated */ +#define SNDRV_CTL_ELEM_ACCESS_LOCK (1<<9) /* write lock */ +#define SNDRV_CTL_ELEM_ACCESS_OWNER (1<<10) /* write lock owner */ +#define SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK (1<<28) /* kernel use a TLV callback */ +#define SNDRV_CTL_ELEM_ACCESS_USER (1<<29) /* user space element */ +/* bits 30 and 31 are obsoleted (for indirect access) */ + +/* for further details see the ACPI and PCI power management specification */ +#define SNDRV_CTL_POWER_D0 0x0000 /* full On */ +#define SNDRV_CTL_POWER_D1 0x0100 /* partial On */ +#define SNDRV_CTL_POWER_D2 0x0200 /* partial On */ +#define SNDRV_CTL_POWER_D3 0x0300 /* Off */ +#define SNDRV_CTL_POWER_D3hot (SNDRV_CTL_POWER_D3|0x0000) /* Off, with power */ +#define SNDRV_CTL_POWER_D3cold (SNDRV_CTL_POWER_D3|0x0001) /* Off, without power */ + +#define SNDRV_CTL_ELEM_ID_NAME_MAXLEN 44 + +struct snd_ctl_elem_id { + unsigned int numid; /* numeric identifier, zero = invalid */ + snd_ctl_elem_iface_t iface; /* interface identifier */ + unsigned int device; /* device/client number */ + unsigned int subdevice; /* subdevice (substream) number */ + unsigned char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; /* ASCII name of item */ + unsigned int index; /* index of item */ +}; + +struct snd_ctl_elem_list { + unsigned int offset; /* W: first element ID to get */ + unsigned int space; /* W: count of element IDs to get */ + unsigned int used; /* R: count of element IDs set */ + unsigned int count; /* R: count of all elements */ + struct snd_ctl_elem_id __user *pids; /* R: IDs */ + unsigned char reserved[50]; +}; + +struct snd_ctl_elem_info { + struct snd_ctl_elem_id id; /* W: element ID */ + snd_ctl_elem_type_t type; /* R: value type - SNDRV_CTL_ELEM_TYPE_* */ + unsigned int access; /* R: value access (bitmask) - SNDRV_CTL_ELEM_ACCESS_* */ + unsigned int count; /* count of values */ + __kernel_pid_t owner; /* owner's PID of this control */ + union { + struct { + long min; /* R: minimum value */ + long max; /* R: maximum value */ + long step; /* R: step (0 variable) */ + } integer; + struct { + long long min; /* R: minimum value */ + long long max; /* R: maximum value */ + long long step; /* R: step (0 variable) */ + } integer64; + struct { + unsigned int items; /* R: number of items */ + unsigned int item; /* W: item number */ + char name[64]; /* R: value name */ + __u64 names_ptr; /* W: names list (ELEM_ADD only) */ + unsigned int names_length; + } enumerated; + unsigned char reserved[128]; + } value; + unsigned char reserved[64]; +}; + +struct snd_ctl_elem_value { + struct snd_ctl_elem_id id; /* W: element ID */ + unsigned int indirect: 1; /* W: indirect access - obsoleted */ + union { + union { + long value[128]; + long *value_ptr; /* obsoleted */ + } integer; + union { + long long value[64]; + long long *value_ptr; /* obsoleted */ + } integer64; + union { + unsigned int item[128]; + unsigned int *item_ptr; /* obsoleted */ + } enumerated; + union { + unsigned char data[512]; + unsigned char *data_ptr; /* obsoleted */ + } bytes; + struct snd_aes_iec958 iec958; + } value; /* RO */ + unsigned char reserved[128]; +}; + +struct snd_ctl_tlv { + unsigned int numid; /* control element numeric identification */ + unsigned int length; /* in bytes aligned to 4 */ + unsigned int tlv[]; /* first TLV */ +}; + +#define SNDRV_CTL_IOCTL_PVERSION _IOR('U', 0x00, int) +#define SNDRV_CTL_IOCTL_CARD_INFO _IOR('U', 0x01, struct snd_ctl_card_info) +#define SNDRV_CTL_IOCTL_ELEM_LIST _IOWR('U', 0x10, struct snd_ctl_elem_list) +#define SNDRV_CTL_IOCTL_ELEM_INFO _IOWR('U', 0x11, struct snd_ctl_elem_info) +#define SNDRV_CTL_IOCTL_ELEM_READ _IOWR('U', 0x12, struct snd_ctl_elem_value) +#define SNDRV_CTL_IOCTL_ELEM_WRITE _IOWR('U', 0x13, struct snd_ctl_elem_value) +#define SNDRV_CTL_IOCTL_ELEM_LOCK _IOW('U', 0x14, struct snd_ctl_elem_id) +#define SNDRV_CTL_IOCTL_ELEM_UNLOCK _IOW('U', 0x15, struct snd_ctl_elem_id) +#define SNDRV_CTL_IOCTL_SUBSCRIBE_EVENTS _IOWR('U', 0x16, int) +#define SNDRV_CTL_IOCTL_ELEM_ADD _IOWR('U', 0x17, struct snd_ctl_elem_info) +#define SNDRV_CTL_IOCTL_ELEM_REPLACE _IOWR('U', 0x18, struct snd_ctl_elem_info) +#define SNDRV_CTL_IOCTL_ELEM_REMOVE _IOWR('U', 0x19, struct snd_ctl_elem_id) +#define SNDRV_CTL_IOCTL_TLV_READ _IOWR('U', 0x1a, struct snd_ctl_tlv) +#define SNDRV_CTL_IOCTL_TLV_WRITE _IOWR('U', 0x1b, struct snd_ctl_tlv) +#define SNDRV_CTL_IOCTL_TLV_COMMAND _IOWR('U', 0x1c, struct snd_ctl_tlv) +#define SNDRV_CTL_IOCTL_HWDEP_NEXT_DEVICE _IOWR('U', 0x20, int) +#define SNDRV_CTL_IOCTL_HWDEP_INFO _IOR('U', 0x21, struct snd_hwdep_info) +#define SNDRV_CTL_IOCTL_PCM_NEXT_DEVICE _IOR('U', 0x30, int) +#define SNDRV_CTL_IOCTL_PCM_INFO _IOWR('U', 0x31, struct snd_pcm_info) +#define SNDRV_CTL_IOCTL_PCM_PREFER_SUBDEVICE _IOW('U', 0x32, int) +#define SNDRV_CTL_IOCTL_RAWMIDI_NEXT_DEVICE _IOWR('U', 0x40, int) +#define SNDRV_CTL_IOCTL_RAWMIDI_INFO _IOWR('U', 0x41, struct snd_rawmidi_info) +#define SNDRV_CTL_IOCTL_RAWMIDI_PREFER_SUBDEVICE _IOW('U', 0x42, int) +#define SNDRV_CTL_IOCTL_UMP_NEXT_DEVICE _IOWR('U', 0x43, int) +#define SNDRV_CTL_IOCTL_UMP_ENDPOINT_INFO _IOWR('U', 0x44, struct snd_ump_endpoint_info) +#define SNDRV_CTL_IOCTL_UMP_BLOCK_INFO _IOWR('U', 0x45, struct snd_ump_block_info) +#define SNDRV_CTL_IOCTL_POWER _IOWR('U', 0xd0, int) +#define SNDRV_CTL_IOCTL_POWER_STATE _IOR('U', 0xd1, int) + +/* + * Read interface. + */ + +enum sndrv_ctl_event_type { + SNDRV_CTL_EVENT_ELEM = 0, + SNDRV_CTL_EVENT_LAST = SNDRV_CTL_EVENT_ELEM, +}; + +#define SNDRV_CTL_EVENT_MASK_VALUE (1<<0) /* element value was changed */ +#define SNDRV_CTL_EVENT_MASK_INFO (1<<1) /* element info was changed */ +#define SNDRV_CTL_EVENT_MASK_ADD (1<<2) /* element was added */ +#define SNDRV_CTL_EVENT_MASK_TLV (1<<3) /* element TLV tree was changed */ +#define SNDRV_CTL_EVENT_MASK_REMOVE (~0U) /* element was removed */ + +struct snd_ctl_event { + int type; /* event type - SNDRV_CTL_EVENT_* */ + union { + struct { + unsigned int mask; + struct snd_ctl_elem_id id; + } elem; + unsigned char data8[60]; + } data; +}; + +/* + * Control names + */ + +#define SNDRV_CTL_NAME_NONE "" +#define SNDRV_CTL_NAME_PLAYBACK "Playback " +#define SNDRV_CTL_NAME_CAPTURE "Capture " + +#define SNDRV_CTL_NAME_IEC958_NONE "" +#define SNDRV_CTL_NAME_IEC958_SWITCH "Switch" +#define SNDRV_CTL_NAME_IEC958_VOLUME "Volume" +#define SNDRV_CTL_NAME_IEC958_DEFAULT "Default" +#define SNDRV_CTL_NAME_IEC958_MASK "Mask" +#define SNDRV_CTL_NAME_IEC958_CON_MASK "Con Mask" +#define SNDRV_CTL_NAME_IEC958_PRO_MASK "Pro Mask" +#define SNDRV_CTL_NAME_IEC958_PCM_STREAM "PCM Stream" +#define SNDRV_CTL_NAME_IEC958(expl,direction,what) "IEC958 " expl SNDRV_CTL_NAME_##direction SNDRV_CTL_NAME_IEC958_##what + +#endif /* _UAPI__SOUND_ASOUND_H */ diff --git a/tools/perf/trace/beauty/mount_flags.sh b/tools/perf/trace/beauty/mount_flags.sh index 730099a9a67c..ff578f7b451b 100755 --- a/tools/perf/trace/beauty/mount_flags.sh +++ b/tools/perf/trace/beauty/mount_flags.sh @@ -1,15 +1,15 @@ #!/bin/sh # SPDX-License-Identifier: LGPL-2.1 -[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/ +[ $# -eq 1 ] && beauty_uapi_linux_dir=$1 || beauty_uapi_linux_dir=tools/perf/trace/beauty/include/uapi/linux/ printf "static const char *mount_flags[] = {\n" regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+MS_([[:alnum:]_]+)[[:space:]]+([[:digit:]]+)[[:space:]]*.*' -grep -E $regex ${header_dir}/mount.h | grep -E -v '(MSK|VERBOSE|MGC_VAL)\>' | \ +grep -E $regex ${beauty_uapi_linux_dir}/mount.h | grep -E -v '(MSK|VERBOSE|MGC_VAL)\>' | \ sed -r "s/$regex/\2 \2 \1/g" | sort -n | \ xargs printf "\t[%s ? (ilog2(%s) + 1) : 0] = \"%s\",\n" regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+MS_([[:alnum:]_]+)[[:space:]]+\(1<<([[:digit:]]+)\)[[:space:]]*.*' -grep -E $regex ${header_dir}/mount.h | \ +grep -E $regex ${beauty_uapi_linux_dir}/mount.h | \ sed -r "s/$regex/\2 \1/g" | \ xargs printf "\t[%s + 1] = \"%s\",\n" printf "};\n" diff --git a/tools/perf/trace/beauty/move_mount_flags.sh b/tools/perf/trace/beauty/move_mount_flags.sh index ce5e632d1448..c0dde9020bc3 100755 --- a/tools/perf/trace/beauty/move_mount_flags.sh +++ b/tools/perf/trace/beauty/move_mount_flags.sh @@ -2,12 +2,12 @@ # SPDX-License-Identifier: LGPL-2.1 if [ $# -ne 1 ] ; then - linux_header_dir=tools/include/uapi/linux + beauty_uapi_linux_dir=tools/perf/trace/beauty/include/uapi/linux/ else - linux_header_dir=$1 + beauty_uapi_linux_dir=$1 fi -linux_mount=${linux_header_dir}/mount.h +linux_mount=${beauty_uapi_linux_dir}/mount.h printf "static const char *move_mount_flags[] = {\n" regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+MOVE_MOUNT_([^_]+[[:alnum:]_]+)[[:space:]]+(0x[[:xdigit:]]+)[[:space:]]*.*' diff --git a/tools/perf/trace/beauty/prctl.c b/tools/perf/trace/beauty/prctl.c index 6fe5ad5f5d3a..7d1aa9fd03da 100644 --- a/tools/perf/trace/beauty/prctl.c +++ b/tools/perf/trace/beauty/prctl.c @@ -7,7 +7,7 @@ #include "trace/beauty/beauty.h" #include <linux/kernel.h> -#include <uapi/linux/prctl.h> +#include <linux/prctl.h> #include "trace/beauty/generated/prctl_option_array.c" diff --git a/tools/perf/trace/beauty/prctl_option.sh b/tools/perf/trace/beauty/prctl_option.sh index 9455d9672f14..e049f5e9c011 100755 --- a/tools/perf/trace/beauty/prctl_option.sh +++ b/tools/perf/trace/beauty/prctl_option.sh @@ -1,18 +1,18 @@ #!/bin/sh # SPDX-License-Identifier: LGPL-2.1 -[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/ +[ $# -eq 1 ] && beauty_uapi_linux_dir=$1 || beauty_uapi_linux_dir=tools/perf/trace/beauty/include/uapi/linux/ printf "static const char *prctl_options[] = {\n" regex='^#define[[:space:]]{1}PR_(\w+)[[:space:]]*([[:xdigit:]]+)([[:space:]]*/.*)?$' -grep -E $regex ${header_dir}/prctl.h | grep -v PR_SET_PTRACER | \ +grep -E $regex ${beauty_uapi_linux_dir}/prctl.h | grep -v PR_SET_PTRACER | \ sed -E "s%$regex%\2 \1%g" | \ sort -n | xargs printf "\t[%s] = \"%s\",\n" printf "};\n" printf "static const char *prctl_set_mm_options[] = {\n" regex='^#[[:space:]]+define[[:space:]]+PR_SET_MM_(\w+)[[:space:]]*([[:digit:]]+).*' -grep -E $regex ${header_dir}/prctl.h | \ +grep -E $regex ${beauty_uapi_linux_dir}/prctl.h | \ sed -r "s/$regex/\2 \1/g" | \ sort -n | xargs printf "\t[%s] = \"%s\",\n" printf "};\n" diff --git a/tools/perf/trace/beauty/rename_flags.sh b/tools/perf/trace/beauty/rename_flags.sh index 94bf7f45d28e..702411dd7a1c 100755 --- a/tools/perf/trace/beauty/rename_flags.sh +++ b/tools/perf/trace/beauty/rename_flags.sh @@ -2,7 +2,7 @@ # Copyright (C) 2018, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> # SPDX-License-Identifier: LGPL-2.1 -[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/ +[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/perf/trace/beauty/include/uapi/linux/ fs_header=${header_dir}/fs.h diff --git a/tools/perf/trace/beauty/sndrv_ctl_ioctl.sh b/tools/perf/trace/beauty/sndrv_ctl_ioctl.sh index e0803b957593..572939a12884 100755 --- a/tools/perf/trace/beauty/sndrv_ctl_ioctl.sh +++ b/tools/perf/trace/beauty/sndrv_ctl_ioctl.sh @@ -1,9 +1,9 @@ #!/bin/sh # SPDX-License-Identifier: LGPL-2.1 -[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/sound/ +[ $# -eq 1 ] && beauty_uapi_sound_dir=$1 || beauty_uapi_sound_dir=tools/perf/trace/beauty/include/uapi/sound/ printf "static const char *sndrv_ctl_ioctl_cmds[] = {\n" -grep "^#define[\t ]\+SNDRV_CTL_IOCTL_" $header_dir/asound.h | \ +grep "^#define[\t ]\+SNDRV_CTL_IOCTL_" $beauty_uapi_sound_dir/asound.h | \ sed -r 's/^#define +SNDRV_CTL_IOCTL_([A-Z0-9_]+)[\t ]+_IO[RW]*\( *.U., *(0x[[:xdigit:]]+),?.*/\t[\2] = \"\1\",/g' printf "};\n" diff --git a/tools/perf/trace/beauty/sndrv_pcm_ioctl.sh b/tools/perf/trace/beauty/sndrv_pcm_ioctl.sh index 7a464a7bf913..33afae9a1c07 100755 --- a/tools/perf/trace/beauty/sndrv_pcm_ioctl.sh +++ b/tools/perf/trace/beauty/sndrv_pcm_ioctl.sh @@ -1,9 +1,9 @@ #!/bin/sh # SPDX-License-Identifier: LGPL-2.1 -[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/sound/ +[ $# -eq 1 ] && beauty_uapi_sound_dir=$1 || beauty_uapi_sound_dir=tools/perf/trace/beauty/include/uapi/sound/ printf "static const char *sndrv_pcm_ioctl_cmds[] = {\n" -grep "^#define[\t ]\+SNDRV_PCM_IOCTL_" $header_dir/asound.h | \ +grep "^#define[\t ]\+SNDRV_PCM_IOCTL_" $beauty_uapi_sound_dir/asound.h | \ sed -r 's/^#define +SNDRV_PCM_IOCTL_([A-Z0-9_]+)[\t ]+_IO[RW]*\( *.A., *(0x[[:xdigit:]]+),?.*/\t[\2] = \"\1\",/g' printf "};\n" diff --git a/tools/perf/trace/beauty/statx.c b/tools/perf/trace/beauty/statx.c index dc5943a6352d..24843e614b93 100644 --- a/tools/perf/trace/beauty/statx.c +++ b/tools/perf/trace/beauty/statx.c @@ -6,73 +6,20 @@ */ #include "trace/beauty/beauty.h" -#include <linux/kernel.h> #include <sys/types.h> -#include <uapi/linux/fcntl.h> -#include <uapi/linux/stat.h> +#include <linux/log2.h> -size_t syscall_arg__scnprintf_statx_flags(char *bf, size_t size, struct syscall_arg *arg) +static size_t statx__scnprintf_mask(unsigned long mask, char *bf, size_t size, bool show_prefix) { - bool show_prefix = arg->show_string_prefix; - const char *prefix = "AT_"; - int printed = 0, flags = arg->val; - - if (flags == 0) - return scnprintf(bf, size, "%s%s", show_prefix ? "AT_STATX_" : "", "SYNC_AS_STAT"); -#define P_FLAG(n) \ - if (flags & AT_##n) { \ - printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \ - flags &= ~AT_##n; \ - } - - P_FLAG(SYMLINK_NOFOLLOW); - P_FLAG(REMOVEDIR); - P_FLAG(SYMLINK_FOLLOW); - P_FLAG(NO_AUTOMOUNT); - P_FLAG(EMPTY_PATH); - P_FLAG(STATX_FORCE_SYNC); - P_FLAG(STATX_DONT_SYNC); - -#undef P_FLAG - - if (flags) - printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags); - - return printed; + #include "trace/beauty/generated/statx_mask_array.c" + static DEFINE_STRARRAY(statx_mask, "STATX_"); + return strarray__scnprintf_flags(&strarray__statx_mask, bf, size, show_prefix, mask); } size_t syscall_arg__scnprintf_statx_mask(char *bf, size_t size, struct syscall_arg *arg) { bool show_prefix = arg->show_string_prefix; - const char *prefix = "STATX_"; - int printed = 0, flags = arg->val; - -#define P_FLAG(n) \ - if (flags & STATX_##n) { \ - printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \ - flags &= ~STATX_##n; \ - } - - P_FLAG(TYPE); - P_FLAG(MODE); - P_FLAG(NLINK); - P_FLAG(UID); - P_FLAG(GID); - P_FLAG(ATIME); - P_FLAG(MTIME); - P_FLAG(CTIME); - P_FLAG(INO); - P_FLAG(SIZE); - P_FLAG(BLOCKS); - P_FLAG(BTIME); - P_FLAG(MNT_ID); - P_FLAG(DIOALIGN); - P_FLAG(MNT_ID_UNIQUE); - -#undef P_FLAG - - if (flags) - printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags); + int mask = arg->val; - return printed; + return statx__scnprintf_mask(mask, bf, size, show_prefix); } diff --git a/tools/perf/trace/beauty/statx_mask.sh b/tools/perf/trace/beauty/statx_mask.sh new file mode 100755 index 000000000000..18c802ed0c71 --- /dev/null +++ b/tools/perf/trace/beauty/statx_mask.sh @@ -0,0 +1,23 @@ +#!/bin/sh +# SPDX-License-Identifier: LGPL-2.1 + +if [ $# -ne 1 ] ; then + beauty_uapi_linux_dir=tools/perf/trace/beauty/include/uapi/linux/ +else + beauty_uapi_linux_dir=$1 +fi + +linux_stat=${beauty_uapi_linux_dir}/stat.h + +printf "static const char *statx_mask[] = {\n" +regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+STATX_([^_]+[[:alnum:]_]+)[[:space:]]+(0x[[:xdigit:]]+)[[:space:]]*.*' +# STATX_BASIC_STATS its a bitmask formed by the mask in the normal stat struct +# STATX_ALL is another bitmask and deprecated +# STATX_ATTR_*: Attributes to be found in stx_attributes and masked in stx_attributes_mask +grep -E $regex ${linux_stat} | \ + grep -v STATX_ALL | \ + grep -v STATX_BASIC_STATS | \ + grep -v '\<STATX_ATTR_' | \ + sed -r "s/$regex/\2 \1/g" | \ + xargs printf "\t[ilog2(%s) + 1] = \"%s\",\n" +printf "};\n" diff --git a/tools/perf/trace/beauty/sync_file_range.c b/tools/perf/trace/beauty/sync_file_range.c index 1c425f04047d..3e8f50ff4fc7 100644 --- a/tools/perf/trace/beauty/sync_file_range.c +++ b/tools/perf/trace/beauty/sync_file_range.c @@ -7,7 +7,16 @@ #include "trace/beauty/beauty.h" #include <linux/log2.h> -#include <uapi/linux/fs.h> +#include <linux/fs.h> + +#ifndef SYNC_FILE_RANGE_WRITE_AND_WAIT +#define SYNC_FILE_RANGE_WAIT_BEFORE 1 +#define SYNC_FILE_RANGE_WRITE 2 +#define SYNC_FILE_RANGE_WAIT_AFTER 4 +#define SYNC_FILE_RANGE_WRITE_AND_WAIT (SYNC_FILE_RANGE_WRITE | \ + SYNC_FILE_RANGE_WAIT_BEFORE | \ + SYNC_FILE_RANGE_WAIT_AFTER) +#endif static size_t sync_file_range__scnprintf_flags(unsigned long flags, char *bf, size_t size, bool show_prefix) { diff --git a/tools/perf/trace/beauty/sync_file_range.sh b/tools/perf/trace/beauty/sync_file_range.sh index 90bf633be879..b1084c4cab63 100755 --- a/tools/perf/trace/beauty/sync_file_range.sh +++ b/tools/perf/trace/beauty/sync_file_range.sh @@ -2,7 +2,7 @@ # SPDX-License-Identifier: LGPL-2.1 if [ $# -ne 1 ] ; then - linux_header_dir=tools/include/uapi/linux + linux_header_dir=tools/perf/trace/beauty/include/uapi/linux/ else linux_header_dir=$1 fi diff --git a/tools/perf/trace/beauty/tracepoints/x86_irq_vectors.sh b/tools/perf/trace/beauty/tracepoints/x86_irq_vectors.sh index 87dc68c7de0c..d8e927dd2bb7 100755 --- a/tools/perf/trace/beauty/tracepoints/x86_irq_vectors.sh +++ b/tools/perf/trace/beauty/tracepoints/x86_irq_vectors.sh @@ -3,12 +3,12 @@ # (C) 2019, Arnaldo Carvalho de Melo <acme@redhat.com> if [ $# -ne 1 ] ; then - arch_x86_header_dir=tools/arch/x86/include/asm/ + beauty_arch_asm_dir=tools/perf/trace/beauty/arch/x86/include/asm/ else - arch_x86_header_dir=$1 + beauty_arch_asm_dir=$1 fi -x86_irq_vectors=${arch_x86_header_dir}/irq_vectors.h +x86_irq_vectors=${beauty_arch_asm_dir}/irq_vectors.h # FIRST_EXTERNAL_VECTOR is not that useful, find what is its number # and then replace whatever is using it and that is useful, which at diff --git a/tools/perf/trace/beauty/usbdevfs_ioctl.sh b/tools/perf/trace/beauty/usbdevfs_ioctl.sh index b39cfb3720b8..12a30a9a8e0c 100755 --- a/tools/perf/trace/beauty/usbdevfs_ioctl.sh +++ b/tools/perf/trace/beauty/usbdevfs_ioctl.sh @@ -1,21 +1,21 @@ #!/bin/sh # SPDX-License-Identifier: LGPL-2.1 -[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/ +[ $# -eq 1 ] && beauty_uapi_linux_dir=$1 || beauty_uapi_linux_dir=tools/perf/trace/beauty/include/uapi/linux/ # also as: # #define USBDEVFS_CONNINFO_EX(len) _IOC(_IOC_READ, 'U', 32, len) printf "static const char *usbdevfs_ioctl_cmds[] = {\n" regex="^#[[:space:]]*define[[:space:]]+USBDEVFS_(\w+)(\(\w+\))?[[:space:]]+_IO[CWR]{0,2}\([[:space:]]*(_IOC_\w+,[[:space:]]*)?'U'[[:space:]]*,[[:space:]]*([[:digit:]]+).*" -grep -E "$regex" ${header_dir}/usbdevice_fs.h | grep -E -v 'USBDEVFS_\w+32[[:space:]]' | \ +grep -E "$regex" ${beauty_uapi_linux_dir}/usbdevice_fs.h | grep -E -v 'USBDEVFS_\w+32[[:space:]]' | \ sed -r "s/$regex/\4 \1/g" | \ sort | xargs printf "\t[%s] = \"%s\",\n" printf "};\n\n" printf "#if 0\n" printf "static const char *usbdevfs_ioctl_32_cmds[] = {\n" regex="^#[[:space:]]*define[[:space:]]+USBDEVFS_(\w+)[[:space:]]+_IO[WR]{0,2}\([[:space:]]*'U'[[:space:]]*,[[:space:]]*([[:digit:]]+).*" -grep -E $regex ${header_dir}/usbdevice_fs.h | grep -E 'USBDEVFS_\w+32[[:space:]]' | \ +grep -E $regex ${beauty_uapi_linux_dir}/usbdevice_fs.h | grep -E 'USBDEVFS_\w+32[[:space:]]' | \ sed -r "s/$regex/\2 \1/g" | \ sort | xargs printf "\t[%s] = \"%s\",\n" printf "};\n" diff --git a/tools/perf/trace/beauty/vhost_virtio_ioctl.sh b/tools/perf/trace/beauty/vhost_virtio_ioctl.sh index 2dd0a3b1f55a..e4f395e7650a 100755 --- a/tools/perf/trace/beauty/vhost_virtio_ioctl.sh +++ b/tools/perf/trace/beauty/vhost_virtio_ioctl.sh @@ -1,18 +1,18 @@ #!/bin/sh # SPDX-License-Identifier: LGPL-2.1 -[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/ +[ $# -eq 1 ] && beauty_uapi_linux_dir=$1 || beauty_uapi_linux_dir=tools/perf/trace/beauty/include/uapi/linux printf "static const char *vhost_virtio_ioctl_cmds[] = {\n" regex='^#[[:space:]]*define[[:space:]]+VHOST_(\w+)[[:space:]]+_IOW?\([[:space:]]*VHOST_VIRTIO[[:space:]]*,[[:space:]]*(0x[[:xdigit:]]+).*' -grep -E $regex ${header_dir}/vhost.h | \ +grep -E $regex ${beauty_uapi_linux_dir}/vhost.h | \ sed -r "s/$regex/\2 \1/g" | \ sort | xargs printf "\t[%s] = \"%s\",\n" printf "};\n" printf "static const char *vhost_virtio_ioctl_read_cmds[] = {\n" regex='^#[[:space:]]*define[[:space:]]+VHOST_(\w+)[[:space:]]+_IOW?R\([[:space:]]*VHOST_VIRTIO[[:space:]]*,[[:space:]]*(0x[[:xdigit:]]+).*' -grep -E $regex ${header_dir}/vhost.h | \ +grep -E $regex ${beauty_uapi_linux_dir}/vhost.h | \ sed -r "s/$regex/\2 \1/g" | \ sort | xargs printf "\t[%s] = \"%s\",\n" printf "};\n" diff --git a/tools/perf/trace/beauty/x86_arch_prctl.sh b/tools/perf/trace/beauty/x86_arch_prctl.sh index b1596df251f0..b714ffa3cb7a 100755 --- a/tools/perf/trace/beauty/x86_arch_prctl.sh +++ b/tools/perf/trace/beauty/x86_arch_prctl.sh @@ -2,9 +2,9 @@ # Copyright (C) 2018, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> # SPDX-License-Identifier: LGPL-2.1 -[ $# -eq 1 ] && x86_header_dir=$1 || x86_header_dir=tools/arch/x86/include/uapi/asm/ +[ $# -eq 1 ] && beauty_x86_arch_asm_uapi_dir=$1 || beauty_x86_arch_asm_uapi_dir=tools/perf/trace/beauty/arch/x86/include/uapi/asm/ -prctl_arch_header=${x86_header_dir}/prctl.h +prctl_arch_header=${beauty_x86_arch_asm_uapi_dir}/prctl.h print_range () { idx=$1 diff --git a/tools/perf/ui/browser.c b/tools/perf/ui/browser.c index 603d11283cbd..19503e838738 100644 --- a/tools/perf/ui/browser.c +++ b/tools/perf/ui/browser.c @@ -203,7 +203,7 @@ void ui_browser__refresh_dimensions(struct ui_browser *browser) void ui_browser__handle_resize(struct ui_browser *browser) { ui__refresh_dimensions(false); - ui_browser__show(browser, browser->title, ui_helpline__current); + ui_browser__show(browser, browser->title ?: "", ui_helpline__current); ui_browser__refresh(browser); } @@ -287,7 +287,8 @@ int ui_browser__show(struct ui_browser *browser, const char *title, mutex_lock(&ui__lock); __ui_browser__show_title(browser, title); - browser->title = title; + free(browser->title); + browser->title = strdup(title); zfree(&browser->helpline); va_start(ap, helpline); @@ -304,6 +305,7 @@ void ui_browser__hide(struct ui_browser *browser) mutex_lock(&ui__lock); ui_helpline__pop(); zfree(&browser->helpline); + zfree(&browser->title); mutex_unlock(&ui__lock); } diff --git a/tools/perf/ui/browser.h b/tools/perf/ui/browser.h index 510ce4554050..6e98d5f8f71c 100644 --- a/tools/perf/ui/browser.h +++ b/tools/perf/ui/browser.h @@ -21,7 +21,7 @@ struct ui_browser { u8 extra_title_lines; int current_color; void *priv; - const char *title; + char *title; char *helpline; const char *no_samples_msg; void (*refresh_dimensions)(struct ui_browser *browser); diff --git a/tools/perf/ui/browsers/Build b/tools/perf/ui/browsers/Build index 7a1d5ddaf688..2608b5da3167 100644 --- a/tools/perf/ui/browsers/Build +++ b/tools/perf/ui/browsers/Build @@ -1,4 +1,5 @@ perf-y += annotate.o +perf-y += annotate-data.o perf-y += hists.o perf-y += map.o perf-y += scripts.o diff --git a/tools/perf/ui/browsers/annotate-data.c b/tools/perf/ui/browsers/annotate-data.c new file mode 100644 index 000000000000..8d6bf08d371d --- /dev/null +++ b/tools/perf/ui/browsers/annotate-data.c @@ -0,0 +1,313 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <inttypes.h> +#include <string.h> +#include <linux/zalloc.h> +#include <sys/ttydefaults.h> + +#include "ui/browser.h" +#include "ui/helpline.h" +#include "ui/keysyms.h" +#include "ui/ui.h" +#include "util/annotate.h" +#include "util/annotate-data.h" +#include "util/evsel.h" +#include "util/evlist.h" +#include "util/sort.h" + +struct annotated_data_browser { + struct ui_browser b; + struct list_head entries; + int nr_events; +}; + +struct browser_entry { + struct list_head node; + struct annotated_member *data; + struct type_hist_entry *hists; + int indent; +}; + +static struct annotated_data_browser *get_browser(struct ui_browser *uib) +{ + return container_of(uib, struct annotated_data_browser, b); +} + +static void update_hist_entry(struct type_hist_entry *dst, + struct type_hist_entry *src) +{ + dst->nr_samples += src->nr_samples; + dst->period += src->period; +} + +static int get_member_overhead(struct annotated_data_type *adt, + struct browser_entry *entry, + struct evsel *leader) +{ + struct annotated_member *member = entry->data; + int i, k; + + for (i = 0; i < member->size; i++) { + struct type_hist *h; + struct evsel *evsel; + int offset = member->offset + i; + + for_each_group_evsel(evsel, leader) { + h = adt->histograms[evsel->core.idx]; + k = evsel__group_idx(evsel); + update_hist_entry(&entry->hists[k], &h->addr[offset]); + } + } + return 0; +} + +static int add_child_entries(struct annotated_data_browser *browser, + struct annotated_data_type *adt, + struct annotated_member *member, + struct evsel *evsel, int indent) +{ + struct annotated_member *pos; + struct browser_entry *entry; + int nr_entries = 0; + + entry = zalloc(sizeof(*entry)); + if (entry == NULL) + return -1; + + entry->hists = calloc(browser->nr_events, sizeof(*entry->hists)); + if (entry->hists == NULL) { + free(entry); + return -1; + } + + entry->data = member; + entry->indent = indent; + if (get_member_overhead(adt, entry, evsel) < 0) { + free(entry); + return -1; + } + + list_add_tail(&entry->node, &browser->entries); + nr_entries++; + + list_for_each_entry(pos, &member->children, node) { + int nr = add_child_entries(browser, adt, pos, evsel, indent + 1); + + if (nr < 0) + return nr; + + nr_entries += nr; + } + + /* add an entry for the closing bracket ("}") */ + if (!list_empty(&member->children)) { + entry = zalloc(sizeof(*entry)); + if (entry == NULL) + return -1; + + entry->indent = indent; + list_add_tail(&entry->node, &browser->entries); + nr_entries++; + } + + return nr_entries; +} + +static int annotated_data_browser__collect_entries(struct annotated_data_browser *browser) +{ + struct hist_entry *he = browser->b.priv; + struct annotated_data_type *adt = he->mem_type; + struct evsel *evsel = hists_to_evsel(he->hists); + + INIT_LIST_HEAD(&browser->entries); + browser->b.entries = &browser->entries; + browser->b.nr_entries = add_child_entries(browser, adt, &adt->self, + evsel, /*indent=*/0); + return 0; +} + +static void annotated_data_browser__delete_entries(struct annotated_data_browser *browser) +{ + struct browser_entry *pos, *tmp; + + list_for_each_entry_safe(pos, tmp, &browser->entries, node) { + list_del_init(&pos->node); + zfree(&pos->hists); + free(pos); + } +} + +static unsigned int browser__refresh(struct ui_browser *uib) +{ + return ui_browser__list_head_refresh(uib); +} + +static int browser__show(struct ui_browser *uib) +{ + struct hist_entry *he = uib->priv; + struct annotated_data_type *adt = he->mem_type; + struct annotated_data_browser *browser = get_browser(uib); + const char *help = "Press 'h' for help on key bindings"; + char title[256]; + + snprintf(title, sizeof(title), "Annotate type: '%s' (%d samples)", + adt->self.type_name, he->stat.nr_events); + + if (ui_browser__show(uib, title, help) < 0) + return -1; + + /* second line header */ + ui_browser__gotorc_title(uib, 0, 0); + ui_browser__set_color(uib, HE_COLORSET_ROOT); + + if (symbol_conf.show_total_period) + strcpy(title, "Period"); + else if (symbol_conf.show_nr_samples) + strcpy(title, "Samples"); + else + strcpy(title, "Percent"); + + ui_browser__printf(uib, "%*s %10s %10s %10s %s", + 11 * (browser->nr_events - 1), "", + title, "Offset", "Size", "Field"); + ui_browser__write_nstring(uib, "", uib->width); + return 0; +} + +static void browser__write_overhead(struct ui_browser *uib, + struct type_hist *total, + struct type_hist_entry *hist, int row) +{ + u64 period = hist->period; + double percent = total->period ? (100.0 * period / total->period) : 0; + bool current = ui_browser__is_current_entry(uib, row); + int nr_samples = 0; + + ui_browser__set_percent_color(uib, percent, current); + + if (symbol_conf.show_total_period) + ui_browser__printf(uib, " %10" PRIu64, period); + else if (symbol_conf.show_nr_samples) + ui_browser__printf(uib, " %10d", nr_samples); + else + ui_browser__printf(uib, " %10.2f", percent); + + ui_browser__set_percent_color(uib, 0, current); +} + +static void browser__write(struct ui_browser *uib, void *entry, int row) +{ + struct annotated_data_browser *browser = get_browser(uib); + struct browser_entry *be = entry; + struct annotated_member *member = be->data; + struct hist_entry *he = uib->priv; + struct annotated_data_type *adt = he->mem_type; + struct evsel *leader = hists_to_evsel(he->hists); + struct evsel *evsel; + + if (member == NULL) { + bool current = ui_browser__is_current_entry(uib, row); + + /* print the closing bracket */ + ui_browser__set_percent_color(uib, 0, current); + ui_browser__write_nstring(uib, "", 11 * browser->nr_events); + ui_browser__printf(uib, " %10s %10s %*s};", + "", "", be->indent * 4, ""); + ui_browser__write_nstring(uib, "", uib->width); + return; + } + + /* print the number */ + for_each_group_evsel(evsel, leader) { + struct type_hist *h = adt->histograms[evsel->core.idx]; + int idx = evsel__group_idx(evsel); + + browser__write_overhead(uib, h, &be->hists[idx], row); + } + + /* print type info */ + if (be->indent == 0 && !member->var_name) { + ui_browser__printf(uib, " %10d %10d %s%s", + member->offset, member->size, + member->type_name, + list_empty(&member->children) ? ";" : " {"); + } else { + ui_browser__printf(uib, " %10d %10d %*s%s\t%s%s", + member->offset, member->size, + be->indent * 4, "", member->type_name, + member->var_name ?: "", + list_empty(&member->children) ? ";" : " {"); + } + /* fill the rest */ + ui_browser__write_nstring(uib, "", uib->width); +} + +static int annotated_data_browser__run(struct annotated_data_browser *browser, + struct evsel *evsel __maybe_unused, + struct hist_browser_timer *hbt) +{ + int delay_secs = hbt ? hbt->refresh : 0; + int key; + + if (browser__show(&browser->b) < 0) + return -1; + + while (1) { + key = ui_browser__run(&browser->b, delay_secs); + + switch (key) { + case K_TIMER: + if (hbt) + hbt->timer(hbt->arg); + continue; + case K_F1: + case 'h': + ui_browser__help_window(&browser->b, + "UP/DOWN/PGUP\n" + "PGDN/SPACE Navigate\n" + "</> Move to prev/next symbol\n" + "q/ESC/CTRL+C Exit\n\n"); + continue; + case K_LEFT: + case '<': + case '>': + case K_ESC: + case 'q': + case CTRL('c'): + goto out; + default: + continue; + } + } +out: + ui_browser__hide(&browser->b); + return key; +} + +int hist_entry__annotate_data_tui(struct hist_entry *he, struct evsel *evsel, + struct hist_browser_timer *hbt) +{ + struct annotated_data_browser browser = { + .b = { + .refresh = browser__refresh, + .seek = ui_browser__list_head_seek, + .write = browser__write, + .priv = he, + .extra_title_lines = 1, + }, + .nr_events = 1, + }; + int ret; + + ui_helpline__push("Press ESC to exit"); + + if (evsel__is_group_event(evsel)) + browser.nr_events = evsel->core.nr_members; + + ret = annotated_data_browser__collect_entries(&browser); + if (ret == 0) + ret = annotated_data_browser__run(&browser, evsel, hbt); + + annotated_data_browser__delete_entries(&browser); + + return ret; +} diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c index ec5e21932876..ea986430241e 100644 --- a/tools/perf/ui/browsers/annotate.c +++ b/tools/perf/ui/browsers/annotate.c @@ -49,7 +49,7 @@ static int ui_browser__jumps_percent_color(struct ui_browser *browser, int nr, b if (current && (!browser->use_navkeypressed || browser->navkeypressed)) return HE_COLORSET_SELECTED; - if (nr == notes->max_jump_sources) + if (nr == notes->src->max_jump_sources) return HE_COLORSET_TOP; if (nr > 1) return HE_COLORSET_MEDIUM; @@ -186,7 +186,7 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser) * name right after the '<' token and probably treating this like a * 'call' instruction. */ - target = notes->src->offsets[cursor->ops.target.offset]; + target = annotated_source__get_line(notes->src, cursor->ops.target.offset); if (target == NULL) { ui_helpline__printf("WARN: jump target inconsistency, press 'o', notes->offsets[%#x] = NULL\n", cursor->ops.target.offset); @@ -205,13 +205,13 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser) ui_browser__set_color(browser, HE_COLORSET_JUMP_ARROWS); __ui_browser__line_arrow(browser, - pcnt_width + 2 + notes->widths.addr + width, + pcnt_width + 2 + notes->src->widths.addr + width, from, to); diff = is_fused(ab, cursor); if (diff > 0) { ui_browser__mark_fused(browser, - pcnt_width + 3 + notes->widths.addr + width, + pcnt_width + 3 + notes->src->widths.addr + width, from - diff, diff, to > from); } } @@ -438,7 +438,7 @@ static int sym_title(struct symbol *sym, struct map *map, char *title, size_t sz, int percent_type) { return snprintf(title, sz, "%s %s [Percent: %s]", sym->name, - map__dso(map)->long_name, + dso__long_name(map__dso(map)), percent_type_str(percent_type)); } @@ -967,23 +967,23 @@ int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel, return -1; dso = map__dso(ms->map); - if (dso->annotate_warned) + if (dso__annotate_warned(dso)) return -1; - if (not_annotated) { + if (not_annotated || !sym->annotate2) { err = symbol__annotate2(ms, evsel, &browser.arch); if (err) { char msg[BUFSIZ]; - dso->annotate_warned = true; + dso__set_annotate_warned(dso); symbol__strerror_disassemble(ms, err, msg, sizeof(msg)); ui__error("Couldn't annotate %s:\n%s", sym->name, msg); - goto out_free_offsets; + return -1; } } ui_helpline__push("Press ESC to exit"); - browser.b.width = notes->src->max_line_len; + browser.b.width = notes->src->widths.max_line_len; browser.b.nr_entries = notes->src->nr_entries; browser.b.entries = ¬es->src->source, browser.b.width += 18; /* Percentage */ @@ -996,8 +996,5 @@ int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel, if(not_annotated) annotated_source__purge(notes->src); -out_free_offsets: - if(not_annotated) - zfree(¬es->src->offsets); return ret; } diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c index 0c02b3a8e121..b7219df51236 100644 --- a/tools/perf/ui/browsers/hists.c +++ b/tools/perf/ui/browsers/hists.c @@ -38,6 +38,7 @@ #include "../ui.h" #include "map.h" #include "annotate.h" +#include "annotate-data.h" #include "srcline.h" #include "string2.h" #include "units.h" @@ -2488,7 +2489,7 @@ add_annotate_opt(struct hist_browser *browser __maybe_unused, { struct dso *dso; - if (!ms->map || (dso = map__dso(ms->map)) == NULL || dso->annotate_warned) + if (!ms->map || (dso = map__dso(ms->map)) == NULL || dso__annotate_warned(dso)) return 0; if (!ms->sym) @@ -2506,6 +2507,32 @@ add_annotate_opt(struct hist_browser *browser __maybe_unused, } static int +do_annotate_type(struct hist_browser *browser, struct popup_action *act) +{ + struct hist_entry *he = browser->he_selection; + + hist_entry__annotate_data_tui(he, act->evsel, browser->hbt); + ui_browser__handle_resize(&browser->b); + return 0; +} + +static int +add_annotate_type_opt(struct hist_browser *browser, + struct popup_action *act, char **optstr, + struct hist_entry *he) +{ + if (he == NULL || he->mem_type == NULL || he->mem_type->histograms == NULL) + return 0; + + if (asprintf(optstr, "Annotate type %s", he->mem_type->self.type_name) < 0) + return 0; + + act->evsel = hists_to_evsel(browser->hists); + act->fn = do_annotate_type; + return 1; +} + +static int do_zoom_thread(struct hist_browser *browser, struct popup_action *act) { struct thread *thread = act->thread; @@ -2581,7 +2608,7 @@ static int hists_browser__zoom_map(struct hist_browser *browser, struct map *map } else { struct dso *dso = map__dso(map); ui_helpline__fpush("To zoom out press ESC or ENTER + \"Zoom out of %s DSO\"", - __map__is_kernel(map) ? "the Kernel" : dso->short_name); + __map__is_kernel(map) ? "the Kernel" : dso__short_name(dso)); browser->hists->dso_filter = dso; perf_hpp__set_elide(HISTC_DSO, true); pstack__push(browser->pstack, &browser->hists->dso_filter); @@ -2607,7 +2634,7 @@ add_dso_opt(struct hist_browser *browser, struct popup_action *act, if (asprintf(optstr, "Zoom %s %s DSO (use the 'k' hotkey to zoom directly into the kernel)", browser->hists->dso_filter ? "out of" : "into", - __map__is_kernel(map) ? "the Kernel" : map__dso(map)->short_name) < 0) + __map__is_kernel(map) ? "the Kernel" : dso__short_name(map__dso(map))) < 0) return 0; act->ms.map = map; @@ -3083,7 +3110,7 @@ do_hotkey: // key came straight from options ui__popup_menu() if (!browser->selection || !browser->selection->map || !map__dso(browser->selection->map) || - map__dso(browser->selection->map)->annotate_warned) { + dso__annotate_warned(map__dso(browser->selection->map))) { continue; } @@ -3307,6 +3334,10 @@ do_hotkey: // key came straight from options ui__popup_menu() browser->he_selection->ip); } skip_annotation: + nr_options += add_annotate_type_opt(browser, + &actions[nr_options], + &options[nr_options], + browser->he_selection); nr_options += add_thread_opt(browser, &actions[nr_options], &options[nr_options], thread); nr_options += add_dso_opt(browser, &actions[nr_options], diff --git a/tools/perf/ui/browsers/map.c b/tools/perf/ui/browsers/map.c index 3d1b958d8832..fba55175a935 100644 --- a/tools/perf/ui/browsers/map.c +++ b/tools/perf/ui/browsers/map.c @@ -76,7 +76,7 @@ static int map_browser__run(struct map_browser *browser) { int key; - if (ui_browser__show(&browser->b, map__dso(browser->map)->long_name, + if (ui_browser__show(&browser->b, dso__long_name(map__dso(browser->map)), "Press ESC to exit, %s / to search", verbose > 0 ? "" : "restart with -v to use") < 0) return -1; @@ -106,7 +106,7 @@ int map__browse(struct map *map) { struct map_browser mb = { .b = { - .entries = &map__dso(map)->symbols, + .entries = dso__symbols(map__dso(map)), .refresh = ui_browser__rb_tree_refresh, .seek = ui_browser__rb_tree_seek, .write = map_browser__write, diff --git a/tools/perf/ui/browsers/res_sample.c b/tools/perf/ui/browsers/res_sample.c index 7cb2d6678039..5f60e515b12e 100644 --- a/tools/perf/ui/browsers/res_sample.c +++ b/tools/perf/ui/browsers/res_sample.c @@ -83,7 +83,7 @@ int res_sample_browse(struct res_sample *res_samples, int num_res, r->tid ? "--tid " : "", r->tid ? (sprintf(tidbuf, "%d", r->tid), tidbuf) : "", extra_format, - rstype == A_ASM ? "-F +insn --xed" : + rstype == A_ASM ? "-F +disasm" : rstype == A_SOURCE ? "-F +srcline,+srccode" : "", symbol_conf.inline_name ? "--inline" : "", "--show-lost-events ", diff --git a/tools/perf/ui/browsers/scripts.c b/tools/perf/ui/browsers/scripts.c index 50d45054ed6c..e437d7889de6 100644 --- a/tools/perf/ui/browsers/scripts.c +++ b/tools/perf/ui/browsers/scripts.c @@ -107,7 +107,7 @@ static int list_scripts(char *script_name, bool *custom, if (evsel) attr_to_script(scriptc.extra_format, &evsel->core.attr); add_script_option("Show individual samples", "", &scriptc); - add_script_option("Show individual samples with assembler", "-F +insn --xed", + add_script_option("Show individual samples with assembler", "-F +disasm", &scriptc); add_script_option("Show individual samples with source", "-F +srcline,+srccode", &scriptc); diff --git a/tools/perf/ui/gtk/annotate.c b/tools/perf/ui/gtk/annotate.c index 394861245fd3..93ce3d47e47e 100644 --- a/tools/perf/ui/gtk/annotate.c +++ b/tools/perf/ui/gtk/annotate.c @@ -28,21 +28,29 @@ static const char *const col_names[] = { static int perf_gtk__get_percent(char *buf, size_t size, struct symbol *sym, struct disasm_line *dl, int evidx) { + struct annotation *notes; struct sym_hist *symhist; + struct sym_hist_entry *entry; double percent = 0.0; const char *markup; int ret = 0; + u64 nr_samples = 0; strcpy(buf, ""); if (dl->al.offset == (s64) -1) return 0; - symhist = annotation__histogram(symbol__annotation(sym), evidx); - if (!symbol_conf.event_group && !symhist->addr[dl->al.offset].nr_samples) + notes = symbol__annotation(sym); + symhist = annotation__histogram(notes, evidx); + entry = annotated_source__hist_entry(notes->src, evidx, dl->al.offset); + if (entry) + nr_samples = entry->nr_samples; + + if (!symbol_conf.event_group && nr_samples == 0) return 0; - percent = 100.0 * symhist->addr[dl->al.offset].nr_samples / symhist->nr_samples; + percent = 100.0 * nr_samples / symhist->nr_samples; markup = perf_gtk__get_percent_color(percent); if (markup) diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c index 2bf959d08354..685ba2a54fd8 100644 --- a/tools/perf/ui/hist.c +++ b/tools/perf/ui/hist.c @@ -25,7 +25,7 @@ static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he, hpp_field_fn get_field, const char *fmt, int len, - hpp_snprint_fn print_fn, bool fmt_percent) + hpp_snprint_fn print_fn, enum perf_hpp_fmt_type fmtype) { int ret; struct hists *hists = he->hists; @@ -33,7 +33,7 @@ static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he, char *buf = hpp->buf; size_t size = hpp->size; - if (fmt_percent) { + if (fmtype == PERF_HPP_FMT_TYPE__PERCENT) { double percent = 0.0; u64 total = hists__total_period(hists); @@ -41,8 +41,16 @@ static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he, percent = 100.0 * get_field(he) / total; ret = hpp__call_print_fn(hpp, print_fn, fmt, len, percent); - } else + } else if (fmtype == PERF_HPP_FMT_TYPE__AVERAGE) { + double average = 0; + + if (he->stat.nr_events) + average = 1.0 * get_field(he) / he->stat.nr_events; + + ret = hpp__call_print_fn(hpp, print_fn, fmt, len, average); + } else { ret = hpp__call_print_fn(hpp, print_fn, fmt, len, get_field(he)); + } if (evsel__is_group_event(evsel)) { int prev_idx, idx_delta; @@ -54,6 +62,7 @@ static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he, list_for_each_entry(pair, &he->pairs.head, pairs.node) { u64 period = get_field(pair); u64 total = hists__total_period(pair->hists); + int nr_samples = pair->stat.nr_events; if (!total) continue; @@ -66,7 +75,7 @@ static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he, * zero-fill group members in the middle which * have no sample */ - if (fmt_percent) { + if (fmtype != PERF_HPP_FMT_TYPE__RAW) { ret += hpp__call_print_fn(hpp, print_fn, fmt, len, 0.0); } else { @@ -75,9 +84,14 @@ static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he, } } - if (fmt_percent) { + if (fmtype == PERF_HPP_FMT_TYPE__PERCENT) { ret += hpp__call_print_fn(hpp, print_fn, fmt, len, 100.0 * period / total); + } else if (fmtype == PERF_HPP_FMT_TYPE__AVERAGE) { + double avg = nr_samples ? (period / nr_samples) : 0; + + ret += hpp__call_print_fn(hpp, print_fn, fmt, + len, avg); } else { ret += hpp__call_print_fn(hpp, print_fn, fmt, len, period); @@ -92,7 +106,7 @@ static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he, /* * zero-fill group members at last which have no sample */ - if (fmt_percent) { + if (fmtype != PERF_HPP_FMT_TYPE__RAW) { ret += hpp__call_print_fn(hpp, print_fn, fmt, len, 0.0); } else { @@ -114,33 +128,35 @@ static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he, int hpp__fmt(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, struct hist_entry *he, hpp_field_fn get_field, - const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent) + const char *fmtstr, hpp_snprint_fn print_fn, + enum perf_hpp_fmt_type fmtype) { int len = fmt->user_len ?: fmt->len; if (symbol_conf.field_sep) { return __hpp__fmt(hpp, he, get_field, fmtstr, 1, - print_fn, fmt_percent); + print_fn, fmtype); } - if (fmt_percent) + if (fmtype == PERF_HPP_FMT_TYPE__PERCENT) len -= 2; /* 2 for a space and a % sign */ else len -= 1; - return __hpp__fmt(hpp, he, get_field, fmtstr, len, print_fn, fmt_percent); + return __hpp__fmt(hpp, he, get_field, fmtstr, len, print_fn, fmtype); } int hpp__fmt_acc(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, struct hist_entry *he, hpp_field_fn get_field, - const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent) + const char *fmtstr, hpp_snprint_fn print_fn, + enum perf_hpp_fmt_type fmtype) { if (!symbol_conf.cumulate_callchain) { int len = fmt->user_len ?: fmt->len; return snprintf(hpp->buf, hpp->size, " %*s", len - 1, "N/A"); } - return hpp__fmt(fmt, hpp, he, get_field, fmtstr, print_fn, fmt_percent); + return hpp__fmt(fmt, hpp, he, get_field, fmtstr, print_fn, fmtype); } static int field_cmp(u64 field_a, u64 field_b) @@ -350,7 +366,7 @@ static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \ struct perf_hpp *hpp, struct hist_entry *he) \ { \ return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \ - hpp_color_scnprintf, true); \ + hpp_color_scnprintf, PERF_HPP_FMT_TYPE__PERCENT); \ } #define __HPP_ENTRY_PERCENT_FN(_type, _field) \ @@ -358,7 +374,7 @@ static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \ struct perf_hpp *hpp, struct hist_entry *he) \ { \ return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \ - hpp_entry_scnprintf, true); \ + hpp_entry_scnprintf, PERF_HPP_FMT_TYPE__PERCENT); \ } #define __HPP_SORT_FN(_type, _field) \ @@ -378,7 +394,7 @@ static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \ struct perf_hpp *hpp, struct hist_entry *he) \ { \ return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \ - hpp_color_scnprintf, true); \ + hpp_color_scnprintf, PERF_HPP_FMT_TYPE__PERCENT); \ } #define __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \ @@ -386,7 +402,7 @@ static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \ struct perf_hpp *hpp, struct hist_entry *he) \ { \ return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \ - hpp_entry_scnprintf, true); \ + hpp_entry_scnprintf, PERF_HPP_FMT_TYPE__PERCENT); \ } #define __HPP_SORT_ACC_FN(_type, _field) \ @@ -406,7 +422,7 @@ static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \ struct perf_hpp *hpp, struct hist_entry *he) \ { \ return hpp__fmt(fmt, hpp, he, he_get_raw_##_field, " %*"PRIu64, \ - hpp_entry_scnprintf, false); \ + hpp_entry_scnprintf, PERF_HPP_FMT_TYPE__RAW); \ } #define __HPP_SORT_RAW_FN(_type, _field) \ @@ -416,6 +432,26 @@ static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \ return __hpp__sort(a, b, he_get_raw_##_field); \ } +#define __HPP_ENTRY_AVERAGE_FN(_type, _field) \ +static u64 he_get_##_field(struct hist_entry *he) \ +{ \ + return he->stat._field; \ +} \ + \ +static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \ + struct perf_hpp *hpp, struct hist_entry *he) \ +{ \ + return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.1f", \ + hpp_entry_scnprintf, PERF_HPP_FMT_TYPE__AVERAGE); \ +} + +#define __HPP_SORT_AVERAGE_FN(_type, _field) \ +static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \ + struct hist_entry *a, struct hist_entry *b) \ +{ \ + return __hpp__sort(a, b, he_get_##_field); \ +} + #define HPP_PERCENT_FNS(_type, _field) \ __HPP_COLOR_PERCENT_FN(_type, _field) \ @@ -431,6 +467,10 @@ __HPP_SORT_ACC_FN(_type, _field) __HPP_ENTRY_RAW_FN(_type, _field) \ __HPP_SORT_RAW_FN(_type, _field) +#define HPP_AVERAGE_FNS(_type, _field) \ +__HPP_ENTRY_AVERAGE_FN(_type, _field) \ +__HPP_SORT_AVERAGE_FN(_type, _field) + HPP_PERCENT_FNS(overhead, period) HPP_PERCENT_FNS(overhead_sys, period_sys) HPP_PERCENT_FNS(overhead_us, period_us) @@ -441,6 +481,10 @@ HPP_PERCENT_ACC_FNS(overhead_acc, period) HPP_RAW_FNS(samples, nr_events) HPP_RAW_FNS(period, period) +HPP_AVERAGE_FNS(weight1, weight1) +HPP_AVERAGE_FNS(weight2, weight2) +HPP_AVERAGE_FNS(weight3, weight3) + static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused, struct hist_entry *a __maybe_unused, struct hist_entry *b __maybe_unused) @@ -510,7 +554,10 @@ struct perf_hpp_fmt perf_hpp__format[] = { HPP__COLOR_PRINT_FNS("guest usr", overhead_guest_us, OVERHEAD_GUEST_US), HPP__COLOR_ACC_PRINT_FNS("Children", overhead_acc, OVERHEAD_ACC), HPP__PRINT_FNS("Samples", samples, SAMPLES), - HPP__PRINT_FNS("Period", period, PERIOD) + HPP__PRINT_FNS("Period", period, PERIOD), + HPP__PRINT_FNS("Weight1", weight1, WEIGHT1), + HPP__PRINT_FNS("Weight2", weight2, WEIGHT2), + HPP__PRINT_FNS("Weight3", weight3, WEIGHT3), }; struct perf_hpp_list perf_hpp_list = { @@ -526,6 +573,7 @@ struct perf_hpp_list perf_hpp_list = { #undef HPP_PERCENT_FNS #undef HPP_PERCENT_ACC_FNS #undef HPP_RAW_FNS +#undef HPP_AVERAGE_FNS #undef __HPP_HEADER_FN #undef __HPP_WIDTH_FN @@ -534,9 +582,11 @@ struct perf_hpp_list perf_hpp_list = { #undef __HPP_COLOR_ACC_PERCENT_FN #undef __HPP_ENTRY_ACC_PERCENT_FN #undef __HPP_ENTRY_RAW_FN +#undef __HPP_ENTRY_AVERAGE_FN #undef __HPP_SORT_FN #undef __HPP_SORT_ACC_FN #undef __HPP_SORT_RAW_FN +#undef __HPP_SORT_AVERAGE_FN static void fmt_free(struct perf_hpp_fmt *fmt) { @@ -785,6 +835,12 @@ void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists) fmt->len = 12; break; + case PERF_HPP__WEIGHT1: + case PERF_HPP__WEIGHT2: + case PERF_HPP__WEIGHT3: + fmt->len = 8; + break; + default: break; } diff --git a/tools/perf/util/Build b/tools/perf/util/Build index 8027f450fa3e..da64efd8718f 100644 --- a/tools/perf/util/Build +++ b/tools/perf/util/Build @@ -12,6 +12,7 @@ perf-y += config.o perf-y += copyfile.o perf-y += ctype.o perf-y += db-export.o +perf-y += disasm.o perf-y += env.o perf-y += event.o perf-y += evlist.o @@ -32,6 +33,7 @@ perf-y += perf_regs.o perf-y += perf-regs-arch/ perf-y += path.o perf-y += print_binary.o +perf-y += print_insn.o perf-y += rlimit.o perf-y += argv_split.o perf-y += rbtree.o @@ -71,6 +73,7 @@ perf-y += ordered-events.o perf-y += namespaces.o perf-y += comm.o perf-y += thread.o +perf-y += threads.o perf-y += thread_map.o perf-y += parse-events-flex.o perf-y += parse-events-bison.o @@ -138,6 +141,7 @@ perf-y += term.o perf-y += help-unknown-cmd.o perf-y += dlfilter.o perf-y += mem-events.o +perf-y += mem-info.o perf-y += vsprintf.o perf-y += units.o perf-y += time-utils.o @@ -386,3 +390,17 @@ $(OUTPUT)util/vsprintf.o: ../lib/vsprintf.c FORCE $(OUTPUT)util/list_sort.o: ../lib/list_sort.c FORCE $(call rule_mkdir) $(call if_changed_dep,cc_o_c) + +ifdef SHELLCHECK + SHELL_TESTS := generate-cmdlist.sh + TEST_LOGS := $(SHELL_TESTS:%=%.shellcheck_log) +else + SHELL_TESTS := + TEST_LOGS := +endif + +$(OUTPUT)%.shellcheck_log: % + $(call rule_mkdir) + $(Q)$(call echo-cmd,test)shellcheck -a -S warning "$<" > $@ || (cat $@ && rm $@ && false) + +perf-y += $(TEST_LOGS) diff --git a/tools/perf/util/annotate-data.c b/tools/perf/util/annotate-data.c index f22b4f18271c..965da6c0b542 100644 --- a/tools/perf/util/annotate-data.c +++ b/tools/perf/util/annotate-data.c @@ -8,18 +8,214 @@ #include <stdio.h> #include <stdlib.h> #include <inttypes.h> +#include <linux/zalloc.h> +#include "annotate.h" #include "annotate-data.h" #include "debuginfo.h" #include "debug.h" #include "dso.h" +#include "dwarf-regs.h" #include "evsel.h" #include "evlist.h" #include "map.h" #include "map_symbol.h" +#include "sort.h" #include "strbuf.h" #include "symbol.h" #include "symbol_conf.h" +#include "thread.h" + +/* register number of the stack pointer */ +#define X86_REG_SP 7 + +static void delete_var_types(struct die_var_type *var_types); + +enum type_state_kind { + TSR_KIND_INVALID = 0, + TSR_KIND_TYPE, + TSR_KIND_PERCPU_BASE, + TSR_KIND_CONST, + TSR_KIND_POINTER, + TSR_KIND_CANARY, +}; + +#define pr_debug_dtp(fmt, ...) \ +do { \ + if (debug_type_profile) \ + pr_info(fmt, ##__VA_ARGS__); \ + else \ + pr_debug3(fmt, ##__VA_ARGS__); \ +} while (0) + +static void pr_debug_type_name(Dwarf_Die *die, enum type_state_kind kind) +{ + struct strbuf sb; + char *str; + Dwarf_Word size = 0; + + if (!debug_type_profile && verbose < 3) + return; + + switch (kind) { + case TSR_KIND_INVALID: + pr_info("\n"); + return; + case TSR_KIND_PERCPU_BASE: + pr_info(" percpu base\n"); + return; + case TSR_KIND_CONST: + pr_info(" constant\n"); + return; + case TSR_KIND_POINTER: + pr_info(" pointer"); + /* it also prints the type info */ + break; + case TSR_KIND_CANARY: + pr_info(" stack canary\n"); + return; + case TSR_KIND_TYPE: + default: + break; + } + + dwarf_aggregate_size(die, &size); + + strbuf_init(&sb, 32); + die_get_typename_from_type(die, &sb); + str = strbuf_detach(&sb, NULL); + pr_info(" type='%s' size=%#lx (die:%#lx)\n", + str, (long)size, (long)dwarf_dieoffset(die)); + free(str); +} + +static void pr_debug_location(Dwarf_Die *die, u64 pc, int reg) +{ + ptrdiff_t off = 0; + Dwarf_Attribute attr; + Dwarf_Addr base, start, end; + Dwarf_Op *ops; + size_t nops; + + if (!debug_type_profile && verbose < 3) + return; + + if (dwarf_attr(die, DW_AT_location, &attr) == NULL) + return; + + while ((off = dwarf_getlocations(&attr, off, &base, &start, &end, &ops, &nops)) > 0) { + if (reg != DWARF_REG_PC && end < pc) + continue; + if (reg != DWARF_REG_PC && start > pc) + break; + + pr_info(" variable location: "); + switch (ops->atom) { + case DW_OP_reg0 ...DW_OP_reg31: + pr_info("reg%d\n", ops->atom - DW_OP_reg0); + break; + case DW_OP_breg0 ...DW_OP_breg31: + pr_info("base=reg%d, offset=%#lx\n", + ops->atom - DW_OP_breg0, (long)ops->number); + break; + case DW_OP_regx: + pr_info("reg%ld\n", (long)ops->number); + break; + case DW_OP_bregx: + pr_info("base=reg%ld, offset=%#lx\n", + (long)ops->number, (long)ops->number2); + break; + case DW_OP_fbreg: + pr_info("use frame base, offset=%#lx\n", (long)ops->number); + break; + case DW_OP_addr: + pr_info("address=%#lx\n", (long)ops->number); + break; + default: + pr_info("unknown: code=%#x, number=%#lx\n", + ops->atom, (long)ops->number); + break; + } + break; + } +} + +/* + * Type information in a register, valid when @ok is true. + * The @caller_saved registers are invalidated after a function call. + */ +struct type_state_reg { + Dwarf_Die type; + u32 imm_value; + bool ok; + bool caller_saved; + u8 kind; +}; + +/* Type information in a stack location, dynamically allocated */ +struct type_state_stack { + struct list_head list; + Dwarf_Die type; + int offset; + int size; + bool compound; + u8 kind; +}; + +/* FIXME: This should be arch-dependent */ +#define TYPE_STATE_MAX_REGS 16 + +/* + * State table to maintain type info in each register and stack location. + * It'll be updated when new variable is allocated or type info is moved + * to a new location (register or stack). As it'd be used with the + * shortest path of basic blocks, it only maintains a single table. + */ +struct type_state { + /* state of general purpose registers */ + struct type_state_reg regs[TYPE_STATE_MAX_REGS]; + /* state of stack location */ + struct list_head stack_vars; + /* return value register */ + int ret_reg; + /* stack pointer register */ + int stack_reg; +}; + +static bool has_reg_type(struct type_state *state, int reg) +{ + return (unsigned)reg < ARRAY_SIZE(state->regs); +} + +static void init_type_state(struct type_state *state, struct arch *arch) +{ + memset(state, 0, sizeof(*state)); + INIT_LIST_HEAD(&state->stack_vars); + + if (arch__is(arch, "x86")) { + state->regs[0].caller_saved = true; + state->regs[1].caller_saved = true; + state->regs[2].caller_saved = true; + state->regs[4].caller_saved = true; + state->regs[5].caller_saved = true; + state->regs[8].caller_saved = true; + state->regs[9].caller_saved = true; + state->regs[10].caller_saved = true; + state->regs[11].caller_saved = true; + state->ret_reg = 0; + state->stack_reg = X86_REG_SP; + } +} + +static void exit_type_state(struct type_state *state) +{ + struct type_state_stack *stack, *tmp; + + list_for_each_entry_safe(stack, tmp, &state->stack_vars, list) { + list_del(&stack->list); + free(stack); + } +} /* * Compare type name and size to maintain them in a tree. @@ -116,8 +312,8 @@ static void delete_members(struct annotated_member *member) list_for_each_entry_safe(child, tmp, &member->children, node) { list_del(&child->node); delete_members(child); - free(child->type_name); - free(child->var_name); + zfree(&child->type_name); + zfree(&child->var_name); free(child); } } @@ -141,7 +337,7 @@ static struct annotated_data_type *dso__findnew_data_type(struct dso *dso, /* Check existing nodes in dso->data_types tree */ key.self.type_name = type_name; key.self.size = size; - node = rb_find(&key, &dso->data_types, data_type_cmp); + node = rb_find(&key, dso__data_types(dso), data_type_cmp); if (node) { result = rb_entry(node, struct annotated_data_type, node); free(type_name); @@ -162,7 +358,7 @@ static struct annotated_data_type *dso__findnew_data_type(struct dso *dso, if (symbol_conf.annotate_data_member) add_member_types(result, type_die); - rb_add(&result->node, &dso->data_types, data_type_less); + rb_add(&result->node, dso__data_types(dso), data_type_less); return result; } @@ -192,38 +388,52 @@ static bool find_cu_die(struct debuginfo *di, u64 pc, Dwarf_Die *cu_die) } /* The type info will be saved in @type_die */ -static int check_variable(Dwarf_Die *var_die, Dwarf_Die *type_die, int offset) +static int check_variable(struct data_loc_info *dloc, Dwarf_Die *var_die, + Dwarf_Die *type_die, int reg, int offset, bool is_fbreg) { Dwarf_Word size; + bool is_pointer = true; + + if (reg == DWARF_REG_PC) + is_pointer = false; + else if (reg == dloc->fbreg || is_fbreg) + is_pointer = false; + else if (arch__is(dloc->arch, "x86") && reg == X86_REG_SP) + is_pointer = false; /* Get the type of the variable */ if (die_get_real_type(var_die, type_die) == NULL) { - pr_debug("variable has no type\n"); + pr_debug_dtp("variable has no type\n"); ann_data_stat.no_typeinfo++; return -1; } /* - * It expects a pointer type for a memory access. - * Convert to a real type it points to. + * Usually it expects a pointer type for a memory access. + * Convert to a real type it points to. But global variables + * and local variables are accessed directly without a pointer. */ - if (dwarf_tag(type_die) != DW_TAG_pointer_type || - die_get_real_type(type_die, type_die) == NULL) { - pr_debug("no pointer or no type\n"); - ann_data_stat.no_typeinfo++; - return -1; + if (is_pointer) { + if ((dwarf_tag(type_die) != DW_TAG_pointer_type && + dwarf_tag(type_die) != DW_TAG_array_type) || + die_get_real_type(type_die, type_die) == NULL) { + pr_debug_dtp("no pointer or no type\n"); + ann_data_stat.no_typeinfo++; + return -1; + } } /* Get the size of the actual type */ if (dwarf_aggregate_size(type_die, &size) < 0) { - pr_debug("type size is unknown\n"); + pr_debug_dtp("type size is unknown\n"); ann_data_stat.invalid_size++; return -1; } /* Minimal sanity check */ if ((unsigned)offset >= size) { - pr_debug("offset: %d is bigger than size: %" PRIu64 "\n", offset, size); + pr_debug_dtp("offset: %d is bigger than size: %"PRIu64"\n", + offset, size); ann_data_stat.bad_offset++; return -1; } @@ -231,37 +441,1307 @@ static int check_variable(Dwarf_Die *var_die, Dwarf_Die *type_die, int offset) return 0; } +static struct type_state_stack *find_stack_state(struct type_state *state, + int offset) +{ + struct type_state_stack *stack; + + list_for_each_entry(stack, &state->stack_vars, list) { + if (offset == stack->offset) + return stack; + + if (stack->compound && stack->offset < offset && + offset < stack->offset + stack->size) + return stack; + } + return NULL; +} + +static void set_stack_state(struct type_state_stack *stack, int offset, u8 kind, + Dwarf_Die *type_die) +{ + int tag; + Dwarf_Word size; + + if (dwarf_aggregate_size(type_die, &size) < 0) + size = 0; + + tag = dwarf_tag(type_die); + + stack->type = *type_die; + stack->size = size; + stack->offset = offset; + stack->kind = kind; + + switch (tag) { + case DW_TAG_structure_type: + case DW_TAG_union_type: + stack->compound = (kind != TSR_KIND_POINTER); + break; + default: + stack->compound = false; + break; + } +} + +static struct type_state_stack *findnew_stack_state(struct type_state *state, + int offset, u8 kind, + Dwarf_Die *type_die) +{ + struct type_state_stack *stack = find_stack_state(state, offset); + + if (stack) { + set_stack_state(stack, offset, kind, type_die); + return stack; + } + + stack = malloc(sizeof(*stack)); + if (stack) { + set_stack_state(stack, offset, kind, type_die); + list_add(&stack->list, &state->stack_vars); + } + return stack; +} + +/* Maintain a cache for quick global variable lookup */ +struct global_var_entry { + struct rb_node node; + char *name; + u64 start; + u64 end; + u64 die_offset; +}; + +static int global_var_cmp(const void *_key, const struct rb_node *node) +{ + const u64 addr = (uintptr_t)_key; + struct global_var_entry *gvar; + + gvar = rb_entry(node, struct global_var_entry, node); + + if (gvar->start <= addr && addr < gvar->end) + return 0; + return gvar->start > addr ? -1 : 1; +} + +static bool global_var_less(struct rb_node *node_a, const struct rb_node *node_b) +{ + struct global_var_entry *gvar_a, *gvar_b; + + gvar_a = rb_entry(node_a, struct global_var_entry, node); + gvar_b = rb_entry(node_b, struct global_var_entry, node); + + return gvar_a->start < gvar_b->start; +} + +static struct global_var_entry *global_var__find(struct data_loc_info *dloc, u64 addr) +{ + struct dso *dso = map__dso(dloc->ms->map); + struct rb_node *node; + + node = rb_find((void *)(uintptr_t)addr, dso__global_vars(dso), global_var_cmp); + if (node == NULL) + return NULL; + + return rb_entry(node, struct global_var_entry, node); +} + +static bool global_var__add(struct data_loc_info *dloc, u64 addr, + const char *name, Dwarf_Die *type_die) +{ + struct dso *dso = map__dso(dloc->ms->map); + struct global_var_entry *gvar; + Dwarf_Word size; + + if (dwarf_aggregate_size(type_die, &size) < 0) + return false; + + gvar = malloc(sizeof(*gvar)); + if (gvar == NULL) + return false; + + gvar->name = name ? strdup(name) : NULL; + if (name && gvar->name == NULL) { + free(gvar); + return false; + } + + gvar->start = addr; + gvar->end = addr + size; + gvar->die_offset = dwarf_dieoffset(type_die); + + rb_add(&gvar->node, dso__global_vars(dso), global_var_less); + return true; +} + +void global_var_type__tree_delete(struct rb_root *root) +{ + struct global_var_entry *gvar; + + while (!RB_EMPTY_ROOT(root)) { + struct rb_node *node = rb_first(root); + + rb_erase(node, root); + gvar = rb_entry(node, struct global_var_entry, node); + zfree(&gvar->name); + free(gvar); + } +} + +static bool get_global_var_info(struct data_loc_info *dloc, u64 addr, + const char **var_name, int *var_offset) +{ + struct addr_location al; + struct symbol *sym; + u64 mem_addr; + + /* Kernel symbols might be relocated */ + mem_addr = addr + map__reloc(dloc->ms->map); + + addr_location__init(&al); + sym = thread__find_symbol_fb(dloc->thread, dloc->cpumode, + mem_addr, &al); + if (sym) { + *var_name = sym->name; + /* Calculate type offset from the start of variable */ + *var_offset = mem_addr - map__unmap_ip(al.map, sym->start); + } else { + *var_name = NULL; + } + addr_location__exit(&al); + if (*var_name == NULL) + return false; + + return true; +} + +static void global_var__collect(struct data_loc_info *dloc) +{ + Dwarf *dwarf = dloc->di->dbg; + Dwarf_Off off, next_off; + Dwarf_Die cu_die, type_die; + size_t header_size; + + /* Iterate all CU and collect global variables that have no location in a register. */ + off = 0; + while (dwarf_nextcu(dwarf, off, &next_off, &header_size, + NULL, NULL, NULL) == 0) { + struct die_var_type *var_types = NULL; + struct die_var_type *pos; + + if (dwarf_offdie(dwarf, off + header_size, &cu_die) == NULL) { + off = next_off; + continue; + } + + die_collect_global_vars(&cu_die, &var_types); + + for (pos = var_types; pos; pos = pos->next) { + const char *var_name = NULL; + int var_offset = 0; + + if (pos->reg != -1) + continue; + + if (!dwarf_offdie(dwarf, pos->die_off, &type_die)) + continue; + + if (!get_global_var_info(dloc, pos->addr, &var_name, + &var_offset)) + continue; + + if (var_offset != 0) + continue; + + global_var__add(dloc, pos->addr, var_name, &type_die); + } + + delete_var_types(var_types); + + off = next_off; + } +} + +static bool get_global_var_type(Dwarf_Die *cu_die, struct data_loc_info *dloc, + u64 ip, u64 var_addr, int *var_offset, + Dwarf_Die *type_die) +{ + u64 pc; + int offset; + const char *var_name = NULL; + struct global_var_entry *gvar; + struct dso *dso = map__dso(dloc->ms->map); + Dwarf_Die var_die; + + if (RB_EMPTY_ROOT(dso__global_vars(dso))) + global_var__collect(dloc); + + gvar = global_var__find(dloc, var_addr); + if (gvar) { + if (!dwarf_offdie(dloc->di->dbg, gvar->die_offset, type_die)) + return false; + + *var_offset = var_addr - gvar->start; + return true; + } + + /* Try to get the variable by address first */ + if (die_find_variable_by_addr(cu_die, var_addr, &var_die, &offset) && + check_variable(dloc, &var_die, type_die, DWARF_REG_PC, offset, + /*is_fbreg=*/false) == 0) { + var_name = dwarf_diename(&var_die); + *var_offset = offset; + goto ok; + } + + if (!get_global_var_info(dloc, var_addr, &var_name, var_offset)) + return false; + + pc = map__rip_2objdump(dloc->ms->map, ip); + + /* Try to get the name of global variable */ + if (die_find_variable_at(cu_die, var_name, pc, &var_die) && + check_variable(dloc, &var_die, type_die, DWARF_REG_PC, *var_offset, + /*is_fbreg=*/false) == 0) + goto ok; + + return false; + +ok: + /* The address should point to the start of the variable */ + global_var__add(dloc, var_addr - *var_offset, var_name, type_die); + return true; +} + +/** + * update_var_state - Update type state using given variables + * @state: type state table + * @dloc: data location info + * @addr: instruction address to match with variable + * @insn_offset: instruction offset (for debug) + * @var_types: list of variables with type info + * + * This function fills the @state table using @var_types info. Each variable + * is used only at the given location and updates an entry in the table. + */ +static void update_var_state(struct type_state *state, struct data_loc_info *dloc, + u64 addr, u64 insn_offset, struct die_var_type *var_types) +{ + Dwarf_Die mem_die; + struct die_var_type *var; + int fbreg = dloc->fbreg; + int fb_offset = 0; + + if (dloc->fb_cfa) { + if (die_get_cfa(dloc->di->dbg, addr, &fbreg, &fb_offset) < 0) + fbreg = -1; + } + + for (var = var_types; var != NULL; var = var->next) { + if (var->addr != addr) + continue; + /* Get the type DIE using the offset */ + if (!dwarf_offdie(dloc->di->dbg, var->die_off, &mem_die)) + continue; + + if (var->reg == DWARF_REG_FB) { + findnew_stack_state(state, var->offset, TSR_KIND_TYPE, + &mem_die); + + pr_debug_dtp("var [%"PRIx64"] -%#x(stack)", + insn_offset, -var->offset); + pr_debug_type_name(&mem_die, TSR_KIND_TYPE); + } else if (var->reg == fbreg) { + findnew_stack_state(state, var->offset - fb_offset, + TSR_KIND_TYPE, &mem_die); + + pr_debug_dtp("var [%"PRIx64"] -%#x(stack)", + insn_offset, -var->offset + fb_offset); + pr_debug_type_name(&mem_die, TSR_KIND_TYPE); + } else if (has_reg_type(state, var->reg) && var->offset == 0) { + struct type_state_reg *reg; + + reg = &state->regs[var->reg]; + reg->type = mem_die; + reg->kind = TSR_KIND_TYPE; + reg->ok = true; + + pr_debug_dtp("var [%"PRIx64"] reg%d", + insn_offset, var->reg); + pr_debug_type_name(&mem_die, TSR_KIND_TYPE); + } + } +} + +static void update_insn_state_x86(struct type_state *state, + struct data_loc_info *dloc, Dwarf_Die *cu_die, + struct disasm_line *dl) +{ + struct annotated_insn_loc loc; + struct annotated_op_loc *src = &loc.ops[INSN_OP_SOURCE]; + struct annotated_op_loc *dst = &loc.ops[INSN_OP_TARGET]; + struct type_state_reg *tsr; + Dwarf_Die type_die; + u32 insn_offset = dl->al.offset; + int fbreg = dloc->fbreg; + int fboff = 0; + + if (annotate_get_insn_location(dloc->arch, dl, &loc) < 0) + return; + + if (ins__is_call(&dl->ins)) { + struct symbol *func = dl->ops.target.sym; + + if (func == NULL) + return; + + /* __fentry__ will preserve all registers */ + if (!strcmp(func->name, "__fentry__")) + return; + + pr_debug_dtp("call [%x] %s\n", insn_offset, func->name); + + /* Otherwise invalidate caller-saved registers after call */ + for (unsigned i = 0; i < ARRAY_SIZE(state->regs); i++) { + if (state->regs[i].caller_saved) + state->regs[i].ok = false; + } + + /* Update register with the return type (if any) */ + if (die_find_func_rettype(cu_die, func->name, &type_die)) { + tsr = &state->regs[state->ret_reg]; + tsr->type = type_die; + tsr->kind = TSR_KIND_TYPE; + tsr->ok = true; + + pr_debug_dtp("call [%x] return -> reg%d", + insn_offset, state->ret_reg); + pr_debug_type_name(&type_die, tsr->kind); + } + return; + } + + if (!strncmp(dl->ins.name, "add", 3)) { + u64 imm_value = -1ULL; + int offset; + const char *var_name = NULL; + struct map_symbol *ms = dloc->ms; + u64 ip = ms->sym->start + dl->al.offset; + + if (!has_reg_type(state, dst->reg1)) + return; + + tsr = &state->regs[dst->reg1]; + + if (src->imm) + imm_value = src->offset; + else if (has_reg_type(state, src->reg1) && + state->regs[src->reg1].kind == TSR_KIND_CONST) + imm_value = state->regs[src->reg1].imm_value; + else if (src->reg1 == DWARF_REG_PC) { + u64 var_addr = annotate_calc_pcrel(dloc->ms, ip, + src->offset, dl); + + if (get_global_var_info(dloc, var_addr, + &var_name, &offset) && + !strcmp(var_name, "this_cpu_off") && + tsr->kind == TSR_KIND_CONST) { + tsr->kind = TSR_KIND_PERCPU_BASE; + imm_value = tsr->imm_value; + } + } + else + return; + + if (tsr->kind != TSR_KIND_PERCPU_BASE) + return; + + if (get_global_var_type(cu_die, dloc, ip, imm_value, &offset, + &type_die) && offset == 0) { + /* + * This is not a pointer type, but it should be treated + * as a pointer. + */ + tsr->type = type_die; + tsr->kind = TSR_KIND_POINTER; + tsr->ok = true; + + pr_debug_dtp("add [%x] percpu %#"PRIx64" -> reg%d", + insn_offset, imm_value, dst->reg1); + pr_debug_type_name(&tsr->type, tsr->kind); + } + return; + } + + if (strncmp(dl->ins.name, "mov", 3)) + return; + + if (dloc->fb_cfa) { + u64 ip = dloc->ms->sym->start + dl->al.offset; + u64 pc = map__rip_2objdump(dloc->ms->map, ip); + + if (die_get_cfa(dloc->di->dbg, pc, &fbreg, &fboff) < 0) + fbreg = -1; + } + + /* Case 1. register to register or segment:offset to register transfers */ + if (!src->mem_ref && !dst->mem_ref) { + if (!has_reg_type(state, dst->reg1)) + return; + + tsr = &state->regs[dst->reg1]; + if (dso__kernel(map__dso(dloc->ms->map)) && + src->segment == INSN_SEG_X86_GS && src->imm) { + u64 ip = dloc->ms->sym->start + dl->al.offset; + u64 var_addr; + int offset; + + /* + * In kernel, %gs points to a per-cpu region for the + * current CPU. Access with a constant offset should + * be treated as a global variable access. + */ + var_addr = src->offset; + + if (var_addr == 40) { + tsr->kind = TSR_KIND_CANARY; + tsr->ok = true; + + pr_debug_dtp("mov [%x] stack canary -> reg%d\n", + insn_offset, dst->reg1); + return; + } + + if (!get_global_var_type(cu_die, dloc, ip, var_addr, + &offset, &type_die) || + !die_get_member_type(&type_die, offset, &type_die)) { + tsr->ok = false; + return; + } + + tsr->type = type_die; + tsr->kind = TSR_KIND_TYPE; + tsr->ok = true; + + pr_debug_dtp("mov [%x] this-cpu addr=%#"PRIx64" -> reg%d", + insn_offset, var_addr, dst->reg1); + pr_debug_type_name(&tsr->type, tsr->kind); + return; + } + + if (src->imm) { + tsr->kind = TSR_KIND_CONST; + tsr->imm_value = src->offset; + tsr->ok = true; + + pr_debug_dtp("mov [%x] imm=%#x -> reg%d\n", + insn_offset, tsr->imm_value, dst->reg1); + return; + } + + if (!has_reg_type(state, src->reg1) || + !state->regs[src->reg1].ok) { + tsr->ok = false; + return; + } + + tsr->type = state->regs[src->reg1].type; + tsr->kind = state->regs[src->reg1].kind; + tsr->ok = true; + + pr_debug_dtp("mov [%x] reg%d -> reg%d", + insn_offset, src->reg1, dst->reg1); + pr_debug_type_name(&tsr->type, tsr->kind); + } + /* Case 2. memory to register transers */ + if (src->mem_ref && !dst->mem_ref) { + int sreg = src->reg1; + + if (!has_reg_type(state, dst->reg1)) + return; + + tsr = &state->regs[dst->reg1]; + +retry: + /* Check stack variables with offset */ + if (sreg == fbreg) { + struct type_state_stack *stack; + int offset = src->offset - fboff; + + stack = find_stack_state(state, offset); + if (stack == NULL) { + tsr->ok = false; + return; + } else if (!stack->compound) { + tsr->type = stack->type; + tsr->kind = stack->kind; + tsr->ok = true; + } else if (die_get_member_type(&stack->type, + offset - stack->offset, + &type_die)) { + tsr->type = type_die; + tsr->kind = TSR_KIND_TYPE; + tsr->ok = true; + } else { + tsr->ok = false; + return; + } + + pr_debug_dtp("mov [%x] -%#x(stack) -> reg%d", + insn_offset, -offset, dst->reg1); + pr_debug_type_name(&tsr->type, tsr->kind); + } + /* And then dereference the pointer if it has one */ + else if (has_reg_type(state, sreg) && state->regs[sreg].ok && + state->regs[sreg].kind == TSR_KIND_TYPE && + die_deref_ptr_type(&state->regs[sreg].type, + src->offset, &type_die)) { + tsr->type = type_die; + tsr->kind = TSR_KIND_TYPE; + tsr->ok = true; + + pr_debug_dtp("mov [%x] %#x(reg%d) -> reg%d", + insn_offset, src->offset, sreg, dst->reg1); + pr_debug_type_name(&tsr->type, tsr->kind); + } + /* Or check if it's a global variable */ + else if (sreg == DWARF_REG_PC) { + struct map_symbol *ms = dloc->ms; + u64 ip = ms->sym->start + dl->al.offset; + u64 addr; + int offset; + + addr = annotate_calc_pcrel(ms, ip, src->offset, dl); + + if (!get_global_var_type(cu_die, dloc, ip, addr, &offset, + &type_die) || + !die_get_member_type(&type_die, offset, &type_die)) { + tsr->ok = false; + return; + } + + tsr->type = type_die; + tsr->kind = TSR_KIND_TYPE; + tsr->ok = true; + + pr_debug_dtp("mov [%x] global addr=%"PRIx64" -> reg%d", + insn_offset, addr, dst->reg1); + pr_debug_type_name(&type_die, tsr->kind); + } + /* And check percpu access with base register */ + else if (has_reg_type(state, sreg) && + state->regs[sreg].kind == TSR_KIND_PERCPU_BASE) { + u64 ip = dloc->ms->sym->start + dl->al.offset; + u64 var_addr = src->offset; + int offset; + + if (src->multi_regs) { + int reg2 = (sreg == src->reg1) ? src->reg2 : src->reg1; + + if (has_reg_type(state, reg2) && state->regs[reg2].ok && + state->regs[reg2].kind == TSR_KIND_CONST) + var_addr += state->regs[reg2].imm_value; + } + + /* + * In kernel, %gs points to a per-cpu region for the + * current CPU. Access with a constant offset should + * be treated as a global variable access. + */ + if (get_global_var_type(cu_die, dloc, ip, var_addr, + &offset, &type_die) && + die_get_member_type(&type_die, offset, &type_die)) { + tsr->type = type_die; + tsr->kind = TSR_KIND_TYPE; + tsr->ok = true; + + if (src->multi_regs) { + pr_debug_dtp("mov [%x] percpu %#x(reg%d,reg%d) -> reg%d", + insn_offset, src->offset, src->reg1, + src->reg2, dst->reg1); + } else { + pr_debug_dtp("mov [%x] percpu %#x(reg%d) -> reg%d", + insn_offset, src->offset, sreg, dst->reg1); + } + pr_debug_type_name(&tsr->type, tsr->kind); + } else { + tsr->ok = false; + } + } + /* And then dereference the calculated pointer if it has one */ + else if (has_reg_type(state, sreg) && state->regs[sreg].ok && + state->regs[sreg].kind == TSR_KIND_POINTER && + die_get_member_type(&state->regs[sreg].type, + src->offset, &type_die)) { + tsr->type = type_die; + tsr->kind = TSR_KIND_TYPE; + tsr->ok = true; + + pr_debug_dtp("mov [%x] pointer %#x(reg%d) -> reg%d", + insn_offset, src->offset, sreg, dst->reg1); + pr_debug_type_name(&tsr->type, tsr->kind); + } + /* Or try another register if any */ + else if (src->multi_regs && sreg == src->reg1 && + src->reg1 != src->reg2) { + sreg = src->reg2; + goto retry; + } + else { + int offset; + const char *var_name = NULL; + + /* it might be per-cpu variable (in kernel) access */ + if (src->offset < 0) { + if (get_global_var_info(dloc, (s64)src->offset, + &var_name, &offset) && + !strcmp(var_name, "__per_cpu_offset")) { + tsr->kind = TSR_KIND_PERCPU_BASE; + + pr_debug_dtp("mov [%x] percpu base reg%d\n", + insn_offset, dst->reg1); + } + } + + tsr->ok = false; + } + } + /* Case 3. register to memory transfers */ + if (!src->mem_ref && dst->mem_ref) { + if (!has_reg_type(state, src->reg1) || + !state->regs[src->reg1].ok) + return; + + /* Check stack variables with offset */ + if (dst->reg1 == fbreg) { + struct type_state_stack *stack; + int offset = dst->offset - fboff; + + tsr = &state->regs[src->reg1]; + + stack = find_stack_state(state, offset); + if (stack) { + /* + * The source register is likely to hold a type + * of member if it's a compound type. Do not + * update the stack variable type since we can + * get the member type later by using the + * die_get_member_type(). + */ + if (!stack->compound) + set_stack_state(stack, offset, tsr->kind, + &tsr->type); + } else { + findnew_stack_state(state, offset, tsr->kind, + &tsr->type); + } + + pr_debug_dtp("mov [%x] reg%d -> -%#x(stack)", + insn_offset, src->reg1, -offset); + pr_debug_type_name(&tsr->type, tsr->kind); + } + /* + * Ignore other transfers since it'd set a value in a struct + * and won't change the type. + */ + } + /* Case 4. memory to memory transfers (not handled for now) */ +} + +/** + * update_insn_state - Update type state for an instruction + * @state: type state table + * @dloc: data location info + * @cu_die: compile unit debug entry + * @dl: disasm line for the instruction + * + * This function updates the @state table for the target operand of the + * instruction at @dl if it transfers the type like MOV on x86. Since it + * tracks the type, it won't care about the values like in arithmetic + * instructions like ADD/SUB/MUL/DIV and INC/DEC. + * + * Note that ops->reg2 is only available when both mem_ref and multi_regs + * are true. + */ +static void update_insn_state(struct type_state *state, struct data_loc_info *dloc, + Dwarf_Die *cu_die, struct disasm_line *dl) +{ + if (arch__is(dloc->arch, "x86")) + update_insn_state_x86(state, dloc, cu_die, dl); +} + +/* + * Prepend this_blocks (from the outer scope) to full_blocks, removing + * duplicate disasm line. + */ +static void prepend_basic_blocks(struct list_head *this_blocks, + struct list_head *full_blocks) +{ + struct annotated_basic_block *first_bb, *last_bb; + + last_bb = list_last_entry(this_blocks, typeof(*last_bb), list); + first_bb = list_first_entry(full_blocks, typeof(*first_bb), list); + + if (list_empty(full_blocks)) + goto out; + + /* Last insn in this_blocks should be same as first insn in full_blocks */ + if (last_bb->end != first_bb->begin) { + pr_debug("prepend basic blocks: mismatched disasm line %"PRIx64" -> %"PRIx64"\n", + last_bb->end->al.offset, first_bb->begin->al.offset); + goto out; + } + + /* Is the basic block have only one disasm_line? */ + if (last_bb->begin == last_bb->end) { + list_del(&last_bb->list); + free(last_bb); + goto out; + } + + /* Point to the insn before the last when adding this block to full_blocks */ + last_bb->end = list_prev_entry(last_bb->end, al.node); + +out: + list_splice(this_blocks, full_blocks); +} + +static void delete_basic_blocks(struct list_head *basic_blocks) +{ + struct annotated_basic_block *bb, *tmp; + + list_for_each_entry_safe(bb, tmp, basic_blocks, list) { + list_del(&bb->list); + free(bb); + } +} + +/* Make sure all variables have a valid start address */ +static void fixup_var_address(struct die_var_type *var_types, u64 addr) +{ + while (var_types) { + /* + * Some variables have no address range meaning it's always + * available in the whole scope. Let's adjust the start + * address to the start of the scope. + */ + if (var_types->addr == 0) + var_types->addr = addr; + + var_types = var_types->next; + } +} + +static void delete_var_types(struct die_var_type *var_types) +{ + while (var_types) { + struct die_var_type *next = var_types->next; + + free(var_types); + var_types = next; + } +} + +/* should match to is_stack_canary() in util/annotate.c */ +static void setup_stack_canary(struct data_loc_info *dloc) +{ + if (arch__is(dloc->arch, "x86")) { + dloc->op->segment = INSN_SEG_X86_GS; + dloc->op->imm = true; + dloc->op->offset = 40; + } +} + +/* + * It's at the target address, check if it has a matching type. + * It returns 1 if found, 0 if not or -1 if not found but no need to + * repeat the search. The last case is for per-cpu variables which + * are similar to global variables and no additional info is needed. + */ +static int check_matching_type(struct type_state *state, + struct data_loc_info *dloc, + Dwarf_Die *cu_die, Dwarf_Die *type_die) +{ + Dwarf_Word size; + u32 insn_offset = dloc->ip - dloc->ms->sym->start; + int reg = dloc->op->reg1; + + pr_debug_dtp("chk [%x] reg%d offset=%#x ok=%d kind=%d", + insn_offset, reg, dloc->op->offset, + state->regs[reg].ok, state->regs[reg].kind); + + if (state->regs[reg].ok && state->regs[reg].kind == TSR_KIND_TYPE) { + int tag = dwarf_tag(&state->regs[reg].type); + + /* + * Normal registers should hold a pointer (or array) to + * dereference a memory location. + */ + if (tag != DW_TAG_pointer_type && tag != DW_TAG_array_type) { + if (dloc->op->offset < 0 && reg != state->stack_reg) + goto check_kernel; + + pr_debug_dtp("\n"); + return -1; + } + + pr_debug_dtp("\n"); + + /* Remove the pointer and get the target type */ + if (die_get_real_type(&state->regs[reg].type, type_die) == NULL) + return -1; + + dloc->type_offset = dloc->op->offset; + + /* Get the size of the actual type */ + if (dwarf_aggregate_size(type_die, &size) < 0 || + (unsigned)dloc->type_offset >= size) + return -1; + + return 1; + } + + if (reg == dloc->fbreg) { + struct type_state_stack *stack; + + pr_debug_dtp(" fbreg\n"); + + stack = find_stack_state(state, dloc->type_offset); + if (stack == NULL) + return 0; + + if (stack->kind == TSR_KIND_CANARY) { + setup_stack_canary(dloc); + return -1; + } + + if (stack->kind != TSR_KIND_TYPE) + return 0; + + *type_die = stack->type; + /* Update the type offset from the start of slot */ + dloc->type_offset -= stack->offset; + + return 1; + } + + if (dloc->fb_cfa) { + struct type_state_stack *stack; + u64 pc = map__rip_2objdump(dloc->ms->map, dloc->ip); + int fbreg, fboff; + + pr_debug_dtp(" cfa\n"); + + if (die_get_cfa(dloc->di->dbg, pc, &fbreg, &fboff) < 0) + fbreg = -1; + + if (reg != fbreg) + return 0; + + stack = find_stack_state(state, dloc->type_offset - fboff); + if (stack == NULL) + return 0; + + if (stack->kind == TSR_KIND_CANARY) { + setup_stack_canary(dloc); + return -1; + } + + if (stack->kind != TSR_KIND_TYPE) + return 0; + + *type_die = stack->type; + /* Update the type offset from the start of slot */ + dloc->type_offset -= fboff + stack->offset; + + return 1; + } + + if (state->regs[reg].kind == TSR_KIND_PERCPU_BASE) { + u64 var_addr = dloc->op->offset; + int var_offset; + + pr_debug_dtp(" percpu var\n"); + + if (dloc->op->multi_regs) { + int reg2 = dloc->op->reg2; + + if (dloc->op->reg2 == reg) + reg2 = dloc->op->reg1; + + if (has_reg_type(state, reg2) && state->regs[reg2].ok && + state->regs[reg2].kind == TSR_KIND_CONST) + var_addr += state->regs[reg2].imm_value; + } + + if (get_global_var_type(cu_die, dloc, dloc->ip, var_addr, + &var_offset, type_die)) { + dloc->type_offset = var_offset; + return 1; + } + /* No need to retry per-cpu (global) variables */ + return -1; + } + + if (state->regs[reg].ok && state->regs[reg].kind == TSR_KIND_POINTER) { + pr_debug_dtp(" percpu ptr\n"); + + /* + * It's actaully pointer but the address was calculated using + * some arithmetic. So it points to the actual type already. + */ + *type_die = state->regs[reg].type; + + dloc->type_offset = dloc->op->offset; + + /* Get the size of the actual type */ + if (dwarf_aggregate_size(type_die, &size) < 0 || + (unsigned)dloc->type_offset >= size) + return -1; + + return 1; + } + + if (state->regs[reg].ok && state->regs[reg].kind == TSR_KIND_CANARY) { + pr_debug_dtp(" stack canary\n"); + + /* + * This is a saved value of the stack canary which will be handled + * in the outer logic when it returns failure here. Pretend it's + * from the stack canary directly. + */ + setup_stack_canary(dloc); + + return -1; + } + +check_kernel: + if (dso__kernel(map__dso(dloc->ms->map))) { + u64 addr; + int offset; + + /* Direct this-cpu access like "%gs:0x34740" */ + if (dloc->op->segment == INSN_SEG_X86_GS && dloc->op->imm && + arch__is(dloc->arch, "x86")) { + pr_debug_dtp(" this-cpu var\n"); + + addr = dloc->op->offset; + + if (get_global_var_type(cu_die, dloc, dloc->ip, addr, + &offset, type_die)) { + dloc->type_offset = offset; + return 1; + } + return -1; + } + + /* Access to global variable like "-0x7dcf0500(,%rdx,8)" */ + if (dloc->op->offset < 0 && reg != state->stack_reg) { + addr = (s64) dloc->op->offset; + + if (get_global_var_type(cu_die, dloc, dloc->ip, addr, + &offset, type_die)) { + pr_debug_dtp(" global var\n"); + + dloc->type_offset = offset; + return 1; + } + pr_debug_dtp(" negative offset\n"); + return -1; + } + } + + pr_debug_dtp("\n"); + return 0; +} + +/* Iterate instructions in basic blocks and update type table */ +static int find_data_type_insn(struct data_loc_info *dloc, + struct list_head *basic_blocks, + struct die_var_type *var_types, + Dwarf_Die *cu_die, Dwarf_Die *type_die) +{ + struct type_state state; + struct symbol *sym = dloc->ms->sym; + struct annotation *notes = symbol__annotation(sym); + struct annotated_basic_block *bb; + int ret = 0; + + init_type_state(&state, dloc->arch); + + list_for_each_entry(bb, basic_blocks, list) { + struct disasm_line *dl = bb->begin; + + BUG_ON(bb->begin->al.offset == -1 || bb->end->al.offset == -1); + + pr_debug_dtp("bb: [%"PRIx64" - %"PRIx64"]\n", + bb->begin->al.offset, bb->end->al.offset); + + list_for_each_entry_from(dl, ¬es->src->source, al.node) { + u64 this_ip = sym->start + dl->al.offset; + u64 addr = map__rip_2objdump(dloc->ms->map, this_ip); + + /* Skip comment or debug info lines */ + if (dl->al.offset == -1) + continue; + + /* Update variable type at this address */ + update_var_state(&state, dloc, addr, dl->al.offset, var_types); + + if (this_ip == dloc->ip) { + ret = check_matching_type(&state, dloc, + cu_die, type_die); + goto out; + } + + /* Update type table after processing the instruction */ + update_insn_state(&state, dloc, cu_die, dl); + if (dl == bb->end) + break; + } + } + +out: + exit_type_state(&state); + return ret; +} + +/* + * Construct a list of basic blocks for each scope with variables and try to find + * the data type by updating a type state table through instructions. + */ +static int find_data_type_block(struct data_loc_info *dloc, + Dwarf_Die *cu_die, Dwarf_Die *scopes, + int nr_scopes, Dwarf_Die *type_die) +{ + LIST_HEAD(basic_blocks); + struct die_var_type *var_types = NULL; + u64 src_ip, dst_ip, prev_dst_ip; + int ret = -1; + + /* TODO: other architecture support */ + if (!arch__is(dloc->arch, "x86")) + return -1; + + prev_dst_ip = dst_ip = dloc->ip; + for (int i = nr_scopes - 1; i >= 0; i--) { + Dwarf_Addr base, start, end; + LIST_HEAD(this_blocks); + int found; + + if (dwarf_ranges(&scopes[i], 0, &base, &start, &end) < 0) + break; + + pr_debug_dtp("scope: [%d/%d] (die:%lx)\n", + i + 1, nr_scopes, (long)dwarf_dieoffset(&scopes[i])); + src_ip = map__objdump_2rip(dloc->ms->map, start); + +again: + /* Get basic blocks for this scope */ + if (annotate_get_basic_blocks(dloc->ms->sym, src_ip, dst_ip, + &this_blocks) < 0) { + /* Try previous block if they are not connected */ + if (prev_dst_ip != dst_ip) { + dst_ip = prev_dst_ip; + goto again; + } + + pr_debug_dtp("cannot find a basic block from %"PRIx64" to %"PRIx64"\n", + src_ip - dloc->ms->sym->start, + dst_ip - dloc->ms->sym->start); + continue; + } + prepend_basic_blocks(&this_blocks, &basic_blocks); + + /* Get variable info for this scope and add to var_types list */ + die_collect_vars(&scopes[i], &var_types); + fixup_var_address(var_types, start); + + /* Find from start of this scope to the target instruction */ + found = find_data_type_insn(dloc, &basic_blocks, var_types, + cu_die, type_die); + if (found > 0) { + char buf[64]; + + if (dloc->op->multi_regs) + snprintf(buf, sizeof(buf), "reg%d, reg%d", + dloc->op->reg1, dloc->op->reg2); + else + snprintf(buf, sizeof(buf), "reg%d", dloc->op->reg1); + + pr_debug_dtp("found by insn track: %#x(%s) type-offset=%#x\n", + dloc->op->offset, buf, dloc->type_offset); + pr_debug_type_name(type_die, TSR_KIND_TYPE); + ret = 0; + break; + } + + if (found < 0) + break; + + /* Go up to the next scope and find blocks to the start */ + prev_dst_ip = dst_ip; + dst_ip = src_ip; + } + + delete_basic_blocks(&basic_blocks); + delete_var_types(var_types); + return ret; +} + /* The result will be saved in @type_die */ -static int find_data_type_die(struct debuginfo *di, u64 pc, - int reg, int offset, Dwarf_Die *type_die) +static int find_data_type_die(struct data_loc_info *dloc, Dwarf_Die *type_die) { + struct annotated_op_loc *loc = dloc->op; Dwarf_Die cu_die, var_die; Dwarf_Die *scopes = NULL; + int reg, offset; int ret = -1; int i, nr_scopes; + int fbreg = -1; + int fb_offset = 0; + bool is_fbreg = false; + u64 pc; + char buf[64]; + + if (dloc->op->multi_regs) + snprintf(buf, sizeof(buf), "reg%d, reg%d", dloc->op->reg1, dloc->op->reg2); + else if (dloc->op->reg1 == DWARF_REG_PC) + snprintf(buf, sizeof(buf), "PC"); + else + snprintf(buf, sizeof(buf), "reg%d", dloc->op->reg1); + + pr_debug_dtp("-----------------------------------------------------------\n"); + pr_debug_dtp("find data type for %#x(%s) at %s+%#"PRIx64"\n", + dloc->op->offset, buf, dloc->ms->sym->name, + dloc->ip - dloc->ms->sym->start); + + /* + * IP is a relative instruction address from the start of the map, as + * it can be randomized/relocated, it needs to translate to PC which is + * a file address for DWARF processing. + */ + pc = map__rip_2objdump(dloc->ms->map, dloc->ip); /* Get a compile_unit for this address */ - if (!find_cu_die(di, pc, &cu_die)) { - pr_debug("cannot find CU for address %" PRIx64 "\n", pc); + if (!find_cu_die(dloc->di, pc, &cu_die)) { + pr_debug_dtp("cannot find CU for address %"PRIx64"\n", pc); ann_data_stat.no_cuinfo++; return -1; } + reg = loc->reg1; + offset = loc->offset; + + pr_debug_dtp("CU for %s (die:%#lx)\n", + dwarf_diename(&cu_die), (long)dwarf_dieoffset(&cu_die)); + + if (reg == DWARF_REG_PC) { + if (get_global_var_type(&cu_die, dloc, dloc->ip, dloc->var_addr, + &offset, type_die)) { + dloc->type_offset = offset; + + pr_debug_dtp("found by addr=%#"PRIx64" type_offset=%#x\n", + dloc->var_addr, offset); + pr_debug_type_name(type_die, TSR_KIND_TYPE); + ret = 0; + goto out; + } + } + /* Get a list of nested scopes - i.e. (inlined) functions and blocks. */ nr_scopes = die_get_scopes(&cu_die, pc, &scopes); + if (reg != DWARF_REG_PC && dwarf_hasattr(&scopes[0], DW_AT_frame_base)) { + Dwarf_Attribute attr; + Dwarf_Block block; + + /* Check if the 'reg' is assigned as frame base register */ + if (dwarf_attr(&scopes[0], DW_AT_frame_base, &attr) != NULL && + dwarf_formblock(&attr, &block) == 0 && block.length == 1) { + switch (*block.data) { + case DW_OP_reg0 ... DW_OP_reg31: + fbreg = dloc->fbreg = *block.data - DW_OP_reg0; + break; + case DW_OP_call_frame_cfa: + dloc->fb_cfa = true; + if (die_get_cfa(dloc->di->dbg, pc, &fbreg, + &fb_offset) < 0) + fbreg = -1; + break; + default: + break; + } + + pr_debug_dtp("frame base: cfa=%d fbreg=%d\n", + dloc->fb_cfa, fbreg); + } + } + +retry: + is_fbreg = (reg == fbreg); + if (is_fbreg) + offset = loc->offset - fb_offset; + /* Search from the inner-most scope to the outer */ for (i = nr_scopes - 1; i >= 0; i--) { - /* Look up variables/parameters in this scope */ - if (!die_find_variable_by_reg(&scopes[i], pc, reg, &var_die)) - continue; + if (reg == DWARF_REG_PC) { + if (!die_find_variable_by_addr(&scopes[i], dloc->var_addr, + &var_die, &offset)) + continue; + } else { + /* Look up variables/parameters in this scope */ + if (!die_find_variable_by_reg(&scopes[i], pc, reg, + &offset, is_fbreg, &var_die)) + continue; + } /* Found a variable, see if it's correct */ - ret = check_variable(&var_die, type_die, offset); + ret = check_variable(dloc, &var_die, type_die, reg, offset, is_fbreg); + if (ret == 0) { + pr_debug_dtp("found \"%s\" in scope=%d/%d (die: %#lx) ", + dwarf_diename(&var_die), i+1, nr_scopes, + (long)dwarf_dieoffset(&scopes[i])); + if (reg == DWARF_REG_PC) { + pr_debug_dtp("addr=%#"PRIx64" type_offset=%#x\n", + dloc->var_addr, offset); + } else if (reg == DWARF_REG_FB || is_fbreg) { + pr_debug_dtp("stack_offset=%#x type_offset=%#x\n", + fb_offset, offset); + } else { + pr_debug_dtp("type_offset=%#x\n", offset); + } + pr_debug_location(&var_die, pc, reg); + pr_debug_type_name(type_die, TSR_KIND_TYPE); + } else { + pr_debug_dtp("check variable \"%s\" failed (die: %#lx)\n", + dwarf_diename(&var_die), + (long)dwarf_dieoffset(&var_die)); + pr_debug_location(&var_die, pc, reg); + pr_debug_type_name(type_die, TSR_KIND_TYPE); + } + dloc->type_offset = offset; goto out; } - if (ret < 0) + + if (loc->multi_regs && reg == loc->reg1 && loc->reg1 != loc->reg2) { + reg = loc->reg2; + goto retry; + } + + if (reg != DWARF_REG_PC) { + ret = find_data_type_block(dloc, &cu_die, scopes, + nr_scopes, type_die); + if (ret == 0) { + ann_data_stat.insn_track++; + goto out; + } + } + + if (ret < 0) { + pr_debug_dtp("no variable found\n"); ann_data_stat.no_var++; + } out: free(scopes); @@ -270,43 +1750,45 @@ out: /** * find_data_type - Return a data type at the location - * @ms: map and symbol at the location - * @ip: instruction address of the memory access - * @reg: register that holds the base address - * @offset: offset from the base address + * @dloc: data location * * This functions searches the debug information of the binary to get the data - * type it accesses. The exact location is expressed by (ip, reg, offset). + * type it accesses. The exact location is expressed by (ip, reg, offset) + * for pointer variables or (ip, addr) for global variables. Note that global + * variables might update the @dloc->type_offset after finding the start of the + * variable. If it cannot find a global variable by address, it tried to find + * a declaration of the variable using var_name. In that case, @dloc->offset + * won't be updated. + * * It return %NULL if not found. */ -struct annotated_data_type *find_data_type(struct map_symbol *ms, u64 ip, - int reg, int offset) +struct annotated_data_type *find_data_type(struct data_loc_info *dloc) { struct annotated_data_type *result = NULL; - struct dso *dso = map__dso(ms->map); - struct debuginfo *di; + struct dso *dso = map__dso(dloc->ms->map); Dwarf_Die type_die; - u64 pc; - di = debuginfo__new(dso->long_name); - if (di == NULL) { - pr_debug("cannot get the debug info\n"); + dloc->di = debuginfo__new(dso__long_name(dso)); + if (dloc->di == NULL) { + pr_debug_dtp("cannot get the debug info\n"); return NULL; } /* - * IP is a relative instruction address from the start of the map, as - * it can be randomized/relocated, it needs to translate to PC which is - * a file address for DWARF processing. + * The type offset is the same as instruction offset by default. + * But when finding a global variable, the offset won't be valid. */ - pc = map__rip_2objdump(ms->map, ip); - if (find_data_type_die(di, pc, reg, offset, &type_die) < 0) + dloc->type_offset = dloc->op->offset; + + dloc->fbreg = -1; + + if (find_data_type_die(dloc, &type_die) < 0) goto out; result = dso__findnew_data_type(dso, &type_die); out: - debuginfo__delete(di); + debuginfo__delete(dloc->di); return result; } @@ -318,7 +1800,6 @@ static int alloc_data_type_histograms(struct annotated_data_type *adt, int nr_en sz += sizeof(struct type_hist_entry) * adt->self.size; /* Allocate a table of pointers for each event */ - adt->nr_histograms = nr_entries; adt->histograms = calloc(nr_entries, sizeof(*adt->histograms)); if (adt->histograms == NULL) return -ENOMEM; @@ -332,20 +1813,24 @@ static int alloc_data_type_histograms(struct annotated_data_type *adt, int nr_en if (adt->histograms[i] == NULL) goto err; } + + adt->nr_histograms = nr_entries; return 0; err: while (--i >= 0) - free(adt->histograms[i]); - free(adt->histograms); + zfree(&(adt->histograms[i])); + zfree(&adt->histograms); return -ENOMEM; } static void delete_data_type_histograms(struct annotated_data_type *adt) { for (int i = 0; i < adt->nr_histograms; i++) - free(adt->histograms[i]); - free(adt->histograms); + zfree(&(adt->histograms[i])); + + zfree(&adt->histograms); + adt->nr_histograms = 0; } void annotated_data_type__tree_delete(struct rb_root *root) @@ -359,7 +1844,7 @@ void annotated_data_type__tree_delete(struct rb_root *root) pos = rb_entry(node, struct annotated_data_type, node); delete_members(&pos->self); delete_data_type_histograms(pos); - free(pos->self.type_name); + zfree(&pos->self.type_name); free(pos); } } @@ -403,3 +1888,115 @@ int annotated_data_type__update_samples(struct annotated_data_type *adt, h->addr[offset].period += period; return 0; } + +static void print_annotated_data_header(struct hist_entry *he, struct evsel *evsel) +{ + struct dso *dso = map__dso(he->ms.map); + int nr_members = 1; + int nr_samples = he->stat.nr_events; + int width = 7; + const char *val_hdr = "Percent"; + + if (evsel__is_group_event(evsel)) { + struct hist_entry *pair; + + list_for_each_entry(pair, &he->pairs.head, pairs.node) + nr_samples += pair->stat.nr_events; + } + + printf("Annotate type: '%s' in %s (%d samples):\n", + he->mem_type->self.type_name, dso__name(dso), nr_samples); + + if (evsel__is_group_event(evsel)) { + struct evsel *pos; + int i = 0; + + for_each_group_evsel(pos, evsel) + printf(" event[%d] = %s\n", i++, pos->name); + + nr_members = evsel->core.nr_members; + } + + if (symbol_conf.show_total_period) { + width = 11; + val_hdr = "Period"; + } else if (symbol_conf.show_nr_samples) { + width = 7; + val_hdr = "Samples"; + } + + printf("============================================================================\n"); + printf("%*s %10s %10s %s\n", (width + 1) * nr_members, val_hdr, + "offset", "size", "field"); +} + +static void print_annotated_data_value(struct type_hist *h, u64 period, int nr_samples) +{ + double percent = h->period ? (100.0 * period / h->period) : 0; + const char *color = get_percent_color(percent); + + if (symbol_conf.show_total_period) + color_fprintf(stdout, color, " %11" PRIu64, period); + else if (symbol_conf.show_nr_samples) + color_fprintf(stdout, color, " %7d", nr_samples); + else + color_fprintf(stdout, color, " %7.2f", percent); +} + +static void print_annotated_data_type(struct annotated_data_type *mem_type, + struct annotated_member *member, + struct evsel *evsel, int indent) +{ + struct annotated_member *child; + struct type_hist *h = mem_type->histograms[evsel->core.idx]; + int i, nr_events = 1, samples = 0; + u64 period = 0; + int width = symbol_conf.show_total_period ? 11 : 7; + + for (i = 0; i < member->size; i++) { + samples += h->addr[member->offset + i].nr_samples; + period += h->addr[member->offset + i].period; + } + print_annotated_data_value(h, period, samples); + + if (evsel__is_group_event(evsel)) { + struct evsel *pos; + + for_each_group_member(pos, evsel) { + h = mem_type->histograms[pos->core.idx]; + + samples = 0; + period = 0; + for (i = 0; i < member->size; i++) { + samples += h->addr[member->offset + i].nr_samples; + period += h->addr[member->offset + i].period; + } + print_annotated_data_value(h, period, samples); + } + nr_events = evsel->core.nr_members; + } + + printf(" %10d %10d %*s%s\t%s", + member->offset, member->size, indent, "", member->type_name, + member->var_name ?: ""); + + if (!list_empty(&member->children)) + printf(" {\n"); + + list_for_each_entry(child, &member->children, node) + print_annotated_data_type(mem_type, child, evsel, indent + 4); + + if (!list_empty(&member->children)) + printf("%*s}", (width + 1) * nr_events + 24 + indent, ""); + printf(";\n"); +} + +int hist_entry__annotate_data_tty(struct hist_entry *he, struct evsel *evsel) +{ + print_annotated_data_header(he, evsel); + print_annotated_data_type(he->mem_type, &he->mem_type->self, evsel, 0); + printf("\n"); + + /* move to the next entry */ + return '>'; +} diff --git a/tools/perf/util/annotate-data.h b/tools/perf/util/annotate-data.h index 8e73096c01d1..0a57d9f5ee78 100644 --- a/tools/perf/util/annotate-data.h +++ b/tools/perf/util/annotate-data.h @@ -7,8 +7,13 @@ #include <linux/rbtree.h> #include <linux/types.h> +struct annotated_op_loc; +struct debuginfo; struct evsel; +struct hist_browser_timer; +struct hist_entry; struct map_symbol; +struct thread; /** * struct annotated_member - Type of member field @@ -69,6 +74,41 @@ struct annotated_data_type { }; extern struct annotated_data_type unknown_type; +extern struct annotated_data_type stackop_type; +extern struct annotated_data_type canary_type; + +/** + * struct data_loc_info - Data location information + * @arch: CPU architecture info + * @thread: Thread info + * @ms: Map and Symbol info + * @ip: Instruction address + * @var_addr: Data address (for global variables) + * @cpumode: CPU execution mode + * @op: Instruction operand location (regs and offset) + * @di: Debug info + * @fbreg: Frame base register + * @fb_cfa: Whether the frame needs to check CFA + * @type_offset: Final offset in the type + */ +struct data_loc_info { + /* These are input field, should be filled by caller */ + struct arch *arch; + struct thread *thread; + struct map_symbol *ms; + u64 ip; + u64 var_addr; + u8 cpumode; + struct annotated_op_loc *op; + + /* These are used internally */ + struct debuginfo *di; + int fbreg; + bool fb_cfa; + + /* This is for the result */ + int type_offset; +}; /** * struct annotated_data_stat - Debug statistics @@ -98,14 +138,14 @@ struct annotated_data_stat { int no_typeinfo; int invalid_size; int bad_offset; + int insn_track; }; extern struct annotated_data_stat ann_data_stat; #ifdef HAVE_DWARF_SUPPORT /* Returns data type at the location (ip, reg, offset) */ -struct annotated_data_type *find_data_type(struct map_symbol *ms, u64 ip, - int reg, int offset); +struct annotated_data_type *find_data_type(struct data_loc_info *dloc); /* Update type access histogram at the given offset */ int annotated_data_type__update_samples(struct annotated_data_type *adt, @@ -115,11 +155,15 @@ int annotated_data_type__update_samples(struct annotated_data_type *adt, /* Release all data type information in the tree */ void annotated_data_type__tree_delete(struct rb_root *root); +/* Release all global variable information in the tree */ +void global_var_type__tree_delete(struct rb_root *root); + +int hist_entry__annotate_data_tty(struct hist_entry *he, struct evsel *evsel); + #else /* HAVE_DWARF_SUPPORT */ static inline struct annotated_data_type * -find_data_type(struct map_symbol *ms __maybe_unused, u64 ip __maybe_unused, - int reg __maybe_unused, int offset __maybe_unused) +find_data_type(struct data_loc_info *dloc __maybe_unused) { return NULL; } @@ -138,6 +182,28 @@ static inline void annotated_data_type__tree_delete(struct rb_root *root __maybe { } +static inline void global_var_type__tree_delete(struct rb_root *root __maybe_unused) +{ +} + +static inline int hist_entry__annotate_data_tty(struct hist_entry *he __maybe_unused, + struct evsel *evsel __maybe_unused) +{ + return -1; +} + #endif /* HAVE_DWARF_SUPPORT */ +#ifdef HAVE_SLANG_SUPPORT +int hist_entry__annotate_data_tui(struct hist_entry *he, struct evsel *evsel, + struct hist_browser_timer *hbt); +#else +static inline int hist_entry__annotate_data_tui(struct hist_entry *he __maybe_unused, + struct evsel *evsel __maybe_unused, + struct hist_browser_timer *hbt __maybe_unused) +{ + return -1; +} +#endif /* HAVE_SLANG_SUPPORT */ + #endif /* _PERF_ANNOTATE_DATA_H */ diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 9b70ab110ce7..1451caf25e77 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -16,6 +16,7 @@ #include "build-id.h" #include "color.h" #include "config.h" +#include "disasm.h" #include "dso.h" #include "env.h" #include "map.h" @@ -37,6 +38,8 @@ #include "util/sharded_mutex.h" #include "arch/common.h" #include "namespaces.h" +#include "thread.h" +#include "hashmap.h" #include <regex.h> #include <linux/bitops.h> #include <linux/kernel.h> @@ -62,796 +65,34 @@ /* global annotation options */ struct annotation_options annotate_opts; -static regex_t file_lineno; - -static struct ins_ops *ins__find(struct arch *arch, const char *name); -static void ins__sort(struct arch *arch); -static int disasm_line__parse(char *line, const char **namep, char **rawp); -static int call__scnprintf(struct ins *ins, char *bf, size_t size, - struct ins_operands *ops, int max_ins_name); -static int jump__scnprintf(struct ins *ins, char *bf, size_t size, - struct ins_operands *ops, int max_ins_name); - -struct arch { - const char *name; - struct ins *instructions; - size_t nr_instructions; - size_t nr_instructions_allocated; - struct ins_ops *(*associate_instruction_ops)(struct arch *arch, const char *name); - bool sorted_instructions; - bool initialized; - const char *insn_suffix; - void *priv; - unsigned int model; - unsigned int family; - int (*init)(struct arch *arch, char *cpuid); - bool (*ins_is_fused)(struct arch *arch, const char *ins1, - const char *ins2); - struct { - char comment_char; - char skip_functions_char; - char register_char; - char memory_ref_char; - } objdump; -}; - -static struct ins_ops call_ops; -static struct ins_ops dec_ops; -static struct ins_ops jump_ops; -static struct ins_ops mov_ops; -static struct ins_ops nop_ops; -static struct ins_ops lock_ops; -static struct ins_ops ret_ops; - /* Data type collection debug statistics */ struct annotated_data_stat ann_data_stat; LIST_HEAD(ann_insn_stat); -static int arch__grow_instructions(struct arch *arch) -{ - struct ins *new_instructions; - size_t new_nr_allocated; - - if (arch->nr_instructions_allocated == 0 && arch->instructions) - goto grow_from_non_allocated_table; - - new_nr_allocated = arch->nr_instructions_allocated + 128; - new_instructions = realloc(arch->instructions, new_nr_allocated * sizeof(struct ins)); - if (new_instructions == NULL) - return -1; - -out_update_instructions: - arch->instructions = new_instructions; - arch->nr_instructions_allocated = new_nr_allocated; - return 0; - -grow_from_non_allocated_table: - new_nr_allocated = arch->nr_instructions + 128; - new_instructions = calloc(new_nr_allocated, sizeof(struct ins)); - if (new_instructions == NULL) - return -1; - - memcpy(new_instructions, arch->instructions, arch->nr_instructions); - goto out_update_instructions; -} - -static int arch__associate_ins_ops(struct arch* arch, const char *name, struct ins_ops *ops) -{ - struct ins *ins; - - if (arch->nr_instructions == arch->nr_instructions_allocated && - arch__grow_instructions(arch)) - return -1; - - ins = &arch->instructions[arch->nr_instructions]; - ins->name = strdup(name); - if (!ins->name) - return -1; - - ins->ops = ops; - arch->nr_instructions++; - - ins__sort(arch); - return 0; -} - -#include "arch/arc/annotate/instructions.c" -#include "arch/arm/annotate/instructions.c" -#include "arch/arm64/annotate/instructions.c" -#include "arch/csky/annotate/instructions.c" -#include "arch/loongarch/annotate/instructions.c" -#include "arch/mips/annotate/instructions.c" -#include "arch/x86/annotate/instructions.c" -#include "arch/powerpc/annotate/instructions.c" -#include "arch/riscv64/annotate/instructions.c" -#include "arch/s390/annotate/instructions.c" -#include "arch/sparc/annotate/instructions.c" - -static struct arch architectures[] = { - { - .name = "arc", - .init = arc__annotate_init, - }, - { - .name = "arm", - .init = arm__annotate_init, - }, - { - .name = "arm64", - .init = arm64__annotate_init, +/* Pseudo data types */ +struct annotated_data_type stackop_type = { + .self = { + .type_name = (char *)"(stack operation)", + .children = LIST_HEAD_INIT(stackop_type.self.children), }, - { - .name = "csky", - .init = csky__annotate_init, - }, - { - .name = "mips", - .init = mips__annotate_init, - .objdump = { - .comment_char = '#', - }, - }, - { - .name = "x86", - .init = x86__annotate_init, - .instructions = x86__instructions, - .nr_instructions = ARRAY_SIZE(x86__instructions), - .insn_suffix = "bwlq", - .objdump = { - .comment_char = '#', - .register_char = '%', - .memory_ref_char = '(', - }, - }, - { - .name = "powerpc", - .init = powerpc__annotate_init, - }, - { - .name = "riscv64", - .init = riscv64__annotate_init, - }, - { - .name = "s390", - .init = s390__annotate_init, - .objdump = { - .comment_char = '#', - }, - }, - { - .name = "sparc", - .init = sparc__annotate_init, - .objdump = { - .comment_char = '#', - }, - }, - { - .name = "loongarch", - .init = loongarch__annotate_init, - .objdump = { - .comment_char = '#', - }, - }, -}; - -static void ins__delete(struct ins_operands *ops) -{ - if (ops == NULL) - return; - zfree(&ops->source.raw); - zfree(&ops->source.name); - zfree(&ops->target.raw); - zfree(&ops->target.name); -} - -static int ins__raw_scnprintf(struct ins *ins, char *bf, size_t size, - struct ins_operands *ops, int max_ins_name) -{ - return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, ops->raw); -} - -int ins__scnprintf(struct ins *ins, char *bf, size_t size, - struct ins_operands *ops, int max_ins_name) -{ - if (ins->ops->scnprintf) - return ins->ops->scnprintf(ins, bf, size, ops, max_ins_name); - - return ins__raw_scnprintf(ins, bf, size, ops, max_ins_name); -} - -bool ins__is_fused(struct arch *arch, const char *ins1, const char *ins2) -{ - if (!arch || !arch->ins_is_fused) - return false; - - return arch->ins_is_fused(arch, ins1, ins2); -} - -static int call__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms) -{ - char *endptr, *tok, *name; - struct map *map = ms->map; - struct addr_map_symbol target = { - .ms = { .map = map, }, - }; - - ops->target.addr = strtoull(ops->raw, &endptr, 16); - - name = strchr(endptr, '<'); - if (name == NULL) - goto indirect_call; - - name++; - - if (arch->objdump.skip_functions_char && - strchr(name, arch->objdump.skip_functions_char)) - return -1; - - tok = strchr(name, '>'); - if (tok == NULL) - return -1; - - *tok = '\0'; - ops->target.name = strdup(name); - *tok = '>'; - - if (ops->target.name == NULL) - return -1; -find_target: - target.addr = map__objdump_2mem(map, ops->target.addr); - - if (maps__find_ams(ms->maps, &target) == 0 && - map__rip_2objdump(target.ms.map, map__map_ip(target.ms.map, target.addr)) == ops->target.addr) - ops->target.sym = target.ms.sym; - - return 0; - -indirect_call: - tok = strchr(endptr, '*'); - if (tok != NULL) { - endptr++; - - /* Indirect call can use a non-rip register and offset: callq *0x8(%rbx). - * Do not parse such instruction. */ - if (strstr(endptr, "(%r") == NULL) - ops->target.addr = strtoull(endptr, NULL, 16); - } - goto find_target; -} - -static int call__scnprintf(struct ins *ins, char *bf, size_t size, - struct ins_operands *ops, int max_ins_name) -{ - if (ops->target.sym) - return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, ops->target.sym->name); - - if (ops->target.addr == 0) - return ins__raw_scnprintf(ins, bf, size, ops, max_ins_name); - - if (ops->target.name) - return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, ops->target.name); - - return scnprintf(bf, size, "%-*s *%" PRIx64, max_ins_name, ins->name, ops->target.addr); -} - -static struct ins_ops call_ops = { - .parse = call__parse, - .scnprintf = call__scnprintf, -}; - -bool ins__is_call(const struct ins *ins) -{ - return ins->ops == &call_ops || ins->ops == &s390_call_ops || ins->ops == &loongarch_call_ops; -} - -/* - * Prevents from matching commas in the comment section, e.g.: - * ffff200008446e70: b.cs ffff2000084470f4 <generic_exec_single+0x314> // b.hs, b.nlast - * - * and skip comma as part of function arguments, e.g.: - * 1d8b4ac <linemap_lookup(line_maps const*, unsigned int)+0xcc> - */ -static inline const char *validate_comma(const char *c, struct ins_operands *ops) -{ - if (ops->jump.raw_comment && c > ops->jump.raw_comment) - return NULL; - - if (ops->jump.raw_func_start && c > ops->jump.raw_func_start) - return NULL; - - return c; -} - -static int jump__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms) -{ - struct map *map = ms->map; - struct symbol *sym = ms->sym; - struct addr_map_symbol target = { - .ms = { .map = map, }, - }; - const char *c = strchr(ops->raw, ','); - u64 start, end; - - ops->jump.raw_comment = strchr(ops->raw, arch->objdump.comment_char); - ops->jump.raw_func_start = strchr(ops->raw, '<'); - - c = validate_comma(c, ops); - - /* - * Examples of lines to parse for the _cpp_lex_token@@Base - * function: - * - * 1159e6c: jne 115aa32 <_cpp_lex_token@@Base+0xf92> - * 1159e8b: jne c469be <cpp_named_operator2name@@Base+0xa72> - * - * The first is a jump to an offset inside the same function, - * the second is to another function, i.e. that 0xa72 is an - * offset in the cpp_named_operator2name@@base function. - */ - /* - * skip over possible up to 2 operands to get to address, e.g.: - * tbnz w0, #26, ffff0000083cd190 <security_file_permission+0xd0> - */ - if (c++ != NULL) { - ops->target.addr = strtoull(c, NULL, 16); - if (!ops->target.addr) { - c = strchr(c, ','); - c = validate_comma(c, ops); - if (c++ != NULL) - ops->target.addr = strtoull(c, NULL, 16); - } - } else { - ops->target.addr = strtoull(ops->raw, NULL, 16); - } - - target.addr = map__objdump_2mem(map, ops->target.addr); - start = map__unmap_ip(map, sym->start); - end = map__unmap_ip(map, sym->end); - - ops->target.outside = target.addr < start || target.addr > end; - - /* - * FIXME: things like this in _cpp_lex_token (gcc's cc1 program): - - cpp_named_operator2name@@Base+0xa72 - - * Point to a place that is after the cpp_named_operator2name - * boundaries, i.e. in the ELF symbol table for cc1 - * cpp_named_operator2name is marked as being 32-bytes long, but it in - * fact is much larger than that, so we seem to need a symbols__find() - * routine that looks for >= current->start and < next_symbol->start, - * possibly just for C++ objects? - * - * For now lets just make some progress by marking jumps to outside the - * current function as call like. - * - * Actual navigation will come next, with further understanding of how - * the symbol searching and disassembly should be done. - */ - if (maps__find_ams(ms->maps, &target) == 0 && - map__rip_2objdump(target.ms.map, map__map_ip(target.ms.map, target.addr)) == ops->target.addr) - ops->target.sym = target.ms.sym; - - if (!ops->target.outside) { - ops->target.offset = target.addr - start; - ops->target.offset_avail = true; - } else { - ops->target.offset_avail = false; - } - - return 0; -} - -static int jump__scnprintf(struct ins *ins, char *bf, size_t size, - struct ins_operands *ops, int max_ins_name) -{ - const char *c; - - if (!ops->target.addr || ops->target.offset < 0) - return ins__raw_scnprintf(ins, bf, size, ops, max_ins_name); - - if (ops->target.outside && ops->target.sym != NULL) - return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, ops->target.sym->name); - - c = strchr(ops->raw, ','); - c = validate_comma(c, ops); - - if (c != NULL) { - const char *c2 = strchr(c + 1, ','); - - c2 = validate_comma(c2, ops); - /* check for 3-op insn */ - if (c2 != NULL) - c = c2; - c++; - - /* mirror arch objdump's space-after-comma style */ - if (*c == ' ') - c++; - } - - return scnprintf(bf, size, "%-*s %.*s%" PRIx64, max_ins_name, - ins->name, c ? c - ops->raw : 0, ops->raw, - ops->target.offset); -} - -static void jump__delete(struct ins_operands *ops __maybe_unused) -{ - /* - * The ops->jump.raw_comment and ops->jump.raw_func_start belong to the - * raw string, don't free them. - */ -} - -static struct ins_ops jump_ops = { - .free = jump__delete, - .parse = jump__parse, - .scnprintf = jump__scnprintf, }; -bool ins__is_jump(const struct ins *ins) -{ - return ins->ops == &jump_ops || ins->ops == &loongarch_jump_ops; -} - -static int comment__symbol(char *raw, char *comment, u64 *addrp, char **namep) -{ - char *endptr, *name, *t; - - if (strstr(raw, "(%rip)") == NULL) - return 0; - - *addrp = strtoull(comment, &endptr, 16); - if (endptr == comment) - return 0; - name = strchr(endptr, '<'); - if (name == NULL) - return -1; - - name++; - - t = strchr(name, '>'); - if (t == NULL) - return 0; - - *t = '\0'; - *namep = strdup(name); - *t = '>'; - - return 0; -} - -static int lock__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms) -{ - ops->locked.ops = zalloc(sizeof(*ops->locked.ops)); - if (ops->locked.ops == NULL) - return 0; - - if (disasm_line__parse(ops->raw, &ops->locked.ins.name, &ops->locked.ops->raw) < 0) - goto out_free_ops; - - ops->locked.ins.ops = ins__find(arch, ops->locked.ins.name); - - if (ops->locked.ins.ops == NULL) - goto out_free_ops; - - if (ops->locked.ins.ops->parse && - ops->locked.ins.ops->parse(arch, ops->locked.ops, ms) < 0) - goto out_free_ops; - - return 0; - -out_free_ops: - zfree(&ops->locked.ops); - return 0; -} - -static int lock__scnprintf(struct ins *ins, char *bf, size_t size, - struct ins_operands *ops, int max_ins_name) -{ - int printed; - - if (ops->locked.ins.ops == NULL) - return ins__raw_scnprintf(ins, bf, size, ops, max_ins_name); - - printed = scnprintf(bf, size, "%-*s ", max_ins_name, ins->name); - return printed + ins__scnprintf(&ops->locked.ins, bf + printed, - size - printed, ops->locked.ops, max_ins_name); -} - -static void lock__delete(struct ins_operands *ops) -{ - struct ins *ins = &ops->locked.ins; - - if (ins->ops && ins->ops->free) - ins->ops->free(ops->locked.ops); - else - ins__delete(ops->locked.ops); - - zfree(&ops->locked.ops); - zfree(&ops->target.raw); - zfree(&ops->target.name); -} - -static struct ins_ops lock_ops = { - .free = lock__delete, - .parse = lock__parse, - .scnprintf = lock__scnprintf, -}; - -/* - * Check if the operand has more than one registers like x86 SIB addressing: - * 0x1234(%rax, %rbx, 8) - * - * But it doesn't care segment selectors like %gs:0x5678(%rcx), so just check - * the input string after 'memory_ref_char' if exists. - */ -static bool check_multi_regs(struct arch *arch, const char *op) -{ - int count = 0; - - if (arch->objdump.register_char == 0) - return false; - - if (arch->objdump.memory_ref_char) { - op = strchr(op, arch->objdump.memory_ref_char); - if (op == NULL) - return false; - } - - while ((op = strchr(op, arch->objdump.register_char)) != NULL) { - count++; - op++; - } - - return count > 1; -} - -static int mov__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms __maybe_unused) -{ - char *s = strchr(ops->raw, ','), *target, *comment, prev; - - if (s == NULL) - return -1; - - *s = '\0'; - - /* - * x86 SIB addressing has something like 0x8(%rax, %rcx, 1) - * then it needs to have the closing parenthesis. - */ - if (strchr(ops->raw, '(')) { - *s = ','; - s = strchr(ops->raw, ')'); - if (s == NULL || s[1] != ',') - return -1; - *++s = '\0'; - } - - ops->source.raw = strdup(ops->raw); - *s = ','; - - if (ops->source.raw == NULL) - return -1; - - ops->source.multi_regs = check_multi_regs(arch, ops->source.raw); - - target = skip_spaces(++s); - comment = strchr(s, arch->objdump.comment_char); - - if (comment != NULL) - s = comment - 1; - else - s = strchr(s, '\0') - 1; - - while (s > target && isspace(s[0])) - --s; - s++; - prev = *s; - *s = '\0'; - - ops->target.raw = strdup(target); - *s = prev; - - if (ops->target.raw == NULL) - goto out_free_source; - - ops->target.multi_regs = check_multi_regs(arch, ops->target.raw); - - if (comment == NULL) - return 0; - - comment = skip_spaces(comment); - comment__symbol(ops->source.raw, comment + 1, &ops->source.addr, &ops->source.name); - comment__symbol(ops->target.raw, comment + 1, &ops->target.addr, &ops->target.name); - - return 0; - -out_free_source: - zfree(&ops->source.raw); - return -1; -} - -static int mov__scnprintf(struct ins *ins, char *bf, size_t size, - struct ins_operands *ops, int max_ins_name) -{ - return scnprintf(bf, size, "%-*s %s,%s", max_ins_name, ins->name, - ops->source.name ?: ops->source.raw, - ops->target.name ?: ops->target.raw); -} - -static struct ins_ops mov_ops = { - .parse = mov__parse, - .scnprintf = mov__scnprintf, -}; - -static int dec__parse(struct arch *arch __maybe_unused, struct ins_operands *ops, struct map_symbol *ms __maybe_unused) -{ - char *target, *comment, *s, prev; - - target = s = ops->raw; - - while (s[0] != '\0' && !isspace(s[0])) - ++s; - prev = *s; - *s = '\0'; - - ops->target.raw = strdup(target); - *s = prev; - - if (ops->target.raw == NULL) - return -1; - - comment = strchr(s, arch->objdump.comment_char); - if (comment == NULL) - return 0; - - comment = skip_spaces(comment); - comment__symbol(ops->target.raw, comment + 1, &ops->target.addr, &ops->target.name); - - return 0; -} - -static int dec__scnprintf(struct ins *ins, char *bf, size_t size, - struct ins_operands *ops, int max_ins_name) -{ - return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, - ops->target.name ?: ops->target.raw); -} - -static struct ins_ops dec_ops = { - .parse = dec__parse, - .scnprintf = dec__scnprintf, -}; - -static int nop__scnprintf(struct ins *ins __maybe_unused, char *bf, size_t size, - struct ins_operands *ops __maybe_unused, int max_ins_name) -{ - return scnprintf(bf, size, "%-*s", max_ins_name, "nop"); -} - -static struct ins_ops nop_ops = { - .scnprintf = nop__scnprintf, -}; - -static struct ins_ops ret_ops = { - .scnprintf = ins__raw_scnprintf, +struct annotated_data_type canary_type = { + .self = { + .type_name = (char *)"(stack canary)", + .children = LIST_HEAD_INIT(canary_type.self.children), + }, }; -bool ins__is_ret(const struct ins *ins) +/* symbol histogram: key = offset << 16 | evsel->core.idx */ +static size_t sym_hist_hash(long key, void *ctx __maybe_unused) { - return ins->ops == &ret_ops; -} - -bool ins__is_lock(const struct ins *ins) -{ - return ins->ops == &lock_ops; -} - -static int ins__key_cmp(const void *name, const void *insp) -{ - const struct ins *ins = insp; - - return strcmp(name, ins->name); -} - -static int ins__cmp(const void *a, const void *b) -{ - const struct ins *ia = a; - const struct ins *ib = b; - - return strcmp(ia->name, ib->name); -} - -static void ins__sort(struct arch *arch) -{ - const int nmemb = arch->nr_instructions; - - qsort(arch->instructions, nmemb, sizeof(struct ins), ins__cmp); -} - -static struct ins_ops *__ins__find(struct arch *arch, const char *name) -{ - struct ins *ins; - const int nmemb = arch->nr_instructions; - - if (!arch->sorted_instructions) { - ins__sort(arch); - arch->sorted_instructions = true; - } - - ins = bsearch(name, arch->instructions, nmemb, sizeof(struct ins), ins__key_cmp); - if (ins) - return ins->ops; - - if (arch->insn_suffix) { - char tmp[32]; - char suffix; - size_t len = strlen(name); - - if (len == 0 || len >= sizeof(tmp)) - return NULL; - - suffix = name[len - 1]; - if (strchr(arch->insn_suffix, suffix) == NULL) - return NULL; - - strcpy(tmp, name); - tmp[len - 1] = '\0'; /* remove the suffix and check again */ - - ins = bsearch(tmp, arch->instructions, nmemb, sizeof(struct ins), ins__key_cmp); - } - return ins ? ins->ops : NULL; -} - -static struct ins_ops *ins__find(struct arch *arch, const char *name) -{ - struct ins_ops *ops = __ins__find(arch, name); - - if (!ops && arch->associate_instruction_ops) - ops = arch->associate_instruction_ops(arch, name); - - return ops; -} - -static int arch__key_cmp(const void *name, const void *archp) -{ - const struct arch *arch = archp; - - return strcmp(name, arch->name); -} - -static int arch__cmp(const void *a, const void *b) -{ - const struct arch *aa = a; - const struct arch *ab = b; - - return strcmp(aa->name, ab->name); -} - -static void arch__sort(void) -{ - const int nmemb = ARRAY_SIZE(architectures); - - qsort(architectures, nmemb, sizeof(struct arch), arch__cmp); -} - -static struct arch *arch__find(const char *name) -{ - const int nmemb = ARRAY_SIZE(architectures); - static bool sorted; - - if (!sorted) { - arch__sort(); - sorted = true; - } - - return bsearch(name, architectures, nmemb, sizeof(struct arch), arch__key_cmp); + return (key >> 16) + (key & 0xffff); } -bool arch__is(struct arch *arch, const char *name) +static bool sym_hist_equal(long key1, long key2, void *ctx __maybe_unused) { - return !strcmp(arch->name, name); + return key1 == key2; } static struct annotated_source *annotated_source__new(void) @@ -866,40 +107,34 @@ static struct annotated_source *annotated_source__new(void) static __maybe_unused void annotated_source__delete(struct annotated_source *src) { + struct hashmap_entry *cur; + size_t bkt; + if (src == NULL) return; + + if (src->samples) { + hashmap__for_each_entry(src->samples, cur, bkt) + zfree(&cur->pvalue); + hashmap__free(src->samples); + } zfree(&src->histograms); free(src); } static int annotated_source__alloc_histograms(struct annotated_source *src, - size_t size, int nr_hists) + int nr_hists) { - size_t sizeof_sym_hist; - - /* - * Add buffer of one element for zero length symbol. - * When sample is taken from first instruction of - * zero length symbol, perf still resolves it and - * shows symbol name in perf report and allows to - * annotate it. - */ - if (size == 0) - size = 1; + src->nr_histograms = nr_hists; + src->histograms = calloc(nr_hists, sizeof(*src->histograms)); - /* Check for overflow when calculating sizeof_sym_hist */ - if (size > (SIZE_MAX - sizeof(struct sym_hist)) / sizeof(struct sym_hist_entry)) + if (src->histograms == NULL) return -1; - sizeof_sym_hist = (sizeof(struct sym_hist) + size * sizeof(struct sym_hist_entry)); + src->samples = hashmap__new(sym_hist_hash, sym_hist_equal, NULL); + if (src->samples == NULL) + zfree(&src->histograms); - /* Check for overflow in zalloc argument */ - if (sizeof_sym_hist > SIZE_MAX / nr_hists) - return -1; - - src->sizeof_sym_hist = sizeof_sym_hist; - src->nr_histograms = nr_hists; - src->histograms = calloc(nr_hists, sizeof_sym_hist) ; return src->histograms ? 0 : -1; } @@ -910,7 +145,8 @@ void symbol__annotate_zero_histograms(struct symbol *sym) annotation__lock(notes); if (notes->src != NULL) { memset(notes->src->histograms, 0, - notes->src->nr_histograms * notes->src->sizeof_sym_hist); + notes->src->nr_histograms * sizeof(*notes->src->histograms)); + hashmap__clear(notes->src->samples); } if (notes->branch && notes->branch->cycles_hist) { memset(notes->branch->cycles_hist, 0, @@ -974,8 +210,10 @@ static int __symbol__inc_addr_samples(struct map_symbol *ms, struct perf_sample *sample) { struct symbol *sym = ms->sym; - unsigned offset; + long hash_key; + u64 offset; struct sym_hist *h; + struct sym_hist_entry *entry; pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map__unmap_ip(ms->map, addr)); @@ -993,15 +231,26 @@ static int __symbol__inc_addr_samples(struct map_symbol *ms, __func__, __LINE__, sym->name, sym->start, addr, sym->end, sym->type == STT_FUNC); return -ENOMEM; } + + hash_key = offset << 16 | evidx; + if (!hashmap__find(src->samples, hash_key, &entry)) { + entry = zalloc(sizeof(*entry)); + if (entry == NULL) + return -ENOMEM; + + if (hashmap__add(src->samples, hash_key, entry) < 0) + return -ENOMEM; + } + h->nr_samples++; - h->addr[offset].nr_samples++; h->period += sample->period; - h->addr[offset].period += sample->period; + entry->nr_samples++; + entry->period += sample->period; pr_debug3("%#" PRIx64 " %s: period++ [addr: %#" PRIx64 ", %#" PRIx64 ", evidx=%d] => nr_samples: %" PRIu64 ", period: %" PRIu64 "\n", sym->start, sym->name, addr, addr - sym->start, evidx, - h->addr[offset].nr_samples, h->addr[offset].period); + entry->nr_samples, entry->period); return 0; } @@ -1047,8 +296,7 @@ struct annotated_source *symbol__hists(struct symbol *sym, int nr_hists) if (notes->src->histograms == NULL) { alloc_histograms: - annotated_source__alloc_histograms(notes->src, symbol__size(sym), - nr_hists); + annotated_source__alloc_histograms(notes->src, nr_hists); } return notes->src; @@ -1128,14 +376,33 @@ int addr_map_symbol__account_cycles(struct addr_map_symbol *ams, return err; } +struct annotation_line *annotated_source__get_line(struct annotated_source *src, + s64 offset) +{ + struct annotation_line *al; + + list_for_each_entry(al, &src->source, node) { + if (al->offset == offset) + return al; + } + return NULL; +} + static unsigned annotation__count_insn(struct annotation *notes, u64 start, u64 end) { + struct annotation_line *al; unsigned n_insn = 0; - u64 offset; - for (offset = start; offset <= end; offset++) { - if (notes->src->offsets[offset]) - n_insn++; + al = annotated_source__get_line(notes->src, start); + if (al == NULL) + return 0; + + list_for_each_entry_from(al, ¬es->src->source, node) { + if (al->offset == -1) + continue; + if ((u64)al->offset > end) + break; + n_insn++; } return n_insn; } @@ -1152,10 +419,10 @@ static void annotation__count_and_fill(struct annotation *notes, u64 start, u64 { unsigned n_insn; unsigned int cover_insn = 0; - u64 offset; n_insn = annotation__count_insn(notes, start, end); if (n_insn && ch->num && ch->cycles) { + struct annotation_line *al; struct annotated_branch *branch; float ipc = n_insn / ((double)ch->cycles / (double)ch->num); @@ -1163,10 +430,16 @@ static void annotation__count_and_fill(struct annotation *notes, u64 start, u64 if (ch->reset >= 0x7fff) return; - for (offset = start; offset <= end; offset++) { - struct annotation_line *al = notes->src->offsets[offset]; + al = annotated_source__get_line(notes->src, start); + if (al == NULL) + return; - if (al && al->cycles && al->cycles->ipc == 0.0) { + list_for_each_entry_from(al, ¬es->src->source, node) { + if (al->offset == -1) + continue; + if ((u64)al->offset > end) + break; + if (al->cycles && al->cycles->ipc == 0.0) { al->cycles->ipc = ipc; cover_insn++; } @@ -1202,7 +475,7 @@ static int annotation__compute_ipc(struct annotation *notes, size_t size) if (ch && ch->cycles) { struct annotation_line *al; - al = notes->src->offsets[offset]; + al = annotated_source__get_line(notes->src, offset); if (al && al->cycles == NULL) { al->cycles = zalloc(sizeof(*al->cycles)); if (al->cycles == NULL) { @@ -1225,7 +498,9 @@ static int annotation__compute_ipc(struct annotation *notes, size_t size) struct cyc_hist *ch = ¬es->branch->cycles_hist[offset]; if (ch && ch->cycles) { - struct annotation_line *al = notes->src->offsets[offset]; + struct annotation_line *al; + + al = annotated_source__get_line(notes->src, offset); if (al) zfree(&al->cycles); } @@ -1248,142 +523,6 @@ int hist_entry__inc_addr_samples(struct hist_entry *he, struct perf_sample *samp return symbol__inc_addr_samples(&he->ms, evsel, ip, sample); } -static void disasm_line__init_ins(struct disasm_line *dl, struct arch *arch, struct map_symbol *ms) -{ - dl->ins.ops = ins__find(arch, dl->ins.name); - - if (!dl->ins.ops) - return; - - if (dl->ins.ops->parse && dl->ins.ops->parse(arch, &dl->ops, ms) < 0) - dl->ins.ops = NULL; -} - -static int disasm_line__parse(char *line, const char **namep, char **rawp) -{ - char tmp, *name = skip_spaces(line); - - if (name[0] == '\0') - return -1; - - *rawp = name + 1; - - while ((*rawp)[0] != '\0' && !isspace((*rawp)[0])) - ++*rawp; - - tmp = (*rawp)[0]; - (*rawp)[0] = '\0'; - *namep = strdup(name); - - if (*namep == NULL) - goto out; - - (*rawp)[0] = tmp; - *rawp = strim(*rawp); - - return 0; - -out: - return -1; -} - -struct annotate_args { - struct arch *arch; - struct map_symbol ms; - struct evsel *evsel; - struct annotation_options *options; - s64 offset; - char *line; - int line_nr; - char *fileloc; -}; - -static void annotation_line__init(struct annotation_line *al, - struct annotate_args *args, - int nr) -{ - al->offset = args->offset; - al->line = strdup(args->line); - al->line_nr = args->line_nr; - al->fileloc = args->fileloc; - al->data_nr = nr; -} - -static void annotation_line__exit(struct annotation_line *al) -{ - zfree_srcline(&al->path); - zfree(&al->line); - zfree(&al->cycles); -} - -static size_t disasm_line_size(int nr) -{ - struct annotation_line *al; - - return (sizeof(struct disasm_line) + (sizeof(al->data[0]) * nr)); -} - -/* - * Allocating the disasm annotation line data with - * following structure: - * - * ------------------------------------------- - * struct disasm_line | struct annotation_line - * ------------------------------------------- - * - * We have 'struct annotation_line' member as last member - * of 'struct disasm_line' to have an easy access. - */ -static struct disasm_line *disasm_line__new(struct annotate_args *args) -{ - struct disasm_line *dl = NULL; - int nr = 1; - - if (evsel__is_group_event(args->evsel)) - nr = args->evsel->core.nr_members; - - dl = zalloc(disasm_line_size(nr)); - if (!dl) - return NULL; - - annotation_line__init(&dl->al, args, nr); - if (dl->al.line == NULL) - goto out_delete; - - if (args->offset != -1) { - if (disasm_line__parse(dl->al.line, &dl->ins.name, &dl->ops.raw) < 0) - goto out_free_line; - - disasm_line__init_ins(dl, args->arch, &args->ms); - } - - return dl; - -out_free_line: - zfree(&dl->al.line); -out_delete: - free(dl); - return NULL; -} - -void disasm_line__free(struct disasm_line *dl) -{ - if (dl->ins.ops && dl->ins.ops->free) - dl->ins.ops->free(&dl->ops); - else - ins__delete(&dl->ops); - zfree(&dl->ins.name); - annotation_line__exit(&dl->al); - free(dl); -} - -int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw, int max_ins_name) -{ - if (raw || !dl->ins.ops) - return scnprintf(bf, size, "%-*s %s", max_ins_name, dl->ins.name, dl->ops.raw); - - return ins__scnprintf(&dl->ins, bf, size, &dl->ops, max_ins_name); -} void annotation__exit(struct annotation *notes) { @@ -1443,8 +582,7 @@ bool annotation__trylock(struct annotation *notes) return mutex_trylock(mutex); } - -static void annotation_line__add(struct annotation_line *al, struct list_head *head) +void annotation_line__add(struct annotation_line *al, struct list_head *head) { list_add_tail(&al->node, head); } @@ -1654,684 +792,25 @@ annotation_line__print(struct annotation_line *al, struct symbol *sym, u64 start return 0; } -/* - * symbol__parse_objdump_line() parses objdump output (with -d --no-show-raw) - * which looks like following - * - * 0000000000415500 <_init>: - * 415500: sub $0x8,%rsp - * 415504: mov 0x2f5ad5(%rip),%rax # 70afe0 <_DYNAMIC+0x2f8> - * 41550b: test %rax,%rax - * 41550e: je 415515 <_init+0x15> - * 415510: callq 416e70 <__gmon_start__@plt> - * 415515: add $0x8,%rsp - * 415519: retq - * - * it will be parsed and saved into struct disasm_line as - * <offset> <name> <ops.raw> - * - * The offset will be a relative offset from the start of the symbol and -1 - * means that it's not a disassembly line so should be treated differently. - * The ops.raw part will be parsed further according to type of the instruction. - */ -static int symbol__parse_objdump_line(struct symbol *sym, - struct annotate_args *args, - char *parsed_line, int *line_nr, char **fileloc) -{ - struct map *map = args->ms.map; - struct annotation *notes = symbol__annotation(sym); - struct disasm_line *dl; - char *tmp; - s64 line_ip, offset = -1; - regmatch_t match[2]; - - /* /filename:linenr ? Save line number and ignore. */ - if (regexec(&file_lineno, parsed_line, 2, match, 0) == 0) { - *line_nr = atoi(parsed_line + match[1].rm_so); - free(*fileloc); - *fileloc = strdup(parsed_line); - return 0; - } - - /* Process hex address followed by ':'. */ - line_ip = strtoull(parsed_line, &tmp, 16); - if (parsed_line != tmp && tmp[0] == ':' && tmp[1] != '\0') { - u64 start = map__rip_2objdump(map, sym->start), - end = map__rip_2objdump(map, sym->end); - - offset = line_ip - start; - if ((u64)line_ip < start || (u64)line_ip >= end) - offset = -1; - else - parsed_line = tmp + 1; - } - - args->offset = offset; - args->line = parsed_line; - args->line_nr = *line_nr; - args->fileloc = *fileloc; - args->ms.sym = sym; - - dl = disasm_line__new(args); - (*line_nr)++; - - if (dl == NULL) - return -1; - - if (!disasm_line__has_local_offset(dl)) { - dl->ops.target.offset = dl->ops.target.addr - - map__rip_2objdump(map, sym->start); - dl->ops.target.offset_avail = true; - } - - /* kcore has no symbols, so add the call target symbol */ - if (dl->ins.ops && ins__is_call(&dl->ins) && !dl->ops.target.sym) { - struct addr_map_symbol target = { - .addr = dl->ops.target.addr, - .ms = { .map = map, }, - }; - - if (!maps__find_ams(args->ms.maps, &target) && - target.ms.sym->start == target.al_addr) - dl->ops.target.sym = target.ms.sym; - } - - annotation_line__add(&dl->al, ¬es->src->source); - return 0; -} - -static __attribute__((constructor)) void symbol__init_regexpr(void) -{ - regcomp(&file_lineno, "^/[^:]+:([0-9]+)", REG_EXTENDED); -} - -static void delete_last_nop(struct symbol *sym) -{ - struct annotation *notes = symbol__annotation(sym); - struct list_head *list = ¬es->src->source; - struct disasm_line *dl; - - while (!list_empty(list)) { - dl = list_entry(list->prev, struct disasm_line, al.node); - - if (dl->ins.ops) { - if (dl->ins.ops != &nop_ops) - return; - } else { - if (!strstr(dl->al.line, " nop ") && - !strstr(dl->al.line, " nopl ") && - !strstr(dl->al.line, " nopw ")) - return; - } - - list_del_init(&dl->al.node); - disasm_line__free(dl); - } -} - -int symbol__strerror_disassemble(struct map_symbol *ms, int errnum, char *buf, size_t buflen) -{ - struct dso *dso = map__dso(ms->map); - - BUG_ON(buflen == 0); - - if (errnum >= 0) { - str_error_r(errnum, buf, buflen); - return 0; - } - - switch (errnum) { - case SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX: { - char bf[SBUILD_ID_SIZE + 15] = " with build id "; - char *build_id_msg = NULL; - - if (dso->has_build_id) { - build_id__sprintf(&dso->bid, bf + 15); - build_id_msg = bf; - } - scnprintf(buf, buflen, - "No vmlinux file%s\nwas found in the path.\n\n" - "Note that annotation using /proc/kcore requires CAP_SYS_RAWIO capability.\n\n" - "Please use:\n\n" - " perf buildid-cache -vu vmlinux\n\n" - "or:\n\n" - " --vmlinux vmlinux\n", build_id_msg ?: ""); - } - break; - case SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF: - scnprintf(buf, buflen, "Please link with binutils's libopcode to enable BPF annotation"); - break; - case SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_REGEXP: - scnprintf(buf, buflen, "Problems with arch specific instruction name regular expressions."); - break; - case SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_CPUID_PARSING: - scnprintf(buf, buflen, "Problems while parsing the CPUID in the arch specific initialization."); - break; - case SYMBOL_ANNOTATE_ERRNO__BPF_INVALID_FILE: - scnprintf(buf, buflen, "Invalid BPF file: %s.", dso->long_name); - break; - case SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF: - scnprintf(buf, buflen, "The %s BPF file has no BTF section, compile with -g or use pahole -J.", - dso->long_name); - break; - default: - scnprintf(buf, buflen, "Internal error: Invalid %d error code\n", errnum); - break; - } - - return 0; -} - -static int dso__disassemble_filename(struct dso *dso, char *filename, size_t filename_size) -{ - char linkname[PATH_MAX]; - char *build_id_filename; - char *build_id_path = NULL; - char *pos; - int len; - - if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS && - !dso__is_kcore(dso)) - return SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX; - - build_id_filename = dso__build_id_filename(dso, NULL, 0, false); - if (build_id_filename) { - __symbol__join_symfs(filename, filename_size, build_id_filename); - free(build_id_filename); - } else { - if (dso->has_build_id) - return ENOMEM; - goto fallback; - } - - build_id_path = strdup(filename); - if (!build_id_path) - return ENOMEM; - - /* - * old style build-id cache has name of XX/XXXXXXX.. while - * new style has XX/XXXXXXX../{elf,kallsyms,vdso}. - * extract the build-id part of dirname in the new style only. - */ - pos = strrchr(build_id_path, '/'); - if (pos && strlen(pos) < SBUILD_ID_SIZE - 2) - dirname(build_id_path); - - if (dso__is_kcore(dso)) - goto fallback; - - len = readlink(build_id_path, linkname, sizeof(linkname) - 1); - if (len < 0) - goto fallback; - - linkname[len] = '\0'; - if (strstr(linkname, DSO__NAME_KALLSYMS) || - access(filename, R_OK)) { -fallback: - /* - * If we don't have build-ids or the build-id file isn't in the - * cache, or is just a kallsyms file, well, lets hope that this - * DSO is the same as when 'perf record' ran. - */ - if (dso->kernel && dso->long_name[0] == '/') - snprintf(filename, filename_size, "%s", dso->long_name); - else - __symbol__join_symfs(filename, filename_size, dso->long_name); - - mutex_lock(&dso->lock); - if (access(filename, R_OK) && errno == ENOENT && dso->nsinfo) { - char *new_name = dso__filename_with_chroot(dso, filename); - if (new_name) { - strlcpy(filename, new_name, filename_size); - free(new_name); - } - } - mutex_unlock(&dso->lock); - } - - free(build_id_path); - return 0; -} - -#if defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT) -#define PACKAGE "perf" -#include <bfd.h> -#include <dis-asm.h> -#include <bpf/bpf.h> -#include <bpf/btf.h> -#include <bpf/libbpf.h> -#include <linux/btf.h> -#include <tools/dis-asm-compat.h> - -static int symbol__disassemble_bpf(struct symbol *sym, - struct annotate_args *args) -{ - struct annotation *notes = symbol__annotation(sym); - struct bpf_prog_linfo *prog_linfo = NULL; - struct bpf_prog_info_node *info_node; - int len = sym->end - sym->start; - disassembler_ftype disassemble; - struct map *map = args->ms.map; - struct perf_bpil *info_linear; - struct disassemble_info info; - struct dso *dso = map__dso(map); - int pc = 0, count, sub_id; - struct btf *btf = NULL; - char tpath[PATH_MAX]; - size_t buf_size; - int nr_skip = 0; - char *buf; - bfd *bfdf; - int ret; - FILE *s; - - if (dso->binary_type != DSO_BINARY_TYPE__BPF_PROG_INFO) - return SYMBOL_ANNOTATE_ERRNO__BPF_INVALID_FILE; - - pr_debug("%s: handling sym %s addr %" PRIx64 " len %" PRIx64 "\n", __func__, - sym->name, sym->start, sym->end - sym->start); - - memset(tpath, 0, sizeof(tpath)); - perf_exe(tpath, sizeof(tpath)); - - bfdf = bfd_openr(tpath, NULL); - if (bfdf == NULL) - abort(); - - if (!bfd_check_format(bfdf, bfd_object)) - abort(); - - s = open_memstream(&buf, &buf_size); - if (!s) { - ret = errno; - goto out; - } - init_disassemble_info_compat(&info, s, - (fprintf_ftype) fprintf, - fprintf_styled); - info.arch = bfd_get_arch(bfdf); - info.mach = bfd_get_mach(bfdf); - - info_node = perf_env__find_bpf_prog_info(dso->bpf_prog.env, - dso->bpf_prog.id); - if (!info_node) { - ret = SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF; - goto out; - } - info_linear = info_node->info_linear; - sub_id = dso->bpf_prog.sub_id; - - info.buffer = (void *)(uintptr_t)(info_linear->info.jited_prog_insns); - info.buffer_length = info_linear->info.jited_prog_len; - - if (info_linear->info.nr_line_info) - prog_linfo = bpf_prog_linfo__new(&info_linear->info); - - if (info_linear->info.btf_id) { - struct btf_node *node; - - node = perf_env__find_btf(dso->bpf_prog.env, - info_linear->info.btf_id); - if (node) - btf = btf__new((__u8 *)(node->data), - node->data_size); - } - - disassemble_init_for_target(&info); - -#ifdef DISASM_FOUR_ARGS_SIGNATURE - disassemble = disassembler(info.arch, - bfd_big_endian(bfdf), - info.mach, - bfdf); -#else - disassemble = disassembler(bfdf); -#endif - if (disassemble == NULL) - abort(); - - fflush(s); - do { - const struct bpf_line_info *linfo = NULL; - struct disasm_line *dl; - size_t prev_buf_size; - const char *srcline; - u64 addr; - - addr = pc + ((u64 *)(uintptr_t)(info_linear->info.jited_ksyms))[sub_id]; - count = disassemble(pc, &info); - - if (prog_linfo) - linfo = bpf_prog_linfo__lfind_addr_func(prog_linfo, - addr, sub_id, - nr_skip); - - if (linfo && btf) { - srcline = btf__name_by_offset(btf, linfo->line_off); - nr_skip++; - } else - srcline = NULL; - - fprintf(s, "\n"); - prev_buf_size = buf_size; - fflush(s); - - if (!annotate_opts.hide_src_code && srcline) { - args->offset = -1; - args->line = strdup(srcline); - args->line_nr = 0; - args->fileloc = NULL; - args->ms.sym = sym; - dl = disasm_line__new(args); - if (dl) { - annotation_line__add(&dl->al, - ¬es->src->source); - } - } - - args->offset = pc; - args->line = buf + prev_buf_size; - args->line_nr = 0; - args->fileloc = NULL; - args->ms.sym = sym; - dl = disasm_line__new(args); - if (dl) - annotation_line__add(&dl->al, ¬es->src->source); - - pc += count; - } while (count > 0 && pc < len); - - ret = 0; -out: - free(prog_linfo); - btf__free(btf); - fclose(s); - bfd_close(bfdf); - return ret; -} -#else // defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT) -static int symbol__disassemble_bpf(struct symbol *sym __maybe_unused, - struct annotate_args *args __maybe_unused) -{ - return SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF; -} -#endif // defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT) - -static int -symbol__disassemble_bpf_image(struct symbol *sym, - struct annotate_args *args) -{ - struct annotation *notes = symbol__annotation(sym); - struct disasm_line *dl; - - args->offset = -1; - args->line = strdup("to be implemented"); - args->line_nr = 0; - args->fileloc = NULL; - dl = disasm_line__new(args); - if (dl) - annotation_line__add(&dl->al, ¬es->src->source); - - zfree(&args->line); - return 0; -} - -/* - * Possibly create a new version of line with tabs expanded. Returns the - * existing or new line, storage is updated if a new line is allocated. If - * allocation fails then NULL is returned. - */ -static char *expand_tabs(char *line, char **storage, size_t *storage_len) -{ - size_t i, src, dst, len, new_storage_len, num_tabs; - char *new_line; - size_t line_len = strlen(line); - - for (num_tabs = 0, i = 0; i < line_len; i++) - if (line[i] == '\t') - num_tabs++; - - if (num_tabs == 0) - return line; - - /* - * Space for the line and '\0', less the leading and trailing - * spaces. Each tab may introduce 7 additional spaces. - */ - new_storage_len = line_len + 1 + (num_tabs * 7); - - new_line = malloc(new_storage_len); - if (new_line == NULL) { - pr_err("Failure allocating memory for tab expansion\n"); - return NULL; - } - - /* - * Copy regions starting at src and expand tabs. If there are two - * adjacent tabs then 'src == i', the memcpy is of size 0 and the spaces - * are inserted. - */ - for (i = 0, src = 0, dst = 0; i < line_len && num_tabs; i++) { - if (line[i] == '\t') { - len = i - src; - memcpy(&new_line[dst], &line[src], len); - dst += len; - new_line[dst++] = ' '; - while (dst % 8 != 0) - new_line[dst++] = ' '; - src = i + 1; - num_tabs--; - } - } - - /* Expand the last region. */ - len = line_len - src; - memcpy(&new_line[dst], &line[src], len); - dst += len; - new_line[dst] = '\0'; - - free(*storage); - *storage = new_line; - *storage_len = new_storage_len; - return new_line; - -} - -static int symbol__disassemble(struct symbol *sym, struct annotate_args *args) -{ - struct annotation_options *opts = &annotate_opts; - struct map *map = args->ms.map; - struct dso *dso = map__dso(map); - char *command; - FILE *file; - char symfs_filename[PATH_MAX]; - struct kcore_extract kce; - bool delete_extract = false; - bool decomp = false; - int lineno = 0; - char *fileloc = NULL; - int nline; - char *line; - size_t line_len; - const char *objdump_argv[] = { - "/bin/sh", - "-c", - NULL, /* Will be the objdump command to run. */ - "--", - NULL, /* Will be the symfs path. */ - NULL, - }; - struct child_process objdump_process; - int err = dso__disassemble_filename(dso, symfs_filename, sizeof(symfs_filename)); - - if (err) - return err; - - pr_debug("%s: filename=%s, sym=%s, start=%#" PRIx64 ", end=%#" PRIx64 "\n", __func__, - symfs_filename, sym->name, map__unmap_ip(map, sym->start), - map__unmap_ip(map, sym->end)); - - pr_debug("annotating [%p] %30s : [%p] %30s\n", - dso, dso->long_name, sym, sym->name); - - if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO) { - return symbol__disassemble_bpf(sym, args); - } else if (dso->binary_type == DSO_BINARY_TYPE__BPF_IMAGE) { - return symbol__disassemble_bpf_image(sym, args); - } else if (dso__is_kcore(dso)) { - kce.kcore_filename = symfs_filename; - kce.addr = map__rip_2objdump(map, sym->start); - kce.offs = sym->start; - kce.len = sym->end - sym->start; - if (!kcore_extract__create(&kce)) { - delete_extract = true; - strlcpy(symfs_filename, kce.extract_filename, - sizeof(symfs_filename)); - } - } else if (dso__needs_decompress(dso)) { - char tmp[KMOD_DECOMP_LEN]; - - if (dso__decompress_kmodule_path(dso, symfs_filename, - tmp, sizeof(tmp)) < 0) - return -1; - - decomp = true; - strcpy(symfs_filename, tmp); - } - - err = asprintf(&command, - "%s %s%s --start-address=0x%016" PRIx64 - " --stop-address=0x%016" PRIx64 - " %s -d %s %s %s %c%s%c %s%s -C \"$1\"", - opts->objdump_path ?: "objdump", - opts->disassembler_style ? "-M " : "", - opts->disassembler_style ?: "", - map__rip_2objdump(map, sym->start), - map__rip_2objdump(map, sym->end), - opts->show_linenr ? "-l" : "", - opts->show_asm_raw ? "" : "--no-show-raw-insn", - opts->annotate_src ? "-S" : "", - opts->prefix ? "--prefix " : "", - opts->prefix ? '"' : ' ', - opts->prefix ?: "", - opts->prefix ? '"' : ' ', - opts->prefix_strip ? "--prefix-strip=" : "", - opts->prefix_strip ?: ""); - - if (err < 0) { - pr_err("Failure allocating memory for the command to run\n"); - goto out_remove_tmp; - } - - pr_debug("Executing: %s\n", command); - - objdump_argv[2] = command; - objdump_argv[4] = symfs_filename; - - /* Create a pipe to read from for stdout */ - memset(&objdump_process, 0, sizeof(objdump_process)); - objdump_process.argv = objdump_argv; - objdump_process.out = -1; - objdump_process.err = -1; - objdump_process.no_stderr = 1; - if (start_command(&objdump_process)) { - pr_err("Failure starting to run %s\n", command); - err = -1; - goto out_free_command; - } - - file = fdopen(objdump_process.out, "r"); - if (!file) { - pr_err("Failure creating FILE stream for %s\n", command); - /* - * If we were using debug info should retry with - * original binary. - */ - err = -1; - goto out_close_stdout; - } - - /* Storage for getline. */ - line = NULL; - line_len = 0; - - nline = 0; - while (!feof(file)) { - const char *match; - char *expanded_line; - - if (getline(&line, &line_len, file) < 0 || !line) - break; - - /* Skip lines containing "filename:" */ - match = strstr(line, symfs_filename); - if (match && match[strlen(symfs_filename)] == ':') - continue; - - expanded_line = strim(line); - expanded_line = expand_tabs(expanded_line, &line, &line_len); - if (!expanded_line) - break; - - /* - * The source code line number (lineno) needs to be kept in - * across calls to symbol__parse_objdump_line(), so that it - * can associate it with the instructions till the next one. - * See disasm_line__new() and struct disasm_line::line_nr. - */ - if (symbol__parse_objdump_line(sym, args, expanded_line, - &lineno, &fileloc) < 0) - break; - nline++; - } - free(line); - free(fileloc); - - err = finish_command(&objdump_process); - if (err) - pr_err("Error running %s\n", command); - - if (nline == 0) { - err = -1; - pr_err("No output from %s\n", command); - } - - /* - * kallsyms does not have symbol sizes so there may a nop at the end. - * Remove it. - */ - if (dso__is_kcore(dso)) - delete_last_nop(sym); - - fclose(file); - -out_close_stdout: - close(objdump_process.out); - -out_free_command: - free(command); - -out_remove_tmp: - if (decomp) - unlink(symfs_filename); - - if (delete_extract) - kcore_extract__delete(&kce); - - return err; -} - -static void calc_percent(struct sym_hist *sym_hist, - struct hists *hists, +static void calc_percent(struct annotation *notes, + struct evsel *evsel, struct annotation_data *data, s64 offset, s64 end) { + struct hists *hists = evsel__hists(evsel); + int evidx = evsel->core.idx; + struct sym_hist *sym_hist = annotation__histogram(notes, evidx); unsigned int hits = 0; u64 period = 0; while (offset < end) { - hits += sym_hist->addr[offset].nr_samples; - period += sym_hist->addr[offset].period; + struct sym_hist_entry *entry; + + entry = annotated_source__hist_entry(notes->src, evidx, offset); + if (entry) { + hits += entry->nr_samples; + period += entry->period; + } ++offset; } @@ -2368,16 +847,13 @@ static void annotation__calc_percent(struct annotation *notes, end = next ? next->offset : len; for_each_group_evsel(evsel, leader) { - struct hists *hists = evsel__hists(evsel); struct annotation_data *data; - struct sym_hist *sym_hist; BUG_ON(i >= al->data_nr); - sym_hist = annotation__histogram(notes, evsel->core.idx); data = &al->data[i++]; - calc_percent(sym_hist, hists, data, al->offset, end); + calc_percent(notes, evsel, data, al->offset, end); } } } @@ -2396,8 +872,10 @@ static int evsel__get_arch(struct evsel *evsel, struct arch **parch) struct arch *arch; int err; - if (!arch_name) + if (!arch_name) { + *parch = NULL; return errno; + } *parch = arch = arch__find(arch_name); if (arch == NULL) { @@ -2435,12 +913,22 @@ int symbol__annotate(struct map_symbol *ms, struct evsel *evsel, if (parch) *parch = arch; + if (notes->src && !list_empty(¬es->src->source)) + return 0; + args.arch = arch; args.ms = *ms; + + if (notes->src == NULL) { + notes->src = annotated_source__new(); + if (notes->src == NULL) + return -1; + } + if (annotate_opts.full_addr) - notes->start = map__objdump_2mem(ms->map, ms->sym->start); + notes->src->start = map__objdump_2mem(ms->map, ms->sym->start); else - notes->start = map__rip_2objdump(ms->map, ms->sym->start); + notes->src->start = map__rip_2objdump(ms->map, ms->sym->start); return symbol__disassemble(sym, &args); } @@ -2572,14 +1060,19 @@ static void print_summary(struct rb_root *root, const char *filename) static void symbol__annotate_hits(struct symbol *sym, struct evsel *evsel) { + int evidx = evsel->core.idx; struct annotation *notes = symbol__annotation(sym); - struct sym_hist *h = annotation__histogram(notes, evsel->core.idx); + struct sym_hist *h = annotation__histogram(notes, evidx); u64 len = symbol__size(sym), offset; - for (offset = 0; offset < len; ++offset) - if (h->addr[offset].nr_samples != 0) + for (offset = 0; offset < len; ++offset) { + struct sym_hist_entry *entry; + + entry = annotated_source__hist_entry(notes->src, evidx, offset); + if (entry && entry->nr_samples != 0) printf("%*" PRIx64 ": %" PRIu64 "\n", BITS_PER_LONG / 2, - sym->start + offset, h->addr[offset].nr_samples); + sym->start + offset, entry->nr_samples); + } printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->nr_samples", h->nr_samples); } @@ -2617,7 +1110,7 @@ int symbol__annotate_printf(struct map_symbol *ms, struct evsel *evsel) int graph_dotted_len; char buf[512]; - filename = strdup(dso->long_name); + filename = strdup(dso__long_name(dso)); if (!filename) return -ENOMEM; @@ -2782,7 +1275,7 @@ int map_symbol__annotation_dump(struct map_symbol *ms, struct evsel *evsel) } fprintf(fp, "%s() %s\nEvent: %s\n\n", - ms->sym->name, map__dso(ms->map)->long_name, ev_name); + ms->sym->name, dso__long_name(map__dso(ms->map)), ev_name); symbol__annotate_fprintf2(ms->sym, fp); fclose(fp); @@ -2797,19 +1290,28 @@ void symbol__annotate_zero_histogram(struct symbol *sym, int evidx) struct annotation *notes = symbol__annotation(sym); struct sym_hist *h = annotation__histogram(notes, evidx); - memset(h, 0, notes->src->sizeof_sym_hist); + memset(h, 0, sizeof(*notes->src->histograms) * notes->src->nr_histograms); } void symbol__annotate_decay_histogram(struct symbol *sym, int evidx) { struct annotation *notes = symbol__annotation(sym); struct sym_hist *h = annotation__histogram(notes, evidx); - int len = symbol__size(sym), offset; + struct annotation_line *al; h->nr_samples = 0; - for (offset = 0; offset < len; ++offset) { - h->addr[offset].nr_samples = h->addr[offset].nr_samples * 7 / 8; - h->nr_samples += h->addr[offset].nr_samples; + list_for_each_entry(al, ¬es->src->source, node) { + struct sym_hist_entry *entry; + + if (al->offset == -1) + continue; + + entry = annotated_source__hist_entry(notes->src, evidx, al->offset); + if (entry == NULL) + continue; + + entry->nr_samples = entry->nr_samples * 7 / 8; + h->nr_samples += entry->nr_samples; } } @@ -2861,64 +1363,56 @@ bool disasm_line__is_valid_local_jump(struct disasm_line *dl, struct symbol *sym return true; } -void annotation__mark_jump_targets(struct annotation *notes, struct symbol *sym) +static void +annotation__mark_jump_targets(struct annotation *notes, struct symbol *sym) { - u64 offset, size = symbol__size(sym); + struct annotation_line *al; /* PLT symbols contain external offsets */ if (strstr(sym->name, "@plt")) return; - for (offset = 0; offset < size; ++offset) { - struct annotation_line *al = notes->src->offsets[offset]; + list_for_each_entry(al, ¬es->src->source, node) { struct disasm_line *dl; + struct annotation_line *target; dl = disasm_line(al); if (!disasm_line__is_valid_local_jump(dl, sym)) continue; - al = notes->src->offsets[dl->ops.target.offset]; - + target = annotated_source__get_line(notes->src, + dl->ops.target.offset); /* * FIXME: Oops, no jump target? Buggy disassembler? Or do we * have to adjust to the previous offset? */ - if (al == NULL) + if (target == NULL) continue; - if (++al->jump_sources > notes->max_jump_sources) - notes->max_jump_sources = al->jump_sources; + if (++target->jump_sources > notes->src->max_jump_sources) + notes->src->max_jump_sources = target->jump_sources; } } -void annotation__set_offsets(struct annotation *notes, s64 size) +static void annotation__set_index(struct annotation *notes) { struct annotation_line *al; struct annotated_source *src = notes->src; - src->max_line_len = 0; + src->widths.max_line_len = 0; src->nr_entries = 0; src->nr_asm_entries = 0; list_for_each_entry(al, &src->source, node) { size_t line_len = strlen(al->line); - if (src->max_line_len < line_len) - src->max_line_len = line_len; + if (src->widths.max_line_len < line_len) + src->widths.max_line_len = line_len; al->idx = src->nr_entries++; - if (al->offset != -1) { + if (al->offset != -1) al->idx_asm = src->nr_asm_entries++; - /* - * FIXME: short term bandaid to cope with assembly - * routines that comes with labels in the same column - * as the address in objdump, sigh. - * - * E.g. copy_user_generic_unrolled - */ - if (al->offset < size) - notes->src->offsets[al->offset] = al; - } else + else al->idx_asm = -1; } } @@ -2949,28 +1443,29 @@ static int annotation__max_ins_name(struct annotation *notes) return max_name; } -void annotation__init_column_widths(struct annotation *notes, struct symbol *sym) +static void +annotation__init_column_widths(struct annotation *notes, struct symbol *sym) { - notes->widths.addr = notes->widths.target = - notes->widths.min_addr = hex_width(symbol__size(sym)); - notes->widths.max_addr = hex_width(sym->end); - notes->widths.jumps = width_jumps(notes->max_jump_sources); - notes->widths.max_ins_name = annotation__max_ins_name(notes); + notes->src->widths.addr = notes->src->widths.target = + notes->src->widths.min_addr = hex_width(symbol__size(sym)); + notes->src->widths.max_addr = hex_width(sym->end); + notes->src->widths.jumps = width_jumps(notes->src->max_jump_sources); + notes->src->widths.max_ins_name = annotation__max_ins_name(notes); } void annotation__update_column_widths(struct annotation *notes) { if (annotate_opts.use_offset) - notes->widths.target = notes->widths.min_addr; + notes->src->widths.target = notes->src->widths.min_addr; else if (annotate_opts.full_addr) - notes->widths.target = BITS_PER_LONG / 4; + notes->src->widths.target = BITS_PER_LONG / 4; else - notes->widths.target = notes->widths.max_addr; + notes->src->widths.target = notes->src->widths.max_addr; - notes->widths.addr = notes->widths.target; + notes->src->widths.addr = notes->src->widths.target; if (annotate_opts.show_nr_jumps) - notes->widths.addr += notes->widths.jumps + 1; + notes->src->widths.addr += notes->src->widths.jumps + 1; } void annotation__toggle_full_addr(struct annotation *notes, struct map_symbol *ms) @@ -2978,14 +1473,14 @@ void annotation__toggle_full_addr(struct annotation *notes, struct map_symbol *m annotate_opts.full_addr = !annotate_opts.full_addr; if (annotate_opts.full_addr) - notes->start = map__objdump_2mem(ms->map, ms->sym->start); + notes->src->start = map__objdump_2mem(ms->map, ms->sym->start); else - notes->start = map__rip_2objdump(ms->map, ms->sym->start); + notes->src->start = map__rip_2objdump(ms->map, ms->sym->start); annotation__update_column_widths(notes); } -static void annotation__calc_lines(struct annotation *notes, struct map *map, +static void annotation__calc_lines(struct annotation *notes, struct map_symbol *ms, struct rb_root *root) { struct annotation_line *al; @@ -2993,6 +1488,7 @@ static void annotation__calc_lines(struct annotation *notes, struct map *map, list_for_each_entry(al, ¬es->src->source, node) { double percent_max = 0.0; + u64 addr; int i; for (i = 0; i < al->data_nr; i++) { @@ -3008,8 +1504,9 @@ static void annotation__calc_lines(struct annotation *notes, struct map *map, if (percent_max <= 0.5) continue; - al->path = get_srcline(map__dso(map), notes->start + al->offset, NULL, - false, true, notes->start + al->offset); + addr = map__rip_2objdump(ms->map, ms->sym->start); + al->path = get_srcline(map__dso(ms->map), addr + al->offset, NULL, + false, true, ms->sym->start + al->offset); insert_source_line(&tmp_root, al); } @@ -3020,7 +1517,7 @@ static void symbol__calc_lines(struct map_symbol *ms, struct rb_root *root) { struct annotation *notes = symbol__annotation(ms->sym); - annotation__calc_lines(notes, ms->map, root); + annotation__calc_lines(notes, ms, root); } int symbol__tty_annotate2(struct map_symbol *ms, struct evsel *evsel) @@ -3036,7 +1533,7 @@ int symbol__tty_annotate2(struct map_symbol *ms, struct evsel *evsel) if (err) { char msg[BUFSIZ]; - dso->annotate_warned = true; + dso__set_annotate_warned(dso); symbol__strerror_disassemble(ms, err, msg, sizeof(msg)); ui__error("Couldn't annotate %s:\n%s", sym->name, msg); return -1; @@ -3045,13 +1542,12 @@ int symbol__tty_annotate2(struct map_symbol *ms, struct evsel *evsel) if (annotate_opts.print_lines) { srcline_full_filename = annotate_opts.full_path; symbol__calc_lines(ms, &source_line); - print_summary(&source_line, dso->long_name); + print_summary(&source_line, dso__long_name(dso)); } hists__scnprintf_title(hists, buf, sizeof(buf)); fprintf(stdout, "%s, [percent: %s]\n%s() %s\n", - buf, percent_type_str(annotate_opts.percent_type), sym->name, - dso->long_name); + buf, percent_type_str(annotate_opts.percent_type), sym->name, dso__long_name(dso)); symbol__annotate_fprintf2(sym, stdout); annotated_source__purge(symbol__annotation(sym)->src); @@ -3070,7 +1566,7 @@ int symbol__tty_annotate(struct map_symbol *ms, struct evsel *evsel) if (err) { char msg[BUFSIZ]; - dso->annotate_warned = true; + dso__set_annotate_warned(dso); symbol__strerror_disassemble(ms, err, msg, sizeof(msg)); ui__error("Couldn't annotate %s:\n%s", sym->name, msg); return -1; @@ -3081,7 +1577,7 @@ int symbol__tty_annotate(struct map_symbol *ms, struct evsel *evsel) if (annotate_opts.print_lines) { srcline_full_filename = annotate_opts.full_path; symbol__calc_lines(ms, &source_line); - print_summary(&source_line, dso->long_name); + print_summary(&source_line, dso__long_name(dso)); } symbol__annotate_printf(ms, evsel); @@ -3104,7 +1600,7 @@ static double annotation_line__max_percent(struct annotation_line *al, double percent_max = 0.0; int i; - for (i = 0; i < notes->nr_events; i++) { + for (i = 0; i < notes->src->nr_events; i++) { double percent; percent = annotation_data__percent(&al->data[i], @@ -3145,7 +1641,8 @@ call_like: obj__printf(obj, " "); } - disasm_line__scnprintf(dl, bf, size, !annotate_opts.use_offset, notes->widths.max_ins_name); + disasm_line__scnprintf(dl, bf, size, !annotate_opts.use_offset, + notes->src->widths.max_ins_name); } static void ipc_coverage_string(char *bf, int size, struct annotation *notes) @@ -3193,7 +1690,7 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati if (al->offset != -1 && percent_max != 0.0) { int i; - for (i = 0; i < notes->nr_events; i++) { + for (i = 0; i < notes->src->nr_events; i++) { double percent; percent = annotation_data__percent(&al->data[i], percent_type); @@ -3273,9 +1770,11 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati obj__printf(obj, "%-*s", width - pcnt_width - cycles_width, " "); else if (al->offset == -1) { if (al->line_nr && annotate_opts.show_linenr) - printed = scnprintf(bf, sizeof(bf), "%-*d ", notes->widths.addr + 1, al->line_nr); + printed = scnprintf(bf, sizeof(bf), "%-*d ", + notes->src->widths.addr + 1, al->line_nr); else - printed = scnprintf(bf, sizeof(bf), "%-*s ", notes->widths.addr, " "); + printed = scnprintf(bf, sizeof(bf), "%-*s ", + notes->src->widths.addr, " "); obj__printf(obj, bf); obj__printf(obj, "%-*s", width - printed - pcnt_width - cycles_width + 1, al->line); } else { @@ -3283,7 +1782,7 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati int color = -1; if (!annotate_opts.use_offset) - addr += notes->start; + addr += notes->src->start; if (!annotate_opts.use_offset) { printed = scnprintf(bf, sizeof(bf), "%" PRIx64 ": ", addr); @@ -3293,7 +1792,7 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati if (annotate_opts.show_nr_jumps) { int prev; printed = scnprintf(bf, sizeof(bf), "%*d ", - notes->widths.jumps, + notes->src->widths.jumps, al->jump_sources); prev = obj__set_jumps_percent_color(obj, al->jump_sources, current_entry); @@ -3302,7 +1801,7 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati } print_addr: printed = scnprintf(bf, sizeof(bf), "%*" PRIx64 ": ", - notes->widths.target, addr); + notes->src->widths.target, addr); } else if (ins__is_call(&disasm_line(al)->ins) && annotate_opts.offset_level >= ANNOTATION__OFFSET_CALL) { goto print_addr; @@ -3310,7 +1809,7 @@ print_addr: goto print_addr; } else { printed = scnprintf(bf, sizeof(bf), "%-*s ", - notes->widths.addr, " "); + notes->src->widths.addr, " "); } } @@ -3346,37 +1845,29 @@ int symbol__annotate2(struct map_symbol *ms, struct evsel *evsel, size_t size = symbol__size(sym); int nr_pcnt = 1, err; - notes->src->offsets = zalloc(size * sizeof(struct annotation_line *)); - if (notes->src->offsets == NULL) - return ENOMEM; - if (evsel__is_group_event(evsel)) nr_pcnt = evsel->core.nr_members; err = symbol__annotate(ms, evsel, parch); if (err) - goto out_free_offsets; + return err; symbol__calc_percent(sym, evsel); - annotation__set_offsets(notes, size); + annotation__set_index(notes); annotation__mark_jump_targets(notes, sym); err = annotation__compute_ipc(notes, size); if (err) - goto out_free_offsets; + return err; annotation__init_column_widths(notes, sym); - notes->nr_events = nr_pcnt; + notes->src->nr_events = nr_pcnt; annotation__update_column_widths(notes); sym->annotate2 = 1; return 0; - -out_free_offsets: - zfree(¬es->src->offsets); - return err; } static int annotation__config(const char *var, const char *value, void *data) @@ -3548,6 +2039,12 @@ static int extract_reg_offset(struct arch *arch, const char *str, * %gs:0x18(%rbx). In that case it should skip the part. */ if (*str == arch->objdump.register_char) { + if (arch__is(arch, "x86")) { + /* FIXME: Handle other segment registers */ + if (!strncmp(str, "%gs:", 4)) + op_loc->segment = INSN_SEG_X86_GS; + } + while (*str && !isdigit(*str) && *str != arch->objdump.memory_ref_char) str++; @@ -3563,8 +2060,22 @@ static int extract_reg_offset(struct arch *arch, const char *str, if (regname == NULL) return -1; - op_loc->reg = get_dwarf_regnum(regname, 0); + op_loc->reg1 = get_dwarf_regnum(regname, 0); free(regname); + + /* Get the second register */ + if (op_loc->multi_regs) { + p = strchr(p + 1, arch->objdump.register_char); + if (p == NULL) + return -1; + + regname = strdup(p); + if (regname == NULL) + return -1; + + op_loc->reg2 = get_dwarf_regnum(regname, 0); + free(regname); + } return 0; } @@ -3577,14 +2088,20 @@ static int extract_reg_offset(struct arch *arch, const char *str, * Get detailed location info (register and offset) in the instruction. * It needs both source and target operand and whether it accesses a * memory location. The offset field is meaningful only when the - * corresponding mem flag is set. + * corresponding mem flag is set. The reg2 field is meaningful only + * when multi_regs flag is set. * * Some examples on x86: * - * mov (%rax), %rcx # src_reg = rax, src_mem = 1, src_offset = 0 - * # dst_reg = rcx, dst_mem = 0 + * mov (%rax), %rcx # src_reg1 = rax, src_mem = 1, src_offset = 0 + * # dst_reg1 = rcx, dst_mem = 0 + * + * mov 0x18, %r8 # src_reg1 = -1, src_mem = 0 + * # dst_reg1 = r8, dst_mem = 0 * - * mov 0x18, %r8 # src_reg = -1, dst_reg = r8 + * mov %rsi, 8(%rbx,%rcx,4) # src_reg1 = rsi, src_mem = 0, src_multi_regs = 0 + * # dst_reg1 = rbx, dst_reg2 = rcx, dst_mem = 1 + * # dst_multi_regs = 1, dst_offset = 8 */ int annotate_get_insn_location(struct arch *arch, struct disasm_line *dl, struct annotated_insn_loc *loc) @@ -3593,7 +2110,7 @@ int annotate_get_insn_location(struct arch *arch, struct disasm_line *dl, struct annotated_op_loc *op_loc; int i; - if (!strcmp(dl->ins.name, "lock")) + if (ins__is_lock(&dl->ins)) ops = dl->ops.locked.ops; else ops = &dl->ops; @@ -3605,54 +2122,59 @@ int annotate_get_insn_location(struct arch *arch, struct disasm_line *dl, for_each_insn_op_loc(loc, i, op_loc) { const char *insn_str = ops->source.raw; + bool multi_regs = ops->source.multi_regs; - if (i == INSN_OP_TARGET) + if (i == INSN_OP_TARGET) { insn_str = ops->target.raw; + multi_regs = ops->target.multi_regs; + } /* Invalidate the register by default */ - op_loc->reg = -1; + op_loc->reg1 = -1; + op_loc->reg2 = -1; if (insn_str == NULL) continue; if (strchr(insn_str, arch->objdump.memory_ref_char)) { op_loc->mem_ref = true; + op_loc->multi_regs = multi_regs; extract_reg_offset(arch, insn_str, op_loc); } else { - char *s = strdup(insn_str); + char *s, *p = NULL; + + if (arch__is(arch, "x86")) { + /* FIXME: Handle other segment registers */ + if (!strncmp(insn_str, "%gs:", 4)) { + op_loc->segment = INSN_SEG_X86_GS; + op_loc->offset = strtol(insn_str + 4, + &p, 0); + if (p && p != insn_str + 4) + op_loc->imm = true; + continue; + } + } - if (s) { - op_loc->reg = get_dwarf_regnum(s, 0); - free(s); + s = strdup(insn_str); + if (s == NULL) + return -1; + + if (*s == arch->objdump.register_char) + op_loc->reg1 = get_dwarf_regnum(s, 0); + else if (*s == arch->objdump.imm_char) { + op_loc->offset = strtol(s + 1, &p, 0); + if (p && p != s + 1) + op_loc->imm = true; } + free(s); } } return 0; } -static void symbol__ensure_annotate(struct map_symbol *ms, struct evsel *evsel) -{ - struct disasm_line *dl, *tmp_dl; - struct annotation *notes; - - notes = symbol__annotation(ms->sym); - if (!list_empty(¬es->src->source)) - return; - - if (symbol__annotate(ms, evsel, NULL) < 0) - return; - - /* remove non-insn disasm lines for simplicity */ - list_for_each_entry_safe(dl, tmp_dl, ¬es->src->source, al.node) { - if (dl->al.offset == -1) { - list_del(&dl->al.node); - free(dl); - } - } -} - -static struct disasm_line *find_disasm_line(struct symbol *sym, u64 ip) +static struct disasm_line *find_disasm_line(struct symbol *sym, u64 ip, + bool allow_update) { struct disasm_line *dl; struct annotation *notes; @@ -3660,8 +2182,21 @@ static struct disasm_line *find_disasm_line(struct symbol *sym, u64 ip) notes = symbol__annotation(sym); list_for_each_entry(dl, ¬es->src->source, al.node) { - if (sym->start + dl->al.offset == ip) + if (dl->al.offset == -1) + continue; + + if (sym->start + dl->al.offset == ip) { + /* + * llvm-objdump places "lock" in a separate line and + * in that case, we want to get the next line. + */ + if (ins__is_lock(&dl->ins) && + *dl->ops.raw == '\0' && allow_update) { + ip++; + continue; + } return dl; + } } return NULL; } @@ -3690,6 +2225,94 @@ static struct annotated_item_stat *annotate_data_stat(struct list_head *head, return istat; } +static bool is_stack_operation(struct arch *arch, struct disasm_line *dl) +{ + if (arch__is(arch, "x86")) { + if (!strncmp(dl->ins.name, "push", 4) || + !strncmp(dl->ins.name, "pop", 3) || + !strncmp(dl->ins.name, "ret", 3)) + return true; + } + + return false; +} + +static bool is_stack_canary(struct arch *arch, struct annotated_op_loc *loc) +{ + /* On x86_64, %gs:40 is used for stack canary */ + if (arch__is(arch, "x86")) { + if (loc->segment == INSN_SEG_X86_GS && loc->imm && + loc->offset == 40) + return true; + } + + return false; +} + +static struct disasm_line * +annotation__prev_asm_line(struct annotation *notes, struct disasm_line *curr) +{ + struct list_head *sources = ¬es->src->source; + struct disasm_line *prev; + + if (curr == list_first_entry(sources, struct disasm_line, al.node)) + return NULL; + + prev = list_prev_entry(curr, al.node); + while (prev->al.offset == -1 && + prev != list_first_entry(sources, struct disasm_line, al.node)) + prev = list_prev_entry(prev, al.node); + + if (prev->al.offset == -1) + return NULL; + + return prev; +} + +static struct disasm_line * +annotation__next_asm_line(struct annotation *notes, struct disasm_line *curr) +{ + struct list_head *sources = ¬es->src->source; + struct disasm_line *next; + + if (curr == list_last_entry(sources, struct disasm_line, al.node)) + return NULL; + + next = list_next_entry(curr, al.node); + while (next->al.offset == -1 && + next != list_last_entry(sources, struct disasm_line, al.node)) + next = list_next_entry(next, al.node); + + if (next->al.offset == -1) + return NULL; + + return next; +} + +u64 annotate_calc_pcrel(struct map_symbol *ms, u64 ip, int offset, + struct disasm_line *dl) +{ + struct annotation *notes; + struct disasm_line *next; + u64 addr; + + notes = symbol__annotation(ms->sym); + /* + * PC-relative addressing starts from the next instruction address + * But the IP is for the current instruction. Since disasm_line + * doesn't have the instruction size, calculate it using the next + * disasm_line. If it's the last one, we can use symbol's end + * address directly. + */ + next = annotation__next_asm_line(notes, dl); + if (next == NULL) + addr = ms->sym->end + offset; + else + addr = ip + (next->al.offset - dl->al.offset) + offset; + + return map__rip_2objdump(ms->map, addr); +} + /** * hist_entry__get_data_type - find data type for given hist entry * @he: hist entry @@ -3724,24 +2347,23 @@ struct annotated_data_type *hist_entry__get_data_type(struct hist_entry *he) return NULL; } - if (evsel__get_arch(evsel, &arch) < 0) { + /* Make sure it has the disasm of the function */ + if (symbol__annotate(ms, evsel, &arch) < 0) { ann_data_stat.no_insn++; return NULL; } - /* Make sure it runs objdump to get disasm of the function */ - symbol__ensure_annotate(ms, evsel); - /* * Get a disasm to extract the location from the insn. * This is too slow... */ - dl = find_disasm_line(ms->sym, ip); + dl = find_disasm_line(ms->sym, ip, /*allow_update=*/true); if (dl == NULL) { ann_data_stat.no_insn++; return NULL; } +retry: istat = annotate_data_stat(&ann_insn_stat, dl->ins.name); if (istat == NULL) { ann_data_stat.no_insn++; @@ -3754,11 +2376,50 @@ struct annotated_data_type *hist_entry__get_data_type(struct hist_entry *he) return NULL; } + if (is_stack_operation(arch, dl)) { + istat->good++; + he->mem_type_off = 0; + return &stackop_type; + } + for_each_insn_op_loc(&loc, i, op_loc) { - if (!op_loc->mem_ref) + struct data_loc_info dloc = { + .arch = arch, + .thread = he->thread, + .ms = ms, + /* Recalculate IP for LOCK prefix or insn fusion */ + .ip = ms->sym->start + dl->al.offset, + .cpumode = he->cpumode, + .op = op_loc, + }; + + if (!op_loc->mem_ref && op_loc->segment == INSN_SEG_NONE) continue; - mem_type = find_data_type(ms, ip, op_loc->reg, op_loc->offset); + /* Recalculate IP because of LOCK prefix or insn fusion */ + ip = ms->sym->start + dl->al.offset; + + /* PC-relative addressing */ + if (op_loc->reg1 == DWARF_REG_PC) { + dloc.var_addr = annotate_calc_pcrel(ms, dloc.ip, + op_loc->offset, dl); + } + + /* This CPU access in kernel - pretend PC-relative addressing */ + if (dso__kernel(map__dso(ms->map)) && arch__is(arch, "x86") && + op_loc->segment == INSN_SEG_X86_GS && op_loc->imm) { + dloc.var_addr = op_loc->offset; + op_loc->reg1 = DWARF_REG_PC; + } + + mem_type = find_data_type(&dloc); + + if (mem_type == NULL && is_stack_canary(arch, op_loc)) { + istat->good++; + he->mem_type_off = 0; + return &canary_type; + } + if (mem_type) istat->good++; else @@ -3766,15 +2427,256 @@ struct annotated_data_type *hist_entry__get_data_type(struct hist_entry *he) if (symbol_conf.annotate_data_sample) { annotated_data_type__update_samples(mem_type, evsel, - op_loc->offset, + dloc.type_offset, he->stat.nr_events, he->stat.period); } - he->mem_type_off = op_loc->offset; + he->mem_type_off = dloc.type_offset; return mem_type; } + /* + * Some instructions can be fused and the actual memory access came + * from the previous instruction. + */ + if (dl->al.offset > 0) { + struct annotation *notes; + struct disasm_line *prev_dl; + + notes = symbol__annotation(ms->sym); + prev_dl = annotation__prev_asm_line(notes, dl); + + if (prev_dl && ins__is_fused(arch, prev_dl->ins.name, dl->ins.name)) { + dl = prev_dl; + goto retry; + } + } + ann_data_stat.no_mem_ops++; istat->bad++; return NULL; } + +/* Basic block traversal (BFS) data structure */ +struct basic_block_data { + struct list_head queue; + struct list_head visited; +}; + +/* + * During the traversal, it needs to know the parent block where the current + * block block started from. Note that single basic block can be parent of + * two child basic blocks (in case of condition jump). + */ +struct basic_block_link { + struct list_head node; + struct basic_block_link *parent; + struct annotated_basic_block *bb; +}; + +/* Check any of basic block in the list already has the offset */ +static bool basic_block_has_offset(struct list_head *head, s64 offset) +{ + struct basic_block_link *link; + + list_for_each_entry(link, head, node) { + s64 begin_offset = link->bb->begin->al.offset; + s64 end_offset = link->bb->end->al.offset; + + if (begin_offset <= offset && offset <= end_offset) + return true; + } + return false; +} + +static bool is_new_basic_block(struct basic_block_data *bb_data, + struct disasm_line *dl) +{ + s64 offset = dl->al.offset; + + if (basic_block_has_offset(&bb_data->visited, offset)) + return false; + if (basic_block_has_offset(&bb_data->queue, offset)) + return false; + return true; +} + +/* Add a basic block starting from dl and link it to the parent */ +static int add_basic_block(struct basic_block_data *bb_data, + struct basic_block_link *parent, + struct disasm_line *dl) +{ + struct annotated_basic_block *bb; + struct basic_block_link *link; + + if (dl == NULL) + return -1; + + if (!is_new_basic_block(bb_data, dl)) + return 0; + + bb = zalloc(sizeof(*bb)); + if (bb == NULL) + return -1; + + bb->begin = dl; + bb->end = dl; + INIT_LIST_HEAD(&bb->list); + + link = malloc(sizeof(*link)); + if (link == NULL) { + free(bb); + return -1; + } + + link->bb = bb; + link->parent = parent; + list_add_tail(&link->node, &bb_data->queue); + return 0; +} + +/* Returns true when it finds the target in the current basic block */ +static bool process_basic_block(struct basic_block_data *bb_data, + struct basic_block_link *link, + struct symbol *sym, u64 target) +{ + struct disasm_line *dl, *next_dl, *last_dl; + struct annotation *notes = symbol__annotation(sym); + bool found = false; + + dl = link->bb->begin; + /* Check if it's already visited */ + if (basic_block_has_offset(&bb_data->visited, dl->al.offset)) + return false; + + last_dl = list_last_entry(¬es->src->source, + struct disasm_line, al.node); + if (last_dl->al.offset == -1) + last_dl = annotation__prev_asm_line(notes, last_dl); + + if (last_dl == NULL) + return false; + + list_for_each_entry_from(dl, ¬es->src->source, al.node) { + /* Skip comment or debug info line */ + if (dl->al.offset == -1) + continue; + /* Found the target instruction */ + if (sym->start + dl->al.offset == target) { + found = true; + break; + } + /* End of the function, finish the block */ + if (dl == last_dl) + break; + /* 'return' instruction finishes the block */ + if (ins__is_ret(&dl->ins)) + break; + /* normal instructions are part of the basic block */ + if (!ins__is_jump(&dl->ins)) + continue; + /* jump to a different function, tail call or return */ + if (dl->ops.target.outside) + break; + /* jump instruction creates new basic block(s) */ + next_dl = find_disasm_line(sym, sym->start + dl->ops.target.offset, + /*allow_update=*/false); + if (next_dl) + add_basic_block(bb_data, link, next_dl); + + /* + * FIXME: determine conditional jumps properly. + * Conditional jumps create another basic block with the + * next disasm line. + */ + if (!strstr(dl->ins.name, "jmp")) { + next_dl = annotation__next_asm_line(notes, dl); + if (next_dl) + add_basic_block(bb_data, link, next_dl); + } + break; + + } + link->bb->end = dl; + return found; +} + +/* + * It founds a target basic block, build a proper linked list of basic blocks + * by following the link recursively. + */ +static void link_found_basic_blocks(struct basic_block_link *link, + struct list_head *head) +{ + while (link) { + struct basic_block_link *parent = link->parent; + + list_move(&link->bb->list, head); + list_del(&link->node); + free(link); + + link = parent; + } +} + +static void delete_basic_blocks(struct basic_block_data *bb_data) +{ + struct basic_block_link *link, *tmp; + + list_for_each_entry_safe(link, tmp, &bb_data->queue, node) { + list_del(&link->node); + zfree(&link->bb); + free(link); + } + + list_for_each_entry_safe(link, tmp, &bb_data->visited, node) { + list_del(&link->node); + zfree(&link->bb); + free(link); + } +} + +/** + * annotate_get_basic_blocks - Get basic blocks for given address range + * @sym: symbol to annotate + * @src: source address + * @dst: destination address + * @head: list head to save basic blocks + * + * This function traverses disasm_lines from @src to @dst and save them in a + * list of annotated_basic_block to @head. It uses BFS to find the shortest + * path between two. The basic_block_link is to maintain parent links so + * that it can build a list of blocks from the start. + */ +int annotate_get_basic_blocks(struct symbol *sym, s64 src, s64 dst, + struct list_head *head) +{ + struct basic_block_data bb_data = { + .queue = LIST_HEAD_INIT(bb_data.queue), + .visited = LIST_HEAD_INIT(bb_data.visited), + }; + struct basic_block_link *link; + struct disasm_line *dl; + int ret = -1; + + dl = find_disasm_line(sym, src, /*allow_update=*/false); + if (dl == NULL) + return -1; + + if (add_basic_block(&bb_data, /*parent=*/NULL, dl) < 0) + return -1; + + /* Find shortest path from src to dst using BFS */ + while (!list_empty(&bb_data.queue)) { + link = list_first_entry(&bb_data.queue, struct basic_block_link, node); + + if (process_basic_block(&bb_data, link, sym, dst)) { + link_found_basic_blocks(link, head); + ret = 0; + break; + } + list_move(&link->node, &bb_data.visited); + } + delete_basic_blocks(&bb_data); + return ret; +} diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h index dba50762c6e8..d5c821c22f79 100644 --- a/tools/perf/util/annotate.h +++ b/tools/perf/util/annotate.h @@ -12,10 +12,11 @@ #include "symbol_conf.h" #include "mutex.h" #include "spark.h" +#include "hashmap.h" +#include "disasm.h" struct hist_browser_timer; struct hist_entry; -struct ins_ops; struct map; struct map_symbol; struct addr_map_symbol; @@ -25,59 +26,6 @@ struct evsel; struct symbol; struct annotated_data_type; -struct ins { - const char *name; - struct ins_ops *ops; -}; - -struct ins_operands { - char *raw; - struct { - char *raw; - char *name; - struct symbol *sym; - u64 addr; - s64 offset; - bool offset_avail; - bool outside; - bool multi_regs; - } target; - union { - struct { - char *raw; - char *name; - u64 addr; - bool multi_regs; - } source; - struct { - struct ins ins; - struct ins_operands *ops; - } locked; - struct { - char *raw_comment; - char *raw_func_start; - } jump; - }; -}; - -struct arch; - -bool arch__is(struct arch *arch, const char *name); - -struct ins_ops { - void (*free)(struct ins_operands *ops); - int (*parse)(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms); - int (*scnprintf)(struct ins *ins, char *bf, size_t size, - struct ins_operands *ops, int max_ins_name); -}; - -bool ins__is_jump(const struct ins *ins); -bool ins__is_call(const struct ins *ins); -bool ins__is_ret(const struct ins *ins); -bool ins__is_lock(const struct ins *ins); -int ins__scnprintf(struct ins *ins, char *bf, size_t size, struct ins_operands *ops, int max_ins_name); -bool ins__is_fused(struct arch *arch, const char *ins1, const char *ins2); - #define ANNOTATION__IPC_WIDTH 6 #define ANNOTATION__CYCLES_WIDTH 6 #define ANNOTATION__MINMAX_CYCLES_WIDTH 19 @@ -170,6 +118,8 @@ struct disasm_line { struct annotation_line al; }; +void annotation_line__add(struct annotation_line *al, struct list_head *head); + static inline double annotation_data__percent(struct annotation_data *data, unsigned int which) { @@ -211,7 +161,6 @@ static inline bool disasm_line__has_local_offset(const struct disasm_line *dl) */ bool disasm_line__is_valid_local_jump(struct disasm_line *dl, struct symbol *sym); -void disasm_line__free(struct disasm_line *dl); struct annotation_line * annotation_line__next(struct annotation_line *pos, struct list_head *head); @@ -234,16 +183,45 @@ int __annotation__scnprintf_samples_period(struct annotation *notes, struct evsel *evsel, bool show_freq); -int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw, int max_ins_name); size_t disasm__fprintf(struct list_head *head, FILE *fp); void symbol__calc_percent(struct symbol *sym, struct evsel *evsel); +/** + * struct sym_hist - symbol histogram information for an event + * + * @nr_samples: Total number of samples. + * @period: Sum of sample periods. + */ struct sym_hist { u64 nr_samples; u64 period; - struct sym_hist_entry addr[]; }; +/** + * struct cyc_hist - (CPU) cycle histogram for a basic block + * + * @start: Start address of current block (if known). + * @cycles: Sum of cycles for the longest basic block. + * @cycles_aggr: Total cycles for this address. + * @cycles_max: Max cycles for this address. + * @cycles_min: Min cycles for this address. + * @cycles_spark: History of cycles for the longest basic block. + * @num: Number of samples for the longest basic block. + * @num_aggr: Total number of samples for this address. + * @have_start: Whether the current branch info has a start address. + * @reset: Number of resets due to a different start address. + * + * If sample has branch_stack and cycles info, it can construct basic blocks + * between two adjacent branches. It'd have start and end addresses but + * sometimes the start address may not be available. So the cycles are + * accounted at the end address. If multiple basic blocks end at the same + * address, it will take the longest one. + * + * The @start, @cycles, @cycles_spark and @num fields are used for the longest + * block only. Other fields are used for all cases. + * + * See __symbol__account_cycles(). + */ struct cyc_hist { u64 start; u64 cycles; @@ -258,18 +236,26 @@ struct cyc_hist { u16 reset; }; -/** struct annotated_source - symbols with hits have this attached as in sannotation +/** + * struct annotated_source - symbols with hits have this attached as in annotation * - * @histograms: Array of addr hit histograms per event being monitored - * nr_histograms: This may not be the same as evsel->evlist->core.nr_entries if + * @source: List head for annotated_line (embeded in disasm_line). + * @histograms: Array of symbol histograms per event to maintain the total number + * of samples and period. + * @nr_histograms: This may not be the same as evsel->evlist->core.nr_entries if * we have more than a group in a evlist, where we will want * to see each group separately, that is why symbol__annotate2() * sets src->nr_histograms to evsel->nr_members. - * @lines: If 'print_lines' is specified, per source code line percentages - * @source: source parsed from a disassembler like objdump -dS - * @cyc_hist: Average cycles per basic block + * @samples: Hash map of sym_hist_entry. Keyed by event index and offset in symbol. + * @nr_events: Number of events in the current output. + * @nr_entries: Number of annotated_line in the source list. + * @nr_asm_entries: Number of annotated_line with actual asm instruction in the + * source list. + * @max_jump_sources: Maximum number of jump instructions targeting to the same + * instruction. + * @widths: Precalculated width of each column in the TUI output. * - * lines is allocated, percentages calculated and all sorted by percentage + * disasm_lines are allocated, percentages calculated and all sorted by percentage * when the annotation is about to be presented, so the percentages are for * one of the entries in the histogram array, i.e. for the event/counter being * presented. It is deallocated right after symbol__{tui,tty,etc}_annotate @@ -277,15 +263,46 @@ struct cyc_hist { */ struct annotated_source { struct list_head source; - size_t sizeof_sym_hist; struct sym_hist *histograms; - struct annotation_line **offsets; + struct hashmap *samples; int nr_histograms; + int nr_events; int nr_entries; int nr_asm_entries; - u16 max_line_len; + int max_jump_sources; + u64 start; + struct { + u8 addr; + u8 jumps; + u8 target; + u8 min_addr; + u8 max_addr; + u8 max_ins_name; + u16 max_line_len; + } widths; }; +struct annotation_line *annotated_source__get_line(struct annotated_source *src, + s64 offset); + +/** + * struct annotated_branch - basic block and IPC information for a symbol. + * + * @hit_cycles: Total executed cycles. + * @hit_insn: Total number of instructions executed. + * @total_insn: Number of instructions in the function. + * @cover_insn: Number of distinct, actually executed instructions. + * @cycles_hist: Array of cyc_hist for each instruction. + * @max_coverage: Maximum number of covered basic block (used for block-range). + * + * This struct is used by two different codes when the sample has branch stack + * and cycles information. annotation__compute_ipc() calculates average IPC + * using @hit_insn / @hit_cycles. The actual coverage can be calculated using + * @cover_insn / @total_insn. The @cycles_hist can give IPC for each (longest) + * basic block ends at the given address. + * process_basic_block() calculates coverage of instructions (or basic blocks) + * in the function. + */ struct annotated_branch { u64 hit_cycles; u64 hit_insn; @@ -296,17 +313,6 @@ struct annotated_branch { }; struct LOCKABLE annotation { - u64 start; - int nr_events; - int max_jump_sources; - struct { - u8 addr; - u8 jumps; - u8 target; - u8 min_addr; - u8 max_addr; - u8 max_ins_name; - } widths; struct annotated_source *src; struct annotated_branch *branch; }; @@ -330,7 +336,7 @@ static inline int annotation__cycles_width(struct annotation *notes) static inline int annotation__pcnt_width(struct annotation *notes) { - return (symbol_conf.show_total_period ? 12 : 7) * notes->nr_events; + return (symbol_conf.show_total_period ? 12 : 7) * notes->src->nr_events; } static inline bool annotation_line__filter(struct annotation_line *al) @@ -338,15 +344,12 @@ static inline bool annotation_line__filter(struct annotation_line *al) return annotate_opts.hide_src_code && al->offset == -1; } -void annotation__set_offsets(struct annotation *notes, s64 size); -void annotation__mark_jump_targets(struct annotation *notes, struct symbol *sym); void annotation__update_column_widths(struct annotation *notes); -void annotation__init_column_widths(struct annotation *notes, struct symbol *sym); void annotation__toggle_full_addr(struct annotation *notes, struct map_symbol *ms); static inline struct sym_hist *annotated_source__histogram(struct annotated_source *src, int idx) { - return ((void *)src->histograms) + (src->sizeof_sym_hist * idx); + return &src->histograms[idx]; } static inline struct sym_hist *annotation__histogram(struct annotation *notes, int idx) @@ -354,6 +357,17 @@ static inline struct sym_hist *annotation__histogram(struct annotation *notes, i return annotated_source__histogram(notes->src, idx); } +static inline struct sym_hist_entry * +annotated_source__hist_entry(struct annotated_source *src, int idx, u64 offset) +{ + struct sym_hist_entry *entry; + long key = offset << 16 | idx; + + if (!hashmap__find(src->samples, key, &entry)) + return NULL; + return entry; +} + static inline struct annotation *symbol__annotation(struct symbol *sym) { return (void *)sym - symbol_conf.priv_size; @@ -442,14 +456,22 @@ int annotate_check_args(void); /** * struct annotated_op_loc - Location info of instruction operand - * @reg: Register in the operand + * @reg1: First register in the operand + * @reg2: Second register in the operand * @offset: Memory access offset in the operand + * @segment: Segment selector register * @mem_ref: Whether the operand accesses memory + * @multi_regs: Whether the second register is used + * @imm: Whether the operand is an immediate value (in offset) */ struct annotated_op_loc { - int reg; + int reg1; + int reg2; int offset; + u8 segment; bool mem_ref; + bool multi_regs; + bool imm; }; enum annotated_insn_ops { @@ -459,6 +481,17 @@ enum annotated_insn_ops { INSN_OP_MAX, }; +enum annotated_x86_segment { + INSN_SEG_NONE = 0, + + INSN_SEG_X86_CS, + INSN_SEG_X86_DS, + INSN_SEG_X86_ES, + INSN_SEG_X86_FS, + INSN_SEG_X86_GS, + INSN_SEG_X86_SS, +}; + /** * struct annotated_insn_loc - Location info of instruction * @ops: Array of location info for source and target operands @@ -487,4 +520,24 @@ struct annotated_item_stat { }; extern struct list_head ann_insn_stat; +/* Calculate PC-relative address */ +u64 annotate_calc_pcrel(struct map_symbol *ms, u64 ip, int offset, + struct disasm_line *dl); + +/** + * struct annotated_basic_block - Basic block of instructions + * @list: List node + * @begin: start instruction in the block + * @end: end instruction in the block + */ +struct annotated_basic_block { + struct list_head list; + struct disasm_line *begin; + struct disasm_line *end; +}; + +/* Get a list of basic blocks from src to dst addresses */ +int annotate_get_basic_blocks(struct symbol *sym, s64 src, s64 dst, + struct list_head *head); + #endif /* __PERF_ANNOTATE_H */ diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c index 3684e6009b63..e2f317063eec 100644 --- a/tools/perf/util/auxtrace.c +++ b/tools/perf/util/auxtrace.c @@ -174,7 +174,7 @@ void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp, struct evlist *evlist, struct evsel *evsel, int idx) { - bool per_cpu = !perf_cpu_map__has_any_cpu_or_is_empty(evlist->core.user_requested_cpus); + bool per_cpu = !perf_cpu_map__has_any_cpu(evlist->core.user_requested_cpus); mp->mmap_needed = evsel->needs_auxtrace_mmap; @@ -218,15 +218,20 @@ static struct auxtrace_queue *auxtrace_alloc_queue_array(unsigned int nr_queues) return queue_array; } -int auxtrace_queues__init(struct auxtrace_queues *queues) +int auxtrace_queues__init_nr(struct auxtrace_queues *queues, int nr_queues) { - queues->nr_queues = AUXTRACE_INIT_NR_QUEUES; + queues->nr_queues = nr_queues; queues->queue_array = auxtrace_alloc_queue_array(queues->nr_queues); if (!queues->queue_array) return -ENOMEM; return 0; } +int auxtrace_queues__init(struct auxtrace_queues *queues) +{ + return auxtrace_queues__init_nr(queues, AUXTRACE_INIT_NR_QUEUES); +} + static int auxtrace_queues__grow(struct auxtrace_queues *queues, unsigned int new_nr_queues) { @@ -648,7 +653,7 @@ int auxtrace_parse_snapshot_options(struct auxtrace_record *itr, static int evlist__enable_event_idx(struct evlist *evlist, struct evsel *evsel, int idx) { - bool per_cpu_mmaps = !perf_cpu_map__has_any_cpu_or_is_empty(evlist->core.user_requested_cpus); + bool per_cpu_mmaps = !perf_cpu_map__has_any_cpu(evlist->core.user_requested_cpus); if (per_cpu_mmaps) { struct perf_cpu evlist_cpu = perf_cpu_map__cpu(evlist->core.all_cpus, idx); @@ -1466,6 +1471,7 @@ int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts, char *endptr; bool period_type_set = false; bool period_set = false; + bool iy = false; synth_opts->set = true; @@ -1484,6 +1490,7 @@ int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts, switch (*p++) { case 'i': case 'y': + iy = true; if (p[-1] == 'y') synth_opts->cycles = true; else @@ -1649,7 +1656,7 @@ int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts, } } out: - if (synth_opts->instructions || synth_opts->cycles) { + if (iy) { if (!period_type_set) synth_opts->period_type = PERF_ITRACE_DEFAULT_PERIOD_TYPE; @@ -2652,7 +2659,7 @@ static int addr_filter__entire_dso(struct addr_filter *filt, struct dso *dso) } filt->addr = 0; - filt->size = dso->data.file_size; + filt->size = dso__data(dso)->file_size; return 0; } diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h index 55702215a82d..8a6ec9565835 100644 --- a/tools/perf/util/auxtrace.h +++ b/tools/perf/util/auxtrace.h @@ -521,6 +521,7 @@ int auxtrace_mmap__read_snapshot(struct mmap *map, struct perf_tool *tool, process_auxtrace_t fn, size_t snapshot_size); +int auxtrace_queues__init_nr(struct auxtrace_queues *queues, int nr_queues); int auxtrace_queues__init(struct auxtrace_queues *queues); int auxtrace_queues__add_event(struct auxtrace_queues *queues, struct perf_session *session, diff --git a/tools/perf/util/block-info.c b/tools/perf/util/block-info.c index dec910989701..04068d48683f 100644 --- a/tools/perf/util/block-info.c +++ b/tools/perf/util/block-info.c @@ -43,26 +43,14 @@ static struct block_header_column { } }; -struct block_info *block_info__get(struct block_info *bi) -{ - if (bi) - refcount_inc(&bi->refcnt); - return bi; -} - -void block_info__put(struct block_info *bi) +struct block_info *block_info__new(void) { - if (bi && refcount_dec_and_test(&bi->refcnt)) - free(bi); + return zalloc(sizeof(struct block_info)); } -struct block_info *block_info__new(void) +void block_info__delete(struct block_info *bi) { - struct block_info *bi = zalloc(sizeof(*bi)); - - if (bi) - refcount_set(&bi->refcnt, 1); - return bi; + free(bi); } int64_t __block_info__cmp(struct hist_entry *left, struct hist_entry *right) @@ -148,7 +136,7 @@ int block_info__process_sym(struct hist_entry *he, struct block_hist *bh, he_block = hists__add_entry_block(&bh->block_hists, &al, bi); if (!he_block) { - block_info__put(bi); + block_info__delete(bi); return -1; } } @@ -319,7 +307,7 @@ static int block_dso_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, if (map && map__dso(map)) { return scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width, - map__dso(map)->short_name); + dso__short_name(map__dso(map))); } return scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width, diff --git a/tools/perf/util/block-info.h b/tools/perf/util/block-info.h index 96f53e89795e..0b9e1aad4c55 100644 --- a/tools/perf/util/block-info.h +++ b/tools/perf/util/block-info.h @@ -3,7 +3,6 @@ #define __PERF_BLOCK_H #include <linux/types.h> -#include <linux/refcount.h> #include "hist.h" #include "symbol.h" #include "sort.h" @@ -19,7 +18,6 @@ struct block_info { u64 total_cycles; int num; int num_aggr; - refcount_t refcnt; }; struct block_fmt { @@ -48,19 +46,8 @@ struct block_report { int nr_fmts; }; -struct block_hist; - struct block_info *block_info__new(void); -struct block_info *block_info__get(struct block_info *bi); -void block_info__put(struct block_info *bi); - -static inline void __block_info__zput(struct block_info **bi) -{ - block_info__put(*bi); - *bi = NULL; -} - -#define block_info__zput(bi) __block_info__zput(&bi) +void block_info__delete(struct block_info *bi); int64_t __block_info__cmp(struct hist_entry *left, struct hist_entry *right); diff --git a/tools/perf/util/bpf-event.c b/tools/perf/util/bpf-event.c index 3573e0b7ef3e..827695cd0408 100644 --- a/tools/perf/util/bpf-event.c +++ b/tools/perf/util/bpf-event.c @@ -59,10 +59,11 @@ static int machine__process_bpf_event_load(struct machine *machine, if (map) { struct dso *dso = map__dso(map); - dso->binary_type = DSO_BINARY_TYPE__BPF_PROG_INFO; - dso->bpf_prog.id = id; - dso->bpf_prog.sub_id = i; - dso->bpf_prog.env = env; + dso__set_binary_type(dso, DSO_BINARY_TYPE__BPF_PROG_INFO); + dso__bpf_prog(dso)->id = id; + dso__bpf_prog(dso)->sub_id = i; + dso__bpf_prog(dso)->env = env; + map__put(map); } } return 0; diff --git a/tools/perf/util/bpf_counter_cgroup.c b/tools/perf/util/bpf_counter_cgroup.c index 1c82377ed78b..ea29c372f339 100644 --- a/tools/perf/util/bpf_counter_cgroup.c +++ b/tools/perf/util/bpf_counter_cgroup.c @@ -136,9 +136,8 @@ static int bperf_load_program(struct evlist *evlist) cgrp = evsel->cgrp; if (read_cgroup_id(cgrp) < 0) { - pr_err("Failed to get cgroup id\n"); - err = -1; - goto out; + pr_debug("Failed to get cgroup id for %s\n", cgrp->name); + cgrp->id = 0; } map_fd = bpf_map__fd(skel->maps.cgrp_idx); diff --git a/tools/perf/util/bpf_kwork.c b/tools/perf/util/bpf_kwork.c index 6eb2c78fd7f4..44f0f708a15d 100644 --- a/tools/perf/util/bpf_kwork.c +++ b/tools/perf/util/bpf_kwork.c @@ -147,12 +147,12 @@ static bool valid_kwork_class_type(enum kwork_class_type type) static int setup_filters(struct perf_kwork *kwork) { - u8 val = 1; - int i, nr_cpus, key, fd; - struct perf_cpu_map *map; - if (kwork->cpu_list != NULL) { - fd = bpf_map__fd(skel->maps.perf_kwork_cpu_filter); + int idx, nr_cpus; + struct perf_cpu_map *map; + struct perf_cpu cpu; + int fd = bpf_map__fd(skel->maps.perf_kwork_cpu_filter); + if (fd < 0) { pr_debug("Invalid cpu filter fd\n"); return -1; @@ -165,8 +165,8 @@ static int setup_filters(struct perf_kwork *kwork) } nr_cpus = libbpf_num_possible_cpus(); - for (i = 0; i < perf_cpu_map__nr(map); i++) { - struct perf_cpu cpu = perf_cpu_map__cpu(map, i); + perf_cpu_map__for_each_cpu(cpu, idx, map) { + u8 val = 1; if (cpu.cpu >= nr_cpus) { perf_cpu_map__put(map); @@ -181,6 +181,8 @@ static int setup_filters(struct perf_kwork *kwork) } if (kwork->profile_name != NULL) { + int key, fd; + if (strlen(kwork->profile_name) >= MAX_KWORKNAME) { pr_err("Requested name filter %s too large, limit to %d\n", kwork->profile_name, MAX_KWORKNAME - 1); diff --git a/tools/perf/util/bpf_kwork_top.c b/tools/perf/util/bpf_kwork_top.c index 035e02272790..22a3b00a1e23 100644 --- a/tools/perf/util/bpf_kwork_top.c +++ b/tools/perf/util/bpf_kwork_top.c @@ -122,11 +122,11 @@ static bool valid_kwork_class_type(enum kwork_class_type type) static int setup_filters(struct perf_kwork *kwork) { - u8 val = 1; - int i, nr_cpus, fd; - struct perf_cpu_map *map; - if (kwork->cpu_list) { + int idx, nr_cpus, fd; + struct perf_cpu_map *map; + struct perf_cpu cpu; + fd = bpf_map__fd(skel->maps.kwork_top_cpu_filter); if (fd < 0) { pr_debug("Invalid cpu filter fd\n"); @@ -140,8 +140,8 @@ static int setup_filters(struct perf_kwork *kwork) } nr_cpus = libbpf_num_possible_cpus(); - for (i = 0; i < perf_cpu_map__nr(map); i++) { - struct perf_cpu cpu = perf_cpu_map__cpu(map, i); + perf_cpu_map__for_each_cpu(cpu, idx, map) { + u8 val = 1; if (cpu.cpu >= nr_cpus) { perf_cpu_map__put(map); diff --git a/tools/perf/util/bpf_lock_contention.c b/tools/perf/util/bpf_lock_contention.c index 31ff19afc20c..b4cb3fe5cc25 100644 --- a/tools/perf/util/bpf_lock_contention.c +++ b/tools/perf/util/bpf_lock_contention.c @@ -179,6 +179,123 @@ int lock_contention_prepare(struct lock_contention *con) return 0; } +/* + * Run the BPF program directly using BPF_PROG_TEST_RUN to update the end + * timestamp in ktime so that it can calculate delta easily. + */ +static void mark_end_timestamp(void) +{ + DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts, + .flags = BPF_F_TEST_RUN_ON_CPU, + ); + int prog_fd = bpf_program__fd(skel->progs.end_timestamp); + + bpf_prog_test_run_opts(prog_fd, &opts); +} + +static void update_lock_stat(int map_fd, int pid, u64 end_ts, + enum lock_aggr_mode aggr_mode, + struct tstamp_data *ts_data) +{ + u64 delta; + struct contention_key stat_key = {}; + struct contention_data stat_data; + + if (ts_data->timestamp >= end_ts) + return; + + delta = end_ts - ts_data->timestamp; + + switch (aggr_mode) { + case LOCK_AGGR_CALLER: + stat_key.stack_id = ts_data->stack_id; + break; + case LOCK_AGGR_TASK: + stat_key.pid = pid; + break; + case LOCK_AGGR_ADDR: + stat_key.lock_addr_or_cgroup = ts_data->lock; + break; + case LOCK_AGGR_CGROUP: + /* TODO */ + return; + default: + return; + } + + if (bpf_map_lookup_elem(map_fd, &stat_key, &stat_data) < 0) + return; + + stat_data.total_time += delta; + stat_data.count++; + + if (delta > stat_data.max_time) + stat_data.max_time = delta; + if (delta < stat_data.min_time) + stat_data.min_time = delta; + + bpf_map_update_elem(map_fd, &stat_key, &stat_data, BPF_EXIST); +} + +/* + * Account entries in the tstamp map (which didn't see the corresponding + * lock:contention_end tracepoint) using end_ts. + */ +static void account_end_timestamp(struct lock_contention *con) +{ + int ts_fd, stat_fd; + int *prev_key, key; + u64 end_ts = skel->bss->end_ts; + int total_cpus; + enum lock_aggr_mode aggr_mode = con->aggr_mode; + struct tstamp_data ts_data, *cpu_data; + + /* Iterate per-task tstamp map (key = TID) */ + ts_fd = bpf_map__fd(skel->maps.tstamp); + stat_fd = bpf_map__fd(skel->maps.lock_stat); + + prev_key = NULL; + while (!bpf_map_get_next_key(ts_fd, prev_key, &key)) { + if (bpf_map_lookup_elem(ts_fd, &key, &ts_data) == 0) { + int pid = key; + + if (aggr_mode == LOCK_AGGR_TASK && con->owner) + pid = ts_data.flags; + + update_lock_stat(stat_fd, pid, end_ts, aggr_mode, + &ts_data); + } + + prev_key = &key; + } + + /* Now it'll check per-cpu tstamp map which doesn't have TID. */ + if (aggr_mode == LOCK_AGGR_TASK || aggr_mode == LOCK_AGGR_CGROUP) + return; + + total_cpus = cpu__max_cpu().cpu; + ts_fd = bpf_map__fd(skel->maps.tstamp_cpu); + + cpu_data = calloc(total_cpus, sizeof(*cpu_data)); + if (cpu_data == NULL) + return; + + prev_key = NULL; + while (!bpf_map_get_next_key(ts_fd, prev_key, &key)) { + if (bpf_map_lookup_elem(ts_fd, &key, cpu_data) < 0) + goto next; + + for (int i = 0; i < total_cpus; i++) { + update_lock_stat(stat_fd, -1, end_ts, aggr_mode, + &cpu_data[i]); + } + +next: + prev_key = &key; + } + free(cpu_data); +} + int lock_contention_start(void) { skel->bss->enabled = 1; @@ -188,6 +305,7 @@ int lock_contention_start(void) int lock_contention_stop(void) { skel->bss->enabled = 0; + mark_end_timestamp(); return 0; } @@ -210,7 +328,7 @@ static const char *lock_contention_get_name(struct lock_contention *con, /* do not update idle comm which contains CPU number */ if (pid) { - struct thread *t = __machine__findnew_thread(machine, /*pid=*/-1, pid); + struct thread *t = machine__findnew_thread(machine, /*pid=*/-1, pid); if (t == NULL) return name; @@ -301,8 +419,10 @@ int lock_contention_read(struct lock_contention *con) if (stack_trace == NULL) return -1; + account_end_timestamp(con); + if (con->aggr_mode == LOCK_AGGR_TASK) { - struct thread *idle = __machine__findnew_thread(machine, + struct thread *idle = machine__findnew_thread(machine, /*pid=*/0, /*tid=*/0); thread__set_comm(idle, "swapper", /*timestamp=*/0); diff --git a/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c b/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c index 52c270330ae0..0acbd74e8c76 100644 --- a/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c +++ b/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c @@ -6,7 +6,7 @@ * payload expected by the 'perf trace' beautifiers. */ -#include <linux/bpf.h> +#include "vmlinux.h" #include <bpf/bpf_helpers.h> #include <linux/limits.h> @@ -22,19 +22,6 @@ #define MAX_CPUS 4096 -// FIXME: These should come from system headers -#ifndef bool -typedef char bool; -#endif -typedef int pid_t; -typedef long long int __s64; -typedef __s64 time64_t; - -struct timespec64 { - time64_t tv_sec; - long int tv_nsec; -}; - /* bpf-output associated map */ struct __augmented_syscalls__ { __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); @@ -354,6 +341,27 @@ failure: return 1; /* Failure: don't filter */ } +SEC("tp/syscalls/sys_enter_nanosleep") +int sys_enter_nanosleep(struct syscall_enter_args *args) +{ + struct augmented_args_payload *augmented_args = augmented_args_payload(); + const void *req_arg = (const void *)args->args[0]; + unsigned int len = sizeof(augmented_args->args); + __u32 size = sizeof(struct timespec64); + + if (augmented_args == NULL) + goto failure; + + if (size > sizeof(augmented_args->__data)) + goto failure; + + bpf_probe_read_user(&augmented_args->__data, size, req_arg); + + return augmented__output(args, augmented_args, len + size); +failure: + return 1; /* Failure: don't filter */ +} + static pid_t getpid(void) { return bpf_get_current_pid_tgid(); diff --git a/tools/perf/util/bpf_skel/bench_uprobe.bpf.c b/tools/perf/util/bpf_skel/bench_uprobe.bpf.c index 2c55896bb33c..a01c7f791fcd 100644 --- a/tools/perf/util/bpf_skel/bench_uprobe.bpf.c +++ b/tools/perf/util/bpf_skel/bench_uprobe.bpf.c @@ -4,6 +4,7 @@ #include <bpf/bpf_tracing.h> unsigned int nr_uprobes; +unsigned int nr_uretprobes; SEC("uprobe") int BPF_UPROBE(empty) @@ -20,4 +21,19 @@ int BPF_UPROBE(trace_printk) return 0; } +SEC("uretprobe") +int BPF_URETPROBE(empty_ret) +{ + return 0; +} + +SEC("uretprobe") +int BPF_URETPROBE(trace_printk_ret) +{ + char fmt[] = "perf bench uretprobe %u"; + + bpf_trace_printk(fmt, sizeof(fmt), ++nr_uretprobes); + return 0; +} + char LICENSE[] SEC("license") = "Dual BSD/GPL"; diff --git a/tools/perf/util/bpf_skel/lock_contention.bpf.c b/tools/perf/util/bpf_skel/lock_contention.bpf.c index 95cd8414f6ef..d931a898c434 100644 --- a/tools/perf/util/bpf_skel/lock_contention.bpf.c +++ b/tools/perf/util/bpf_skel/lock_contention.bpf.c @@ -19,13 +19,6 @@ #define LCB_F_PERCPU (1U << 4) #define LCB_F_MUTEX (1U << 5) -struct tstamp_data { - __u64 timestamp; - __u64 lock; - __u32 flags; - __s32 stack_id; -}; - /* callstack storage */ struct { __uint(type, BPF_MAP_TYPE_STACK_TRACE); @@ -140,6 +133,8 @@ int perf_subsys_id = -1; /* determine the key of lock stat */ int aggr_mode; +__u64 end_ts; + /* error stat */ int task_fail; int stack_fail; @@ -289,6 +284,7 @@ static inline __u32 check_lock_type(__u64 lock, __u32 flags) struct task_struct *curr; struct mm_struct___old *mm_old; struct mm_struct___new *mm_new; + struct sighand_struct *sighand; switch (flags) { case LCB_F_READ: /* rwsem */ @@ -310,7 +306,9 @@ static inline __u32 check_lock_type(__u64 lock, __u32 flags) break; case LCB_F_SPIN: /* spinlock */ curr = bpf_get_current_task_btf(); - if (&curr->sighand->siglock == (void *)lock) + sighand = curr->sighand; + + if (sighand && &sighand->siglock == (void *)lock) return LCD_F_SIGHAND_LOCK; break; default: @@ -559,4 +557,11 @@ int BPF_PROG(collect_lock_syms) return 0; } +SEC("raw_tp/bpf_test_finish") +int BPF_PROG(end_timestamp) +{ + end_ts = bpf_ktime_get_ns(); + return 0; +} + char LICENSE[] SEC("license") = "Dual BSD/GPL"; diff --git a/tools/perf/util/bpf_skel/lock_data.h b/tools/perf/util/bpf_skel/lock_data.h index 08482daf61be..36af11faad03 100644 --- a/tools/perf/util/bpf_skel/lock_data.h +++ b/tools/perf/util/bpf_skel/lock_data.h @@ -3,6 +3,13 @@ #ifndef UTIL_BPF_SKEL_LOCK_DATA_H #define UTIL_BPF_SKEL_LOCK_DATA_H +struct tstamp_data { + u64 timestamp; + u64 lock; + u32 flags; + u32 stack_id; +}; + struct contention_key { u32 stack_id; u32 pid; diff --git a/tools/perf/util/bpf_skel/vmlinux/vmlinux.h b/tools/perf/util/bpf_skel/vmlinux/vmlinux.h index ab84a6e1da5e..e9028235d771 100644 --- a/tools/perf/util/bpf_skel/vmlinux/vmlinux.h +++ b/tools/perf/util/bpf_skel/vmlinux/vmlinux.h @@ -20,6 +20,13 @@ typedef __s64 s64; typedef int pid_t; +typedef __s64 time64_t; + +struct timespec64 { + time64_t tv_sec; + long int tv_nsec; +}; + enum cgroup_subsys_id { perf_event_cgrp_id = 8, }; diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c index 03c64b85383b..83a1581e8cf1 100644 --- a/tools/perf/util/build-id.c +++ b/tools/perf/util/build-id.c @@ -60,7 +60,7 @@ int build_id__mark_dso_hit(struct perf_tool *tool __maybe_unused, addr_location__init(&al); if (thread__find_map(thread, sample->cpumode, sample->ip, &al)) - map__dso(al.map)->hit = 1; + dso__set_hit(map__dso(al.map)); addr_location__exit(&al); thread__put(thread); @@ -272,10 +272,10 @@ char *__dso__build_id_filename(const struct dso *dso, char *bf, size_t size, bool alloc = (bf == NULL); int ret; - if (!dso->has_build_id) + if (!dso__has_build_id(dso)) return NULL; - build_id__sprintf(&dso->bid, sbuild_id); + build_id__sprintf(dso__bid_const(dso), sbuild_id); linkname = build_id_cache__linkname(sbuild_id, NULL, 0); if (!linkname) return NULL; @@ -327,48 +327,56 @@ static int write_buildid(const char *name, size_t name_len, struct build_id *bid return write_padded(fd, name, name_len + 1, len); } -static int machine__write_buildid_table(struct machine *machine, - struct feat_fd *fd) +struct machine__write_buildid_table_cb_args { + struct machine *machine; + struct feat_fd *fd; + u16 kmisc, umisc; +}; + +static int machine__write_buildid_table_cb(struct dso *dso, void *data) { - int err = 0; - struct dso *pos; - u16 kmisc = PERF_RECORD_MISC_KERNEL, - umisc = PERF_RECORD_MISC_USER; + struct machine__write_buildid_table_cb_args *args = data; + const char *name; + size_t name_len; + bool in_kernel = false; - if (!machine__is_host(machine)) { - kmisc = PERF_RECORD_MISC_GUEST_KERNEL; - umisc = PERF_RECORD_MISC_GUEST_USER; - } + if (!dso__has_build_id(dso)) + return 0; - dsos__for_each_with_build_id(pos, &machine->dsos.head) { - const char *name; - size_t name_len; - bool in_kernel = false; + if (!dso__hit(dso) && !dso__is_vdso(dso)) + return 0; - if (!pos->hit && !dso__is_vdso(pos)) - continue; + if (dso__is_vdso(dso)) { + name = dso__short_name(dso); + name_len = dso__short_name_len(dso); + } else if (dso__is_kcore(dso)) { + name = args->machine->mmap_name; + name_len = strlen(name); + } else { + name = dso__long_name(dso); + name_len = dso__long_name_len(dso); + } - if (dso__is_vdso(pos)) { - name = pos->short_name; - name_len = pos->short_name_len; - } else if (dso__is_kcore(pos)) { - name = machine->mmap_name; - name_len = strlen(name); - } else { - name = pos->long_name; - name_len = pos->long_name_len; - } + in_kernel = dso__kernel(dso) || is_kernel_module(name, PERF_RECORD_MISC_CPUMODE_UNKNOWN); + return write_buildid(name, name_len, dso__bid(dso), args->machine->pid, + in_kernel ? args->kmisc : args->umisc, args->fd); +} - in_kernel = pos->kernel || - is_kernel_module(name, - PERF_RECORD_MISC_CPUMODE_UNKNOWN); - err = write_buildid(name, name_len, &pos->bid, machine->pid, - in_kernel ? kmisc : umisc, fd); - if (err) - break; +static int machine__write_buildid_table(struct machine *machine, struct feat_fd *fd) +{ + struct machine__write_buildid_table_cb_args args = { + .machine = machine, + .fd = fd, + .kmisc = PERF_RECORD_MISC_KERNEL, + .umisc = PERF_RECORD_MISC_USER, + }; + + if (!machine__is_host(machine)) { + args.kmisc = PERF_RECORD_MISC_GUEST_KERNEL; + args.umisc = PERF_RECORD_MISC_GUEST_USER; } - return err; + return dsos__for_each_dso(&machine->dsos, machine__write_buildid_table_cb, &args); } int perf_session__write_buildid_table(struct perf_session *session, @@ -390,42 +398,6 @@ int perf_session__write_buildid_table(struct perf_session *session, return err; } -static int __dsos__hit_all(struct list_head *head) -{ - struct dso *pos; - - list_for_each_entry(pos, head, node) - pos->hit = true; - - return 0; -} - -static int machine__hit_all_dsos(struct machine *machine) -{ - return __dsos__hit_all(&machine->dsos.head); -} - -int dsos__hit_all(struct perf_session *session) -{ - struct rb_node *nd; - int err; - - err = machine__hit_all_dsos(&session->machines.host); - if (err) - return err; - - for (nd = rb_first_cached(&session->machines.guests); nd; - nd = rb_next(nd)) { - struct machine *pos = rb_entry(nd, struct machine, rb_node); - - err = machine__hit_all_dsos(pos); - if (err) - return err; - } - - return 0; -} - void disable_buildid_cache(void) { no_buildid_cache = true; @@ -904,11 +876,11 @@ static bool dso__build_id_mismatch(struct dso *dso, const char *name) struct build_id bid; bool ret = false; - mutex_lock(&dso->lock); - if (filename__read_build_id_ns(name, &bid, dso->nsinfo) >= 0) + mutex_lock(dso__lock(dso)); + if (filename__read_build_id_ns(name, &bid, dso__nsinfo(dso)) >= 0) ret = !dso__build_id_equal(dso, &bid); - mutex_unlock(&dso->lock); + mutex_unlock(dso__lock(dso)); return ret; } @@ -918,13 +890,13 @@ static int dso__cache_build_id(struct dso *dso, struct machine *machine, { bool is_kallsyms = dso__is_kallsyms(dso); bool is_vdso = dso__is_vdso(dso); - const char *name = dso->long_name; + const char *name = dso__long_name(dso); const char *proper_name = NULL; const char *root_dir = NULL; char *allocated_name = NULL; int ret = 0; - if (!dso->has_build_id) + if (!dso__has_build_id(dso)) return 0; if (dso__is_kcore(dso)) { @@ -949,10 +921,10 @@ static int dso__cache_build_id(struct dso *dso, struct machine *machine, if (!is_kallsyms && dso__build_id_mismatch(dso, name)) goto out_free; - mutex_lock(&dso->lock); - ret = build_id_cache__add_b(&dso->bid, name, dso->nsinfo, + mutex_lock(dso__lock(dso)); + ret = build_id_cache__add_b(dso__bid(dso), name, dso__nsinfo(dso), is_kallsyms, is_vdso, proper_name, root_dir); - mutex_unlock(&dso->lock); + mutex_unlock(dso__lock(dso)); out_free: free(allocated_name); return ret; @@ -992,7 +964,7 @@ int perf_session__cache_build_ids(struct perf_session *session) static bool machine__read_build_ids(struct machine *machine, bool with_hits) { - return __dsos__read_build_ids(&machine->dsos.head, with_hits); + return dsos__read_build_ids(&machine->dsos, with_hits); } bool perf_session__read_build_ids(struct perf_session *session, bool with_hits) diff --git a/tools/perf/util/build-id.h b/tools/perf/util/build-id.h index 4e3a1169379b..3fa8bffb07ca 100644 --- a/tools/perf/util/build-id.h +++ b/tools/perf/util/build-id.h @@ -39,8 +39,6 @@ int build_id__mark_dso_hit(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct evsel *evsel, struct machine *machine); -int dsos__hit_all(struct perf_session *session); - int perf_event__inject_buildid(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct evsel *evsel, struct machine *machine); diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c index 8262f69118db..1730b852a947 100644 --- a/tools/perf/util/callchain.c +++ b/tools/perf/util/callchain.c @@ -606,7 +606,7 @@ fill_node(struct callchain_node *node, struct callchain_cursor *cursor) call->brtype_stat = zalloc(sizeof(*call->brtype_stat)); if (!call->brtype_stat) { perror("not enough memory for the code path branch statistics"); - free(call->brtype_stat); + zfree(&call->brtype_stat); return -ENOMEM; } } @@ -1157,7 +1157,7 @@ int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node * if (al->map == NULL) goto out; } - if (RC_CHK_EQUAL(al->maps, machine__kernel_maps(machine))) { + if (maps__equal(al->maps, machine__kernel_maps(machine))) { if (machine__is_host(machine)) { al->cpumode = PERF_RECORD_MISC_KERNEL; al->level = 'k'; @@ -1205,7 +1205,7 @@ char *callchain_list__sym_name(struct callchain_list *cl, if (show_dso) scnprintf(bf + printed, bfsize - printed, " %s", cl->ms.map ? - map__dso(cl->ms.map)->short_name : + dso__short_name(map__dso(cl->ms.map)) : "unknown"); return bf; diff --git a/tools/perf/util/cgroup.c b/tools/perf/util/cgroup.c index fcb509058499..0f759dd96db7 100644 --- a/tools/perf/util/cgroup.c +++ b/tools/perf/util/cgroup.c @@ -465,9 +465,11 @@ int evlist__expand_cgroup(struct evlist *evlist, const char *str, name = cn->name + prefix_len; if (name[0] == '/' && name[1]) name++; + + /* the cgroup can go away in the meantime */ cgrp = cgroup__new(name, open_cgroup); if (cgrp == NULL) - goto out_err; + continue; leader = NULL; evlist__for_each_entry(orig_list, pos) { diff --git a/tools/perf/util/comm.c b/tools/perf/util/comm.c index afb8d4fd2644..233f2b6edf52 100644 --- a/tools/perf/util/comm.c +++ b/tools/perf/util/comm.c @@ -1,108 +1,181 @@ // SPDX-License-Identifier: GPL-2.0 #include "comm.h" #include <errno.h> -#include <stdlib.h> -#include <stdio.h> #include <string.h> +#include <internal/rc_check.h> #include <linux/refcount.h> -#include <linux/rbtree.h> #include <linux/zalloc.h> #include "rwsem.h" -struct comm_str { - char *str; - struct rb_node rb_node; +DECLARE_RC_STRUCT(comm_str) { refcount_t refcnt; + char str[]; }; -/* Should perhaps be moved to struct machine */ -static struct rb_root comm_str_root; -static struct rw_semaphore comm_str_lock = {.lock = PTHREAD_RWLOCK_INITIALIZER,}; +static struct comm_strs { + struct rw_semaphore lock; + struct comm_str **strs; + int num_strs; + int capacity; +} _comm_strs; -static struct comm_str *comm_str__get(struct comm_str *cs) +static void comm_strs__remove_if_last(struct comm_str *cs); + +static void comm_strs__init(void) +{ + init_rwsem(&_comm_strs.lock); + _comm_strs.capacity = 16; + _comm_strs.num_strs = 0; + _comm_strs.strs = calloc(16, sizeof(*_comm_strs.strs)); +} + +static struct comm_strs *comm_strs__get(void) { - if (cs && refcount_inc_not_zero(&cs->refcnt)) - return cs; + static pthread_once_t comm_strs_type_once = PTHREAD_ONCE_INIT; - return NULL; + pthread_once(&comm_strs_type_once, comm_strs__init); + + return &_comm_strs; } -static void comm_str__put(struct comm_str *cs) +static refcount_t *comm_str__refcnt(struct comm_str *cs) { - if (cs && refcount_dec_and_test(&cs->refcnt)) { - down_write(&comm_str_lock); - rb_erase(&cs->rb_node, &comm_str_root); - up_write(&comm_str_lock); - zfree(&cs->str); - free(cs); - } + return &RC_CHK_ACCESS(cs)->refcnt; +} + +static const char *comm_str__str(const struct comm_str *cs) +{ + return &RC_CHK_ACCESS(cs)->str[0]; } -static struct comm_str *comm_str__alloc(const char *str) +static struct comm_str *comm_str__get(struct comm_str *cs) { - struct comm_str *cs; + struct comm_str *result; - cs = zalloc(sizeof(*cs)); + if (RC_CHK_GET(result, cs)) + refcount_inc_not_zero(comm_str__refcnt(cs)); + + return result; +} + +static void comm_str__put(struct comm_str *cs) +{ if (!cs) - return NULL; + return; - cs->str = strdup(str); - if (!cs->str) { - free(cs); - return NULL; + if (refcount_dec_and_test(comm_str__refcnt(cs))) { + RC_CHK_FREE(cs); + } else { + if (refcount_read(comm_str__refcnt(cs)) == 1) + comm_strs__remove_if_last(cs); + + RC_CHK_PUT(cs); } +} - refcount_set(&cs->refcnt, 1); +static struct comm_str *comm_str__new(const char *str) +{ + struct comm_str *result = NULL; + RC_STRUCT(comm_str) *cs; - return cs; + cs = malloc(sizeof(*cs) + strlen(str) + 1); + if (ADD_RC_CHK(result, cs)) { + refcount_set(comm_str__refcnt(result), 1); + strcpy(&cs->str[0], str); + } + return result; } -static -struct comm_str *__comm_str__findnew(const char *str, struct rb_root *root) +static int comm_str__cmp(const void *_lhs, const void *_rhs) { - struct rb_node **p = &root->rb_node; - struct rb_node *parent = NULL; - struct comm_str *iter, *new; - int cmp; + const struct comm_str *lhs = *(const struct comm_str * const *)_lhs; + const struct comm_str *rhs = *(const struct comm_str * const *)_rhs; - while (*p != NULL) { - parent = *p; - iter = rb_entry(parent, struct comm_str, rb_node); + return strcmp(comm_str__str(lhs), comm_str__str(rhs)); +} + +static int comm_str__search(const void *_key, const void *_member) +{ + const char *key = _key; + const struct comm_str *member = *(const struct comm_str * const *)_member; - /* - * If we race with comm_str__put, iter->refcnt is 0 - * and it will be removed within comm_str__put call - * shortly, ignore it in this search. - */ - cmp = strcmp(str, iter->str); - if (!cmp && comm_str__get(iter)) - return iter; + return strcmp(key, comm_str__str(member)); +} - if (cmp < 0) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; +static void comm_strs__remove_if_last(struct comm_str *cs) +{ + struct comm_strs *comm_strs = comm_strs__get(); + + down_write(&comm_strs->lock); + /* + * Are there only references from the array, if so remove the array + * reference under the write lock so that we don't race with findnew. + */ + if (refcount_read(comm_str__refcnt(cs)) == 1) { + struct comm_str **entry; + + entry = bsearch(comm_str__str(cs), comm_strs->strs, comm_strs->num_strs, + sizeof(struct comm_str *), comm_str__search); + comm_str__put(*entry); + for (int i = entry - comm_strs->strs; i < comm_strs->num_strs - 1; i++) + comm_strs->strs[i] = comm_strs->strs[i + 1]; + comm_strs->num_strs--; } + up_write(&comm_strs->lock); +} - new = comm_str__alloc(str); - if (!new) - return NULL; +static struct comm_str *__comm_strs__find(struct comm_strs *comm_strs, const char *str) +{ + struct comm_str **result; - rb_link_node(&new->rb_node, parent, p); - rb_insert_color(&new->rb_node, root); + result = bsearch(str, comm_strs->strs, comm_strs->num_strs, sizeof(struct comm_str *), + comm_str__search); - return new; + if (!result) + return NULL; + + return comm_str__get(*result); } -static struct comm_str *comm_str__findnew(const char *str, struct rb_root *root) +static struct comm_str *comm_strs__findnew(const char *str) { - struct comm_str *cs; + struct comm_strs *comm_strs = comm_strs__get(); + struct comm_str *result; - down_write(&comm_str_lock); - cs = __comm_str__findnew(str, root); - up_write(&comm_str_lock); + if (!comm_strs) + return NULL; - return cs; + down_read(&comm_strs->lock); + result = __comm_strs__find(comm_strs, str); + up_read(&comm_strs->lock); + if (result) + return result; + + down_write(&comm_strs->lock); + result = __comm_strs__find(comm_strs, str); + if (!result) { + if (comm_strs->num_strs == comm_strs->capacity) { + struct comm_str **tmp; + + tmp = reallocarray(comm_strs->strs, + comm_strs->capacity + 16, + sizeof(*comm_strs->strs)); + if (!tmp) { + up_write(&comm_strs->lock); + return NULL; + } + comm_strs->strs = tmp; + comm_strs->capacity += 16; + } + result = comm_str__new(str); + if (result) { + comm_strs->strs[comm_strs->num_strs++] = result; + qsort(comm_strs->strs, comm_strs->num_strs, sizeof(struct comm_str *), + comm_str__cmp); + } + } + up_write(&comm_strs->lock); + return comm_str__get(result); } struct comm *comm__new(const char *str, u64 timestamp, bool exec) @@ -115,7 +188,7 @@ struct comm *comm__new(const char *str, u64 timestamp, bool exec) comm->start = timestamp; comm->exec = exec; - comm->comm_str = comm_str__findnew(str, &comm_str_root); + comm->comm_str = comm_strs__findnew(str); if (!comm->comm_str) { free(comm); return NULL; @@ -128,7 +201,7 @@ int comm__override(struct comm *comm, const char *str, u64 timestamp, bool exec) { struct comm_str *new, *old = comm->comm_str; - new = comm_str__findnew(str, &comm_str_root); + new = comm_strs__findnew(str); if (!new) return -ENOMEM; @@ -149,5 +222,5 @@ void comm__free(struct comm *comm) const char *comm__str(const struct comm *comm) { - return comm->comm_str->str; + return comm_str__str(comm->comm_str); } diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c index 0581ee0fa5f2..27094211edd8 100644 --- a/tools/perf/util/cpumap.c +++ b/tools/perf/util/cpumap.c @@ -180,8 +180,6 @@ struct cpu_aggr_map *cpu_aggr_map__empty_new(int nr) cpus->nr = nr; for (i = 0; i < nr; i++) cpus->map[i] = aggr_cpu_id__empty(); - - refcount_set(&cpus->refcnt, 1); } return cpus; @@ -222,6 +220,8 @@ static int aggr_cpu_id__cmp(const void *a_pointer, const void *b_pointer) return a->socket - b->socket; else if (a->die != b->die) return a->die - b->die; + else if (a->cluster != b->cluster) + return a->cluster - b->cluster; else if (a->cache_lvl != b->cache_lvl) return a->cache_lvl - b->cache_lvl; else if (a->cache != b->cache) @@ -309,6 +309,30 @@ struct aggr_cpu_id aggr_cpu_id__die(struct perf_cpu cpu, void *data) return id; } +int cpu__get_cluster_id(struct perf_cpu cpu) +{ + int value, ret = cpu__get_topology_int(cpu.cpu, "cluster_id", &value); + + return ret ?: value; +} + +struct aggr_cpu_id aggr_cpu_id__cluster(struct perf_cpu cpu, void *data) +{ + int cluster = cpu__get_cluster_id(cpu); + struct aggr_cpu_id id; + + /* There is no cluster_id on legacy system. */ + if (cluster == -1) + cluster = 0; + + id = aggr_cpu_id__die(cpu, data); + if (aggr_cpu_id__is_empty(&id)) + return id; + + id.cluster = cluster; + return id; +} + int cpu__get_core_id(struct perf_cpu cpu) { int value, ret = cpu__get_topology_int(cpu.cpu, "core_id", &value); @@ -320,8 +344,8 @@ struct aggr_cpu_id aggr_cpu_id__core(struct perf_cpu cpu, void *data) struct aggr_cpu_id id; int core = cpu__get_core_id(cpu); - /* aggr_cpu_id__die returns a struct with socket and die set. */ - id = aggr_cpu_id__die(cpu, data); + /* aggr_cpu_id__die returns a struct with socket die, and cluster set. */ + id = aggr_cpu_id__cluster(cpu, data); if (aggr_cpu_id__is_empty(&id)) return id; @@ -629,10 +653,10 @@ static char hex_char(unsigned char val) size_t cpu_map__snprint_mask(struct perf_cpu_map *map, char *buf, size_t size) { - int i, cpu; + int idx; char *ptr = buf; unsigned char *bitmap; - struct perf_cpu last_cpu = perf_cpu_map__cpu(map, perf_cpu_map__nr(map) - 1); + struct perf_cpu c, last_cpu = perf_cpu_map__max(map); if (buf == NULL) return 0; @@ -643,12 +667,10 @@ size_t cpu_map__snprint_mask(struct perf_cpu_map *map, char *buf, size_t size) return 0; } - for (i = 0; i < perf_cpu_map__nr(map); i++) { - cpu = perf_cpu_map__cpu(map, i).cpu; - bitmap[cpu / 8] |= 1 << (cpu % 8); - } + perf_cpu_map__for_each_cpu(c, idx, map) + bitmap[c.cpu / 8] |= 1 << (c.cpu % 8); - for (cpu = last_cpu.cpu / 4 * 4; cpu >= 0; cpu -= 4) { + for (int cpu = last_cpu.cpu / 4 * 4; cpu >= 0; cpu -= 4) { unsigned char bits = bitmap[cpu / 8]; if (cpu % 8) @@ -683,6 +705,7 @@ bool aggr_cpu_id__equal(const struct aggr_cpu_id *a, const struct aggr_cpu_id *b a->node == b->node && a->socket == b->socket && a->die == b->die && + a->cluster == b->cluster && a->cache_lvl == b->cache_lvl && a->cache == b->cache && a->core == b->core && @@ -695,6 +718,7 @@ bool aggr_cpu_id__is_empty(const struct aggr_cpu_id *a) a->node == -1 && a->socket == -1 && a->die == -1 && + a->cluster == -1 && a->cache_lvl == -1 && a->cache == -1 && a->core == -1 && @@ -708,6 +732,7 @@ struct aggr_cpu_id aggr_cpu_id__empty(void) .node = -1, .socket = -1, .die = -1, + .cluster = -1, .cache_lvl = -1, .cache = -1, .core = -1, diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h index 9df2aeb34d3d..ee0f6139b04a 100644 --- a/tools/perf/util/cpumap.h +++ b/tools/perf/util/cpumap.h @@ -5,7 +5,6 @@ #include <stdbool.h> #include <stdio.h> #include <perf/cpumap.h> -#include <linux/refcount.h> /** Identify where counts are aggregated, -1 implies not to aggregate. */ struct aggr_cpu_id { @@ -20,6 +19,8 @@ struct aggr_cpu_id { int socket; /** The die id as read from /sys/devices/system/cpu/cpuX/topology/die_id. */ int die; + /** The cluster id as read from /sys/devices/system/cpu/cpuX/topology/cluster_id */ + int cluster; /** The cache level as read from /sys/devices/system/cpu/cpuX/cache/indexY/level */ int cache_lvl; /** @@ -35,7 +36,6 @@ struct aggr_cpu_id { /** A collection of aggr_cpu_id values, the "built" version is sorted and uniqued. */ struct cpu_aggr_map { - refcount_t refcnt; /** Number of valid entries. */ int nr; /** The entries. */ @@ -87,6 +87,11 @@ int cpu__get_socket_id(struct perf_cpu cpu); */ int cpu__get_die_id(struct perf_cpu cpu); /** + * cpu__get_cluster_id - Returns the cluster id as read from + * /sys/devices/system/cpu/cpuX/topology/cluster_id for the given CPU + */ +int cpu__get_cluster_id(struct perf_cpu cpu); +/** * cpu__get_core_id - Returns the core id as read from * /sys/devices/system/cpu/cpuX/topology/core_id for the given CPU. */ @@ -127,9 +132,15 @@ struct aggr_cpu_id aggr_cpu_id__socket(struct perf_cpu cpu, void *data); */ struct aggr_cpu_id aggr_cpu_id__die(struct perf_cpu cpu, void *data); /** - * aggr_cpu_id__core - Create an aggr_cpu_id with the core, die and socket - * populated with the core, die and socket for cpu. The function signature is - * compatible with aggr_cpu_id_get_t. + * aggr_cpu_id__cluster - Create an aggr_cpu_id with cluster, die and socket + * populated with the cluster, die and socket for cpu. The function signature + * is compatible with aggr_cpu_id_get_t. + */ +struct aggr_cpu_id aggr_cpu_id__cluster(struct perf_cpu cpu, void *data); +/** + * aggr_cpu_id__core - Create an aggr_cpu_id with the core, cluster, die and + * socket populated with the core, die and socket for cpu. The function + * signature is compatible with aggr_cpu_id_get_t. */ struct aggr_cpu_id aggr_cpu_id__core(struct perf_cpu cpu, void *data); /** diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c index d65d7485886c..32818bd7cd17 100644 --- a/tools/perf/util/cs-etm.c +++ b/tools/perf/util/cs-etm.c @@ -335,8 +335,11 @@ static int cs_etm__process_aux_output_hw_id(struct perf_session *session, trace_chan_id = FIELD_GET(CS_AUX_HW_ID_TRACE_ID_MASK, hw_id); /* check that we can handle this version */ - if (version > CS_AUX_HW_ID_CURR_VERSION) + if (version > CS_AUX_HW_ID_CURR_VERSION) { + pr_err("CS ETM Trace: PERF_RECORD_AUX_OUTPUT_HW_ID version %d not supported. Please update Perf.\n", + version); return -EINVAL; + } /* get access to the etm metadata */ etm = container_of(session->auxtrace, struct cs_etm_auxtrace, auxtrace); diff --git a/tools/perf/util/data-convert-json.c b/tools/perf/util/data-convert-json.c index 5bb3c2ba95ca..3cf64f5b23ee 100644 --- a/tools/perf/util/data-convert-json.c +++ b/tools/perf/util/data-convert-json.c @@ -134,7 +134,7 @@ static void output_sample_callchain_entry(struct perf_tool *tool, output_json_key_string(out, false, 5, "symbol", al->sym->name); if (dso) { - const char *dso_name = dso->short_name; + const char *dso_name = dso__short_name(dso); if (dso_name && strlen(dso_name) > 0) { fputc(',', out); @@ -284,7 +284,9 @@ static void output_headers(struct perf_session *session, struct convert_json *c) output_json_key_string(out, true, 2, "os-release", header->env.os_release); output_json_key_string(out, true, 2, "arch", header->env.arch); - output_json_key_string(out, true, 2, "cpu-desc", header->env.cpu_desc); + if (header->env.cpu_desc) + output_json_key_string(out, true, 2, "cpu-desc", header->env.cpu_desc); + output_json_key_string(out, true, 2, "cpuid", header->env.cpuid); output_json_key_format(out, true, 2, "nrcpus-online", "%u", header->env.nr_cpus_online); output_json_key_format(out, true, 2, "nrcpus-avail", "%u", header->env.nr_cpus_avail); diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c index c29d8a382b19..08c4bfbd817f 100644 --- a/tools/perf/util/data.c +++ b/tools/perf/util/data.c @@ -413,7 +413,7 @@ ssize_t perf_data_file__write(struct perf_data_file *file, } ssize_t perf_data__write(struct perf_data *data, - void *buf, size_t size) + void *buf, size_t size) { if (data->use_stdio) { if (fwrite(buf, size, 1, data->file.fptr) == 1) @@ -424,14 +424,12 @@ ssize_t perf_data__write(struct perf_data *data, } int perf_data__switch(struct perf_data *data, - const char *postfix, - size_t pos, bool at_exit, - char **new_filepath) + const char *postfix, + size_t pos, bool at_exit, + char **new_filepath) { int ret; - if (check_pipe(data)) - return -EINVAL; if (perf_data__is_read(data)) return -EINVAL; diff --git a/tools/perf/util/data.h b/tools/perf/util/data.h index effcc195d7e9..110f3ebde30f 100644 --- a/tools/perf/util/data.h +++ b/tools/perf/util/data.h @@ -80,7 +80,7 @@ int perf_data__open(struct perf_data *data); void perf_data__close(struct perf_data *data); ssize_t perf_data__read(struct perf_data *data, void *buf, size_t size); ssize_t perf_data__write(struct perf_data *data, - void *buf, size_t size); + void *buf, size_t size); ssize_t perf_data_file__write(struct perf_data_file *file, void *buf, size_t size); /* @@ -91,8 +91,8 @@ ssize_t perf_data_file__write(struct perf_data_file *file, * Return value is fd of new output. */ int perf_data__switch(struct perf_data *data, - const char *postfix, - size_t pos, bool at_exit, char **new_filepath); + const char *postfix, + size_t pos, bool at_exit, char **new_filepath); int perf_data__create_dir(struct perf_data *data, int nr); int perf_data__open_dir(struct perf_data *data); diff --git a/tools/perf/util/db-export.c b/tools/perf/util/db-export.c index 106429155c2e..50f916374d87 100644 --- a/tools/perf/util/db-export.c +++ b/tools/perf/util/db-export.c @@ -146,10 +146,10 @@ int db_export__comm_thread(struct db_export *dbe, struct comm *comm, int db_export__dso(struct db_export *dbe, struct dso *dso, struct machine *machine) { - if (dso->db_id) + if (dso__db_id(dso)) return 0; - dso->db_id = ++dbe->dso_last_db_id; + dso__set_db_id(dso, ++dbe->dso_last_db_id); if (dbe->export_dso) return dbe->export_dso(dbe, dso, machine); @@ -184,7 +184,7 @@ static int db_ids_from_al(struct db_export *dbe, struct addr_location *al, err = db_export__dso(dbe, dso, maps__machine(al->maps)); if (err) return err; - *dso_db_id = dso->db_id; + *dso_db_id = dso__db_id(dso); if (!al->sym) { al->sym = symbol__new(al->addr, 0, 0, 0, "unknown"); diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c index e282b4ceb4d2..d633d15329fa 100644 --- a/tools/perf/util/debug.c +++ b/tools/perf/util/debug.c @@ -33,6 +33,7 @@ #endif int verbose; +int debug_kmaps; int debug_peo_args; bool dump_trace = false, quiet = false; int debug_ordered_events; @@ -40,6 +41,7 @@ static int redirect_to_stderr; int debug_data_convert; static FILE *_debug_file; bool debug_display_time; +int debug_type_profile; FILE *debug_file(void) { @@ -229,6 +231,8 @@ static struct sublevel_option debug_opts[] = { { .name = "stderr", .value_ptr = &redirect_to_stderr}, { .name = "data-convert", .value_ptr = &debug_data_convert }, { .name = "perf-event-open", .value_ptr = &debug_peo_args }, + { .name = "kmaps", .value_ptr = &debug_kmaps }, + { .name = "type-profile", .value_ptr = &debug_type_profile }, { .name = NULL, } }; @@ -267,6 +271,8 @@ int perf_quiet_option(void) /* For debug variables that are used as bool types, set to 0. */ redirect_to_stderr = 0; debug_peo_args = 0; + debug_kmaps = 0; + debug_type_profile = 0; return 0; } diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h index de8870980d44..a4026d1fd6a3 100644 --- a/tools/perf/util/debug.h +++ b/tools/perf/util/debug.h @@ -9,10 +9,12 @@ #include <linux/compiler.h> extern int verbose; +extern int debug_kmaps; extern int debug_peo_args; extern bool quiet, dump_trace; extern int debug_ordered_events; extern int debug_data_convert; +extern int debug_type_profile; #ifndef pr_fmt #define pr_fmt(fmt) fmt diff --git a/tools/perf/util/disasm.c b/tools/perf/util/disasm.c new file mode 100644 index 000000000000..72aec8f61b94 --- /dev/null +++ b/tools/perf/util/disasm.c @@ -0,0 +1,1837 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include <ctype.h> +#include <errno.h> +#include <fcntl.h> +#include <inttypes.h> +#include <libgen.h> +#include <regex.h> +#include <stdlib.h> +#include <unistd.h> + +#include <linux/string.h> +#include <subcmd/run-command.h> + +#include "annotate.h" +#include "build-id.h" +#include "debug.h" +#include "disasm.h" +#include "dso.h" +#include "env.h" +#include "evsel.h" +#include "map.h" +#include "maps.h" +#include "namespaces.h" +#include "srcline.h" +#include "symbol.h" +#include "util.h" + +static regex_t file_lineno; + +/* These can be referred from the arch-dependent code */ +static struct ins_ops call_ops; +static struct ins_ops dec_ops; +static struct ins_ops jump_ops; +static struct ins_ops mov_ops; +static struct ins_ops nop_ops; +static struct ins_ops lock_ops; +static struct ins_ops ret_ops; + +static int jump__scnprintf(struct ins *ins, char *bf, size_t size, + struct ins_operands *ops, int max_ins_name); +static int call__scnprintf(struct ins *ins, char *bf, size_t size, + struct ins_operands *ops, int max_ins_name); + +static void ins__sort(struct arch *arch); +static int disasm_line__parse(char *line, const char **namep, char **rawp); + +static __attribute__((constructor)) void symbol__init_regexpr(void) +{ + regcomp(&file_lineno, "^/[^:]+:([0-9]+)", REG_EXTENDED); +} + +static int arch__grow_instructions(struct arch *arch) +{ + struct ins *new_instructions; + size_t new_nr_allocated; + + if (arch->nr_instructions_allocated == 0 && arch->instructions) + goto grow_from_non_allocated_table; + + new_nr_allocated = arch->nr_instructions_allocated + 128; + new_instructions = realloc(arch->instructions, new_nr_allocated * sizeof(struct ins)); + if (new_instructions == NULL) + return -1; + +out_update_instructions: + arch->instructions = new_instructions; + arch->nr_instructions_allocated = new_nr_allocated; + return 0; + +grow_from_non_allocated_table: + new_nr_allocated = arch->nr_instructions + 128; + new_instructions = calloc(new_nr_allocated, sizeof(struct ins)); + if (new_instructions == NULL) + return -1; + + memcpy(new_instructions, arch->instructions, arch->nr_instructions); + goto out_update_instructions; +} + +static int arch__associate_ins_ops(struct arch* arch, const char *name, struct ins_ops *ops) +{ + struct ins *ins; + + if (arch->nr_instructions == arch->nr_instructions_allocated && + arch__grow_instructions(arch)) + return -1; + + ins = &arch->instructions[arch->nr_instructions]; + ins->name = strdup(name); + if (!ins->name) + return -1; + + ins->ops = ops; + arch->nr_instructions++; + + ins__sort(arch); + return 0; +} + +#include "arch/arc/annotate/instructions.c" +#include "arch/arm/annotate/instructions.c" +#include "arch/arm64/annotate/instructions.c" +#include "arch/csky/annotate/instructions.c" +#include "arch/loongarch/annotate/instructions.c" +#include "arch/mips/annotate/instructions.c" +#include "arch/x86/annotate/instructions.c" +#include "arch/powerpc/annotate/instructions.c" +#include "arch/riscv64/annotate/instructions.c" +#include "arch/s390/annotate/instructions.c" +#include "arch/sparc/annotate/instructions.c" + +static struct arch architectures[] = { + { + .name = "arc", + .init = arc__annotate_init, + }, + { + .name = "arm", + .init = arm__annotate_init, + }, + { + .name = "arm64", + .init = arm64__annotate_init, + }, + { + .name = "csky", + .init = csky__annotate_init, + }, + { + .name = "mips", + .init = mips__annotate_init, + .objdump = { + .comment_char = '#', + }, + }, + { + .name = "x86", + .init = x86__annotate_init, + .instructions = x86__instructions, + .nr_instructions = ARRAY_SIZE(x86__instructions), + .insn_suffix = "bwlq", + .objdump = { + .comment_char = '#', + .register_char = '%', + .memory_ref_char = '(', + .imm_char = '$', + }, + }, + { + .name = "powerpc", + .init = powerpc__annotate_init, + }, + { + .name = "riscv64", + .init = riscv64__annotate_init, + }, + { + .name = "s390", + .init = s390__annotate_init, + .objdump = { + .comment_char = '#', + }, + }, + { + .name = "sparc", + .init = sparc__annotate_init, + .objdump = { + .comment_char = '#', + }, + }, + { + .name = "loongarch", + .init = loongarch__annotate_init, + .objdump = { + .comment_char = '#', + }, + }, +}; + +static int arch__key_cmp(const void *name, const void *archp) +{ + const struct arch *arch = archp; + + return strcmp(name, arch->name); +} + +static int arch__cmp(const void *a, const void *b) +{ + const struct arch *aa = a; + const struct arch *ab = b; + + return strcmp(aa->name, ab->name); +} + +static void arch__sort(void) +{ + const int nmemb = ARRAY_SIZE(architectures); + + qsort(architectures, nmemb, sizeof(struct arch), arch__cmp); +} + +struct arch *arch__find(const char *name) +{ + const int nmemb = ARRAY_SIZE(architectures); + static bool sorted; + + if (!sorted) { + arch__sort(); + sorted = true; + } + + return bsearch(name, architectures, nmemb, sizeof(struct arch), arch__key_cmp); +} + +bool arch__is(struct arch *arch, const char *name) +{ + return !strcmp(arch->name, name); +} + +static void ins_ops__delete(struct ins_operands *ops) +{ + if (ops == NULL) + return; + zfree(&ops->source.raw); + zfree(&ops->source.name); + zfree(&ops->target.raw); + zfree(&ops->target.name); +} + +static int ins__raw_scnprintf(struct ins *ins, char *bf, size_t size, + struct ins_operands *ops, int max_ins_name) +{ + return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, ops->raw); +} + +int ins__scnprintf(struct ins *ins, char *bf, size_t size, + struct ins_operands *ops, int max_ins_name) +{ + if (ins->ops->scnprintf) + return ins->ops->scnprintf(ins, bf, size, ops, max_ins_name); + + return ins__raw_scnprintf(ins, bf, size, ops, max_ins_name); +} + +bool ins__is_fused(struct arch *arch, const char *ins1, const char *ins2) +{ + if (!arch || !arch->ins_is_fused) + return false; + + return arch->ins_is_fused(arch, ins1, ins2); +} + +static int call__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms) +{ + char *endptr, *tok, *name; + struct map *map = ms->map; + struct addr_map_symbol target = { + .ms = { .map = map, }, + }; + + ops->target.addr = strtoull(ops->raw, &endptr, 16); + + name = strchr(endptr, '<'); + if (name == NULL) + goto indirect_call; + + name++; + + if (arch->objdump.skip_functions_char && + strchr(name, arch->objdump.skip_functions_char)) + return -1; + + tok = strchr(name, '>'); + if (tok == NULL) + return -1; + + *tok = '\0'; + ops->target.name = strdup(name); + *tok = '>'; + + if (ops->target.name == NULL) + return -1; +find_target: + target.addr = map__objdump_2mem(map, ops->target.addr); + + if (maps__find_ams(ms->maps, &target) == 0 && + map__rip_2objdump(target.ms.map, map__map_ip(target.ms.map, target.addr)) == ops->target.addr) + ops->target.sym = target.ms.sym; + + return 0; + +indirect_call: + tok = strchr(endptr, '*'); + if (tok != NULL) { + endptr++; + + /* Indirect call can use a non-rip register and offset: callq *0x8(%rbx). + * Do not parse such instruction. */ + if (strstr(endptr, "(%r") == NULL) + ops->target.addr = strtoull(endptr, NULL, 16); + } + goto find_target; +} + +static int call__scnprintf(struct ins *ins, char *bf, size_t size, + struct ins_operands *ops, int max_ins_name) +{ + if (ops->target.sym) + return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, ops->target.sym->name); + + if (ops->target.addr == 0) + return ins__raw_scnprintf(ins, bf, size, ops, max_ins_name); + + if (ops->target.name) + return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, ops->target.name); + + return scnprintf(bf, size, "%-*s *%" PRIx64, max_ins_name, ins->name, ops->target.addr); +} + +static struct ins_ops call_ops = { + .parse = call__parse, + .scnprintf = call__scnprintf, +}; + +bool ins__is_call(const struct ins *ins) +{ + return ins->ops == &call_ops || ins->ops == &s390_call_ops || ins->ops == &loongarch_call_ops; +} + +/* + * Prevents from matching commas in the comment section, e.g.: + * ffff200008446e70: b.cs ffff2000084470f4 <generic_exec_single+0x314> // b.hs, b.nlast + * + * and skip comma as part of function arguments, e.g.: + * 1d8b4ac <linemap_lookup(line_maps const*, unsigned int)+0xcc> + */ +static inline const char *validate_comma(const char *c, struct ins_operands *ops) +{ + if (ops->jump.raw_comment && c > ops->jump.raw_comment) + return NULL; + + if (ops->jump.raw_func_start && c > ops->jump.raw_func_start) + return NULL; + + return c; +} + +static int jump__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms) +{ + struct map *map = ms->map; + struct symbol *sym = ms->sym; + struct addr_map_symbol target = { + .ms = { .map = map, }, + }; + const char *c = strchr(ops->raw, ','); + u64 start, end; + + ops->jump.raw_comment = strchr(ops->raw, arch->objdump.comment_char); + ops->jump.raw_func_start = strchr(ops->raw, '<'); + + c = validate_comma(c, ops); + + /* + * Examples of lines to parse for the _cpp_lex_token@@Base + * function: + * + * 1159e6c: jne 115aa32 <_cpp_lex_token@@Base+0xf92> + * 1159e8b: jne c469be <cpp_named_operator2name@@Base+0xa72> + * + * The first is a jump to an offset inside the same function, + * the second is to another function, i.e. that 0xa72 is an + * offset in the cpp_named_operator2name@@base function. + */ + /* + * skip over possible up to 2 operands to get to address, e.g.: + * tbnz w0, #26, ffff0000083cd190 <security_file_permission+0xd0> + */ + if (c++ != NULL) { + ops->target.addr = strtoull(c, NULL, 16); + if (!ops->target.addr) { + c = strchr(c, ','); + c = validate_comma(c, ops); + if (c++ != NULL) + ops->target.addr = strtoull(c, NULL, 16); + } + } else { + ops->target.addr = strtoull(ops->raw, NULL, 16); + } + + target.addr = map__objdump_2mem(map, ops->target.addr); + start = map__unmap_ip(map, sym->start); + end = map__unmap_ip(map, sym->end); + + ops->target.outside = target.addr < start || target.addr > end; + + /* + * FIXME: things like this in _cpp_lex_token (gcc's cc1 program): + + cpp_named_operator2name@@Base+0xa72 + + * Point to a place that is after the cpp_named_operator2name + * boundaries, i.e. in the ELF symbol table for cc1 + * cpp_named_operator2name is marked as being 32-bytes long, but it in + * fact is much larger than that, so we seem to need a symbols__find() + * routine that looks for >= current->start and < next_symbol->start, + * possibly just for C++ objects? + * + * For now lets just make some progress by marking jumps to outside the + * current function as call like. + * + * Actual navigation will come next, with further understanding of how + * the symbol searching and disassembly should be done. + */ + if (maps__find_ams(ms->maps, &target) == 0 && + map__rip_2objdump(target.ms.map, map__map_ip(target.ms.map, target.addr)) == ops->target.addr) + ops->target.sym = target.ms.sym; + + if (!ops->target.outside) { + ops->target.offset = target.addr - start; + ops->target.offset_avail = true; + } else { + ops->target.offset_avail = false; + } + + return 0; +} + +static int jump__scnprintf(struct ins *ins, char *bf, size_t size, + struct ins_operands *ops, int max_ins_name) +{ + const char *c; + + if (!ops->target.addr || ops->target.offset < 0) + return ins__raw_scnprintf(ins, bf, size, ops, max_ins_name); + + if (ops->target.outside && ops->target.sym != NULL) + return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, ops->target.sym->name); + + c = strchr(ops->raw, ','); + c = validate_comma(c, ops); + + if (c != NULL) { + const char *c2 = strchr(c + 1, ','); + + c2 = validate_comma(c2, ops); + /* check for 3-op insn */ + if (c2 != NULL) + c = c2; + c++; + + /* mirror arch objdump's space-after-comma style */ + if (*c == ' ') + c++; + } + + return scnprintf(bf, size, "%-*s %.*s%" PRIx64, max_ins_name, + ins->name, c ? c - ops->raw : 0, ops->raw, + ops->target.offset); +} + +static void jump__delete(struct ins_operands *ops __maybe_unused) +{ + /* + * The ops->jump.raw_comment and ops->jump.raw_func_start belong to the + * raw string, don't free them. + */ +} + +static struct ins_ops jump_ops = { + .free = jump__delete, + .parse = jump__parse, + .scnprintf = jump__scnprintf, +}; + +bool ins__is_jump(const struct ins *ins) +{ + return ins->ops == &jump_ops || ins->ops == &loongarch_jump_ops; +} + +static int comment__symbol(char *raw, char *comment, u64 *addrp, char **namep) +{ + char *endptr, *name, *t; + + if (strstr(raw, "(%rip)") == NULL) + return 0; + + *addrp = strtoull(comment, &endptr, 16); + if (endptr == comment) + return 0; + name = strchr(endptr, '<'); + if (name == NULL) + return -1; + + name++; + + t = strchr(name, '>'); + if (t == NULL) + return 0; + + *t = '\0'; + *namep = strdup(name); + *t = '>'; + + return 0; +} + +static int lock__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms) +{ + ops->locked.ops = zalloc(sizeof(*ops->locked.ops)); + if (ops->locked.ops == NULL) + return 0; + + if (disasm_line__parse(ops->raw, &ops->locked.ins.name, &ops->locked.ops->raw) < 0) + goto out_free_ops; + + ops->locked.ins.ops = ins__find(arch, ops->locked.ins.name); + + if (ops->locked.ins.ops == NULL) + goto out_free_ops; + + if (ops->locked.ins.ops->parse && + ops->locked.ins.ops->parse(arch, ops->locked.ops, ms) < 0) + goto out_free_ops; + + return 0; + +out_free_ops: + zfree(&ops->locked.ops); + return 0; +} + +static int lock__scnprintf(struct ins *ins, char *bf, size_t size, + struct ins_operands *ops, int max_ins_name) +{ + int printed; + + if (ops->locked.ins.ops == NULL) + return ins__raw_scnprintf(ins, bf, size, ops, max_ins_name); + + printed = scnprintf(bf, size, "%-*s ", max_ins_name, ins->name); + return printed + ins__scnprintf(&ops->locked.ins, bf + printed, + size - printed, ops->locked.ops, max_ins_name); +} + +static void lock__delete(struct ins_operands *ops) +{ + struct ins *ins = &ops->locked.ins; + + if (ins->ops && ins->ops->free) + ins->ops->free(ops->locked.ops); + else + ins_ops__delete(ops->locked.ops); + + zfree(&ops->locked.ops); + zfree(&ops->target.raw); + zfree(&ops->target.name); +} + +static struct ins_ops lock_ops = { + .free = lock__delete, + .parse = lock__parse, + .scnprintf = lock__scnprintf, +}; + +/* + * Check if the operand has more than one registers like x86 SIB addressing: + * 0x1234(%rax, %rbx, 8) + * + * But it doesn't care segment selectors like %gs:0x5678(%rcx), so just check + * the input string after 'memory_ref_char' if exists. + */ +static bool check_multi_regs(struct arch *arch, const char *op) +{ + int count = 0; + + if (arch->objdump.register_char == 0) + return false; + + if (arch->objdump.memory_ref_char) { + op = strchr(op, arch->objdump.memory_ref_char); + if (op == NULL) + return false; + } + + while ((op = strchr(op, arch->objdump.register_char)) != NULL) { + count++; + op++; + } + + return count > 1; +} + +static int mov__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms __maybe_unused) +{ + char *s = strchr(ops->raw, ','), *target, *comment, prev; + + if (s == NULL) + return -1; + + *s = '\0'; + + /* + * x86 SIB addressing has something like 0x8(%rax, %rcx, 1) + * then it needs to have the closing parenthesis. + */ + if (strchr(ops->raw, '(')) { + *s = ','; + s = strchr(ops->raw, ')'); + if (s == NULL || s[1] != ',') + return -1; + *++s = '\0'; + } + + ops->source.raw = strdup(ops->raw); + *s = ','; + + if (ops->source.raw == NULL) + return -1; + + ops->source.multi_regs = check_multi_regs(arch, ops->source.raw); + + target = skip_spaces(++s); + comment = strchr(s, arch->objdump.comment_char); + + if (comment != NULL) + s = comment - 1; + else + s = strchr(s, '\0') - 1; + + while (s > target && isspace(s[0])) + --s; + s++; + prev = *s; + *s = '\0'; + + ops->target.raw = strdup(target); + *s = prev; + + if (ops->target.raw == NULL) + goto out_free_source; + + ops->target.multi_regs = check_multi_regs(arch, ops->target.raw); + + if (comment == NULL) + return 0; + + comment = skip_spaces(comment); + comment__symbol(ops->source.raw, comment + 1, &ops->source.addr, &ops->source.name); + comment__symbol(ops->target.raw, comment + 1, &ops->target.addr, &ops->target.name); + + return 0; + +out_free_source: + zfree(&ops->source.raw); + return -1; +} + +static int mov__scnprintf(struct ins *ins, char *bf, size_t size, + struct ins_operands *ops, int max_ins_name) +{ + return scnprintf(bf, size, "%-*s %s,%s", max_ins_name, ins->name, + ops->source.name ?: ops->source.raw, + ops->target.name ?: ops->target.raw); +} + +static struct ins_ops mov_ops = { + .parse = mov__parse, + .scnprintf = mov__scnprintf, +}; + +static int dec__parse(struct arch *arch __maybe_unused, struct ins_operands *ops, struct map_symbol *ms __maybe_unused) +{ + char *target, *comment, *s, prev; + + target = s = ops->raw; + + while (s[0] != '\0' && !isspace(s[0])) + ++s; + prev = *s; + *s = '\0'; + + ops->target.raw = strdup(target); + *s = prev; + + if (ops->target.raw == NULL) + return -1; + + comment = strchr(s, arch->objdump.comment_char); + if (comment == NULL) + return 0; + + comment = skip_spaces(comment); + comment__symbol(ops->target.raw, comment + 1, &ops->target.addr, &ops->target.name); + + return 0; +} + +static int dec__scnprintf(struct ins *ins, char *bf, size_t size, + struct ins_operands *ops, int max_ins_name) +{ + return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, + ops->target.name ?: ops->target.raw); +} + +static struct ins_ops dec_ops = { + .parse = dec__parse, + .scnprintf = dec__scnprintf, +}; + +static int nop__scnprintf(struct ins *ins __maybe_unused, char *bf, size_t size, + struct ins_operands *ops __maybe_unused, int max_ins_name) +{ + return scnprintf(bf, size, "%-*s", max_ins_name, "nop"); +} + +static struct ins_ops nop_ops = { + .scnprintf = nop__scnprintf, +}; + +static struct ins_ops ret_ops = { + .scnprintf = ins__raw_scnprintf, +}; + +bool ins__is_nop(const struct ins *ins) +{ + return ins->ops == &nop_ops; +} + +bool ins__is_ret(const struct ins *ins) +{ + return ins->ops == &ret_ops; +} + +bool ins__is_lock(const struct ins *ins) +{ + return ins->ops == &lock_ops; +} + +static int ins__key_cmp(const void *name, const void *insp) +{ + const struct ins *ins = insp; + + return strcmp(name, ins->name); +} + +static int ins__cmp(const void *a, const void *b) +{ + const struct ins *ia = a; + const struct ins *ib = b; + + return strcmp(ia->name, ib->name); +} + +static void ins__sort(struct arch *arch) +{ + const int nmemb = arch->nr_instructions; + + qsort(arch->instructions, nmemb, sizeof(struct ins), ins__cmp); +} + +static struct ins_ops *__ins__find(struct arch *arch, const char *name) +{ + struct ins *ins; + const int nmemb = arch->nr_instructions; + + if (!arch->sorted_instructions) { + ins__sort(arch); + arch->sorted_instructions = true; + } + + ins = bsearch(name, arch->instructions, nmemb, sizeof(struct ins), ins__key_cmp); + if (ins) + return ins->ops; + + if (arch->insn_suffix) { + char tmp[32]; + char suffix; + size_t len = strlen(name); + + if (len == 0 || len >= sizeof(tmp)) + return NULL; + + suffix = name[len - 1]; + if (strchr(arch->insn_suffix, suffix) == NULL) + return NULL; + + strcpy(tmp, name); + tmp[len - 1] = '\0'; /* remove the suffix and check again */ + + ins = bsearch(tmp, arch->instructions, nmemb, sizeof(struct ins), ins__key_cmp); + } + return ins ? ins->ops : NULL; +} + +struct ins_ops *ins__find(struct arch *arch, const char *name) +{ + struct ins_ops *ops = __ins__find(arch, name); + + if (!ops && arch->associate_instruction_ops) + ops = arch->associate_instruction_ops(arch, name); + + return ops; +} + +static void disasm_line__init_ins(struct disasm_line *dl, struct arch *arch, struct map_symbol *ms) +{ + dl->ins.ops = ins__find(arch, dl->ins.name); + + if (!dl->ins.ops) + return; + + if (dl->ins.ops->parse && dl->ins.ops->parse(arch, &dl->ops, ms) < 0) + dl->ins.ops = NULL; +} + +static int disasm_line__parse(char *line, const char **namep, char **rawp) +{ + char tmp, *name = skip_spaces(line); + + if (name[0] == '\0') + return -1; + + *rawp = name + 1; + + while ((*rawp)[0] != '\0' && !isspace((*rawp)[0])) + ++*rawp; + + tmp = (*rawp)[0]; + (*rawp)[0] = '\0'; + *namep = strdup(name); + + if (*namep == NULL) + goto out; + + (*rawp)[0] = tmp; + *rawp = strim(*rawp); + + return 0; + +out: + return -1; +} + +static void annotation_line__init(struct annotation_line *al, + struct annotate_args *args, + int nr) +{ + al->offset = args->offset; + al->line = strdup(args->line); + al->line_nr = args->line_nr; + al->fileloc = args->fileloc; + al->data_nr = nr; +} + +static void annotation_line__exit(struct annotation_line *al) +{ + zfree_srcline(&al->path); + zfree(&al->line); + zfree(&al->cycles); +} + +static size_t disasm_line_size(int nr) +{ + struct annotation_line *al; + + return (sizeof(struct disasm_line) + (sizeof(al->data[0]) * nr)); +} + +/* + * Allocating the disasm annotation line data with + * following structure: + * + * ------------------------------------------- + * struct disasm_line | struct annotation_line + * ------------------------------------------- + * + * We have 'struct annotation_line' member as last member + * of 'struct disasm_line' to have an easy access. + */ +struct disasm_line *disasm_line__new(struct annotate_args *args) +{ + struct disasm_line *dl = NULL; + int nr = 1; + + if (evsel__is_group_event(args->evsel)) + nr = args->evsel->core.nr_members; + + dl = zalloc(disasm_line_size(nr)); + if (!dl) + return NULL; + + annotation_line__init(&dl->al, args, nr); + if (dl->al.line == NULL) + goto out_delete; + + if (args->offset != -1) { + if (disasm_line__parse(dl->al.line, &dl->ins.name, &dl->ops.raw) < 0) + goto out_free_line; + + disasm_line__init_ins(dl, args->arch, &args->ms); + } + + return dl; + +out_free_line: + zfree(&dl->al.line); +out_delete: + free(dl); + return NULL; +} + +void disasm_line__free(struct disasm_line *dl) +{ + if (dl->ins.ops && dl->ins.ops->free) + dl->ins.ops->free(&dl->ops); + else + ins_ops__delete(&dl->ops); + zfree(&dl->ins.name); + annotation_line__exit(&dl->al); + free(dl); +} + +int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw, int max_ins_name) +{ + if (raw || !dl->ins.ops) + return scnprintf(bf, size, "%-*s %s", max_ins_name, dl->ins.name, dl->ops.raw); + + return ins__scnprintf(&dl->ins, bf, size, &dl->ops, max_ins_name); +} + +/* + * symbol__parse_objdump_line() parses objdump output (with -d --no-show-raw) + * which looks like following + * + * 0000000000415500 <_init>: + * 415500: sub $0x8,%rsp + * 415504: mov 0x2f5ad5(%rip),%rax # 70afe0 <_DYNAMIC+0x2f8> + * 41550b: test %rax,%rax + * 41550e: je 415515 <_init+0x15> + * 415510: callq 416e70 <__gmon_start__@plt> + * 415515: add $0x8,%rsp + * 415519: retq + * + * it will be parsed and saved into struct disasm_line as + * <offset> <name> <ops.raw> + * + * The offset will be a relative offset from the start of the symbol and -1 + * means that it's not a disassembly line so should be treated differently. + * The ops.raw part will be parsed further according to type of the instruction. + */ +static int symbol__parse_objdump_line(struct symbol *sym, + struct annotate_args *args, + char *parsed_line, int *line_nr, char **fileloc) +{ + struct map *map = args->ms.map; + struct annotation *notes = symbol__annotation(sym); + struct disasm_line *dl; + char *tmp; + s64 line_ip, offset = -1; + regmatch_t match[2]; + + /* /filename:linenr ? Save line number and ignore. */ + if (regexec(&file_lineno, parsed_line, 2, match, 0) == 0) { + *line_nr = atoi(parsed_line + match[1].rm_so); + free(*fileloc); + *fileloc = strdup(parsed_line); + return 0; + } + + /* Process hex address followed by ':'. */ + line_ip = strtoull(parsed_line, &tmp, 16); + if (parsed_line != tmp && tmp[0] == ':' && tmp[1] != '\0') { + u64 start = map__rip_2objdump(map, sym->start), + end = map__rip_2objdump(map, sym->end); + + offset = line_ip - start; + if ((u64)line_ip < start || (u64)line_ip >= end) + offset = -1; + else + parsed_line = tmp + 1; + } + + args->offset = offset; + args->line = parsed_line; + args->line_nr = *line_nr; + args->fileloc = *fileloc; + args->ms.sym = sym; + + dl = disasm_line__new(args); + (*line_nr)++; + + if (dl == NULL) + return -1; + + if (!disasm_line__has_local_offset(dl)) { + dl->ops.target.offset = dl->ops.target.addr - + map__rip_2objdump(map, sym->start); + dl->ops.target.offset_avail = true; + } + + /* kcore has no symbols, so add the call target symbol */ + if (dl->ins.ops && ins__is_call(&dl->ins) && !dl->ops.target.sym) { + struct addr_map_symbol target = { + .addr = dl->ops.target.addr, + .ms = { .map = map, }, + }; + + if (!maps__find_ams(args->ms.maps, &target) && + target.ms.sym->start == target.al_addr) + dl->ops.target.sym = target.ms.sym; + } + + annotation_line__add(&dl->al, ¬es->src->source); + return 0; +} + +static void delete_last_nop(struct symbol *sym) +{ + struct annotation *notes = symbol__annotation(sym); + struct list_head *list = ¬es->src->source; + struct disasm_line *dl; + + while (!list_empty(list)) { + dl = list_entry(list->prev, struct disasm_line, al.node); + + if (dl->ins.ops) { + if (!ins__is_nop(&dl->ins)) + return; + } else { + if (!strstr(dl->al.line, " nop ") && + !strstr(dl->al.line, " nopl ") && + !strstr(dl->al.line, " nopw ")) + return; + } + + list_del_init(&dl->al.node); + disasm_line__free(dl); + } +} + +int symbol__strerror_disassemble(struct map_symbol *ms, int errnum, char *buf, size_t buflen) +{ + struct dso *dso = map__dso(ms->map); + + BUG_ON(buflen == 0); + + if (errnum >= 0) { + str_error_r(errnum, buf, buflen); + return 0; + } + + switch (errnum) { + case SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX: { + char bf[SBUILD_ID_SIZE + 15] = " with build id "; + char *build_id_msg = NULL; + + if (dso__has_build_id(dso)) { + build_id__sprintf(dso__bid(dso), bf + 15); + build_id_msg = bf; + } + scnprintf(buf, buflen, + "No vmlinux file%s\nwas found in the path.\n\n" + "Note that annotation using /proc/kcore requires CAP_SYS_RAWIO capability.\n\n" + "Please use:\n\n" + " perf buildid-cache -vu vmlinux\n\n" + "or:\n\n" + " --vmlinux vmlinux\n", build_id_msg ?: ""); + } + break; + case SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF: + scnprintf(buf, buflen, "Please link with binutils's libopcode to enable BPF annotation"); + break; + case SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_REGEXP: + scnprintf(buf, buflen, "Problems with arch specific instruction name regular expressions."); + break; + case SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_CPUID_PARSING: + scnprintf(buf, buflen, "Problems while parsing the CPUID in the arch specific initialization."); + break; + case SYMBOL_ANNOTATE_ERRNO__BPF_INVALID_FILE: + scnprintf(buf, buflen, "Invalid BPF file: %s.", dso__long_name(dso)); + break; + case SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF: + scnprintf(buf, buflen, "The %s BPF file has no BTF section, compile with -g or use pahole -J.", + dso__long_name(dso)); + break; + default: + scnprintf(buf, buflen, "Internal error: Invalid %d error code\n", errnum); + break; + } + + return 0; +} + +static int dso__disassemble_filename(struct dso *dso, char *filename, size_t filename_size) +{ + char linkname[PATH_MAX]; + char *build_id_filename; + char *build_id_path = NULL; + char *pos; + int len; + + if (dso__symtab_type(dso) == DSO_BINARY_TYPE__KALLSYMS && + !dso__is_kcore(dso)) + return SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX; + + build_id_filename = dso__build_id_filename(dso, NULL, 0, false); + if (build_id_filename) { + __symbol__join_symfs(filename, filename_size, build_id_filename); + free(build_id_filename); + } else { + if (dso__has_build_id(dso)) + return ENOMEM; + goto fallback; + } + + build_id_path = strdup(filename); + if (!build_id_path) + return ENOMEM; + + /* + * old style build-id cache has name of XX/XXXXXXX.. while + * new style has XX/XXXXXXX../{elf,kallsyms,vdso}. + * extract the build-id part of dirname in the new style only. + */ + pos = strrchr(build_id_path, '/'); + if (pos && strlen(pos) < SBUILD_ID_SIZE - 2) + dirname(build_id_path); + + if (dso__is_kcore(dso)) + goto fallback; + + len = readlink(build_id_path, linkname, sizeof(linkname) - 1); + if (len < 0) + goto fallback; + + linkname[len] = '\0'; + if (strstr(linkname, DSO__NAME_KALLSYMS) || + access(filename, R_OK)) { +fallback: + /* + * If we don't have build-ids or the build-id file isn't in the + * cache, or is just a kallsyms file, well, lets hope that this + * DSO is the same as when 'perf record' ran. + */ + if (dso__kernel(dso) && dso__long_name(dso)[0] == '/') + snprintf(filename, filename_size, "%s", dso__long_name(dso)); + else + __symbol__join_symfs(filename, filename_size, dso__long_name(dso)); + + mutex_lock(dso__lock(dso)); + if (access(filename, R_OK) && errno == ENOENT && dso__nsinfo(dso)) { + char *new_name = dso__filename_with_chroot(dso, filename); + if (new_name) { + strlcpy(filename, new_name, filename_size); + free(new_name); + } + } + mutex_unlock(dso__lock(dso)); + } else if (dso__binary_type(dso) == DSO_BINARY_TYPE__NOT_FOUND) { + dso__set_binary_type(dso, DSO_BINARY_TYPE__BUILD_ID_CACHE); + } + + free(build_id_path); + return 0; +} + +#if defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT) +#define PACKAGE "perf" +#include <bfd.h> +#include <dis-asm.h> +#include <bpf/bpf.h> +#include <bpf/btf.h> +#include <bpf/libbpf.h> +#include <linux/btf.h> +#include <tools/dis-asm-compat.h> + +#include "bpf-event.h" +#include "bpf-utils.h" + +static int symbol__disassemble_bpf(struct symbol *sym, + struct annotate_args *args) +{ + struct annotation *notes = symbol__annotation(sym); + struct bpf_prog_linfo *prog_linfo = NULL; + struct bpf_prog_info_node *info_node; + int len = sym->end - sym->start; + disassembler_ftype disassemble; + struct map *map = args->ms.map; + struct perf_bpil *info_linear; + struct disassemble_info info; + struct dso *dso = map__dso(map); + int pc = 0, count, sub_id; + struct btf *btf = NULL; + char tpath[PATH_MAX]; + size_t buf_size; + int nr_skip = 0; + char *buf; + bfd *bfdf; + int ret; + FILE *s; + + if (dso->binary_type != DSO_BINARY_TYPE__BPF_PROG_INFO) + return SYMBOL_ANNOTATE_ERRNO__BPF_INVALID_FILE; + + pr_debug("%s: handling sym %s addr %" PRIx64 " len %" PRIx64 "\n", __func__, + sym->name, sym->start, sym->end - sym->start); + + memset(tpath, 0, sizeof(tpath)); + perf_exe(tpath, sizeof(tpath)); + + bfdf = bfd_openr(tpath, NULL); + if (bfdf == NULL) + abort(); + + if (!bfd_check_format(bfdf, bfd_object)) + abort(); + + s = open_memstream(&buf, &buf_size); + if (!s) { + ret = errno; + goto out; + } + init_disassemble_info_compat(&info, s, + (fprintf_ftype) fprintf, + fprintf_styled); + info.arch = bfd_get_arch(bfdf); + info.mach = bfd_get_mach(bfdf); + + info_node = perf_env__find_bpf_prog_info(dso->bpf_prog.env, + dso->bpf_prog.id); + if (!info_node) { + ret = SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF; + goto out; + } + info_linear = info_node->info_linear; + sub_id = dso->bpf_prog.sub_id; + + info.buffer = (void *)(uintptr_t)(info_linear->info.jited_prog_insns); + info.buffer_length = info_linear->info.jited_prog_len; + + if (info_linear->info.nr_line_info) + prog_linfo = bpf_prog_linfo__new(&info_linear->info); + + if (info_linear->info.btf_id) { + struct btf_node *node; + + node = perf_env__find_btf(dso->bpf_prog.env, + info_linear->info.btf_id); + if (node) + btf = btf__new((__u8 *)(node->data), + node->data_size); + } + + disassemble_init_for_target(&info); + +#ifdef DISASM_FOUR_ARGS_SIGNATURE + disassemble = disassembler(info.arch, + bfd_big_endian(bfdf), + info.mach, + bfdf); +#else + disassemble = disassembler(bfdf); +#endif + if (disassemble == NULL) + abort(); + + fflush(s); + do { + const struct bpf_line_info *linfo = NULL; + struct disasm_line *dl; + size_t prev_buf_size; + const char *srcline; + u64 addr; + + addr = pc + ((u64 *)(uintptr_t)(info_linear->info.jited_ksyms))[sub_id]; + count = disassemble(pc, &info); + + if (prog_linfo) + linfo = bpf_prog_linfo__lfind_addr_func(prog_linfo, + addr, sub_id, + nr_skip); + + if (linfo && btf) { + srcline = btf__name_by_offset(btf, linfo->line_off); + nr_skip++; + } else + srcline = NULL; + + fprintf(s, "\n"); + prev_buf_size = buf_size; + fflush(s); + + if (!annotate_opts.hide_src_code && srcline) { + args->offset = -1; + args->line = strdup(srcline); + args->line_nr = 0; + args->fileloc = NULL; + args->ms.sym = sym; + dl = disasm_line__new(args); + if (dl) { + annotation_line__add(&dl->al, + ¬es->src->source); + } + } + + args->offset = pc; + args->line = buf + prev_buf_size; + args->line_nr = 0; + args->fileloc = NULL; + args->ms.sym = sym; + dl = disasm_line__new(args); + if (dl) + annotation_line__add(&dl->al, ¬es->src->source); + + pc += count; + } while (count > 0 && pc < len); + + ret = 0; +out: + free(prog_linfo); + btf__free(btf); + fclose(s); + bfd_close(bfdf); + return ret; +} +#else // defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT) +static int symbol__disassemble_bpf(struct symbol *sym __maybe_unused, + struct annotate_args *args __maybe_unused) +{ + return SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF; +} +#endif // defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT) + +static int +symbol__disassemble_bpf_image(struct symbol *sym, + struct annotate_args *args) +{ + struct annotation *notes = symbol__annotation(sym); + struct disasm_line *dl; + + args->offset = -1; + args->line = strdup("to be implemented"); + args->line_nr = 0; + args->fileloc = NULL; + dl = disasm_line__new(args); + if (dl) + annotation_line__add(&dl->al, ¬es->src->source); + + zfree(&args->line); + return 0; +} + +#ifdef HAVE_LIBCAPSTONE_SUPPORT +#include <capstone/capstone.h> + +static int open_capstone_handle(struct annotate_args *args, bool is_64bit, + csh *handle) +{ + struct annotation_options *opt = args->options; + cs_mode mode = is_64bit ? CS_MODE_64 : CS_MODE_32; + + /* TODO: support more architectures */ + if (!arch__is(args->arch, "x86")) + return -1; + + if (cs_open(CS_ARCH_X86, mode, handle) != CS_ERR_OK) + return -1; + + if (!opt->disassembler_style || + !strcmp(opt->disassembler_style, "att")) + cs_option(*handle, CS_OPT_SYNTAX, CS_OPT_SYNTAX_ATT); + + /* + * Resolving address operands to symbols is implemented + * on x86 by investigating instruction details. + */ + cs_option(*handle, CS_OPT_DETAIL, CS_OPT_ON); + + return 0; +} + +struct find_file_offset_data { + u64 ip; + u64 offset; +}; + +/* This will be called for each PHDR in an ELF binary */ +static int find_file_offset(u64 start, u64 len, u64 pgoff, void *arg) +{ + struct find_file_offset_data *data = arg; + + if (start <= data->ip && data->ip < start + len) { + data->offset = pgoff + data->ip - start; + return 1; + } + return 0; +} + +static void print_capstone_detail(cs_insn *insn, char *buf, size_t len, + struct annotate_args *args, u64 addr) +{ + int i; + struct map *map = args->ms.map; + struct symbol *sym; + + /* TODO: support more architectures */ + if (!arch__is(args->arch, "x86")) + return; + + if (insn->detail == NULL) + return; + + for (i = 0; i < insn->detail->x86.op_count; i++) { + cs_x86_op *op = &insn->detail->x86.operands[i]; + u64 orig_addr; + + if (op->type != X86_OP_MEM) + continue; + + /* only print RIP-based global symbols for now */ + if (op->mem.base != X86_REG_RIP) + continue; + + /* get the target address */ + orig_addr = addr + insn->size + op->mem.disp; + addr = map__objdump_2mem(map, orig_addr); + + if (dso__kernel(map__dso(map))) { + /* + * The kernel maps can be splitted into sections, + * let's find the map first and the search the symbol. + */ + map = maps__find(map__kmaps(map), addr); + if (map == NULL) + continue; + } + + /* convert it to map-relative address for search */ + addr = map__map_ip(map, addr); + + sym = map__find_symbol(map, addr); + if (sym == NULL) + continue; + + if (addr == sym->start) { + scnprintf(buf, len, "\t# %"PRIx64" <%s>", + orig_addr, sym->name); + } else { + scnprintf(buf, len, "\t# %"PRIx64" <%s+%#"PRIx64">", + orig_addr, sym->name, addr - sym->start); + } + break; + } +} + +static int symbol__disassemble_capstone(char *filename, struct symbol *sym, + struct annotate_args *args) +{ + struct annotation *notes = symbol__annotation(sym); + struct map *map = args->ms.map; + struct dso *dso = map__dso(map); + struct nscookie nsc; + u64 start = map__rip_2objdump(map, sym->start); + u64 end = map__rip_2objdump(map, sym->end); + u64 len = end - start; + u64 offset; + int i, fd, count; + bool is_64bit = false; + bool needs_cs_close = false; + u8 *buf = NULL; + struct find_file_offset_data data = { + .ip = start, + }; + csh handle; + cs_insn *insn; + char disasm_buf[512]; + struct disasm_line *dl; + + if (args->options->objdump_path) + return -1; + + nsinfo__mountns_enter(dso__nsinfo(dso), &nsc); + fd = open(filename, O_RDONLY); + nsinfo__mountns_exit(&nsc); + if (fd < 0) + return -1; + + if (file__read_maps(fd, /*exe=*/true, find_file_offset, &data, + &is_64bit) == 0) + goto err; + + if (open_capstone_handle(args, is_64bit, &handle) < 0) + goto err; + + needs_cs_close = true; + + buf = malloc(len); + if (buf == NULL) + goto err; + + count = pread(fd, buf, len, data.offset); + close(fd); + fd = -1; + + if ((u64)count != len) + goto err; + + /* add the function address and name */ + scnprintf(disasm_buf, sizeof(disasm_buf), "%#"PRIx64" <%s>:", + start, sym->name); + + args->offset = -1; + args->line = disasm_buf; + args->line_nr = 0; + args->fileloc = NULL; + args->ms.sym = sym; + + dl = disasm_line__new(args); + if (dl == NULL) + goto err; + + annotation_line__add(&dl->al, ¬es->src->source); + + count = cs_disasm(handle, buf, len, start, len, &insn); + for (i = 0, offset = 0; i < count; i++) { + int printed; + + printed = scnprintf(disasm_buf, sizeof(disasm_buf), + " %-7s %s", + insn[i].mnemonic, insn[i].op_str); + print_capstone_detail(&insn[i], disasm_buf + printed, + sizeof(disasm_buf) - printed, args, + start + offset); + + args->offset = offset; + args->line = disasm_buf; + + dl = disasm_line__new(args); + if (dl == NULL) + goto err; + + annotation_line__add(&dl->al, ¬es->src->source); + + offset += insn[i].size; + } + + /* It failed in the middle: probably due to unknown instructions */ + if (offset != len) { + struct list_head *list = ¬es->src->source; + + /* Discard all lines and fallback to objdump */ + while (!list_empty(list)) { + dl = list_first_entry(list, struct disasm_line, al.node); + + list_del_init(&dl->al.node); + disasm_line__free(dl); + } + count = -1; + } + +out: + if (needs_cs_close) + cs_close(&handle); + free(buf); + return count < 0 ? count : 0; + +err: + if (fd >= 0) + close(fd); + if (needs_cs_close) { + struct disasm_line *tmp; + + /* + * It probably failed in the middle of the above loop. + * Release any resources it might add. + */ + list_for_each_entry_safe(dl, tmp, ¬es->src->source, al.node) { + list_del(&dl->al.node); + free(dl); + } + } + count = -1; + goto out; +} +#endif + +/* + * Possibly create a new version of line with tabs expanded. Returns the + * existing or new line, storage is updated if a new line is allocated. If + * allocation fails then NULL is returned. + */ +static char *expand_tabs(char *line, char **storage, size_t *storage_len) +{ + size_t i, src, dst, len, new_storage_len, num_tabs; + char *new_line; + size_t line_len = strlen(line); + + for (num_tabs = 0, i = 0; i < line_len; i++) + if (line[i] == '\t') + num_tabs++; + + if (num_tabs == 0) + return line; + + /* + * Space for the line and '\0', less the leading and trailing + * spaces. Each tab may introduce 7 additional spaces. + */ + new_storage_len = line_len + 1 + (num_tabs * 7); + + new_line = malloc(new_storage_len); + if (new_line == NULL) { + pr_err("Failure allocating memory for tab expansion\n"); + return NULL; + } + + /* + * Copy regions starting at src and expand tabs. If there are two + * adjacent tabs then 'src == i', the memcpy is of size 0 and the spaces + * are inserted. + */ + for (i = 0, src = 0, dst = 0; i < line_len && num_tabs; i++) { + if (line[i] == '\t') { + len = i - src; + memcpy(&new_line[dst], &line[src], len); + dst += len; + new_line[dst++] = ' '; + while (dst % 8 != 0) + new_line[dst++] = ' '; + src = i + 1; + num_tabs--; + } + } + + /* Expand the last region. */ + len = line_len - src; + memcpy(&new_line[dst], &line[src], len); + dst += len; + new_line[dst] = '\0'; + + free(*storage); + *storage = new_line; + *storage_len = new_storage_len; + return new_line; +} + +int symbol__disassemble(struct symbol *sym, struct annotate_args *args) +{ + struct annotation_options *opts = &annotate_opts; + struct map *map = args->ms.map; + struct dso *dso = map__dso(map); + char *command; + FILE *file; + char symfs_filename[PATH_MAX]; + struct kcore_extract kce; + bool delete_extract = false; + bool decomp = false; + int lineno = 0; + char *fileloc = NULL; + int nline; + char *line; + size_t line_len; + const char *objdump_argv[] = { + "/bin/sh", + "-c", + NULL, /* Will be the objdump command to run. */ + "--", + NULL, /* Will be the symfs path. */ + NULL, + }; + struct child_process objdump_process; + int err = dso__disassemble_filename(dso, symfs_filename, sizeof(symfs_filename)); + + if (err) + return err; + + pr_debug("%s: filename=%s, sym=%s, start=%#" PRIx64 ", end=%#" PRIx64 "\n", __func__, + symfs_filename, sym->name, map__unmap_ip(map, sym->start), + map__unmap_ip(map, sym->end)); + + pr_debug("annotating [%p] %30s : [%p] %30s\n", + dso, dso__long_name(dso), sym, sym->name); + + if (dso__binary_type(dso) == DSO_BINARY_TYPE__BPF_PROG_INFO) { + return symbol__disassemble_bpf(sym, args); + } else if (dso__binary_type(dso) == DSO_BINARY_TYPE__BPF_IMAGE) { + return symbol__disassemble_bpf_image(sym, args); + } else if (dso__binary_type(dso) == DSO_BINARY_TYPE__NOT_FOUND) { + return -1; + } else if (dso__is_kcore(dso)) { + kce.kcore_filename = symfs_filename; + kce.addr = map__rip_2objdump(map, sym->start); + kce.offs = sym->start; + kce.len = sym->end - sym->start; + if (!kcore_extract__create(&kce)) { + delete_extract = true; + strlcpy(symfs_filename, kce.extract_filename, + sizeof(symfs_filename)); + } + } else if (dso__needs_decompress(dso)) { + char tmp[KMOD_DECOMP_LEN]; + + if (dso__decompress_kmodule_path(dso, symfs_filename, + tmp, sizeof(tmp)) < 0) + return -1; + + decomp = true; + strcpy(symfs_filename, tmp); + } + +#ifdef HAVE_LIBCAPSTONE_SUPPORT + err = symbol__disassemble_capstone(symfs_filename, sym, args); + if (err == 0) + goto out_remove_tmp; +#endif + + err = asprintf(&command, + "%s %s%s --start-address=0x%016" PRIx64 + " --stop-address=0x%016" PRIx64 + " %s -d %s %s %s %c%s%c %s%s -C \"$1\"", + opts->objdump_path ?: "objdump", + opts->disassembler_style ? "-M " : "", + opts->disassembler_style ?: "", + map__rip_2objdump(map, sym->start), + map__rip_2objdump(map, sym->end), + opts->show_linenr ? "-l" : "", + opts->show_asm_raw ? "" : "--no-show-raw-insn", + opts->annotate_src ? "-S" : "", + opts->prefix ? "--prefix " : "", + opts->prefix ? '"' : ' ', + opts->prefix ?: "", + opts->prefix ? '"' : ' ', + opts->prefix_strip ? "--prefix-strip=" : "", + opts->prefix_strip ?: ""); + + if (err < 0) { + pr_err("Failure allocating memory for the command to run\n"); + goto out_remove_tmp; + } + + pr_debug("Executing: %s\n", command); + + objdump_argv[2] = command; + objdump_argv[4] = symfs_filename; + + /* Create a pipe to read from for stdout */ + memset(&objdump_process, 0, sizeof(objdump_process)); + objdump_process.argv = objdump_argv; + objdump_process.out = -1; + objdump_process.err = -1; + objdump_process.no_stderr = 1; + if (start_command(&objdump_process)) { + pr_err("Failure starting to run %s\n", command); + err = -1; + goto out_free_command; + } + + file = fdopen(objdump_process.out, "r"); + if (!file) { + pr_err("Failure creating FILE stream for %s\n", command); + /* + * If we were using debug info should retry with + * original binary. + */ + err = -1; + goto out_close_stdout; + } + + /* Storage for getline. */ + line = NULL; + line_len = 0; + + nline = 0; + while (!feof(file)) { + const char *match; + char *expanded_line; + + if (getline(&line, &line_len, file) < 0 || !line) + break; + + /* Skip lines containing "filename:" */ + match = strstr(line, symfs_filename); + if (match && match[strlen(symfs_filename)] == ':') + continue; + + expanded_line = strim(line); + expanded_line = expand_tabs(expanded_line, &line, &line_len); + if (!expanded_line) + break; + + /* + * The source code line number (lineno) needs to be kept in + * across calls to symbol__parse_objdump_line(), so that it + * can associate it with the instructions till the next one. + * See disasm_line__new() and struct disasm_line::line_nr. + */ + if (symbol__parse_objdump_line(sym, args, expanded_line, + &lineno, &fileloc) < 0) + break; + nline++; + } + free(line); + free(fileloc); + + err = finish_command(&objdump_process); + if (err) + pr_err("Error running %s\n", command); + + if (nline == 0) { + err = -1; + pr_err("No output from %s\n", command); + } + + /* + * kallsyms does not have symbol sizes so there may a nop at the end. + * Remove it. + */ + if (dso__is_kcore(dso)) + delete_last_nop(sym); + + fclose(file); + +out_close_stdout: + close(objdump_process.out); + +out_free_command: + free(command); + +out_remove_tmp: + if (decomp) + unlink(symfs_filename); + + if (delete_extract) + kcore_extract__delete(&kce); + + return err; +} diff --git a/tools/perf/util/disasm.h b/tools/perf/util/disasm.h new file mode 100644 index 000000000000..3d381a043520 --- /dev/null +++ b/tools/perf/util/disasm.h @@ -0,0 +1,112 @@ +// SPDX-License-Identifier: GPL-2.0 +#ifndef __PERF_UTIL_DISASM_H +#define __PERF_UTIL_DISASM_H + +#include "map_symbol.h" + +struct annotation_options; +struct disasm_line; +struct ins; +struct evsel; +struct symbol; + +struct arch { + const char *name; + struct ins *instructions; + size_t nr_instructions; + size_t nr_instructions_allocated; + struct ins_ops *(*associate_instruction_ops)(struct arch *arch, const char *name); + bool sorted_instructions; + bool initialized; + const char *insn_suffix; + void *priv; + unsigned int model; + unsigned int family; + int (*init)(struct arch *arch, char *cpuid); + bool (*ins_is_fused)(struct arch *arch, const char *ins1, + const char *ins2); + struct { + char comment_char; + char skip_functions_char; + char register_char; + char memory_ref_char; + char imm_char; + } objdump; +}; + +struct ins { + const char *name; + struct ins_ops *ops; +}; + +struct ins_operands { + char *raw; + struct { + char *raw; + char *name; + struct symbol *sym; + u64 addr; + s64 offset; + bool offset_avail; + bool outside; + bool multi_regs; + } target; + union { + struct { + char *raw; + char *name; + u64 addr; + bool multi_regs; + } source; + struct { + struct ins ins; + struct ins_operands *ops; + } locked; + struct { + char *raw_comment; + char *raw_func_start; + } jump; + }; +}; + +struct ins_ops { + void (*free)(struct ins_operands *ops); + int (*parse)(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms); + int (*scnprintf)(struct ins *ins, char *bf, size_t size, + struct ins_operands *ops, int max_ins_name); +}; + +struct annotate_args { + struct arch *arch; + struct map_symbol ms; + struct evsel *evsel; + struct annotation_options *options; + s64 offset; + char *line; + int line_nr; + char *fileloc; +}; + +struct arch *arch__find(const char *name); +bool arch__is(struct arch *arch, const char *name); + +struct ins_ops *ins__find(struct arch *arch, const char *name); +int ins__scnprintf(struct ins *ins, char *bf, size_t size, + struct ins_operands *ops, int max_ins_name); + +bool ins__is_call(const struct ins *ins); +bool ins__is_jump(const struct ins *ins); +bool ins__is_fused(struct arch *arch, const char *ins1, const char *ins2); +bool ins__is_nop(const struct ins *ins); +bool ins__is_ret(const struct ins *ins); +bool ins__is_lock(const struct ins *ins); + +struct disasm_line *disasm_line__new(struct annotate_args *args); +void disasm_line__free(struct disasm_line *dl); + +int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, + bool raw, int max_ins_name); + +int symbol__disassemble(struct symbol *sym, struct annotate_args *args); + +#endif /* __PERF_UTIL_DISASM_H */ diff --git a/tools/perf/util/dlfilter.c b/tools/perf/util/dlfilter.c index 908e16813722..7d180bdaedbc 100644 --- a/tools/perf/util/dlfilter.c +++ b/tools/perf/util/dlfilter.c @@ -33,13 +33,13 @@ static void al_to_d_al(struct addr_location *al, struct perf_dlfilter_al *d_al) if (al->map) { struct dso *dso = map__dso(al->map); - if (symbol_conf.show_kernel_path && dso->long_name) - d_al->dso = dso->long_name; + if (symbol_conf.show_kernel_path && dso__long_name(dso)) + d_al->dso = dso__long_name(dso); else - d_al->dso = dso->name; - d_al->is_64_bit = dso->is_64_bit; - d_al->buildid_size = dso->bid.size; - d_al->buildid = dso->bid.data; + d_al->dso = dso__name(dso); + d_al->is_64_bit = dso__is_64_bit(dso); + d_al->buildid_size = dso__bid(dso)->size; + d_al->buildid = dso__bid(dso)->data; } else { d_al->dso = NULL; d_al->is_64_bit = 0; diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c index 22fd5fa806ed..dde706b71da7 100644 --- a/tools/perf/util/dso.c +++ b/tools/perf/util/dso.c @@ -40,6 +40,12 @@ static const char * const debuglink_paths[] = { "/usr/lib/debug%s/%s" }; +void dso__set_nsinfo(struct dso *dso, struct nsinfo *nsi) +{ + nsinfo__put(RC_CHK_ACCESS(dso)->nsinfo); + RC_CHK_ACCESS(dso)->nsinfo = nsi; +} + char dso__symtab_origin(const struct dso *dso) { static const char origin[] = { @@ -63,14 +69,14 @@ char dso__symtab_origin(const struct dso *dso) [DSO_BINARY_TYPE__GUEST_VMLINUX] = 'V', }; - if (dso == NULL || dso->symtab_type == DSO_BINARY_TYPE__NOT_FOUND) + if (dso == NULL || dso__symtab_type(dso) == DSO_BINARY_TYPE__NOT_FOUND) return '!'; - return origin[dso->symtab_type]; + return origin[dso__symtab_type(dso)]; } bool dso__is_object_file(const struct dso *dso) { - switch (dso->binary_type) { + switch (dso__binary_type(dso)) { case DSO_BINARY_TYPE__KALLSYMS: case DSO_BINARY_TYPE__GUEST_KALLSYMS: case DSO_BINARY_TYPE__JAVA_JIT: @@ -117,7 +123,7 @@ int dso__read_binary_type_filename(const struct dso *dso, char symfile[PATH_MAX]; unsigned int i; - len = __symbol__join_symfs(filename, size, dso->long_name); + len = __symbol__join_symfs(filename, size, dso__long_name(dso)); last_slash = filename + len; while (last_slash != filename && *last_slash != '/') last_slash--; @@ -159,12 +165,12 @@ int dso__read_binary_type_filename(const struct dso *dso, case DSO_BINARY_TYPE__FEDORA_DEBUGINFO: len = __symbol__join_symfs(filename, size, "/usr/lib/debug"); - snprintf(filename + len, size - len, "%s.debug", dso->long_name); + snprintf(filename + len, size - len, "%s.debug", dso__long_name(dso)); break; case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO: len = __symbol__join_symfs(filename, size, "/usr/lib/debug"); - snprintf(filename + len, size - len, "%s", dso->long_name); + snprintf(filename + len, size - len, "%s", dso__long_name(dso)); break; case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO: @@ -173,13 +179,13 @@ int dso__read_binary_type_filename(const struct dso *dso, * /usr/lib/debug/lib when it is expected to be in * /usr/lib/debug/usr/lib */ - if (strlen(dso->long_name) < 9 || - strncmp(dso->long_name, "/usr/lib/", 9)) { + if (strlen(dso__long_name(dso)) < 9 || + strncmp(dso__long_name(dso), "/usr/lib/", 9)) { ret = -1; break; } len = __symbol__join_symfs(filename, size, "/usr/lib/debug"); - snprintf(filename + len, size - len, "%s", dso->long_name + 4); + snprintf(filename + len, size - len, "%s", dso__long_name(dso) + 4); break; case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO: @@ -187,29 +193,29 @@ int dso__read_binary_type_filename(const struct dso *dso, const char *last_slash; size_t dir_size; - last_slash = dso->long_name + dso->long_name_len; - while (last_slash != dso->long_name && *last_slash != '/') + last_slash = dso__long_name(dso) + dso__long_name_len(dso); + while (last_slash != dso__long_name(dso) && *last_slash != '/') last_slash--; len = __symbol__join_symfs(filename, size, ""); - dir_size = last_slash - dso->long_name + 2; + dir_size = last_slash - dso__long_name(dso) + 2; if (dir_size > (size - len)) { ret = -1; break; } - len += scnprintf(filename + len, dir_size, "%s", dso->long_name); + len += scnprintf(filename + len, dir_size, "%s", dso__long_name(dso)); len += scnprintf(filename + len , size - len, ".debug%s", last_slash); break; } case DSO_BINARY_TYPE__BUILDID_DEBUGINFO: - if (!dso->has_build_id) { + if (!dso__has_build_id(dso)) { ret = -1; break; } - build_id__sprintf(&dso->bid, build_id_hex); + build_id__sprintf(dso__bid_const(dso), build_id_hex); len = __symbol__join_symfs(filename, size, "/usr/lib/debug/.build-id/"); snprintf(filename + len, size - len, "%.2s/%s.debug", build_id_hex, build_id_hex + 2); @@ -218,23 +224,23 @@ int dso__read_binary_type_filename(const struct dso *dso, case DSO_BINARY_TYPE__VMLINUX: case DSO_BINARY_TYPE__GUEST_VMLINUX: case DSO_BINARY_TYPE__SYSTEM_PATH_DSO: - __symbol__join_symfs(filename, size, dso->long_name); + __symbol__join_symfs(filename, size, dso__long_name(dso)); break; case DSO_BINARY_TYPE__GUEST_KMODULE: case DSO_BINARY_TYPE__GUEST_KMODULE_COMP: path__join3(filename, size, symbol_conf.symfs, - root_dir, dso->long_name); + root_dir, dso__long_name(dso)); break; case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE: case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP: - __symbol__join_symfs(filename, size, dso->long_name); + __symbol__join_symfs(filename, size, dso__long_name(dso)); break; case DSO_BINARY_TYPE__KCORE: case DSO_BINARY_TYPE__GUEST_KCORE: - snprintf(filename, size, "%s", dso->long_name); + snprintf(filename, size, "%s", dso__long_name(dso)); break; default: @@ -310,8 +316,8 @@ bool is_kernel_module(const char *pathname, int cpumode) bool dso__needs_decompress(struct dso *dso) { - return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP || - dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP; + return dso__symtab_type(dso) == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP || + dso__symtab_type(dso) == DSO_BINARY_TYPE__GUEST_KMODULE_COMP; } int filename__decompress(const char *name, char *pathname, @@ -363,11 +369,10 @@ static int decompress_kmodule(struct dso *dso, const char *name, if (!dso__needs_decompress(dso)) return -1; - if (dso->comp == COMP_ID__NONE) + if (dso__comp(dso) == COMP_ID__NONE) return -1; - return filename__decompress(name, pathname, len, dso->comp, - &dso->load_errno); + return filename__decompress(name, pathname, len, dso__comp(dso), dso__load_errno(dso)); } int dso__decompress_kmodule_fd(struct dso *dso, const char *name) @@ -468,17 +473,17 @@ void dso__set_module_info(struct dso *dso, struct kmod_path *m, struct machine *machine) { if (machine__is_host(machine)) - dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE; + dso__set_symtab_type(dso, DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE); else - dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE; + dso__set_symtab_type(dso, DSO_BINARY_TYPE__GUEST_KMODULE); /* _KMODULE_COMP should be next to _KMODULE */ if (m->kmod && m->comp) { - dso->symtab_type++; - dso->comp = m->comp; + dso__set_symtab_type(dso, dso__symtab_type(dso) + 1); + dso__set_comp(dso, m->comp); } - dso->is_kmod = 1; + dso__set_is_kmod(dso); dso__set_short_name(dso, strdup(m->name), true); } @@ -491,13 +496,21 @@ static pthread_mutex_t dso__data_open_lock = PTHREAD_MUTEX_INITIALIZER; static void dso__list_add(struct dso *dso) { - list_add_tail(&dso->data.open_entry, &dso__data_open); + list_add_tail(&dso__data(dso)->open_entry, &dso__data_open); +#ifdef REFCNT_CHECKING + dso__data(dso)->dso = dso__get(dso); +#endif + /* Assume the dso is part of dsos, hence the optional reference count above. */ + assert(dso__dsos(dso)); dso__data_open_cnt++; } static void dso__list_del(struct dso *dso) { - list_del_init(&dso->data.open_entry); + list_del_init(&dso__data(dso)->open_entry); +#ifdef REFCNT_CHECKING + dso__put(dso__data(dso)->dso); +#endif WARN_ONCE(dso__data_open_cnt <= 0, "DSO data fd counter out of bounds."); dso__data_open_cnt--; @@ -528,7 +541,7 @@ static int do_open(char *name) char *dso__filename_with_chroot(const struct dso *dso, const char *filename) { - return filename_with_chroot(nsinfo__pid(dso->nsinfo), filename); + return filename_with_chroot(nsinfo__pid(dso__nsinfo_const(dso)), filename); } static int __open_dso(struct dso *dso, struct machine *machine) @@ -541,18 +554,18 @@ static int __open_dso(struct dso *dso, struct machine *machine) if (!name) return -ENOMEM; - mutex_lock(&dso->lock); + mutex_lock(dso__lock(dso)); if (machine) root_dir = machine->root_dir; - if (dso__read_binary_type_filename(dso, dso->binary_type, + if (dso__read_binary_type_filename(dso, dso__binary_type(dso), root_dir, name, PATH_MAX)) goto out; if (!is_regular_file(name)) { char *new_name; - if (errno != ENOENT || dso->nsinfo == NULL) + if (errno != ENOENT || dso__nsinfo(dso) == NULL) goto out; new_name = dso__filename_with_chroot(dso, name); @@ -568,7 +581,7 @@ static int __open_dso(struct dso *dso, struct machine *machine) size_t len = sizeof(newpath); if (dso__decompress_kmodule_path(dso, name, newpath, len) < 0) { - fd = -dso->load_errno; + fd = -(*dso__load_errno(dso)); goto out; } @@ -582,7 +595,7 @@ static int __open_dso(struct dso *dso, struct machine *machine) unlink(name); out: - mutex_unlock(&dso->lock); + mutex_unlock(dso__lock(dso)); free(name); return fd; } @@ -601,13 +614,13 @@ static int open_dso(struct dso *dso, struct machine *machine) int fd; struct nscookie nsc; - if (dso->binary_type != DSO_BINARY_TYPE__BUILD_ID_CACHE) { - mutex_lock(&dso->lock); - nsinfo__mountns_enter(dso->nsinfo, &nsc); - mutex_unlock(&dso->lock); + if (dso__binary_type(dso) != DSO_BINARY_TYPE__BUILD_ID_CACHE) { + mutex_lock(dso__lock(dso)); + nsinfo__mountns_enter(dso__nsinfo(dso), &nsc); + mutex_unlock(dso__lock(dso)); } fd = __open_dso(dso, machine); - if (dso->binary_type != DSO_BINARY_TYPE__BUILD_ID_CACHE) + if (dso__binary_type(dso) != DSO_BINARY_TYPE__BUILD_ID_CACHE) nsinfo__mountns_exit(&nsc); if (fd >= 0) { @@ -624,10 +637,10 @@ static int open_dso(struct dso *dso, struct machine *machine) static void close_data_fd(struct dso *dso) { - if (dso->data.fd >= 0) { - close(dso->data.fd); - dso->data.fd = -1; - dso->data.file_size = 0; + if (dso__data(dso)->fd >= 0) { + close(dso__data(dso)->fd); + dso__data(dso)->fd = -1; + dso__data(dso)->file_size = 0; dso__list_del(dso); } } @@ -646,9 +659,15 @@ static void close_dso(struct dso *dso) static void close_first_dso(void) { + struct dso_data *dso_data; struct dso *dso; - dso = list_first_entry(&dso__data_open, struct dso, data.open_entry); + dso_data = list_first_entry(&dso__data_open, struct dso_data, open_entry); +#ifdef REFCNT_CHECKING + dso = dso_data->dso; +#else + dso = container_of(dso_data, struct dso, data); +#endif close_dso(dso); } @@ -728,28 +747,29 @@ static void try_to_open_dso(struct dso *dso, struct machine *machine) DSO_BINARY_TYPE__NOT_FOUND, }; int i = 0; + struct dso_data *dso_data = dso__data(dso); - if (dso->data.fd >= 0) + if (dso_data->fd >= 0) return; - if (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND) { - dso->data.fd = open_dso(dso, machine); + if (dso__binary_type(dso) != DSO_BINARY_TYPE__NOT_FOUND) { + dso_data->fd = open_dso(dso, machine); goto out; } do { - dso->binary_type = binary_type_data[i++]; + dso__set_binary_type(dso, binary_type_data[i++]); - dso->data.fd = open_dso(dso, machine); - if (dso->data.fd >= 0) + dso_data->fd = open_dso(dso, machine); + if (dso_data->fd >= 0) goto out; - } while (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND); + } while (dso__binary_type(dso) != DSO_BINARY_TYPE__NOT_FOUND); out: - if (dso->data.fd >= 0) - dso->data.status = DSO_DATA_STATUS_OK; + if (dso_data->fd >= 0) + dso_data->status = DSO_DATA_STATUS_OK; else - dso->data.status = DSO_DATA_STATUS_ERROR; + dso_data->status = DSO_DATA_STATUS_ERROR; } /** @@ -763,7 +783,7 @@ out: */ int dso__data_get_fd(struct dso *dso, struct machine *machine) { - if (dso->data.status == DSO_DATA_STATUS_ERROR) + if (dso__data(dso)->status == DSO_DATA_STATUS_ERROR) return -1; if (pthread_mutex_lock(&dso__data_open_lock) < 0) @@ -771,10 +791,10 @@ int dso__data_get_fd(struct dso *dso, struct machine *machine) try_to_open_dso(dso, machine); - if (dso->data.fd < 0) + if (dso__data(dso)->fd < 0) pthread_mutex_unlock(&dso__data_open_lock); - return dso->data.fd; + return dso__data(dso)->fd; } void dso__data_put_fd(struct dso *dso __maybe_unused) @@ -786,10 +806,10 @@ bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by) { u32 flag = 1 << by; - if (dso->data.status_seen & flag) + if (dso__data(dso)->status_seen & flag) return true; - dso->data.status_seen |= flag; + dso__data(dso)->status_seen |= flag; return false; } @@ -799,12 +819,13 @@ static ssize_t bpf_read(struct dso *dso, u64 offset, char *data) { struct bpf_prog_info_node *node; ssize_t size = DSO__DATA_CACHE_SIZE; + struct dso_bpf_prog *dso_bpf_prog = dso__bpf_prog(dso); u64 len; u8 *buf; - node = perf_env__find_bpf_prog_info(dso->bpf_prog.env, dso->bpf_prog.id); + node = perf_env__find_bpf_prog_info(dso_bpf_prog->env, dso_bpf_prog->id); if (!node || !node->info_linear) { - dso->data.status = DSO_DATA_STATUS_ERROR; + dso__data(dso)->status = DSO_DATA_STATUS_ERROR; return -1; } @@ -822,14 +843,15 @@ static ssize_t bpf_read(struct dso *dso, u64 offset, char *data) static int bpf_size(struct dso *dso) { struct bpf_prog_info_node *node; + struct dso_bpf_prog *dso_bpf_prog = dso__bpf_prog(dso); - node = perf_env__find_bpf_prog_info(dso->bpf_prog.env, dso->bpf_prog.id); + node = perf_env__find_bpf_prog_info(dso_bpf_prog->env, dso_bpf_prog->id); if (!node || !node->info_linear) { - dso->data.status = DSO_DATA_STATUS_ERROR; + dso__data(dso)->status = DSO_DATA_STATUS_ERROR; return -1; } - dso->data.file_size = node->info_linear->info.jited_prog_len; + dso__data(dso)->file_size = node->info_linear->info.jited_prog_len; return 0; } #endif // HAVE_LIBBPF_SUPPORT @@ -837,10 +859,10 @@ static int bpf_size(struct dso *dso) static void dso_cache__free(struct dso *dso) { - struct rb_root *root = &dso->data.cache; + struct rb_root *root = &dso__data(dso)->cache; struct rb_node *next = rb_first(root); - mutex_lock(&dso->lock); + mutex_lock(dso__lock(dso)); while (next) { struct dso_cache *cache; @@ -849,12 +871,12 @@ dso_cache__free(struct dso *dso) rb_erase(&cache->rb_node, root); free(cache); } - mutex_unlock(&dso->lock); + mutex_unlock(dso__lock(dso)); } static struct dso_cache *__dso_cache__find(struct dso *dso, u64 offset) { - const struct rb_root *root = &dso->data.cache; + const struct rb_root *root = &dso__data(dso)->cache; struct rb_node * const *p = &root->rb_node; const struct rb_node *parent = NULL; struct dso_cache *cache; @@ -880,13 +902,13 @@ static struct dso_cache *__dso_cache__find(struct dso *dso, u64 offset) static struct dso_cache * dso_cache__insert(struct dso *dso, struct dso_cache *new) { - struct rb_root *root = &dso->data.cache; + struct rb_root *root = &dso__data(dso)->cache; struct rb_node **p = &root->rb_node; struct rb_node *parent = NULL; struct dso_cache *cache; u64 offset = new->offset; - mutex_lock(&dso->lock); + mutex_lock(dso__lock(dso)); while (*p != NULL) { u64 end; @@ -907,7 +929,7 @@ dso_cache__insert(struct dso *dso, struct dso_cache *new) cache = NULL; out: - mutex_unlock(&dso->lock); + mutex_unlock(dso__lock(dso)); return cache; } @@ -932,18 +954,18 @@ static ssize_t file_read(struct dso *dso, struct machine *machine, pthread_mutex_lock(&dso__data_open_lock); /* - * dso->data.fd might be closed if other thread opened another + * dso__data(dso)->fd might be closed if other thread opened another * file (dso) due to open file limit (RLIMIT_NOFILE). */ try_to_open_dso(dso, machine); - if (dso->data.fd < 0) { - dso->data.status = DSO_DATA_STATUS_ERROR; + if (dso__data(dso)->fd < 0) { + dso__data(dso)->status = DSO_DATA_STATUS_ERROR; ret = -errno; goto out; } - ret = pread(dso->data.fd, data, DSO__DATA_CACHE_SIZE, offset); + ret = pread(dso__data(dso)->fd, data, DSO__DATA_CACHE_SIZE, offset); out: pthread_mutex_unlock(&dso__data_open_lock); return ret; @@ -963,11 +985,11 @@ static struct dso_cache *dso_cache__populate(struct dso *dso, return NULL; } #ifdef HAVE_LIBBPF_SUPPORT - if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO) + if (dso__binary_type(dso) == DSO_BINARY_TYPE__BPF_PROG_INFO) *ret = bpf_read(dso, cache_offset, cache->data); else #endif - if (dso->binary_type == DSO_BINARY_TYPE__OOL) + if (dso__binary_type(dso) == DSO_BINARY_TYPE__OOL) *ret = DSO__DATA_CACHE_SIZE; else *ret = file_read(dso, machine, cache_offset, cache->data); @@ -1056,25 +1078,25 @@ static int file_size(struct dso *dso, struct machine *machine) pthread_mutex_lock(&dso__data_open_lock); /* - * dso->data.fd might be closed if other thread opened another + * dso__data(dso)->fd might be closed if other thread opened another * file (dso) due to open file limit (RLIMIT_NOFILE). */ try_to_open_dso(dso, machine); - if (dso->data.fd < 0) { + if (dso__data(dso)->fd < 0) { ret = -errno; - dso->data.status = DSO_DATA_STATUS_ERROR; + dso__data(dso)->status = DSO_DATA_STATUS_ERROR; goto out; } - if (fstat(dso->data.fd, &st) < 0) { + if (fstat(dso__data(dso)->fd, &st) < 0) { ret = -errno; pr_err("dso cache fstat failed: %s\n", str_error_r(errno, sbuf, sizeof(sbuf))); - dso->data.status = DSO_DATA_STATUS_ERROR; + dso__data(dso)->status = DSO_DATA_STATUS_ERROR; goto out; } - dso->data.file_size = st.st_size; + dso__data(dso)->file_size = st.st_size; out: pthread_mutex_unlock(&dso__data_open_lock); @@ -1083,13 +1105,13 @@ out: int dso__data_file_size(struct dso *dso, struct machine *machine) { - if (dso->data.file_size) + if (dso__data(dso)->file_size) return 0; - if (dso->data.status == DSO_DATA_STATUS_ERROR) + if (dso__data(dso)->status == DSO_DATA_STATUS_ERROR) return -1; #ifdef HAVE_LIBBPF_SUPPORT - if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO) + if (dso__binary_type(dso) == DSO_BINARY_TYPE__BPF_PROG_INFO) return bpf_size(dso); #endif return file_size(dso, machine); @@ -1108,7 +1130,7 @@ off_t dso__data_size(struct dso *dso, struct machine *machine) return -1; /* For now just estimate dso data size is close to file size */ - return dso->data.file_size; + return dso__data(dso)->file_size; } static ssize_t data_read_write_offset(struct dso *dso, struct machine *machine, @@ -1119,7 +1141,7 @@ static ssize_t data_read_write_offset(struct dso *dso, struct machine *machine, return -1; /* Check the offset sanity. */ - if (offset > dso->data.file_size) + if (offset > dso__data(dso)->file_size) return -1; if (offset + size < offset) @@ -1142,7 +1164,7 @@ static ssize_t data_read_write_offset(struct dso *dso, struct machine *machine, ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine, u64 offset, u8 *data, ssize_t size) { - if (dso->data.status == DSO_DATA_STATUS_ERROR) + if (dso__data(dso)->status == DSO_DATA_STATUS_ERROR) return -1; return data_read_write_offset(dso, machine, offset, data, size, true); @@ -1182,7 +1204,7 @@ ssize_t dso__data_write_cache_offs(struct dso *dso, struct machine *machine, { u8 *data = (u8 *)data_in; /* cast away const to use same fns for r/w */ - if (dso->data.status == DSO_DATA_STATUS_ERROR) + if (dso__data(dso)->status == DSO_DATA_STATUS_ERROR) return -1; return data_read_write_offset(dso, machine, offset, data, size, false); @@ -1235,56 +1257,139 @@ struct dso *machine__findnew_kernel(struct machine *machine, const char *name, */ if (dso != NULL) { dso__set_short_name(dso, short_name, false); - dso->kernel = dso_type; + dso__set_kernel(dso, dso_type); } return dso; } -static void dso__set_long_name_id(struct dso *dso, const char *name, struct dso_id *id, bool name_allocated) +static void dso__set_long_name_id(struct dso *dso, const char *name, bool name_allocated) { - struct rb_root *root = dso->root; + struct dsos *dsos = dso__dsos(dso); if (name == NULL) return; - if (dso->long_name_allocated) - free((char *)dso->long_name); - - if (root) { - rb_erase(&dso->rb_node, root); + if (dsos) { /* - * __dsos__findnew_link_by_longname_id() isn't guaranteed to - * add it back, so a clean removal is required here. + * Need to avoid re-sorting the dsos breaking by non-atomically + * renaming the dso. */ - RB_CLEAR_NODE(&dso->rb_node); - dso->root = NULL; + down_write(&dsos->lock); } - dso->long_name = name; - dso->long_name_len = strlen(name); - dso->long_name_allocated = name_allocated; + if (dso__long_name_allocated(dso)) + free((char *)dso__long_name(dso)); - if (root) - __dsos__findnew_link_by_longname_id(root, dso, NULL, id); + RC_CHK_ACCESS(dso)->long_name = name; + RC_CHK_ACCESS(dso)->long_name_len = strlen(name); + dso__set_long_name_allocated(dso, name_allocated); + + if (dsos) { + dsos->sorted = false; + up_write(&dsos->lock); + } +} + +static int __dso_id__cmp(const struct dso_id *a, const struct dso_id *b) +{ + if (a->maj > b->maj) return -1; + if (a->maj < b->maj) return 1; + + if (a->min > b->min) return -1; + if (a->min < b->min) return 1; + + if (a->ino > b->ino) return -1; + if (a->ino < b->ino) return 1; + + /* + * Synthesized MMAP events have zero ino_generation, avoid comparing + * them with MMAP events with actual ino_generation. + * + * I found it harmful because the mismatch resulted in a new + * dso that did not have a build ID whereas the original dso did have a + * build ID. The build ID was essential because the object was not found + * otherwise. - Adrian + */ + if (a->ino_generation && b->ino_generation) { + if (a->ino_generation > b->ino_generation) return -1; + if (a->ino_generation < b->ino_generation) return 1; + } + + return 0; +} + +bool dso_id__empty(const struct dso_id *id) +{ + if (!id) + return true; + + return !id->maj && !id->min && !id->ino && !id->ino_generation; +} + +void __dso__inject_id(struct dso *dso, struct dso_id *id) +{ + struct dsos *dsos = dso__dsos(dso); + struct dso_id *dso_id = dso__id(dso); + + /* dsos write lock held by caller. */ + + dso_id->maj = id->maj; + dso_id->min = id->min; + dso_id->ino = id->ino; + dso_id->ino_generation = id->ino_generation; + + if (dsos) + dsos->sorted = false; +} + +int dso_id__cmp(const struct dso_id *a, const struct dso_id *b) +{ + /* + * The second is always dso->id, so zeroes if not set, assume passing + * NULL for a means a zeroed id + */ + if (dso_id__empty(a) || dso_id__empty(b)) + return 0; + + return __dso_id__cmp(a, b); +} + +int dso__cmp_id(struct dso *a, struct dso *b) +{ + return __dso_id__cmp(dso__id(a), dso__id(b)); } void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated) { - dso__set_long_name_id(dso, name, NULL, name_allocated); + dso__set_long_name_id(dso, name, name_allocated); } void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated) { + struct dsos *dsos = dso__dsos(dso); + if (name == NULL) return; - if (dso->short_name_allocated) - free((char *)dso->short_name); + if (dsos) { + /* + * Need to avoid re-sorting the dsos breaking by non-atomically + * renaming the dso. + */ + down_write(&dsos->lock); + } + if (dso__short_name_allocated(dso)) + free((char *)dso__short_name(dso)); + + RC_CHK_ACCESS(dso)->short_name = name; + RC_CHK_ACCESS(dso)->short_name_len = strlen(name); + dso__set_short_name_allocated(dso, name_allocated); - dso->short_name = name; - dso->short_name_len = strlen(name); - dso->short_name_allocated = name_allocated; + if (dsos) { + dsos->sorted = false; + up_write(&dsos->lock); + } } int dso__name_len(const struct dso *dso) @@ -1292,43 +1397,48 @@ int dso__name_len(const struct dso *dso) if (!dso) return strlen("[unknown]"); if (verbose > 0) - return dso->long_name_len; + return dso__long_name_len(dso); - return dso->short_name_len; + return dso__short_name_len(dso); } bool dso__loaded(const struct dso *dso) { - return dso->loaded; + return RC_CHK_ACCESS(dso)->loaded; } bool dso__sorted_by_name(const struct dso *dso) { - return dso->sorted_by_name; + return RC_CHK_ACCESS(dso)->sorted_by_name; } void dso__set_sorted_by_name(struct dso *dso) { - dso->sorted_by_name = true; + RC_CHK_ACCESS(dso)->sorted_by_name = true; } struct dso *dso__new_id(const char *name, struct dso_id *id) { - struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1); + RC_STRUCT(dso) *dso = zalloc(sizeof(*dso) + strlen(name) + 1); + struct dso *res; + struct dso_data *data; - if (dso != NULL) { + if (!dso) + return NULL; + + if (ADD_RC_CHK(res, dso)) { strcpy(dso->name, name); if (id) dso->id = *id; - dso__set_long_name_id(dso, dso->name, id, false); - dso__set_short_name(dso, dso->name, false); + dso__set_long_name_id(res, dso->name, false); + dso__set_short_name(res, dso->name, false); dso->symbols = RB_ROOT_CACHED; dso->symbol_names = NULL; dso->symbol_names_len = 0; - dso->data.cache = RB_ROOT; dso->inlined_nodes = RB_ROOT_CACHED; dso->srclines = RB_ROOT_CACHED; dso->data_types = RB_ROOT; + dso->global_vars = RB_ROOT; dso->data.fd = -1; dso->data.status = DSO_DATA_STATUS_UNKNOWN; dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND; @@ -1344,15 +1454,18 @@ struct dso *dso__new_id(const char *name, struct dso_id *id) dso->is_kmod = 0; dso->needs_swap = DSO_SWAP__UNSET; dso->comp = COMP_ID__NONE; - RB_CLEAR_NODE(&dso->rb_node); - dso->root = NULL; - INIT_LIST_HEAD(&dso->node); - INIT_LIST_HEAD(&dso->data.open_entry); mutex_init(&dso->lock); refcount_set(&dso->refcnt, 1); + data = &dso->data; + data->cache = RB_ROOT; + data->fd = -1; + data->status = DSO_DATA_STATUS_UNKNOWN; + INIT_LIST_HEAD(&data->open_entry); +#ifdef REFCNT_CHECKING + data->dso = NULL; /* Set when on the open_entry list. */ +#endif } - - return dso; + return res; } struct dso *dso__new(const char *name) @@ -1362,71 +1475,78 @@ struct dso *dso__new(const char *name) void dso__delete(struct dso *dso) { - if (!RB_EMPTY_NODE(&dso->rb_node)) - pr_err("DSO %s is still in rbtree when being deleted!\n", - dso->long_name); + if (dso__dsos(dso)) + pr_err("DSO %s is still in rbtree when being deleted!\n", dso__long_name(dso)); /* free inlines first, as they reference symbols */ - inlines__tree_delete(&dso->inlined_nodes); - srcline__tree_delete(&dso->srclines); - symbols__delete(&dso->symbols); - dso->symbol_names_len = 0; - zfree(&dso->symbol_names); - annotated_data_type__tree_delete(&dso->data_types); - - if (dso->short_name_allocated) { - zfree((char **)&dso->short_name); - dso->short_name_allocated = false; + inlines__tree_delete(&RC_CHK_ACCESS(dso)->inlined_nodes); + srcline__tree_delete(&RC_CHK_ACCESS(dso)->srclines); + symbols__delete(&RC_CHK_ACCESS(dso)->symbols); + RC_CHK_ACCESS(dso)->symbol_names_len = 0; + zfree(&RC_CHK_ACCESS(dso)->symbol_names); + annotated_data_type__tree_delete(dso__data_types(dso)); + global_var_type__tree_delete(dso__global_vars(dso)); + + if (RC_CHK_ACCESS(dso)->short_name_allocated) { + zfree((char **)&RC_CHK_ACCESS(dso)->short_name); + RC_CHK_ACCESS(dso)->short_name_allocated = false; } - if (dso->long_name_allocated) { - zfree((char **)&dso->long_name); - dso->long_name_allocated = false; + if (RC_CHK_ACCESS(dso)->long_name_allocated) { + zfree((char **)&RC_CHK_ACCESS(dso)->long_name); + RC_CHK_ACCESS(dso)->long_name_allocated = false; } dso__data_close(dso); - auxtrace_cache__free(dso->auxtrace_cache); + auxtrace_cache__free(RC_CHK_ACCESS(dso)->auxtrace_cache); dso_cache__free(dso); dso__free_a2l(dso); - zfree(&dso->symsrc_filename); - nsinfo__zput(dso->nsinfo); - mutex_destroy(&dso->lock); - free(dso); + zfree(&RC_CHK_ACCESS(dso)->symsrc_filename); + nsinfo__zput(RC_CHK_ACCESS(dso)->nsinfo); + mutex_destroy(dso__lock(dso)); + RC_CHK_FREE(dso); } struct dso *dso__get(struct dso *dso) { - if (dso) - refcount_inc(&dso->refcnt); - return dso; + struct dso *result; + + if (RC_CHK_GET(result, dso)) + refcount_inc(&RC_CHK_ACCESS(dso)->refcnt); + + return result; } void dso__put(struct dso *dso) { - if (dso && refcount_dec_and_test(&dso->refcnt)) + if (dso && refcount_dec_and_test(&RC_CHK_ACCESS(dso)->refcnt)) dso__delete(dso); + else + RC_CHK_PUT(dso); } void dso__set_build_id(struct dso *dso, struct build_id *bid) { - dso->bid = *bid; - dso->has_build_id = 1; + RC_CHK_ACCESS(dso)->bid = *bid; + RC_CHK_ACCESS(dso)->has_build_id = 1; } bool dso__build_id_equal(const struct dso *dso, struct build_id *bid) { - if (dso->bid.size > bid->size && dso->bid.size == BUILD_ID_SIZE) { + const struct build_id *dso_bid = dso__bid_const(dso); + + if (dso_bid->size > bid->size && dso_bid->size == BUILD_ID_SIZE) { /* * For the backward compatibility, it allows a build-id has * trailing zeros. */ - return !memcmp(dso->bid.data, bid->data, bid->size) && - !memchr_inv(&dso->bid.data[bid->size], 0, - dso->bid.size - bid->size); + return !memcmp(dso_bid->data, bid->data, bid->size) && + !memchr_inv(&dso_bid->data[bid->size], 0, + dso_bid->size - bid->size); } - return dso->bid.size == bid->size && - memcmp(dso->bid.data, bid->data, dso->bid.size) == 0; + return dso_bid->size == bid->size && + memcmp(dso_bid->data, bid->data, dso_bid->size) == 0; } void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine) @@ -1436,8 +1556,8 @@ void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine) if (machine__is_default_guest(machine)) return; sprintf(path, "%s/sys/kernel/notes", machine->root_dir); - if (sysfs__read_build_id(path, &dso->bid) == 0) - dso->has_build_id = true; + if (sysfs__read_build_id(path, dso__bid(dso)) == 0) + dso__set_has_build_id(dso); } int dso__kernel_module_get_build_id(struct dso *dso, @@ -1448,14 +1568,14 @@ int dso__kernel_module_get_build_id(struct dso *dso, * kernel module short names are of the form "[module]" and * we need just "module" here. */ - const char *name = dso->short_name + 1; + const char *name = dso__short_name(dso) + 1; snprintf(filename, sizeof(filename), "%s/sys/module/%.*s/notes/.note.gnu.build-id", root_dir, (int)strlen(name) - 1, name); - if (sysfs__read_build_id(filename, &dso->bid) == 0) - dso->has_build_id = true; + if (sysfs__read_build_id(filename, dso__bid(dso)) == 0) + dso__set_has_build_id(dso); return 0; } @@ -1464,21 +1584,21 @@ static size_t dso__fprintf_buildid(struct dso *dso, FILE *fp) { char sbuild_id[SBUILD_ID_SIZE]; - build_id__sprintf(&dso->bid, sbuild_id); + build_id__sprintf(dso__bid(dso), sbuild_id); return fprintf(fp, "%s", sbuild_id); } size_t dso__fprintf(struct dso *dso, FILE *fp) { struct rb_node *nd; - size_t ret = fprintf(fp, "dso: %s (", dso->short_name); + size_t ret = fprintf(fp, "dso: %s (", dso__short_name(dso)); - if (dso->short_name != dso->long_name) - ret += fprintf(fp, "%s, ", dso->long_name); + if (dso__short_name(dso) != dso__long_name(dso)) + ret += fprintf(fp, "%s, ", dso__long_name(dso)); ret += fprintf(fp, "%sloaded, ", dso__loaded(dso) ? "" : "NOT "); ret += dso__fprintf_buildid(dso, fp); ret += fprintf(fp, ")\n"); - for (nd = rb_first_cached(&dso->symbols); nd; nd = rb_next(nd)) { + for (nd = rb_first_cached(dso__symbols(dso)); nd; nd = rb_next(nd)) { struct symbol *pos = rb_entry(nd, struct symbol, rb_node); ret += symbol__fprintf(pos, fp); } @@ -1502,7 +1622,7 @@ enum dso_type dso__type(struct dso *dso, struct machine *machine) int dso__strerror_load(struct dso *dso, char *buf, size_t buflen) { - int idx, errnum = dso->load_errno; + int idx, errnum = *dso__load_errno(dso); /* * This must have a same ordering as the enum dso_load_errno. */ diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h index ce9f3849a773..df2c98402af3 100644 --- a/tools/perf/util/dso.h +++ b/tools/perf/util/dso.h @@ -11,6 +11,7 @@ #include <linux/bitops.h> #include "build-id.h" #include "mutex.h" +#include <internal/rc_check.h> struct machine; struct map; @@ -100,26 +101,27 @@ enum dso_load_errno { __DSO_LOAD_ERRNO__END, }; -#define DSO__SWAP(dso, type, val) \ -({ \ - type ____r = val; \ - BUG_ON(dso->needs_swap == DSO_SWAP__UNSET); \ - if (dso->needs_swap == DSO_SWAP__YES) { \ - switch (sizeof(____r)) { \ - case 2: \ - ____r = bswap_16(val); \ - break; \ - case 4: \ - ____r = bswap_32(val); \ - break; \ - case 8: \ - ____r = bswap_64(val); \ - break; \ - default: \ - BUG_ON(1); \ - } \ - } \ - ____r; \ +#define DSO__SWAP(dso, type, val) \ +({ \ + type ____r = val; \ + enum dso_swap_type ___dst = dso__needs_swap(dso); \ + BUG_ON(___dst == DSO_SWAP__UNSET); \ + if (___dst == DSO_SWAP__YES) { \ + switch (sizeof(____r)) { \ + case 2: \ + ____r = bswap_16(val); \ + break; \ + case 4: \ + ____r = bswap_32(val); \ + break; \ + case 8: \ + ____r = bswap_64(val); \ + break; \ + default: \ + BUG_ON(1); \ + } \ + } \ + ____r; \ }) #define DSO__DATA_CACHE_SIZE 4096 @@ -142,33 +144,77 @@ struct dso_cache { char data[]; }; +struct dso_data { + struct rb_root cache; + struct list_head open_entry; +#ifdef REFCNT_CHECKING + struct dso *dso; +#endif + int fd; + int status; + u32 status_seen; + u64 file_size; + u64 elf_base_addr; + u64 debug_frame_offset; + u64 eh_frame_hdr_addr; + u64 eh_frame_hdr_offset; +}; + +struct dso_bpf_prog { + u32 id; + u32 sub_id; + struct perf_env *env; +}; + struct auxtrace_cache; -struct dso { +DECLARE_RC_STRUCT(dso) { struct mutex lock; - struct list_head node; - struct rb_node rb_node; /* rbtree node sorted by long name */ - struct rb_root *root; /* root of rbtree that rb_node is in */ + struct dsos *dsos; struct rb_root_cached symbols; struct symbol **symbol_names; size_t symbol_names_len; struct rb_root_cached inlined_nodes; struct rb_root_cached srclines; - struct rb_root data_types; + struct rb_root data_types; + struct rb_root global_vars; struct { u64 addr; struct symbol *symbol; } last_find_result; + struct build_id bid; + u64 text_offset; + u64 text_end; + const char *short_name; + const char *long_name; void *a2l; char *symsrc_filename; +#if defined(__powerpc__) + void *dwfl; /* DWARF debug info */ +#endif + struct nsinfo *nsinfo; + struct auxtrace_cache *auxtrace_cache; + union { /* Tool specific area */ + void *priv; + u64 db_id; + }; + /* bpf prog information */ + struct dso_bpf_prog bpf_prog; + /* dso data file */ + struct dso_data data; + struct dso_id id; unsigned int a2l_fails; - enum dso_space_type kernel; - bool is_kmod; - enum dso_swap_type needs_swap; - enum dso_binary_type symtab_type; - enum dso_binary_type binary_type; + int comp; + refcount_t refcnt; enum dso_load_errno load_errno; + u16 long_name_len; + u16 short_name_len; + enum dso_binary_type symtab_type:8; + enum dso_binary_type binary_type:8; + enum dso_space_type kernel:2; + enum dso_swap_type needs_swap:2; + bool is_kmod:1; u8 adjust_symbols:1; u8 has_build_id:1; u8 header_build_id:1; @@ -182,44 +228,6 @@ struct dso { bool sorted_by_name; bool loaded; u8 rel; - struct build_id bid; - u64 text_offset; - u64 text_end; - const char *short_name; - const char *long_name; - u16 long_name_len; - u16 short_name_len; - void *dwfl; /* DWARF debug info */ - struct auxtrace_cache *auxtrace_cache; - int comp; - - /* dso data file */ - struct { - struct rb_root cache; - int fd; - int status; - u32 status_seen; - u64 file_size; - struct list_head open_entry; - u64 elf_base_addr; - u64 debug_frame_offset; - u64 eh_frame_hdr_addr; - u64 eh_frame_hdr_offset; - } data; - /* bpf prog information */ - struct { - u32 id; - u32 sub_id; - struct perf_env *env; - } bpf_prog; - - union { /* Tool specific area */ - void *priv; - u64 db_id; - }; - struct nsinfo *nsinfo; - struct dso_id id; - refcount_t refcnt; char name[]; }; @@ -230,19 +238,393 @@ struct dso { * @n: the 'struct rb_node *' to use as a temporary storage */ #define dso__for_each_symbol(dso, pos, n) \ - symbols__for_each_entry(&(dso)->symbols, pos, n) + symbols__for_each_entry(dso__symbols(dso), pos, n) -#define dsos__for_each_with_build_id(pos, head) \ - list_for_each_entry(pos, head, node) \ - if (!pos->has_build_id) \ - continue; \ - else +static inline void *dso__a2l(const struct dso *dso) +{ + return RC_CHK_ACCESS(dso)->a2l; +} + +static inline void dso__set_a2l(struct dso *dso, void *val) +{ + RC_CHK_ACCESS(dso)->a2l = val; +} + +static inline unsigned int dso__a2l_fails(const struct dso *dso) +{ + return RC_CHK_ACCESS(dso)->a2l_fails; +} + +static inline void dso__set_a2l_fails(struct dso *dso, unsigned int val) +{ + RC_CHK_ACCESS(dso)->a2l_fails = val; +} + +static inline bool dso__adjust_symbols(const struct dso *dso) +{ + return RC_CHK_ACCESS(dso)->adjust_symbols; +} + +static inline void dso__set_adjust_symbols(struct dso *dso, bool val) +{ + RC_CHK_ACCESS(dso)->adjust_symbols = val; +} + +static inline bool dso__annotate_warned(const struct dso *dso) +{ + return RC_CHK_ACCESS(dso)->annotate_warned; +} + +static inline void dso__set_annotate_warned(struct dso *dso) +{ + RC_CHK_ACCESS(dso)->annotate_warned = 1; +} + +static inline struct auxtrace_cache *dso__auxtrace_cache(struct dso *dso) +{ + return RC_CHK_ACCESS(dso)->auxtrace_cache; +} + +static inline void dso__set_auxtrace_cache(struct dso *dso, struct auxtrace_cache *cache) +{ + RC_CHK_ACCESS(dso)->auxtrace_cache = cache; +} + +static inline struct build_id *dso__bid(struct dso *dso) +{ + return &RC_CHK_ACCESS(dso)->bid; +} + +static inline const struct build_id *dso__bid_const(const struct dso *dso) +{ + return &RC_CHK_ACCESS(dso)->bid; +} + +static inline struct dso_bpf_prog *dso__bpf_prog(struct dso *dso) +{ + return &RC_CHK_ACCESS(dso)->bpf_prog; +} + +static inline bool dso__has_build_id(const struct dso *dso) +{ + return RC_CHK_ACCESS(dso)->has_build_id; +} + +static inline void dso__set_has_build_id(struct dso *dso) +{ + RC_CHK_ACCESS(dso)->has_build_id = true; +} + +static inline bool dso__has_srcline(const struct dso *dso) +{ + return RC_CHK_ACCESS(dso)->has_srcline; +} + +static inline void dso__set_has_srcline(struct dso *dso, bool val) +{ + RC_CHK_ACCESS(dso)->has_srcline = val; +} + +static inline int dso__comp(const struct dso *dso) +{ + return RC_CHK_ACCESS(dso)->comp; +} + +static inline void dso__set_comp(struct dso *dso, int comp) +{ + RC_CHK_ACCESS(dso)->comp = comp; +} + +static inline struct dso_data *dso__data(struct dso *dso) +{ + return &RC_CHK_ACCESS(dso)->data; +} + +static inline u64 dso__db_id(const struct dso *dso) +{ + return RC_CHK_ACCESS(dso)->db_id; +} + +static inline void dso__set_db_id(struct dso *dso, u64 db_id) +{ + RC_CHK_ACCESS(dso)->db_id = db_id; +} + +static inline struct dsos *dso__dsos(struct dso *dso) +{ + return RC_CHK_ACCESS(dso)->dsos; +} + +static inline void dso__set_dsos(struct dso *dso, struct dsos *dsos) +{ + RC_CHK_ACCESS(dso)->dsos = dsos; +} + +static inline bool dso__header_build_id(struct dso *dso) +{ + return RC_CHK_ACCESS(dso)->header_build_id; +} + +static inline void dso__set_header_build_id(struct dso *dso, bool val) +{ + RC_CHK_ACCESS(dso)->header_build_id = val; +} + +static inline bool dso__hit(const struct dso *dso) +{ + return RC_CHK_ACCESS(dso)->hit; +} + +static inline void dso__set_hit(struct dso *dso) +{ + RC_CHK_ACCESS(dso)->hit = 1; +} + +static inline struct dso_id *dso__id(struct dso *dso) +{ + return &RC_CHK_ACCESS(dso)->id; +} + +static inline const struct dso_id *dso__id_const(const struct dso *dso) +{ + return &RC_CHK_ACCESS(dso)->id; +} + +static inline struct rb_root_cached *dso__inlined_nodes(struct dso *dso) +{ + return &RC_CHK_ACCESS(dso)->inlined_nodes; +} + +static inline bool dso__is_64_bit(const struct dso *dso) +{ + return RC_CHK_ACCESS(dso)->is_64_bit; +} + +static inline void dso__set_is_64_bit(struct dso *dso, bool is) +{ + RC_CHK_ACCESS(dso)->is_64_bit = is; +} + +static inline bool dso__is_kmod(const struct dso *dso) +{ + return RC_CHK_ACCESS(dso)->is_kmod; +} + +static inline void dso__set_is_kmod(struct dso *dso) +{ + RC_CHK_ACCESS(dso)->is_kmod = 1; +} + +static inline enum dso_space_type dso__kernel(const struct dso *dso) +{ + return RC_CHK_ACCESS(dso)->kernel; +} + +static inline void dso__set_kernel(struct dso *dso, enum dso_space_type kernel) +{ + RC_CHK_ACCESS(dso)->kernel = kernel; +} + +static inline u64 dso__last_find_result_addr(const struct dso *dso) +{ + return RC_CHK_ACCESS(dso)->last_find_result.addr; +} + +static inline void dso__set_last_find_result_addr(struct dso *dso, u64 addr) +{ + RC_CHK_ACCESS(dso)->last_find_result.addr = addr; +} + +static inline struct symbol *dso__last_find_result_symbol(const struct dso *dso) +{ + return RC_CHK_ACCESS(dso)->last_find_result.symbol; +} + +static inline void dso__set_last_find_result_symbol(struct dso *dso, struct symbol *symbol) +{ + RC_CHK_ACCESS(dso)->last_find_result.symbol = symbol; +} + +static inline enum dso_load_errno *dso__load_errno(struct dso *dso) +{ + return &RC_CHK_ACCESS(dso)->load_errno; +} static inline void dso__set_loaded(struct dso *dso) { - dso->loaded = true; + RC_CHK_ACCESS(dso)->loaded = true; +} + +static inline struct mutex *dso__lock(struct dso *dso) +{ + return &RC_CHK_ACCESS(dso)->lock; +} + +static inline const char *dso__long_name(const struct dso *dso) +{ + return RC_CHK_ACCESS(dso)->long_name; +} + +static inline bool dso__long_name_allocated(const struct dso *dso) +{ + return RC_CHK_ACCESS(dso)->long_name_allocated; +} + +static inline void dso__set_long_name_allocated(struct dso *dso, bool allocated) +{ + RC_CHK_ACCESS(dso)->long_name_allocated = allocated; +} + +static inline u16 dso__long_name_len(const struct dso *dso) +{ + return RC_CHK_ACCESS(dso)->long_name_len; +} + +static inline const char *dso__name(const struct dso *dso) +{ + return RC_CHK_ACCESS(dso)->name; +} + +static inline enum dso_swap_type dso__needs_swap(const struct dso *dso) +{ + return RC_CHK_ACCESS(dso)->needs_swap; +} + +static inline void dso__set_needs_swap(struct dso *dso, enum dso_swap_type type) +{ + RC_CHK_ACCESS(dso)->needs_swap = type; +} + +static inline struct nsinfo *dso__nsinfo(struct dso *dso) +{ + return RC_CHK_ACCESS(dso)->nsinfo; +} + +static inline const struct nsinfo *dso__nsinfo_const(const struct dso *dso) +{ + return RC_CHK_ACCESS(dso)->nsinfo; +} + +static inline struct nsinfo **dso__nsinfo_ptr(struct dso *dso) +{ + return &RC_CHK_ACCESS(dso)->nsinfo; +} + +void dso__set_nsinfo(struct dso *dso, struct nsinfo *nsi); + +static inline u8 dso__rel(const struct dso *dso) +{ + return RC_CHK_ACCESS(dso)->rel; +} + +static inline void dso__set_rel(struct dso *dso, u8 rel) +{ + RC_CHK_ACCESS(dso)->rel = rel; } +static inline const char *dso__short_name(const struct dso *dso) +{ + return RC_CHK_ACCESS(dso)->short_name; +} + +static inline bool dso__short_name_allocated(const struct dso *dso) +{ + return RC_CHK_ACCESS(dso)->short_name_allocated; +} + +static inline void dso__set_short_name_allocated(struct dso *dso, bool allocated) +{ + RC_CHK_ACCESS(dso)->short_name_allocated = allocated; +} + +static inline u16 dso__short_name_len(const struct dso *dso) +{ + return RC_CHK_ACCESS(dso)->short_name_len; +} + +static inline struct rb_root_cached *dso__srclines(struct dso *dso) +{ + return &RC_CHK_ACCESS(dso)->srclines; +} + +static inline struct rb_root *dso__data_types(struct dso *dso) +{ + return &RC_CHK_ACCESS(dso)->data_types; +} + +static inline struct rb_root *dso__global_vars(struct dso *dso) +{ + return &RC_CHK_ACCESS(dso)->global_vars; +} + +static inline struct rb_root_cached *dso__symbols(struct dso *dso) +{ + return &RC_CHK_ACCESS(dso)->symbols; +} + +static inline struct symbol **dso__symbol_names(struct dso *dso) +{ + return RC_CHK_ACCESS(dso)->symbol_names; +} + +static inline void dso__set_symbol_names(struct dso *dso, struct symbol **names) +{ + RC_CHK_ACCESS(dso)->symbol_names = names; +} + +static inline size_t dso__symbol_names_len(struct dso *dso) +{ + return RC_CHK_ACCESS(dso)->symbol_names_len; +} + +static inline void dso__set_symbol_names_len(struct dso *dso, size_t len) +{ + RC_CHK_ACCESS(dso)->symbol_names_len = len; +} + +static inline const char *dso__symsrc_filename(const struct dso *dso) +{ + return RC_CHK_ACCESS(dso)->symsrc_filename; +} + +static inline void dso__set_symsrc_filename(struct dso *dso, char *val) +{ + RC_CHK_ACCESS(dso)->symsrc_filename = val; +} + +static inline enum dso_binary_type dso__symtab_type(const struct dso *dso) +{ + return RC_CHK_ACCESS(dso)->symtab_type; +} + +static inline void dso__set_symtab_type(struct dso *dso, enum dso_binary_type bt) +{ + RC_CHK_ACCESS(dso)->symtab_type = bt; +} + +static inline u64 dso__text_end(const struct dso *dso) +{ + return RC_CHK_ACCESS(dso)->text_end; +} + +static inline void dso__set_text_end(struct dso *dso, u64 val) +{ + RC_CHK_ACCESS(dso)->text_end = val; +} + +static inline u64 dso__text_offset(const struct dso *dso) +{ + return RC_CHK_ACCESS(dso)->text_offset; +} + +static inline void dso__set_text_offset(struct dso *dso, u64 val) +{ + RC_CHK_ACCESS(dso)->text_offset = val; +} + +int dso_id__cmp(const struct dso_id *a, const struct dso_id *b); +bool dso_id__empty(const struct dso_id *id); + struct dso *dso__new_id(const char *name, struct dso_id *id); struct dso *dso__new(const char *name); void dso__delete(struct dso *dso); @@ -250,6 +632,7 @@ void dso__delete(struct dso *dso); int dso__cmp_id(struct dso *a, struct dso *b); void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated); void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated); +void __dso__inject_id(struct dso *dso, struct dso_id *id); int dso__name_len(const struct dso *dso); @@ -268,7 +651,7 @@ bool dso__loaded(const struct dso *dso); static inline bool dso__has_symbols(const struct dso *dso) { - return !RB_EMPTY_ROOT(&dso->symbols.rb_root); + return !RB_EMPTY_ROOT(&RC_CHK_ACCESS(dso)->symbols.rb_root); } char *dso__filename_with_chroot(const struct dso *dso, const char *filename); @@ -384,21 +767,33 @@ void dso__reset_find_symbol_cache(struct dso *dso); size_t dso__fprintf_symbols_by_name(struct dso *dso, FILE *fp); size_t dso__fprintf(struct dso *dso, FILE *fp); +static inline enum dso_binary_type dso__binary_type(const struct dso *dso) +{ + return RC_CHK_ACCESS(dso)->binary_type; +} + +static inline void dso__set_binary_type(struct dso *dso, enum dso_binary_type bt) +{ + RC_CHK_ACCESS(dso)->binary_type = bt; +} + static inline bool dso__is_vmlinux(const struct dso *dso) { - return dso->binary_type == DSO_BINARY_TYPE__VMLINUX || - dso->binary_type == DSO_BINARY_TYPE__GUEST_VMLINUX; + enum dso_binary_type bt = dso__binary_type(dso); + + return bt == DSO_BINARY_TYPE__VMLINUX || bt == DSO_BINARY_TYPE__GUEST_VMLINUX; } static inline bool dso__is_kcore(const struct dso *dso) { - return dso->binary_type == DSO_BINARY_TYPE__KCORE || - dso->binary_type == DSO_BINARY_TYPE__GUEST_KCORE; + enum dso_binary_type bt = dso__binary_type(dso); + + return bt == DSO_BINARY_TYPE__KCORE || bt == DSO_BINARY_TYPE__GUEST_KCORE; } static inline bool dso__is_kallsyms(const struct dso *dso) { - return dso->kernel && dso->long_name[0] != '/'; + return RC_CHK_ACCESS(dso)->kernel && RC_CHK_ACCESS(dso)->long_name[0] != '/'; } bool dso__is_object_file(const struct dso *dso); @@ -411,4 +806,7 @@ int dso__strerror_load(struct dso *dso, char *buf, size_t buflen); void reset_fd_limit(void); +u64 dso__find_global_type(struct dso *dso, u64 addr); +u64 dso__findnew_global_type(struct dso *dso, u64 addr, u64 offset); + #endif /* __PERF_DSO */ diff --git a/tools/perf/util/dsos.c b/tools/perf/util/dsos.c index cf80aa42dd07..ab3d0c01dd63 100644 --- a/tools/perf/util/dsos.c +++ b/tools/perf/util/dsos.c @@ -12,115 +12,140 @@ #include <symbol.h> // filename__read_build_id #include <unistd.h> -static int __dso_id__cmp(struct dso_id *a, struct dso_id *b) +void dsos__init(struct dsos *dsos) { - if (a->maj > b->maj) return -1; - if (a->maj < b->maj) return 1; + init_rwsem(&dsos->lock); - if (a->min > b->min) return -1; - if (a->min < b->min) return 1; + dsos->cnt = 0; + dsos->allocated = 0; + dsos->dsos = NULL; + dsos->sorted = true; +} - if (a->ino > b->ino) return -1; - if (a->ino < b->ino) return 1; +static void dsos__purge(struct dsos *dsos) +{ + down_write(&dsos->lock); - /* - * Synthesized MMAP events have zero ino_generation, avoid comparing - * them with MMAP events with actual ino_generation. - * - * I found it harmful because the mismatch resulted in a new - * dso that did not have a build ID whereas the original dso did have a - * build ID. The build ID was essential because the object was not found - * otherwise. - Adrian - */ - if (a->ino_generation && b->ino_generation) { - if (a->ino_generation > b->ino_generation) return -1; - if (a->ino_generation < b->ino_generation) return 1; - } + for (unsigned int i = 0; i < dsos->cnt; i++) { + struct dso *dso = dsos->dsos[i]; - return 0; -} + dso__set_dsos(dso, NULL); + dso__put(dso); + } -static bool dso_id__empty(struct dso_id *id) -{ - if (!id) - return true; + zfree(&dsos->dsos); + dsos->cnt = 0; + dsos->allocated = 0; + dsos->sorted = true; - return !id->maj && !id->min && !id->ino && !id->ino_generation; + up_write(&dsos->lock); } -static void dso__inject_id(struct dso *dso, struct dso_id *id) +void dsos__exit(struct dsos *dsos) { - dso->id.maj = id->maj; - dso->id.min = id->min; - dso->id.ino = id->ino; - dso->id.ino_generation = id->ino_generation; + dsos__purge(dsos); + exit_rwsem(&dsos->lock); } -static int dso_id__cmp(struct dso_id *a, struct dso_id *b) + +static int __dsos__for_each_dso(struct dsos *dsos, + int (*cb)(struct dso *dso, void *data), + void *data) { - /* - * The second is always dso->id, so zeroes if not set, assume passing - * NULL for a means a zeroed id - */ - if (dso_id__empty(a) || dso_id__empty(b)) - return 0; + for (unsigned int i = 0; i < dsos->cnt; i++) { + struct dso *dso = dsos->dsos[i]; + int err; - return __dso_id__cmp(a, b); + err = cb(dso, data); + if (err) + return err; + } + return 0; } -int dso__cmp_id(struct dso *a, struct dso *b) -{ - return __dso_id__cmp(&a->id, &b->id); -} +struct dsos__read_build_ids_cb_args { + bool with_hits; + bool have_build_id; +}; -bool __dsos__read_build_ids(struct list_head *head, bool with_hits) +static int dsos__read_build_ids_cb(struct dso *dso, void *data) { - bool have_build_id = false; - struct dso *pos; + struct dsos__read_build_ids_cb_args *args = data; struct nscookie nsc; - list_for_each_entry(pos, head, node) { - if (with_hits && !pos->hit && !dso__is_vdso(pos)) - continue; - if (pos->has_build_id) { - have_build_id = true; - continue; - } - nsinfo__mountns_enter(pos->nsinfo, &nsc); - if (filename__read_build_id(pos->long_name, &pos->bid) > 0) { - have_build_id = true; - pos->has_build_id = true; - } else if (errno == ENOENT && pos->nsinfo) { - char *new_name = dso__filename_with_chroot(pos, pos->long_name); - - if (new_name && filename__read_build_id(new_name, - &pos->bid) > 0) { - have_build_id = true; - pos->has_build_id = true; - } - free(new_name); + if (args->with_hits && !dso__hit(dso) && !dso__is_vdso(dso)) + return 0; + if (dso__has_build_id(dso)) { + args->have_build_id = true; + return 0; + } + nsinfo__mountns_enter(dso__nsinfo(dso), &nsc); + if (filename__read_build_id(dso__long_name(dso), dso__bid(dso)) > 0) { + args->have_build_id = true; + dso__set_has_build_id(dso); + } else if (errno == ENOENT && dso__nsinfo(dso)) { + char *new_name = dso__filename_with_chroot(dso, dso__long_name(dso)); + + if (new_name && filename__read_build_id(new_name, dso__bid(dso)) > 0) { + args->have_build_id = true; + dso__set_has_build_id(dso); } - nsinfo__mountns_exit(&nsc); + free(new_name); } + nsinfo__mountns_exit(&nsc); + return 0; +} - return have_build_id; +bool dsos__read_build_ids(struct dsos *dsos, bool with_hits) +{ + struct dsos__read_build_ids_cb_args args = { + .with_hits = with_hits, + .have_build_id = false, + }; + + dsos__for_each_dso(dsos, dsos__read_build_ids_cb, &args); + return args.have_build_id; } -static int __dso__cmp_long_name(const char *long_name, struct dso_id *id, struct dso *b) +static int __dso__cmp_long_name(const char *long_name, const struct dso_id *id, + const struct dso *b) { - int rc = strcmp(long_name, b->long_name); - return rc ?: dso_id__cmp(id, &b->id); + int rc = strcmp(long_name, dso__long_name(b)); + return rc ?: dso_id__cmp(id, dso__id_const(b)); } -static int __dso__cmp_short_name(const char *short_name, struct dso_id *id, struct dso *b) +static int __dso__cmp_short_name(const char *short_name, const struct dso_id *id, + const struct dso *b) { - int rc = strcmp(short_name, b->short_name); - return rc ?: dso_id__cmp(id, &b->id); + int rc = strcmp(short_name, dso__short_name(b)); + return rc ?: dso_id__cmp(id, dso__id_const(b)); } -static int dso__cmp_short_name(struct dso *a, struct dso *b) +static int dsos__cmp_long_name_id_short_name(const void *va, const void *vb) { - return __dso__cmp_short_name(a->short_name, &a->id, b); + const struct dso *a = *((const struct dso **)va); + const struct dso *b = *((const struct dso **)vb); + int rc = strcmp(dso__long_name(a), dso__long_name(b)); + + if (!rc) { + rc = dso_id__cmp(dso__id_const(a), dso__id_const(b)); + if (!rc) + rc = strcmp(dso__short_name(a), dso__short_name(b)); + } + return rc; +} + +struct dsos__key { + const char *long_name; + const struct dso_id *id; +}; + +static int dsos__cmp_key_long_name_id(const void *vkey, const void *vdso) +{ + const struct dsos__key *key = vkey; + const struct dso *dso = *((const struct dso **)vdso); + + return __dso__cmp_long_name(key->long_name, key->id, dso); } /* @@ -128,110 +153,121 @@ static int dso__cmp_short_name(struct dso *a, struct dso *b) * Either one of the dso or name parameter must be non-NULL or the * function will not work. */ -struct dso *__dsos__findnew_link_by_longname_id(struct rb_root *root, struct dso *dso, - const char *name, struct dso_id *id) +static struct dso *__dsos__find_by_longname_id(struct dsos *dsos, + const char *name, + struct dso_id *id, + bool write_locked) { - struct rb_node **p = &root->rb_node; - struct rb_node *parent = NULL; - - if (!name) - name = dso->long_name; - /* - * Find node with the matching name - */ - while (*p) { - struct dso *this = rb_entry(*p, struct dso, rb_node); - int rc = __dso__cmp_long_name(name, id, this); - - parent = *p; - if (rc == 0) { - /* - * In case the new DSO is a duplicate of an existing - * one, print a one-time warning & put the new entry - * at the end of the list of duplicates. - */ - if (!dso || (dso == this)) - return this; /* Find matching dso */ - /* - * The core kernel DSOs may have duplicated long name. - * In this case, the short name should be different. - * Comparing the short names to differentiate the DSOs. - */ - rc = dso__cmp_short_name(dso, this); - if (rc == 0) { - pr_err("Duplicated dso name: %s\n", name); - return NULL; - } + struct dsos__key key = { + .long_name = name, + .id = id, + }; + struct dso **res; + + if (!dsos->sorted) { + if (!write_locked) { + struct dso *dso; + + up_read(&dsos->lock); + down_write(&dsos->lock); + dso = __dsos__find_by_longname_id(dsos, name, id, + /*write_locked=*/true); + up_write(&dsos->lock); + down_read(&dsos->lock); + return dso; } - if (rc < 0) - p = &parent->rb_left; - else - p = &parent->rb_right; + qsort(dsos->dsos, dsos->cnt, sizeof(struct dso *), + dsos__cmp_long_name_id_short_name); + dsos->sorted = true; } - if (dso) { - /* Add new node and rebalance tree */ - rb_link_node(&dso->rb_node, parent, p); - rb_insert_color(&dso->rb_node, root); - dso->root = root; - } - return NULL; + + res = bsearch(&key, dsos->dsos, dsos->cnt, sizeof(struct dso *), + dsos__cmp_key_long_name_id); + if (!res) + return NULL; + + return dso__get(*res); } -void __dsos__add(struct dsos *dsos, struct dso *dso) +int __dsos__add(struct dsos *dsos, struct dso *dso) { - list_add_tail(&dso->node, &dsos->head); - __dsos__findnew_link_by_longname_id(&dsos->root, dso, NULL, &dso->id); - /* - * It is now in the linked list, grab a reference, then garbage collect - * this when needing memory, by looking at LRU dso instances in the - * list with atomic_read(&dso->refcnt) == 1, i.e. no references - * anywhere besides the one for the list, do, under a lock for the - * list: remove it from the list, then a dso__put(), that probably will - * be the last and will then call dso__delete(), end of life. - * - * That, or at the end of the 'struct machine' lifetime, when all - * 'struct dso' instances will be removed from the list, in - * dsos__exit(), if they have no other reference from some other data - * structure. - * - * E.g.: after processing a 'perf.data' file and storing references - * to objects instantiated while processing events, we will have - * references to the 'thread', 'map', 'dso' structs all from 'struct - * hist_entry' instances, but we may not need anything not referenced, - * so we might as well call machines__exit()/machines__delete() and - * garbage collect it. - */ - dso__get(dso); + if (dsos->cnt == dsos->allocated) { + unsigned int to_allocate = 2; + struct dso **temp; + + if (dsos->allocated > 0) + to_allocate = dsos->allocated * 2; + temp = realloc(dsos->dsos, sizeof(struct dso *) * to_allocate); + if (!temp) + return -ENOMEM; + dsos->dsos = temp; + dsos->allocated = to_allocate; + } + dsos->dsos[dsos->cnt++] = dso__get(dso); + if (dsos->cnt >= 2 && dsos->sorted) { + dsos->sorted = dsos__cmp_long_name_id_short_name(&dsos->dsos[dsos->cnt - 2], + &dsos->dsos[dsos->cnt - 1]) + <= 0; + } + dso__set_dsos(dso, dsos); + return 0; } -void dsos__add(struct dsos *dsos, struct dso *dso) +int dsos__add(struct dsos *dsos, struct dso *dso) { + int ret; + down_write(&dsos->lock); - __dsos__add(dsos, dso); + ret = __dsos__add(dsos, dso); up_write(&dsos->lock); + return ret; } -static struct dso *__dsos__findnew_by_longname_id(struct rb_root *root, const char *name, struct dso_id *id) +struct dsos__find_id_cb_args { + const char *name; + struct dso_id *id; + struct dso *res; +}; + +static int dsos__find_id_cb(struct dso *dso, void *data) { - return __dsos__findnew_link_by_longname_id(root, NULL, name, id); + struct dsos__find_id_cb_args *args = data; + + if (__dso__cmp_short_name(args->name, args->id, dso) == 0) { + args->res = dso__get(dso); + return 1; + } + return 0; + } -static struct dso *__dsos__find_id(struct dsos *dsos, const char *name, struct dso_id *id, bool cmp_short) +static struct dso *__dsos__find_id(struct dsos *dsos, const char *name, struct dso_id *id, + bool cmp_short, bool write_locked) { - struct dso *pos; + struct dso *res; if (cmp_short) { - list_for_each_entry(pos, &dsos->head, node) - if (__dso__cmp_short_name(name, id, pos) == 0) - return pos; - return NULL; + struct dsos__find_id_cb_args args = { + .name = name, + .id = id, + .res = NULL, + }; + + __dsos__for_each_dso(dsos, dsos__find_id_cb, &args); + return args.res; } - return __dsos__findnew_by_longname_id(&dsos->root, name, id); + res = __dsos__find_by_longname_id(dsos, name, id, write_locked); + return res; } -struct dso *__dsos__find(struct dsos *dsos, const char *name, bool cmp_short) +struct dso *dsos__find(struct dsos *dsos, const char *name, bool cmp_short) { - return __dsos__find_id(dsos, name, NULL, cmp_short); + struct dso *res; + + down_read(&dsos->lock); + res = __dsos__find_id(dsos, name, NULL, cmp_short, /*write_locked=*/false); + up_read(&dsos->lock); + return res; } static void dso__set_basename(struct dso *dso) @@ -239,7 +275,7 @@ static void dso__set_basename(struct dso *dso) char *base, *lname; int tid; - if (sscanf(dso->long_name, "/tmp/perf-%d.map", &tid) == 1) { + if (sscanf(dso__long_name(dso), "/tmp/perf-%d.map", &tid) == 1) { if (asprintf(&base, "[JIT] tid %d", tid) < 0) return; } else { @@ -247,7 +283,7 @@ static void dso__set_basename(struct dso *dso) * basename() may modify path buffer, so we must pass * a copy. */ - lname = strdup(dso->long_name); + lname = strdup(dso__long_name(dso)); if (!lname) return; @@ -271,25 +307,23 @@ static struct dso *__dsos__addnew_id(struct dsos *dsos, const char *name, struct struct dso *dso = dso__new_id(name, id); if (dso != NULL) { - __dsos__add(dsos, dso); + /* + * The dsos lock is held on entry, so rename the dso before + * adding it to avoid needing to take the dsos lock again to say + * the array isn't sorted. + */ dso__set_basename(dso); - /* Put dso here because __dsos_add already got it */ - dso__put(dso); + __dsos__add(dsos, dso); } return dso; } -struct dso *__dsos__addnew(struct dsos *dsos, const char *name) -{ - return __dsos__addnew_id(dsos, name, NULL); -} - static struct dso *__dsos__findnew_id(struct dsos *dsos, const char *name, struct dso_id *id) { - struct dso *dso = __dsos__find_id(dsos, name, id, false); + struct dso *dso = __dsos__find_id(dsos, name, id, false, /*write_locked=*/true); - if (dso && dso_id__empty(&dso->id) && !dso_id__empty(id)) - dso__inject_id(dso, id); + if (dso && dso_id__empty(dso__id(dso)) && !dso_id__empty(id)) + __dso__inject_id(dso, id); return dso ? dso : __dsos__addnew_id(dsos, name, id); } @@ -298,36 +332,151 @@ struct dso *dsos__findnew_id(struct dsos *dsos, const char *name, struct dso_id { struct dso *dso; down_write(&dsos->lock); - dso = dso__get(__dsos__findnew_id(dsos, name, id)); + dso = __dsos__findnew_id(dsos, name, id); up_write(&dsos->lock); return dso; } -size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp, - bool (skip)(struct dso *dso, int parm), int parm) +struct dsos__fprintf_buildid_cb_args { + FILE *fp; + bool (*skip)(struct dso *dso, int parm); + int parm; + size_t ret; +}; + +static int dsos__fprintf_buildid_cb(struct dso *dso, void *data) { - struct dso *pos; - size_t ret = 0; + struct dsos__fprintf_buildid_cb_args *args = data; + char sbuild_id[SBUILD_ID_SIZE]; - list_for_each_entry(pos, head, node) { - char sbuild_id[SBUILD_ID_SIZE]; + if (args->skip && args->skip(dso, args->parm)) + return 0; + build_id__sprintf(dso__bid(dso), sbuild_id); + args->ret += fprintf(args->fp, "%-40s %s\n", sbuild_id, dso__long_name(dso)); + return 0; +} - if (skip && skip(pos, parm)) - continue; - build_id__sprintf(&pos->bid, sbuild_id); - ret += fprintf(fp, "%-40s %s\n", sbuild_id, pos->long_name); - } - return ret; +size_t dsos__fprintf_buildid(struct dsos *dsos, FILE *fp, + bool (*skip)(struct dso *dso, int parm), int parm) +{ + struct dsos__fprintf_buildid_cb_args args = { + .fp = fp, + .skip = skip, + .parm = parm, + .ret = 0, + }; + + dsos__for_each_dso(dsos, dsos__fprintf_buildid_cb, &args); + return args.ret; +} + +struct dsos__fprintf_cb_args { + FILE *fp; + size_t ret; +}; + +static int dsos__fprintf_cb(struct dso *dso, void *data) +{ + struct dsos__fprintf_cb_args *args = data; + + args->ret += dso__fprintf(dso, args->fp); + return 0; +} + +size_t dsos__fprintf(struct dsos *dsos, FILE *fp) +{ + struct dsos__fprintf_cb_args args = { + .fp = fp, + .ret = 0, + }; + + dsos__for_each_dso(dsos, dsos__fprintf_cb, &args); + return args.ret; +} + +static int dsos__hit_all_cb(struct dso *dso, void *data __maybe_unused) +{ + dso__set_hit(dso); + return 0; +} + +int dsos__hit_all(struct dsos *dsos) +{ + return dsos__for_each_dso(dsos, dsos__hit_all_cb, NULL); } -size_t __dsos__fprintf(struct list_head *head, FILE *fp) +struct dso *dsos__findnew_module_dso(struct dsos *dsos, + struct machine *machine, + struct kmod_path *m, + const char *filename) { - struct dso *pos; - size_t ret = 0; + struct dso *dso; + + down_write(&dsos->lock); - list_for_each_entry(pos, head, node) { - ret += dso__fprintf(pos, fp); + dso = __dsos__find_id(dsos, m->name, NULL, /*cmp_short=*/true, /*write_locked=*/true); + if (dso) { + up_write(&dsos->lock); + return dso; } + /* + * Failed to find the dso so create it. Change the name before adding it + * to the array, to avoid unnecessary sorts and potential locking + * issues. + */ + dso = dso__new_id(m->name, /*id=*/NULL); + if (!dso) { + up_write(&dsos->lock); + return NULL; + } + dso__set_basename(dso); + dso__set_module_info(dso, m, machine); + dso__set_long_name(dso, strdup(filename), true); + dso__set_kernel(dso, DSO_SPACE__KERNEL); + __dsos__add(dsos, dso); - return ret; + up_write(&dsos->lock); + return dso; +} + +static int dsos__find_kernel_dso_cb(struct dso *dso, void *data) +{ + struct dso **res = data; + /* + * The cpumode passed to is_kernel_module is not the cpumode of *this* + * event. If we insist on passing correct cpumode to is_kernel_module, + * we should record the cpumode when we adding this dso to the linked + * list. + * + * However we don't really need passing correct cpumode. We know the + * correct cpumode must be kernel mode (if not, we should not link it + * onto kernel_dsos list). + * + * Therefore, we pass PERF_RECORD_MISC_CPUMODE_UNKNOWN. + * is_kernel_module() treats it as a kernel cpumode. + */ + if (!dso__kernel(dso) || + is_kernel_module(dso__long_name(dso), PERF_RECORD_MISC_CPUMODE_UNKNOWN)) + return 0; + + *res = dso__get(dso); + return 1; +} + +struct dso *dsos__find_kernel_dso(struct dsos *dsos) +{ + struct dso *res = NULL; + + dsos__for_each_dso(dsos, dsos__find_kernel_dso_cb, &res); + return res; +} + +int dsos__for_each_dso(struct dsos *dsos, int (*cb)(struct dso *dso, void *data), void *data) +{ + int err; + + down_read(&dsos->lock); + err = __dsos__for_each_dso(dsos, cb, data); + up_read(&dsos->lock); + return err; } diff --git a/tools/perf/util/dsos.h b/tools/perf/util/dsos.h index 5dbec2bc6966..6c13b65648bc 100644 --- a/tools/perf/util/dsos.h +++ b/tools/perf/util/dsos.h @@ -10,31 +10,43 @@ struct dso; struct dso_id; +struct kmod_path; +struct machine; /* - * DSOs are put into both a list for fast iteration and rbtree for fast - * long name lookup. + * Collection of DSOs as an array for iteration speed, but sorted for O(n) + * lookup. */ struct dsos { - struct list_head head; - struct rb_root root; /* rbtree root sorted by long name */ struct rw_semaphore lock; + struct dso **dsos; + unsigned int cnt; + unsigned int allocated; + bool sorted; }; -void __dsos__add(struct dsos *dsos, struct dso *dso); -void dsos__add(struct dsos *dsos, struct dso *dso); -struct dso *__dsos__addnew(struct dsos *dsos, const char *name); -struct dso *__dsos__find(struct dsos *dsos, const char *name, bool cmp_short); +void dsos__init(struct dsos *dsos); +void dsos__exit(struct dsos *dsos); + +int __dsos__add(struct dsos *dsos, struct dso *dso); +int dsos__add(struct dsos *dsos, struct dso *dso); +struct dso *dsos__find(struct dsos *dsos, const char *name, bool cmp_short); struct dso *dsos__findnew_id(struct dsos *dsos, const char *name, struct dso_id *id); -struct dso *__dsos__findnew_link_by_longname_id(struct rb_root *root, struct dso *dso, - const char *name, struct dso_id *id); - -bool __dsos__read_build_ids(struct list_head *head, bool with_hits); +bool dsos__read_build_ids(struct dsos *dsos, bool with_hits); -size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp, +size_t dsos__fprintf_buildid(struct dsos *dsos, FILE *fp, bool (skip)(struct dso *dso, int parm), int parm); -size_t __dsos__fprintf(struct list_head *head, FILE *fp); +size_t dsos__fprintf(struct dsos *dsos, FILE *fp); + +int dsos__hit_all(struct dsos *dsos); + +struct dso *dsos__findnew_module_dso(struct dsos *dsos, struct machine *machine, + struct kmod_path *m, const char *filename); + +struct dso *dsos__find_kernel_dso(struct dsos *dsos); + +int dsos__for_each_dso(struct dsos *dsos, int (*cb)(struct dso *dso, void *data), void *data); #endif /* __PERF_DSOS */ diff --git a/tools/perf/util/dump-insn.h b/tools/perf/util/dump-insn.h index 650125061530..4a7797dd6d09 100644 --- a/tools/perf/util/dump-insn.h +++ b/tools/perf/util/dump-insn.h @@ -11,6 +11,7 @@ struct thread; struct perf_insn { /* Initialized by callers: */ struct thread *thread; + struct machine *machine; u8 cpumode; bool is64bit; int cpu; diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c index 7aa5fee0da19..44ef968a7ad3 100644 --- a/tools/perf/util/dwarf-aux.c +++ b/tools/perf/util/dwarf-aux.c @@ -9,6 +9,7 @@ #include <stdlib.h> #include "debug.h" #include "dwarf-aux.h" +#include "dwarf-regs.h" #include "strbuf.h" #include "string2.h" @@ -696,6 +697,49 @@ Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr, return die_mem; } +static int __die_find_func_rettype_cb(Dwarf_Die *die_mem, void *data) +{ + const char *func_name; + + if (dwarf_tag(die_mem) != DW_TAG_subprogram) + return DIE_FIND_CB_SIBLING; + + func_name = dwarf_diename(die_mem); + if (func_name && !strcmp(func_name, data)) + return DIE_FIND_CB_END; + + return DIE_FIND_CB_SIBLING; +} + +/** + * die_find_func_rettype - Search a return type of function + * @cu_die: a CU DIE + * @name: target function name + * @die_mem: a buffer for result DIE + * + * Search a non-inlined function which matches to @name and stores the + * return type of the function to @die_mem and returns it if found. + * Returns NULL if failed. Note that it doesn't needs to find a + * definition of the function, so it doesn't match with address. + * Most likely, it can find a declaration at the top level. Thus the + * callback function continues to sibling entries only. + */ +Dwarf_Die *die_find_func_rettype(Dwarf_Die *cu_die, const char *name, + Dwarf_Die *die_mem) +{ + Dwarf_Die tmp_die; + + cu_die = die_find_child(cu_die, __die_find_func_rettype_cb, + (void *)name, &tmp_die); + if (!cu_die) + return NULL; + + if (die_get_real_type(&tmp_die, die_mem) == NULL) + return NULL; + + return die_mem; +} + struct __instance_walk_param { void *addr; int (*callback)(Dwarf_Die *, void *); @@ -1066,8 +1110,10 @@ int die_get_typename_from_type(Dwarf_Die *type_die, struct strbuf *buf) const char *tmp = ""; tag = dwarf_tag(type_die); - if (tag == DW_TAG_array_type || tag == DW_TAG_pointer_type) + if (tag == DW_TAG_pointer_type) tmp = "*"; + else if (tag == DW_TAG_array_type) + tmp = "[]"; else if (tag == DW_TAG_subroutine_type) { /* Function pointer */ return strbuf_add(buf, "(function_type)", 15); @@ -1136,6 +1182,71 @@ int die_get_varname(Dwarf_Die *vr_die, struct strbuf *buf) return ret < 0 ? ret : strbuf_addf(buf, "\t%s", dwarf_diename(vr_die)); } +#if defined(HAVE_DWARF_GETLOCATIONS_SUPPORT) || defined(HAVE_DWARF_CFI_SUPPORT) +static int reg_from_dwarf_op(Dwarf_Op *op) +{ + switch (op->atom) { + case DW_OP_reg0 ... DW_OP_reg31: + return op->atom - DW_OP_reg0; + case DW_OP_breg0 ... DW_OP_breg31: + return op->atom - DW_OP_breg0; + case DW_OP_regx: + case DW_OP_bregx: + return op->number; + case DW_OP_fbreg: + return DWARF_REG_FB; + default: + break; + } + return -1; +} + +static int offset_from_dwarf_op(Dwarf_Op *op) +{ + switch (op->atom) { + case DW_OP_reg0 ... DW_OP_reg31: + case DW_OP_regx: + return 0; + case DW_OP_breg0 ... DW_OP_breg31: + case DW_OP_fbreg: + return op->number; + case DW_OP_bregx: + return op->number2; + default: + break; + } + return -1; +} + +static bool check_allowed_ops(Dwarf_Op *ops, size_t nops) +{ + /* The first op is checked separately */ + ops++; + nops--; + + /* + * It needs to make sure if the location expression matches to the given + * register and offset exactly. Thus it rejects any complex expressions + * and only allows a few of selected operators that doesn't change the + * location. + */ + while (nops) { + switch (ops->atom) { + case DW_OP_stack_value: + case DW_OP_deref_size: + case DW_OP_deref: + case DW_OP_piece: + break; + default: + return false; + } + ops++; + nops--; + } + return true; +} +#endif /* HAVE_DWARF_GETLOCATIONS_SUPPORT || HAVE_DWARF_CFI_SUPPORT */ + #ifdef HAVE_DWARF_GETLOCATIONS_SUPPORT /** * die_get_var_innermost_scope - Get innermost scope range of given variable DIE @@ -1272,11 +1383,48 @@ struct find_var_data { unsigned reg; /* Access offset, set for global data */ int offset; + /* True if the current register is the frame base */ + bool is_fbreg; }; /* Max number of registers DW_OP_regN supports */ #define DWARF_OP_DIRECT_REGS 32 +static bool match_var_offset(Dwarf_Die *die_mem, struct find_var_data *data, + u64 addr_offset, u64 addr_type, bool is_pointer) +{ + Dwarf_Die type_die; + Dwarf_Word size; + + if (addr_offset == addr_type) { + /* Update offset relative to the start of the variable */ + data->offset = 0; + return true; + } + + if (addr_offset < addr_type) + return false; + + if (die_get_real_type(die_mem, &type_die) == NULL) + return false; + + if (is_pointer && dwarf_tag(&type_die) == DW_TAG_pointer_type) { + /* Get the target type of the pointer */ + if (die_get_real_type(&type_die, &type_die) == NULL) + return false; + } + + if (dwarf_aggregate_size(&type_die, &size) < 0) + return false; + + if (addr_offset >= addr_type + size) + return false; + + /* Update offset relative to the start of the variable */ + data->offset = addr_offset - addr_type; + return true; +} + /* Only checks direct child DIEs in the given scope. */ static int __die_find_var_reg_cb(Dwarf_Die *die_mem, void *arg) { @@ -1301,13 +1449,41 @@ static int __die_find_var_reg_cb(Dwarf_Die *die_mem, void *arg) if (start > data->pc) break; + /* Local variables accessed using frame base register */ + if (data->is_fbreg && ops->atom == DW_OP_fbreg && + check_allowed_ops(ops, nops) && + match_var_offset(die_mem, data, data->offset, ops->number, + /*is_pointer=*/false)) + return DIE_FIND_CB_END; + /* Only match with a simple case */ if (data->reg < DWARF_OP_DIRECT_REGS) { - if (ops->atom == (DW_OP_reg0 + data->reg) && nops == 1) + /* pointer variables saved in a register 0 to 31 */ + if (ops->atom == (DW_OP_reg0 + data->reg) && + check_allowed_ops(ops, nops) && + match_var_offset(die_mem, data, data->offset, 0, + /*is_pointer=*/true)) + return DIE_FIND_CB_END; + + /* Local variables accessed by a register + offset */ + if (ops->atom == (DW_OP_breg0 + data->reg) && + check_allowed_ops(ops, nops) && + match_var_offset(die_mem, data, data->offset, ops->number, + /*is_pointer=*/false)) return DIE_FIND_CB_END; } else { + /* pointer variables saved in a register 32 or above */ if (ops->atom == DW_OP_regx && ops->number == data->reg && - nops == 1) + check_allowed_ops(ops, nops) && + match_var_offset(die_mem, data, data->offset, 0, + /*is_pointer=*/true)) + return DIE_FIND_CB_END; + + /* Local variables accessed by a register + offset */ + if (ops->atom == DW_OP_bregx && data->reg == ops->number && + check_allowed_ops(ops, nops) && + match_var_offset(die_mem, data, data->offset, ops->number2, + /*is_poitner=*/false)) return DIE_FIND_CB_END; } } @@ -1319,18 +1495,29 @@ static int __die_find_var_reg_cb(Dwarf_Die *die_mem, void *arg) * @sc_die: a scope DIE * @pc: the program address to find * @reg: the register number to find + * @poffset: pointer to offset, will be updated for fbreg case + * @is_fbreg: boolean value if the current register is the frame base * @die_mem: a buffer to save the resulting DIE * - * Find the variable DIE accessed by the given register. + * Find the variable DIE accessed by the given register. It'll update the @offset + * when the variable is in the stack. */ Dwarf_Die *die_find_variable_by_reg(Dwarf_Die *sc_die, Dwarf_Addr pc, int reg, + int *poffset, bool is_fbreg, Dwarf_Die *die_mem) { struct find_var_data data = { .pc = pc, .reg = reg, + .offset = *poffset, + .is_fbreg = is_fbreg, }; - return die_find_child(sc_die, __die_find_var_reg_cb, &data, die_mem); + Dwarf_Die *result; + + result = die_find_child(sc_die, __die_find_var_reg_cb, &data, die_mem); + if (result) + *poffset = data.offset; + return result; } /* Only checks direct child DIEs in the given scope */ @@ -1341,8 +1528,6 @@ static int __die_find_var_addr_cb(Dwarf_Die *die_mem, void *arg) ptrdiff_t off = 0; Dwarf_Attribute attr; Dwarf_Addr base, start, end; - Dwarf_Word size; - Dwarf_Die type_die; Dwarf_Op *ops; size_t nops; @@ -1356,27 +1541,10 @@ static int __die_find_var_addr_cb(Dwarf_Die *die_mem, void *arg) if (ops->atom != DW_OP_addr) continue; - if (data->addr < ops->number) - continue; - - if (data->addr == ops->number) { - /* Update offset relative to the start of the variable */ - data->offset = 0; + if (check_allowed_ops(ops, nops) && + match_var_offset(die_mem, data, data->addr, ops->number, + /*is_pointer=*/false)) return DIE_FIND_CB_END; - } - - if (die_get_real_type(die_mem, &type_die) == NULL) - continue; - - if (dwarf_aggregate_size(&type_die, &size) < 0) - continue; - - if (data->addr >= ops->number + size) - continue; - - /* Update offset relative to the start of the variable */ - data->offset = data->addr - ops->number; - return DIE_FIND_CB_END; } return DIE_FIND_CB_SIBLING; } @@ -1384,7 +1552,6 @@ static int __die_find_var_addr_cb(Dwarf_Die *die_mem, void *arg) /** * die_find_variable_by_addr - Find variable located at given address * @sc_die: a scope DIE - * @pc: the program address to find * @addr: the data address to find * @die_mem: a buffer to save the resulting DIE * @offset: the offset in the resulting type @@ -1392,12 +1559,10 @@ static int __die_find_var_addr_cb(Dwarf_Die *die_mem, void *arg) * Find the variable DIE located at the given address (in PC-relative mode). * This is usually for global variables. */ -Dwarf_Die *die_find_variable_by_addr(Dwarf_Die *sc_die, Dwarf_Addr pc, - Dwarf_Addr addr, Dwarf_Die *die_mem, - int *offset) +Dwarf_Die *die_find_variable_by_addr(Dwarf_Die *sc_die, Dwarf_Addr addr, + Dwarf_Die *die_mem, int *offset) { struct find_var_data data = { - .pc = pc, .addr = addr, }; Dwarf_Die *result; @@ -1407,7 +1572,164 @@ Dwarf_Die *die_find_variable_by_addr(Dwarf_Die *sc_die, Dwarf_Addr pc, *offset = data.offset; return result; } -#endif + +static int __die_collect_vars_cb(Dwarf_Die *die_mem, void *arg) +{ + struct die_var_type **var_types = arg; + Dwarf_Die type_die; + int tag = dwarf_tag(die_mem); + Dwarf_Attribute attr; + Dwarf_Addr base, start, end; + Dwarf_Op *ops; + size_t nops; + struct die_var_type *vt; + + if (tag != DW_TAG_variable && tag != DW_TAG_formal_parameter) + return DIE_FIND_CB_SIBLING; + + if (dwarf_attr(die_mem, DW_AT_location, &attr) == NULL) + return DIE_FIND_CB_SIBLING; + + /* + * Only collect the first location as it can reconstruct the + * remaining state by following the instructions. + * start = 0 means it covers the whole range. + */ + if (dwarf_getlocations(&attr, 0, &base, &start, &end, &ops, &nops) <= 0) + return DIE_FIND_CB_SIBLING; + + if (die_get_real_type(die_mem, &type_die) == NULL) + return DIE_FIND_CB_SIBLING; + + vt = malloc(sizeof(*vt)); + if (vt == NULL) + return DIE_FIND_CB_END; + + vt->die_off = dwarf_dieoffset(&type_die); + vt->addr = start; + vt->reg = reg_from_dwarf_op(ops); + vt->offset = offset_from_dwarf_op(ops); + vt->next = *var_types; + *var_types = vt; + + return DIE_FIND_CB_SIBLING; +} + +/** + * die_collect_vars - Save all variables and parameters + * @sc_die: a scope DIE + * @var_types: a pointer to save the resulting list + * + * Save all variables and parameters in the @sc_die and save them to @var_types. + * The @var_types is a singly-linked list containing type and location info. + * Actual type can be retrieved using dwarf_offdie() with 'die_off' later. + * + * Callers should free @var_types. + */ +void die_collect_vars(Dwarf_Die *sc_die, struct die_var_type **var_types) +{ + Dwarf_Die die_mem; + + die_find_child(sc_die, __die_collect_vars_cb, (void *)var_types, &die_mem); +} + +static int __die_collect_global_vars_cb(Dwarf_Die *die_mem, void *arg) +{ + struct die_var_type **var_types = arg; + Dwarf_Die type_die; + int tag = dwarf_tag(die_mem); + Dwarf_Attribute attr; + Dwarf_Addr base, start, end; + Dwarf_Op *ops; + size_t nops; + struct die_var_type *vt; + + if (tag != DW_TAG_variable) + return DIE_FIND_CB_SIBLING; + + if (dwarf_attr(die_mem, DW_AT_location, &attr) == NULL) + return DIE_FIND_CB_SIBLING; + + /* Only collect the location with an absolute address. */ + if (dwarf_getlocations(&attr, 0, &base, &start, &end, &ops, &nops) <= 0) + return DIE_FIND_CB_SIBLING; + + if (ops->atom != DW_OP_addr) + return DIE_FIND_CB_SIBLING; + + if (!check_allowed_ops(ops, nops)) + return DIE_FIND_CB_SIBLING; + + if (die_get_real_type(die_mem, &type_die) == NULL) + return DIE_FIND_CB_SIBLING; + + vt = malloc(sizeof(*vt)); + if (vt == NULL) + return DIE_FIND_CB_END; + + vt->die_off = dwarf_dieoffset(&type_die); + vt->addr = ops->number; + vt->reg = -1; + vt->offset = 0; + vt->next = *var_types; + *var_types = vt; + + return DIE_FIND_CB_SIBLING; +} + +/** + * die_collect_global_vars - Save all global variables + * @cu_die: a CU DIE + * @var_types: a pointer to save the resulting list + * + * Save all global variables in the @cu_die and save them to @var_types. + * The @var_types is a singly-linked list containing type and location info. + * Actual type can be retrieved using dwarf_offdie() with 'die_off' later. + * + * Callers should free @var_types. + */ +void die_collect_global_vars(Dwarf_Die *cu_die, struct die_var_type **var_types) +{ + Dwarf_Die die_mem; + + die_find_child(cu_die, __die_collect_global_vars_cb, (void *)var_types, &die_mem); +} +#endif /* HAVE_DWARF_GETLOCATIONS_SUPPORT */ + +#ifdef HAVE_DWARF_CFI_SUPPORT +/** + * die_get_cfa - Get frame base information + * @dwarf: a Dwarf info + * @pc: program address + * @preg: pointer for saved register + * @poffset: pointer for saved offset + * + * This function gets register and offset for CFA (Canonical Frame Address) + * by searching the CIE/FDE info. The CFA usually points to the start address + * of the current stack frame and local variables can be located using an offset + * from the CFA. The @preg and @poffset will be updated if it returns 0. + */ +int die_get_cfa(Dwarf *dwarf, u64 pc, int *preg, int *poffset) +{ + Dwarf_CFI *cfi; + Dwarf_Frame *frame = NULL; + Dwarf_Op *ops = NULL; + size_t nops; + + cfi = dwarf_getcfi(dwarf); + if (cfi == NULL) + return -1; + + if (!dwarf_cfi_addrframe(cfi, pc, &frame) && + !dwarf_frame_cfa(frame, &ops, &nops) && + check_allowed_ops(ops, nops)) { + *preg = reg_from_dwarf_op(ops); + *poffset = offset_from_dwarf_op(ops); + return 0; + } + return -1; +} +#endif /* HAVE_DWARF_CFI_SUPPORT */ /* * die_has_loclist - Check if DW_AT_location of @vr_die is a location list @@ -1640,3 +1962,116 @@ int die_get_scopes(Dwarf_Die *cu_die, Dwarf_Addr pc, Dwarf_Die **scopes) *scopes = data.scopes; return data.nr; } + +static int __die_find_member_offset_cb(Dwarf_Die *die_mem, void *arg) +{ + Dwarf_Die type_die; + Dwarf_Word size, loc; + Dwarf_Word offset = (long)arg; + int tag = dwarf_tag(die_mem); + + if (tag != DW_TAG_member) + return DIE_FIND_CB_SIBLING; + + /* Unions might not have location */ + if (die_get_data_member_location(die_mem, &loc) < 0) + loc = 0; + + if (offset == loc) + return DIE_FIND_CB_END; + + if (die_get_real_type(die_mem, &type_die) == NULL) { + // TODO: add a pr_debug_dtp() later for this unlikely failure + return DIE_FIND_CB_SIBLING; + } + + if (dwarf_aggregate_size(&type_die, &size) < 0) + size = 0; + + if (loc < offset && offset < (loc + size)) + return DIE_FIND_CB_END; + + return DIE_FIND_CB_SIBLING; +} + +/** + * die_get_member_type - Return type info of struct member + * @type_die: a type DIE + * @offset: offset in the type + * @die_mem: a buffer to save the resulting DIE + * + * This function returns a type of a member in @type_die where it's located at + * @offset if it's a struct. For now, it just returns the first matching + * member in a union. For other types, it'd return the given type directly + * if it's within the size of the type or NULL otherwise. + */ +Dwarf_Die *die_get_member_type(Dwarf_Die *type_die, int offset, + Dwarf_Die *die_mem) +{ + Dwarf_Die *member; + Dwarf_Die mb_type; + int tag; + + tag = dwarf_tag(type_die); + /* If it's not a compound type, return the type directly */ + if (tag != DW_TAG_structure_type && tag != DW_TAG_union_type) { + Dwarf_Word size; + + if (dwarf_aggregate_size(type_die, &size) < 0) + size = 0; + + if ((unsigned)offset >= size) + return NULL; + + *die_mem = *type_die; + return die_mem; + } + + mb_type = *type_die; + /* TODO: Handle union types better? */ + while (tag == DW_TAG_structure_type || tag == DW_TAG_union_type) { + member = die_find_child(&mb_type, __die_find_member_offset_cb, + (void *)(long)offset, die_mem); + if (member == NULL) + return NULL; + + if (die_get_real_type(member, &mb_type) == NULL) + return NULL; + + tag = dwarf_tag(&mb_type); + + if (tag == DW_TAG_structure_type || tag == DW_TAG_union_type) { + Dwarf_Word loc; + + /* Update offset for the start of the member struct */ + if (die_get_data_member_location(member, &loc) == 0) + offset -= loc; + } + } + *die_mem = mb_type; + return die_mem; +} + +/** + * die_deref_ptr_type - Return type info for pointer access + * @ptr_die: a pointer type DIE + * @offset: access offset for the pointer + * @die_mem: a buffer to save the resulting DIE + * + * This function follows the pointer in @ptr_die with given @offset + * and saves the resulting type in @die_mem. If the pointer points + * a struct type, actual member at the offset would be returned. + */ +Dwarf_Die *die_deref_ptr_type(Dwarf_Die *ptr_die, int offset, + Dwarf_Die *die_mem) +{ + Dwarf_Die type_die; + + if (dwarf_tag(ptr_die) != DW_TAG_pointer_type) + return NULL; + + if (die_get_real_type(ptr_die, &type_die) == NULL) + return NULL; + + return die_get_member_type(&type_die, offset, die_mem); +} diff --git a/tools/perf/util/dwarf-aux.h b/tools/perf/util/dwarf-aux.h index 4e64caac6df8..24446412b869 100644 --- a/tools/perf/util/dwarf-aux.h +++ b/tools/perf/util/dwarf-aux.h @@ -94,6 +94,10 @@ Dwarf_Die *die_find_top_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr, Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr, Dwarf_Die *die_mem); +/* Search a non-inlined function by name and returns its return type */ +Dwarf_Die *die_find_func_rettype(Dwarf_Die *sp_die, const char *name, + Dwarf_Die *die_mem); + /* Walk on the instances of given DIE */ int die_walk_instances(Dwarf_Die *in_die, int (*callback)(Dwarf_Die *, void *), void *data); @@ -135,6 +139,21 @@ void die_skip_prologue(Dwarf_Die *sp_die, Dwarf_Die *cu_die, /* Get the list of including scopes */ int die_get_scopes(Dwarf_Die *cu_die, Dwarf_Addr pc, Dwarf_Die **scopes); +/* Variable type information */ +struct die_var_type { + struct die_var_type *next; + u64 die_off; + u64 addr; + int reg; + int offset; +}; + +/* Return type info of a member at offset */ +Dwarf_Die *die_get_member_type(Dwarf_Die *type_die, int offset, Dwarf_Die *die_mem); + +/* Return type info where the pointer and offset point to */ +Dwarf_Die *die_deref_ptr_type(Dwarf_Die *ptr_die, int offset, Dwarf_Die *die_mem); + #ifdef HAVE_DWARF_GETLOCATIONS_SUPPORT /* Get byte offset range of given variable DIE */ @@ -142,12 +161,18 @@ int die_get_var_range(Dwarf_Die *sp_die, Dwarf_Die *vr_die, struct strbuf *buf); /* Find a variable saved in the 'reg' at given address */ Dwarf_Die *die_find_variable_by_reg(Dwarf_Die *sc_die, Dwarf_Addr pc, int reg, + int *poffset, bool is_fbreg, Dwarf_Die *die_mem); /* Find a (global) variable located in the 'addr' */ -Dwarf_Die *die_find_variable_by_addr(Dwarf_Die *sc_die, Dwarf_Addr pc, - Dwarf_Addr addr, Dwarf_Die *die_mem, - int *offset); +Dwarf_Die *die_find_variable_by_addr(Dwarf_Die *sc_die, Dwarf_Addr addr, + Dwarf_Die *die_mem, int *offset); + +/* Save all variables and parameters in this scope */ +void die_collect_vars(Dwarf_Die *sc_die, struct die_var_type **var_types); + +/* Save all global variables in this CU */ +void die_collect_global_vars(Dwarf_Die *cu_die, struct die_var_type **var_types); #else /* HAVE_DWARF_GETLOCATIONS_SUPPORT */ @@ -161,13 +186,14 @@ static inline int die_get_var_range(Dwarf_Die *sp_die __maybe_unused, static inline Dwarf_Die *die_find_variable_by_reg(Dwarf_Die *sc_die __maybe_unused, Dwarf_Addr pc __maybe_unused, int reg __maybe_unused, + int *poffset __maybe_unused, + bool is_fbreg __maybe_unused, Dwarf_Die *die_mem __maybe_unused) { return NULL; } static inline Dwarf_Die *die_find_variable_by_addr(Dwarf_Die *sc_die __maybe_unused, - Dwarf_Addr pc __maybe_unused, Dwarf_Addr addr __maybe_unused, Dwarf_Die *die_mem __maybe_unused, int *offset __maybe_unused) @@ -175,6 +201,31 @@ static inline Dwarf_Die *die_find_variable_by_addr(Dwarf_Die *sc_die __maybe_unu return NULL; } +static inline void die_collect_vars(Dwarf_Die *sc_die __maybe_unused, + struct die_var_type **var_types __maybe_unused) +{ +} + +static inline void die_collect_global_vars(Dwarf_Die *cu_die __maybe_unused, + struct die_var_type **var_types __maybe_unused) +{ +} + #endif /* HAVE_DWARF_GETLOCATIONS_SUPPORT */ +#ifdef HAVE_DWARF_CFI_SUPPORT + +/* Get the frame base information from CFA */ +int die_get_cfa(Dwarf *dwarf, u64 pc, int *preg, int *poffset); + +#else /* HAVE_DWARF_CFI_SUPPORT */ + +static inline int die_get_cfa(Dwarf *dwarf __maybe_unused, u64 pc __maybe_unused, + int *preg __maybe_unused, int *poffset __maybe_unused) +{ + return -1; +} + +#endif /* HAVE_DWARF_CFI_SUPPORT */ + #endif /* _DWARF_AUX_H */ diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h index 7c527e65c186..2a2c37cc40b7 100644 --- a/tools/perf/util/env.h +++ b/tools/perf/util/env.h @@ -12,6 +12,7 @@ struct perf_cpu_map; struct cpu_topology_map { int socket_id; int die_id; + int cluster_id; int core_id; }; diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index 68f45e9e63b6..f32f9abf6344 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c @@ -511,7 +511,7 @@ size_t perf_event__fprintf_text_poke(union perf_event *event, struct machine *ma struct addr_location al; addr_location__init(&al); - al.map = map__get(maps__find(machine__kernel_maps(machine), tp->addr)); + al.map = maps__find(machine__kernel_maps(machine), tp->addr); if (al.map && map__load(al.map) >= 0) { al.addr = map__map_ip(al.map, tp->addr); al.sym = map__find_symbol(al.map, al.addr); @@ -641,7 +641,7 @@ struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr, return NULL; } al->maps = maps__get(maps); - al->map = map__get(maps__find(maps, al->addr)); + al->map = maps__find(maps, al->addr); if (al->map != NULL) { /* * Kernel maps might be changed when loading symbols so loading @@ -726,7 +726,7 @@ int machine__resolve(struct machine *machine, struct addr_location *al, dso = al->map ? map__dso(al->map) : NULL; dump_printf(" ...... dso: %s\n", dso - ? dso->long_name + ? dso__long_name(dso) : (al->level == 'H' ? "[hypervisor]" : "<not found>")); if (thread__is_filtered(thread)) @@ -750,10 +750,10 @@ int machine__resolve(struct machine *machine, struct addr_location *al, if (al->map) { if (symbol_conf.dso_list && (!dso || !(strlist__has_entry(symbol_conf.dso_list, - dso->short_name) || - (dso->short_name != dso->long_name && + dso__short_name(dso)) || + (dso__short_name(dso) != dso__long_name(dso) && strlist__has_entry(symbol_conf.dso_list, - dso->long_name))))) { + dso__long_name(dso)))))) { al->filtered |= (1 << HIST_FILTER__DSO); } diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index 55a300a0977b..3a719edafc7a 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -298,7 +298,8 @@ struct evsel *evlist__add_aux_dummy(struct evlist *evlist, bool system_wide) #ifdef HAVE_LIBTRACEEVENT struct evsel *evlist__add_sched_switch(struct evlist *evlist, bool system_wide) { - struct evsel *evsel = evsel__newtp_idx("sched", "sched_switch", 0); + struct evsel *evsel = evsel__newtp_idx("sched", "sched_switch", 0, + /*format=*/true); if (IS_ERR(evsel)) return evsel; diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 6d7c9c58a9bc..4f818ab6b662 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -452,7 +452,7 @@ out_err: * Returns pointer with encoded error via <linux/err.h> interface. */ #ifdef HAVE_LIBTRACEEVENT -struct evsel *evsel__newtp_idx(const char *sys, const char *name, int idx) +struct evsel *evsel__newtp_idx(const char *sys, const char *name, int idx, bool format) { struct evsel *evsel = zalloc(perf_evsel__object.size); int err = -ENOMEM; @@ -469,14 +469,20 @@ struct evsel *evsel__newtp_idx(const char *sys, const char *name, int idx) if (asprintf(&evsel->name, "%s:%s", sys, name) < 0) goto out_free; - evsel->tp_format = trace_event__tp_format(sys, name); - if (IS_ERR(evsel->tp_format)) { - err = PTR_ERR(evsel->tp_format); - goto out_free; + event_attr_init(&attr); + + if (format) { + evsel->tp_format = trace_event__tp_format(sys, name); + if (IS_ERR(evsel->tp_format)) { + err = PTR_ERR(evsel->tp_format); + goto out_free; + } + attr.config = evsel->tp_format->id; + } else { + attr.config = (__u64) -1; } - event_attr_init(&attr); - attr.config = evsel->tp_format->id; + attr.sample_period = 1; evsel__init(evsel, &attr, idx); } @@ -2363,7 +2369,6 @@ int evsel__parse_sample(struct evsel *evsel, union perf_event *event, data->period = evsel->core.attr.sample_period; data->cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; data->misc = event->header.misc; - data->id = -1ULL; data->data_src = PERF_MEM_DATA_SRC_NONE; data->vcpu = -1; @@ -2851,6 +2856,39 @@ u64 evsel__intval_common(struct evsel *evsel, struct perf_sample *sample, const return field ? format_field__intval(field, sample, evsel->needs_swap) : 0; } +char evsel__taskstate(struct evsel *evsel, struct perf_sample *sample, const char *name) +{ + static struct tep_format_field *prev_state_field; + static const char *states; + struct tep_format_field *field; + unsigned long long val; + unsigned int bit; + char state = '?'; /* '?' denotes unknown task state */ + + field = evsel__field(evsel, name); + + if (!field) + return state; + + if (!states || field != prev_state_field) { + states = parse_task_states(field); + if (!states) + return state; + prev_state_field = field; + } + + /* + * Note since the kernel exposes TASK_REPORT_MAX to userspace + * to denote the 'preempted' state, we might as welll report + * 'R' for this case, which make senses to users as well. + * + * We can change this if we have a good reason in the future. + */ + val = evsel__intval(evsel, sample, name); + bit = val ? ffs(val) : 0; + state = (!bit || bit > strlen(states)) ? 'R' : states[bit-1]; + return state; +} #endif bool evsel__fallback(struct evsel *evsel, struct target *target, int err, diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index efbb6e848287..375a38e15cd9 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -234,14 +234,14 @@ void free_config_terms(struct list_head *config_terms); #ifdef HAVE_LIBTRACEEVENT -struct evsel *evsel__newtp_idx(const char *sys, const char *name, int idx); +struct evsel *evsel__newtp_idx(const char *sys, const char *name, int idx, bool format); /* * Returns pointer with encoded error via <linux/err.h> interface. */ static inline struct evsel *evsel__newtp(const char *sys, const char *name) { - return evsel__newtp_idx(sys, name, 0); + return evsel__newtp_idx(sys, name, 0, true); } #endif @@ -339,6 +339,7 @@ struct perf_sample; void *evsel__rawptr(struct evsel *evsel, struct perf_sample *sample, const char *name); u64 evsel__intval(struct evsel *evsel, struct perf_sample *sample, const char *name); u64 evsel__intval_common(struct evsel *evsel, struct perf_sample *sample, const char *name); +char evsel__taskstate(struct evsel *evsel, struct perf_sample *sample, const char *name); static inline char *evsel__strval(struct evsel *evsel, struct perf_sample *sample, const char *name) { diff --git a/tools/perf/util/expr.c b/tools/perf/util/expr.c index 7be23b3ac082..b8875aac8f87 100644 --- a/tools/perf/util/expr.c +++ b/tools/perf/util/expr.c @@ -500,7 +500,25 @@ double expr__has_event(const struct expr_parse_ctx *ctx, bool compute_ids, const tmp = evlist__new(); if (!tmp) return NAN; - ret = parse_event(tmp, id) ? 0 : 1; + + if (strchr(id, '@')) { + char *tmp_id, *p; + + tmp_id = strdup(id); + if (!tmp_id) { + ret = NAN; + goto out; + } + p = strchr(tmp_id, '@'); + *p = '/'; + p = strrchr(tmp_id, '@'); + *p = '/'; + ret = parse_event(tmp, tmp_id) ? 0 : 1; + free(tmp_id); + } else { + ret = parse_event(tmp, id) ? 0 : 1; + } +out: evlist__delete(tmp); return ret; } diff --git a/tools/perf/util/expr.l b/tools/perf/util/expr.l index 0feef0726c48..a2fc43159ee9 100644 --- a/tools/perf/util/expr.l +++ b/tools/perf/util/expr.l @@ -94,6 +94,14 @@ static int literal(yyscan_t scanner, const struct expr_scanner_ctx *sctx) } return LITERAL; } + +static int nan_value(yyscan_t scanner) +{ + YYSTYPE *yylval = expr_get_lval(scanner); + + yylval->num = NAN; + return NUMBER; +} %} number ([0-9]+\.?[0-9]*|[0-9]*\.?[0-9]+)(e-?[0-9]+)? @@ -115,6 +123,7 @@ else { return ELSE; } source_count { return SOURCE_COUNT; } has_event { return HAS_EVENT; } strcmp_cpuid_str { return STRCMP_CPUID_STR; } +NaN { return nan_value(yyscanner); } {literal} { return literal(yyscanner, sctx); } {number} { return value(yyscanner); } {symbol} { return str(yyscanner, ID, sctx->runtime); } diff --git a/tools/perf/util/genelf.h b/tools/perf/util/genelf.h index 5f18d20ea903..4e2e4f40e134 100644 --- a/tools/perf/util/genelf.h +++ b/tools/perf/util/genelf.h @@ -43,6 +43,9 @@ int jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_ent #elif defined(__riscv) && __riscv_xlen == 64 #define GEN_ELF_ARCH EM_RISCV #define GEN_ELF_CLASS ELFCLASS64 +#elif defined(__riscv) && __riscv_xlen == 32 +#define GEN_ELF_ARCH EM_RISCV +#define GEN_ELF_CLASS ELFCLASS32 #elif defined(__loongarch__) #define GEN_ELF_ARCH EM_LOONGARCH #define GEN_ELF_CLASS ELFCLASS64 diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 3fe28edc3d01..55e9553861d0 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -2308,7 +2308,7 @@ static int __event_process_build_id(struct perf_record_header_build_id *bev, build_id__init(&bid, bev->data, size); dso__set_build_id(dso, &bid); - dso->header_build_id = 1; + dso__set_header_build_id(dso, true); if (dso_space != DSO_SPACE__USER) { struct kmod_path m = { .name = NULL, }; @@ -2316,13 +2316,13 @@ static int __event_process_build_id(struct perf_record_header_build_id *bev, if (!kmod_path__parse_name(&m, filename) && m.kmod) dso__set_module_info(dso, &m, machine); - dso->kernel = dso_space; + dso__set_kernel(dso, dso_space); free(m.name); } - build_id__sprintf(&dso->bid, sbuild_id); + build_id__sprintf(dso__bid(dso), sbuild_id); pr_debug("build id event received for %s: %s [%zu]\n", - dso->long_name, sbuild_id, size); + dso__long_name(dso), sbuild_id, size); dso__put(dso); } diff --git a/tools/perf/util/help-unknown-cmd.c b/tools/perf/util/help-unknown-cmd.c index eab99ea6ac01..a0a46e34f8d1 100644 --- a/tools/perf/util/help-unknown-cmd.c +++ b/tools/perf/util/help-unknown-cmd.c @@ -52,46 +52,48 @@ static int add_cmd_list(struct cmdnames *cmds, struct cmdnames *old) return 0; } -const char *help_unknown_cmd(const char *cmd) +const char *help_unknown_cmd(const char *cmd, struct cmdnames *main_cmds) { unsigned int i, n = 0, best_similarity = 0; - struct cmdnames main_cmds, other_cmds; + struct cmdnames other_cmds; - memset(&main_cmds, 0, sizeof(main_cmds)); - memset(&other_cmds, 0, sizeof(main_cmds)); + memset(&other_cmds, 0, sizeof(other_cmds)); perf_config(perf_unknown_cmd_config, NULL); - load_command_list("perf-", &main_cmds, &other_cmds); + load_command_list("perf-", main_cmds, &other_cmds); - if (add_cmd_list(&main_cmds, &other_cmds) < 0) { + if (add_cmd_list(main_cmds, &other_cmds) < 0) { fprintf(stderr, "ERROR: Failed to allocate command list for unknown command.\n"); goto end; } - qsort(main_cmds.names, main_cmds.cnt, - sizeof(main_cmds.names), cmdname_compare); - uniq(&main_cmds); + qsort(main_cmds->names, main_cmds->cnt, + sizeof(main_cmds->names), cmdname_compare); + uniq(main_cmds); - if (main_cmds.cnt) { + if (main_cmds->cnt) { /* This reuses cmdname->len for similarity index */ - for (i = 0; i < main_cmds.cnt; ++i) - main_cmds.names[i]->len = - levenshtein(cmd, main_cmds.names[i]->name, 0, 2, 1, 4); - - qsort(main_cmds.names, main_cmds.cnt, - sizeof(*main_cmds.names), levenshtein_compare); + for (i = 0; i < main_cmds->cnt; ++i) { + main_cmds->names[i]->len = + levenshtein(cmd, main_cmds->names[i]->name, + /*swap_penalty=*/0, + /*substition_penality=*/2, + /*insertion_penality=*/1, + /*deletion_penalty=*/1); + } + qsort(main_cmds->names, main_cmds->cnt, + sizeof(*main_cmds->names), levenshtein_compare); - best_similarity = main_cmds.names[0]->len; + best_similarity = main_cmds->names[0]->len; n = 1; - while (n < main_cmds.cnt && best_similarity == main_cmds.names[n]->len) + while (n < main_cmds->cnt && best_similarity == main_cmds->names[n]->len) ++n; } if (autocorrect && n == 1) { - const char *assumed = main_cmds.names[0]->name; + const char *assumed = main_cmds->names[0]->name; - main_cmds.names[0] = NULL; - clean_cmdnames(&main_cmds); + main_cmds->names[0] = NULL; clean_cmdnames(&other_cmds); fprintf(stderr, "WARNING: You called a perf program named '%s', " "which does not exist.\n" @@ -107,15 +109,14 @@ const char *help_unknown_cmd(const char *cmd) fprintf(stderr, "perf: '%s' is not a perf-command. See 'perf --help'.\n", cmd); - if (main_cmds.cnt && best_similarity < 6) { + if (main_cmds->cnt && best_similarity < 6) { fprintf(stderr, "\nDid you mean %s?\n", n < 2 ? "this": "one of these"); for (i = 0; i < n; i++) - fprintf(stderr, "\t%s\n", main_cmds.names[i]->name); + fprintf(stderr, "\t%s\n", main_cmds->names[i]->name); } end: - clean_cmdnames(&main_cmds); clean_cmdnames(&other_cmds); - exit(1); + return NULL; } diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index fa359180ebf8..2e9e193179dd 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -9,6 +9,7 @@ #include "map_symbol.h" #include "branch.h" #include "mem-events.h" +#include "mem-info.h" #include "session.h" #include "namespaces.h" #include "cgroup.h" @@ -153,8 +154,8 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h) } if (h->mem_info) { - if (h->mem_info->daddr.ms.sym) { - symlen = (int)h->mem_info->daddr.ms.sym->namelen + 4 + if (mem_info__daddr(h->mem_info)->ms.sym) { + symlen = (int)mem_info__daddr(h->mem_info)->ms.sym->namelen + 4 + unresolved_col_width + 2; hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen); @@ -168,8 +169,8 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h) symlen); } - if (h->mem_info->iaddr.ms.sym) { - symlen = (int)h->mem_info->iaddr.ms.sym->namelen + 4 + if (mem_info__iaddr(h->mem_info)->ms.sym) { + symlen = (int)mem_info__iaddr(h->mem_info)->ms.sym->namelen + 4 + unresolved_col_width + 2; hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen); @@ -179,8 +180,8 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h) symlen); } - if (h->mem_info->daddr.ms.map) { - symlen = dso__name_len(map__dso(h->mem_info->daddr.ms.map)); + if (mem_info__daddr(h->mem_info)->ms.map) { + symlen = dso__name_len(map__dso(mem_info__daddr(h->mem_info)->ms.map)); hists__new_col_len(hists, HISTC_MEM_DADDR_DSO, symlen); } else { @@ -308,6 +309,9 @@ static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src) dest->period_us += src->period_us; dest->period_guest_sys += src->period_guest_sys; dest->period_guest_us += src->period_guest_us; + dest->weight1 += src->weight1; + dest->weight2 += src->weight2; + dest->weight3 += src->weight3; dest->nr_events += src->nr_events; } @@ -315,7 +319,9 @@ static void he_stat__decay(struct he_stat *he_stat) { he_stat->period = (he_stat->period * 7) / 8; he_stat->nr_events = (he_stat->nr_events * 7) / 8; - /* XXX need decay for weight too? */ + he_stat->weight1 = (he_stat->weight1 * 7) / 8; + he_stat->weight2 = (he_stat->weight2 * 7) / 8; + he_stat->weight3 = (he_stat->weight3 * 7) / 8; } static void hists__delete_entry(struct hists *hists, struct hist_entry *he); @@ -470,11 +476,6 @@ static int hist_entry__init(struct hist_entry *he, he->branch_info->to.ms.map = map__get(he->branch_info->to.ms.map); } - if (he->mem_info) { - he->mem_info->iaddr.ms.map = map__get(he->mem_info->iaddr.ms.map); - he->mem_info->daddr.ms.map = map__get(he->mem_info->daddr.ms.map); - } - if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) callchain_init(he->callchain); @@ -520,8 +521,8 @@ err_infos: zfree(&he->branch_info); } if (he->mem_info) { - map_symbol__exit(&he->mem_info->iaddr.ms); - map_symbol__exit(&he->mem_info->daddr.ms); + map_symbol__exit(&mem_info__iaddr(he->mem_info)->ms); + map_symbol__exit(&mem_info__daddr(he->mem_info)->ms); } err: map_symbol__exit(&he->ms); @@ -566,7 +567,6 @@ static struct hist_entry *hist_entry__new(struct hist_entry *template, he = NULL; } } - return he; } @@ -614,7 +614,7 @@ static struct hist_entry *hists__findnew_entry(struct hists *hists, cmp = hist_entry__cmp(he, entry); if (!cmp) { if (sample_self) { - he_stat__add_period(&he->stat, period); + he_stat__add_stat(&he->stat, &entry->stat); hist_entry__add_callchain_period(he, period); } if (symbol_conf.cumulate_callchain) @@ -626,7 +626,7 @@ static struct hist_entry *hists__findnew_entry(struct hists *hists, */ mem_info__zput(entry->mem_info); - block_info__zput(entry->block_info); + block_info__delete(entry->block_info); kvm_info__zput(entry->kvm_info); @@ -731,12 +731,15 @@ __hists__add_entry(struct hists *hists, .stat = { .nr_events = 1, .period = sample->period, + .weight1 = sample->weight, + .weight2 = sample->ins_lat, + .weight3 = sample->p_stage_cyc, }, .parent = sym_parent, .filtered = symbol__parent_filter(sym_parent) | al->filtered, .hists = hists, .branch_info = bi, - .mem_info = mi, + .mem_info = mem_info__get(mi), .kvm_info = ki, .block_info = block_info, .transaction = sample->transaction, @@ -825,7 +828,7 @@ iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al) if (mi == NULL) return -ENOMEM; - iter->priv = mi; + iter->mi = mi; return 0; } @@ -833,7 +836,7 @@ static int iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al) { u64 cost; - struct mem_info *mi = iter->priv; + struct mem_info *mi = iter->mi; struct hists *hists = evsel__hists(iter->evsel); struct perf_sample *sample = iter->sample; struct hist_entry *he; @@ -880,12 +883,7 @@ iter_finish_mem_entry(struct hist_entry_iter *iter, err = hist_entry__append_callchain(he, iter->sample); out: - /* - * We don't need to free iter->priv (mem_info) here since the mem info - * was either already freed in hists__findnew_entry() or passed to a - * new hist entry by hist_entry__new(). - */ - iter->priv = NULL; + mem_info__zput(iter->mi); iter->he = NULL; return err; @@ -904,7 +902,7 @@ iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al iter->curr = 0; iter->total = sample->branch_stack->nr; - iter->priv = bi; + iter->bi = bi; return 0; } @@ -918,7 +916,7 @@ iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused, static int iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) { - struct branch_info *bi = iter->priv; + struct branch_info *bi = iter->bi; int i = iter->curr; if (bi == NULL) @@ -947,7 +945,7 @@ iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *a int i = iter->curr; int err = 0; - bi = iter->priv; + bi = iter->bi; if (iter->hide_unresolved && !(bi[i].from.ms.sym && bi[i].to.ms.sym)) goto out; @@ -976,7 +974,7 @@ static int iter_finish_branch_entry(struct hist_entry_iter *iter, struct addr_location *al __maybe_unused) { - zfree(&iter->priv); + zfree(&iter->bi); iter->he = NULL; return iter->curr >= iter->total ? 0 : -1; @@ -1044,7 +1042,7 @@ iter_prepare_cumulative_entry(struct hist_entry_iter *iter, if (he_cache == NULL) return -ENOMEM; - iter->priv = he_cache; + iter->he_cache = he_cache; iter->curr = 0; return 0; @@ -1057,7 +1055,7 @@ iter_add_single_cumulative_entry(struct hist_entry_iter *iter, struct evsel *evsel = iter->evsel; struct hists *hists = evsel__hists(evsel); struct perf_sample *sample = iter->sample; - struct hist_entry **he_cache = iter->priv; + struct hist_entry **he_cache = iter->he_cache; struct hist_entry *he; int err = 0; @@ -1115,7 +1113,7 @@ iter_add_next_cumulative_entry(struct hist_entry_iter *iter, { struct evsel *evsel = iter->evsel; struct perf_sample *sample = iter->sample; - struct hist_entry **he_cache = iter->priv; + struct hist_entry **he_cache = iter->he_cache; struct hist_entry *he; struct hist_entry he_tmp = { .hists = evsel__hists(evsel), @@ -1181,7 +1179,9 @@ static int iter_finish_cumulative_entry(struct hist_entry_iter *iter, struct addr_location *al __maybe_unused) { - zfree(&iter->priv); + mem_info__zput(iter->mi); + zfree(&iter->bi); + zfree(&iter->he_cache); iter->he = NULL; return 0; @@ -1327,13 +1327,13 @@ void hist_entry__delete(struct hist_entry *he) } if (he->mem_info) { - map_symbol__exit(&he->mem_info->iaddr.ms); - map_symbol__exit(&he->mem_info->daddr.ms); + map_symbol__exit(&mem_info__iaddr(he->mem_info)->ms); + map_symbol__exit(&mem_info__daddr(he->mem_info)->ms); mem_info__zput(he->mem_info); } if (he->block_info) - block_info__zput(he->block_info); + block_info__delete(he->block_info); if (he->kvm_info) kvm_info__zput(he->kvm_info); @@ -2128,7 +2128,7 @@ static bool hists__filter_entry_by_dso(struct hists *hists, struct hist_entry *he) { if (hists->dso_filter != NULL && - (he->ms.map == NULL || map__dso(he->ms.map) != hists->dso_filter)) { + (he->ms.map == NULL || !RC_CHK_EQUAL(map__dso(he->ms.map), hists->dso_filter))) { he->filtered |= (1 << HIST_FILTER__DSO); return true; } @@ -2808,7 +2808,7 @@ int __hists__scnprintf_title(struct hists *hists, char *bf, size_t size, bool sh } if (dso) printed += scnprintf(bf + printed, size - printed, - ", DSO: %s", dso->short_name); + ", DSO: %s", dso__short_name(dso)); if (socket_id > -1) printed += scnprintf(bf + printed, size - printed, ", Processor Socket: %d", socket_id); diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index 4a0aea0c9e00..8fb3bdd29188 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -4,21 +4,22 @@ #include <linux/rbtree.h> #include <linux/types.h> -#include "evsel.h" +#include "callchain.h" #include "color.h" #include "events_stats.h" +#include "evsel.h" +#include "map_symbol.h" #include "mutex.h" +#include "sample.h" +#include "spark.h" +#include "stat.h" -struct hist_entry; -struct hist_entry_ops; struct addr_location; -struct map_symbol; struct mem_info; struct kvm_info; struct branch_info; struct branch_stack; struct block_info; -struct symbol; struct ui_progress; enum hist_filter { @@ -131,18 +132,20 @@ struct hist_entry_iter { int total; int curr; - bool hide_unresolved; - struct evsel *evsel; struct perf_sample *sample; struct hist_entry *he; struct symbol *parent; - void *priv; + + struct mem_info *mi; + struct branch_info *bi; + struct hist_entry **he_cache; const struct hist_iter_ops *ops; /* user-defined callback function (optional) */ int (*add_entry_cb)(struct hist_entry_iter *iter, struct addr_location *al, bool single, void *arg); + bool hide_unresolved; }; extern const struct hist_iter_ops hist_iter_normal; @@ -150,6 +153,162 @@ extern const struct hist_iter_ops hist_iter_branch; extern const struct hist_iter_ops hist_iter_mem; extern const struct hist_iter_ops hist_iter_cumulative; +struct res_sample { + u64 time; + int cpu; + int tid; +}; + +struct he_stat { + u64 period; + u64 period_sys; + u64 period_us; + u64 period_guest_sys; + u64 period_guest_us; + u64 weight1; + u64 weight2; + u64 weight3; + u32 nr_events; +}; + +struct namespace_id { + u64 dev; + u64 ino; +}; + +struct hist_entry_diff { + bool computed; + union { + /* PERF_HPP__DELTA */ + double period_ratio_delta; + + /* PERF_HPP__RATIO */ + double period_ratio; + + /* HISTC_WEIGHTED_DIFF */ + s64 wdiff; + + /* PERF_HPP_DIFF__CYCLES */ + s64 cycles; + }; + struct stats stats; + unsigned long svals[NUM_SPARKS]; +}; + +struct hist_entry_ops { + void *(*new)(size_t size); + void (*free)(void *ptr); +}; + +/** + * struct hist_entry - histogram entry + * + * @row_offset - offset from the first callchain expanded to appear on screen + * @nr_rows - rows expanded in callchain, recalculated on folding/unfolding + */ +struct hist_entry { + struct rb_node rb_node_in; + struct rb_node rb_node; + union { + struct list_head node; + struct list_head head; + } pairs; + struct he_stat stat; + struct he_stat *stat_acc; + struct map_symbol ms; + struct thread *thread; + struct comm *comm; + struct namespace_id cgroup_id; + u64 cgroup; + u64 ip; + u64 transaction; + s32 socket; + s32 cpu; + u64 code_page_size; + u64 weight; + u64 ins_lat; + u64 p_stage_cyc; + u8 cpumode; + u8 depth; + int mem_type_off; + struct simd_flags simd_flags; + + /* We are added by hists__add_dummy_entry. */ + bool dummy; + bool leaf; + + char level; + u8 filtered; + + u16 callchain_size; + union { + /* + * Since perf diff only supports the stdio output, TUI + * fields are only accessed from perf report (or perf + * top). So make it a union to reduce memory usage. + */ + struct hist_entry_diff diff; + struct /* for TUI */ { + u16 row_offset; + u16 nr_rows; + bool init_have_children; + bool unfolded; + bool has_children; + bool has_no_entry; + }; + }; + char *srcline; + char *srcfile; + struct symbol *parent; + struct branch_info *branch_info; + long time; + struct hists *hists; + struct mem_info *mem_info; + struct block_info *block_info; + struct kvm_info *kvm_info; + void *raw_data; + u32 raw_size; + int num_res; + struct res_sample *res_samples; + void *trace_output; + struct perf_hpp_list *hpp_list; + struct hist_entry *parent_he; + struct hist_entry_ops *ops; + struct annotated_data_type *mem_type; + union { + /* this is for hierarchical entry structure */ + struct { + struct rb_root_cached hroot_in; + struct rb_root_cached hroot_out; + }; /* non-leaf entries */ + struct rb_root sorted_chain; /* leaf entry has callchains */ + }; + struct callchain_root callchain[0]; /* must be last member */ +}; + +static __pure inline bool hist_entry__has_callchains(struct hist_entry *he) +{ + return he->callchain_size != 0; +} + +static inline bool hist_entry__has_pairs(struct hist_entry *he) +{ + return !list_empty(&he->pairs.node); +} + +static inline struct hist_entry *hist_entry__next_pair(struct hist_entry *he) +{ + if (hist_entry__has_pairs(he)) + return list_entry(he->pairs.node.next, struct hist_entry, pairs.node); + return NULL; +} + +static inline void hist_entry__add_pair(struct hist_entry *pair, + struct hist_entry *he) +{ + list_add_tail(&pair->pairs.node, &he->pairs.head); +} + struct hist_entry *hists__add_entry(struct hists *hists, struct addr_location *al, struct symbol *parent, @@ -186,6 +345,8 @@ int hist_entry__sort_snprintf(struct hist_entry *he, char *bf, size_t size, struct hists *hists); int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp, struct perf_hpp_fmt *fmt, int printed); +int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, size_t size, + unsigned int width); void hist_entry__delete(struct hist_entry *he); typedef int (*hists__resort_cb_t)(struct hist_entry *he, void *arg); @@ -238,6 +399,20 @@ void hists__match(struct hists *leader, struct hists *other); int hists__link(struct hists *leader, struct hists *other); int hists__unlink(struct hists *hists); +static inline float hist_entry__get_percent_limit(struct hist_entry *he) +{ + u64 period = he->stat.period; + u64 total_period = hists__total_period(he->hists); + + if (unlikely(total_period == 0)) + return 0; + + if (symbol_conf.cumulate_callchain) + period = he->stat_acc->period; + + return period * 100.0 / total_period; +} + struct hists_evsel { struct evsel evsel; struct hists hists; @@ -377,6 +552,9 @@ enum { PERF_HPP__OVERHEAD_ACC, PERF_HPP__SAMPLES, PERF_HPP__PERIOD, + PERF_HPP__WEIGHT1, + PERF_HPP__WEIGHT2, + PERF_HPP__WEIGHT3, PERF_HPP__MAX_INDEX }; @@ -423,16 +601,24 @@ void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists); void perf_hpp__set_user_width(const char *width_list_str); void hists__reset_column_width(struct hists *hists); +enum perf_hpp_fmt_type { + PERF_HPP_FMT_TYPE__RAW, + PERF_HPP_FMT_TYPE__PERCENT, + PERF_HPP_FMT_TYPE__AVERAGE, +}; + typedef u64 (*hpp_field_fn)(struct hist_entry *he); typedef int (*hpp_callback_fn)(struct perf_hpp *hpp, bool front); typedef int (*hpp_snprint_fn)(struct perf_hpp *hpp, const char *fmt, ...); int hpp__fmt(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, struct hist_entry *he, hpp_field_fn get_field, - const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent); + const char *fmtstr, hpp_snprint_fn print_fn, + enum perf_hpp_fmt_type fmtype); int hpp__fmt_acc(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, struct hist_entry *he, hpp_field_fn get_field, - const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent); + const char *fmtstr, hpp_snprint_fn print_fn, + enum perf_hpp_fmt_type fmtype); static inline void advance_hpp(struct perf_hpp *hpp, int inc) { @@ -460,15 +646,20 @@ struct hist_browser_timer { int refresh; }; -struct res_sample; - enum rstype { A_NORMAL, A_ASM, A_SOURCE }; -struct block_hist; +struct block_hist { + struct hists block_hists; + struct perf_hpp_list block_list; + struct perf_hpp_fmt block_fmt; + int block_idx; + bool valid; + struct hist_entry he; +}; #ifdef HAVE_SLANG_SUPPORT #include "../ui/keysyms.h" diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c index b450178e3420..e733f6b1f7ac 100644 --- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c +++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c @@ -1319,6 +1319,8 @@ static bool intel_pt_fup_event(struct intel_pt_decoder *decoder, bool no_tip) bool ret = false; decoder->state.type &= ~INTEL_PT_BRANCH; + decoder->state.insn_op = INTEL_PT_OP_OTHER; + decoder->state.insn_len = 0; if (decoder->set_fup_cfe_ip || decoder->set_fup_cfe) { bool ip = decoder->set_fup_cfe_ip; diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c index f38893e0b036..d6d7b7512505 100644 --- a/tools/perf/util/intel-pt.c +++ b/tools/perf/util/intel-pt.c @@ -598,15 +598,15 @@ static struct auxtrace_cache *intel_pt_cache(struct dso *dso, struct auxtrace_cache *c; unsigned int bits; - if (dso->auxtrace_cache) - return dso->auxtrace_cache; + if (dso__auxtrace_cache(dso)) + return dso__auxtrace_cache(dso); bits = intel_pt_cache_size(dso, machine); /* Ignoring cache creation failure */ c = auxtrace_cache__new(bits, sizeof(struct intel_pt_cache_entry), 200); - dso->auxtrace_cache = c; + dso__set_auxtrace_cache(dso, c); return c; } @@ -650,7 +650,7 @@ intel_pt_cache_lookup(struct dso *dso, struct machine *machine, u64 offset) if (!c) return NULL; - return auxtrace_cache__lookup(dso->auxtrace_cache, offset); + return auxtrace_cache__lookup(dso__auxtrace_cache(dso), offset); } static void intel_pt_cache_invalidate(struct dso *dso, struct machine *machine, @@ -661,7 +661,7 @@ static void intel_pt_cache_invalidate(struct dso *dso, struct machine *machine, if (!c) return; - auxtrace_cache__remove(dso->auxtrace_cache, offset); + auxtrace_cache__remove(dso__auxtrace_cache(dso), offset); } static inline bool intel_pt_guest_kernel_ip(uint64_t ip) @@ -764,6 +764,7 @@ static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn, addr_location__init(&al); intel_pt_insn->length = 0; + intel_pt_insn->op = INTEL_PT_OP_OTHER; if (to_ip && *ip == to_ip) goto out_no_cache; @@ -820,8 +821,8 @@ static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn, } dso = map__dso(al.map); - if (dso->data.status == DSO_DATA_STATUS_ERROR && - dso__data_status_seen(dso, DSO_DATA_STATUS_SEEN_ITRACE)) { + if (dso__data(dso)->status == DSO_DATA_STATUS_ERROR && + dso__data_status_seen(dso, DSO_DATA_STATUS_SEEN_ITRACE)) { ret = -ENOENT; goto out_ret; } @@ -854,7 +855,7 @@ static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn, /* Load maps to ensure dso->is_64_bit has been updated */ map__load(al.map); - x86_64 = dso->is_64_bit; + x86_64 = dso__is_64_bit(dso); while (1) { len = dso__data_read_offset(dso, machine, @@ -898,6 +899,7 @@ static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn, if (to_ip && *ip == to_ip) { intel_pt_insn->length = 0; + intel_pt_insn->op = INTEL_PT_OP_OTHER; goto out_no_cache; } @@ -1008,7 +1010,7 @@ static int __intel_pt_pgd_ip(uint64_t ip, void *data) offset = map__map_ip(al.map, ip); - res = intel_pt_match_pgd_ip(ptq->pt, ip, offset, map__dso(al.map)->long_name); + res = intel_pt_match_pgd_ip(ptq->pt, ip, offset, dso__long_name(map__dso(al.map))); addr_location__exit(&al); return res; } @@ -3416,7 +3418,7 @@ static int intel_pt_text_poke(struct intel_pt *pt, union perf_event *event) } dso = map__dso(al.map); - if (!dso || !dso->auxtrace_cache) + if (!dso || !dso__auxtrace_cache(dso)) continue; offset = map__map_ip(al.map, addr); @@ -3436,7 +3438,7 @@ static int intel_pt_text_poke(struct intel_pt *pt, union perf_event *event) } else { intel_pt_cache_invalidate(dso, machine, offset); intel_pt_log("Invalidated instruction cache for %s at %#"PRIx64"\n", - dso->long_name, addr); + dso__long_name(dso), addr); } } out: diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index b397a769006f..8477edefc299 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -16,6 +16,7 @@ #include "map_symbol.h" #include "branch.h" #include "mem-events.h" +#include "mem-info.h" #include "path.h" #include "srcline.h" #include "symbol.h" @@ -43,50 +44,11 @@ #include <linux/string.h> #include <linux/zalloc.h> -static void __machine__remove_thread(struct machine *machine, struct thread_rb_node *nd, - struct thread *th, bool lock); - static struct dso *machine__kernel_dso(struct machine *machine) { return map__dso(machine->vmlinux_map); } -static void dsos__init(struct dsos *dsos) -{ - INIT_LIST_HEAD(&dsos->head); - dsos->root = RB_ROOT; - init_rwsem(&dsos->lock); -} - -static void machine__threads_init(struct machine *machine) -{ - int i; - - for (i = 0; i < THREADS__TABLE_SIZE; i++) { - struct threads *threads = &machine->threads[i]; - threads->entries = RB_ROOT_CACHED; - init_rwsem(&threads->lock); - threads->nr = 0; - threads->last_match = NULL; - } -} - -static int thread_rb_node__cmp_tid(const void *key, const struct rb_node *nd) -{ - int to_find = (int) *((pid_t *)key); - - return to_find - (int)thread__tid(rb_entry(nd, struct thread_rb_node, rb_node)->thread); -} - -static struct thread_rb_node *thread_rb_node__find(const struct thread *th, - struct rb_root *tree) -{ - pid_t to_find = thread__tid(th); - struct rb_node *nd = rb_find(&to_find, tree, thread_rb_node__cmp_tid); - - return rb_entry(nd, struct thread_rb_node, rb_node); -} - static int machine__set_mmap_name(struct machine *machine) { if (machine__is_host(machine)) @@ -120,7 +82,7 @@ int machine__init(struct machine *machine, const char *root_dir, pid_t pid) RB_CLEAR_NODE(&machine->rb_node); dsos__init(&machine->dsos); - machine__threads_init(machine); + threads__init(&machine->threads); machine->vdso_info = NULL; machine->env = NULL; @@ -197,51 +159,13 @@ struct machine *machine__new_kallsyms(void) return machine; } -static void dsos__purge(struct dsos *dsos) -{ - struct dso *pos, *n; - - down_write(&dsos->lock); - - list_for_each_entry_safe(pos, n, &dsos->head, node) { - RB_CLEAR_NODE(&pos->rb_node); - pos->root = NULL; - list_del_init(&pos->node); - dso__put(pos); - } - - up_write(&dsos->lock); -} - -static void dsos__exit(struct dsos *dsos) -{ - dsos__purge(dsos); - exit_rwsem(&dsos->lock); -} - void machine__delete_threads(struct machine *machine) { - struct rb_node *nd; - int i; - - for (i = 0; i < THREADS__TABLE_SIZE; i++) { - struct threads *threads = &machine->threads[i]; - down_write(&threads->lock); - nd = rb_first_cached(&threads->entries); - while (nd) { - struct thread_rb_node *trb = rb_entry(nd, struct thread_rb_node, rb_node); - - nd = rb_next(nd); - __machine__remove_thread(machine, trb, trb->thread, false); - } - up_write(&threads->lock); - } + threads__remove_all_threads(&machine->threads); } void machine__exit(struct machine *machine) { - int i; - if (machine == NULL) return; @@ -254,12 +178,7 @@ void machine__exit(struct machine *machine) zfree(&machine->current_tid); zfree(&machine->kallsyms_filename); - machine__delete_threads(machine); - for (i = 0; i < THREADS__TABLE_SIZE; i++) { - struct threads *threads = &machine->threads[i]; - - exit_rwsem(&threads->lock); - } + threads__exit(&machine->threads); } void machine__delete(struct machine *machine) @@ -440,7 +359,7 @@ static struct thread *findnew_guest_code(struct machine *machine, return NULL; /* Assume maps are set up if there are any */ - if (maps__nr_maps(thread__maps(thread))) + if (!maps__empty(thread__maps(thread))) return thread; host_thread = machine__find_thread(host_machine, -1, pid); @@ -526,7 +445,7 @@ static void machine__update_thread_pid(struct machine *machine, if (thread__pid(th) == thread__tid(th)) return; - leader = __machine__findnew_thread(machine, thread__pid(th), thread__pid(th)); + leader = machine__findnew_thread(machine, thread__pid(th), thread__pid(th)); if (!leader) goto out_err; @@ -561,159 +480,55 @@ out_err: } /* - * Front-end cache - TID lookups come in blocks, - * so most of the time we dont have to look up - * the full rbtree: - */ -static struct thread* -__threads__get_last_match(struct threads *threads, struct machine *machine, - int pid, int tid) -{ - struct thread *th; - - th = threads->last_match; - if (th != NULL) { - if (thread__tid(th) == tid) { - machine__update_thread_pid(machine, th, pid); - return thread__get(th); - } - thread__put(threads->last_match); - threads->last_match = NULL; - } - - return NULL; -} - -static struct thread* -threads__get_last_match(struct threads *threads, struct machine *machine, - int pid, int tid) -{ - struct thread *th = NULL; - - if (perf_singlethreaded) - th = __threads__get_last_match(threads, machine, pid, tid); - - return th; -} - -static void -__threads__set_last_match(struct threads *threads, struct thread *th) -{ - thread__put(threads->last_match); - threads->last_match = thread__get(th); -} - -static void -threads__set_last_match(struct threads *threads, struct thread *th) -{ - if (perf_singlethreaded) - __threads__set_last_match(threads, th); -} - -/* * Caller must eventually drop thread->refcnt returned with a successful * lookup/new thread inserted. */ -static struct thread *____machine__findnew_thread(struct machine *machine, - struct threads *threads, - pid_t pid, pid_t tid, - bool create) +static struct thread *__machine__findnew_thread(struct machine *machine, + pid_t pid, + pid_t tid, + bool create) { - struct rb_node **p = &threads->entries.rb_root.rb_node; - struct rb_node *parent = NULL; - struct thread *th; - struct thread_rb_node *nd; - bool leftmost = true; + struct thread *th = threads__find(&machine->threads, tid); + bool created; - th = threads__get_last_match(threads, machine, pid, tid); - if (th) + if (th) { + machine__update_thread_pid(machine, th, pid); return th; - - while (*p != NULL) { - parent = *p; - th = rb_entry(parent, struct thread_rb_node, rb_node)->thread; - - if (thread__tid(th) == tid) { - threads__set_last_match(threads, th); - machine__update_thread_pid(machine, th, pid); - return thread__get(th); - } - - if (tid < thread__tid(th)) - p = &(*p)->rb_left; - else { - p = &(*p)->rb_right; - leftmost = false; - } } - if (!create) return NULL; - th = thread__new(pid, tid); - if (th == NULL) - return NULL; - - nd = malloc(sizeof(*nd)); - if (nd == NULL) { - thread__put(th); - return NULL; - } - nd->thread = th; - - rb_link_node(&nd->rb_node, parent, p); - rb_insert_color_cached(&nd->rb_node, &threads->entries, leftmost); - /* - * We have to initialize maps separately after rb tree is updated. - * - * The reason is that we call machine__findnew_thread within - * thread__init_maps to find the thread leader and that would screwed - * the rb tree. - */ - if (thread__init_maps(th, machine)) { - pr_err("Thread init failed thread %d\n", pid); - rb_erase_cached(&nd->rb_node, &threads->entries); - RB_CLEAR_NODE(&nd->rb_node); - free(nd); - thread__put(th); - return NULL; - } - /* - * It is now in the rbtree, get a ref - */ - threads__set_last_match(threads, th); - ++threads->nr; - - return thread__get(th); -} + th = threads__findnew(&machine->threads, pid, tid, &created); + if (created) { + /* + * We have to initialize maps separately after rb tree is + * updated. + * + * The reason is that we call machine__findnew_thread within + * thread__init_maps to find the thread leader and that would + * screwed the rb tree. + */ + if (thread__init_maps(th, machine)) { + pr_err("Thread init failed thread %d\n", pid); + threads__remove(&machine->threads, th); + thread__put(th); + return NULL; + } + } else + machine__update_thread_pid(machine, th, pid); -struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid) -{ - return ____machine__findnew_thread(machine, machine__threads(machine, tid), pid, tid, true); + return th; } -struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, - pid_t tid) +struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid) { - struct threads *threads = machine__threads(machine, tid); - struct thread *th; - - down_write(&threads->lock); - th = __machine__findnew_thread(machine, pid, tid); - up_write(&threads->lock); - return th; + return __machine__findnew_thread(machine, pid, tid, /*create=*/true); } struct thread *machine__find_thread(struct machine *machine, pid_t pid, pid_t tid) { - struct threads *threads = machine__threads(machine, tid); - struct thread *th; - - down_read(&threads->lock); - th = ____machine__findnew_thread(machine, threads, pid, tid, false); - up_read(&threads->lock); - return th; + return __machine__findnew_thread(machine, pid, tid, /*create=*/false); } /* @@ -832,31 +647,6 @@ int machine__process_lost_samples_event(struct machine *machine __maybe_unused, return 0; } -static struct dso *machine__findnew_module_dso(struct machine *machine, - struct kmod_path *m, - const char *filename) -{ - struct dso *dso; - - down_write(&machine->dsos.lock); - - dso = __dsos__find(&machine->dsos, m->name, true); - if (!dso) { - dso = __dsos__addnew(&machine->dsos, m->name); - if (dso == NULL) - goto out_unlock; - - dso__set_module_info(dso, m, machine); - dso__set_long_name(dso, strdup(filename), true); - dso->kernel = DSO_SPACE__KERNEL; - } - - dso__get(dso); -out_unlock: - up_write(&machine->dsos.lock); - return dso; -} - int machine__process_aux_event(struct machine *machine __maybe_unused, union perf_event *event) { @@ -894,9 +684,8 @@ static int machine__process_ksymbol_register(struct machine *machine, struct perf_sample *sample __maybe_unused) { struct symbol *sym; - struct dso *dso; + struct dso *dso = NULL; struct map *map = maps__find(machine__kernel_maps(machine), event->ksymbol.addr); - bool put_map = false; int err = 0; if (!map) { @@ -906,22 +695,15 @@ static int machine__process_ksymbol_register(struct machine *machine, err = -ENOMEM; goto out; } - dso->kernel = DSO_SPACE__KERNEL; + dso__set_kernel(dso, DSO_SPACE__KERNEL); map = map__new2(0, dso); - dso__put(dso); if (!map) { err = -ENOMEM; goto out; } - /* - * The inserted map has a get on it, we need to put to release - * the reference count here, but do it after all accesses are - * done. - */ - put_map = true; if (event->ksymbol.ksym_type == PERF_RECORD_KSYMBOL_TYPE_OOL) { - dso->binary_type = DSO_BINARY_TYPE__OOL; - dso->data.file_size = event->ksymbol.len; + dso__set_binary_type(dso, DSO_BINARY_TYPE__OOL); + dso__data(dso)->file_size = event->ksymbol.len; dso__set_loaded(dso); } @@ -936,11 +718,11 @@ static int machine__process_ksymbol_register(struct machine *machine, dso__set_loaded(dso); if (is_bpf_image(event->ksymbol.name)) { - dso->binary_type = DSO_BINARY_TYPE__BPF_IMAGE; + dso__set_binary_type(dso, DSO_BINARY_TYPE__BPF_IMAGE); dso__set_long_name(dso, "", false); } } else { - dso = map__dso(map); + dso = dso__get(map__dso(map)); } sym = symbol__new(map__map_ip(map, map__start(map)), @@ -952,8 +734,8 @@ static int machine__process_ksymbol_register(struct machine *machine, } dso__insert_symbol(dso, sym); out: - if (put_map) - map__put(map); + map__put(map); + dso__put(dso); return err; } @@ -977,7 +759,7 @@ static int machine__process_ksymbol_unregister(struct machine *machine, if (sym) dso__delete_symbol(dso, sym); } - + map__put(map); return 0; } @@ -1005,11 +787,11 @@ int machine__process_text_poke(struct machine *machine, union perf_event *event, perf_event__fprintf_text_poke(event, machine, stdout); if (!event->text_poke.new_len) - return 0; + goto out; if (cpumode != PERF_RECORD_MISC_KERNEL) { pr_debug("%s: unsupported cpumode - ignoring\n", __func__); - return 0; + goto out; } if (dso) { @@ -1032,7 +814,8 @@ int machine__process_text_poke(struct machine *machine, union perf_event *event, pr_debug("Failed to find kernel text poke address map for %#" PRI_lx64 "\n", event->text_poke.addr); } - +out: + map__put(map); return 0; } @@ -1047,7 +830,7 @@ static struct map *machine__addnew_module_map(struct machine *machine, u64 start if (kmod_path__parse_name(&m, filename)) return NULL; - dso = machine__findnew_module_dso(machine, &m, filename); + dso = dsos__findnew_module_dso(&machine->dsos, machine, &m, filename); if (dso == NULL) goto out; @@ -1071,11 +854,11 @@ out: size_t machines__fprintf_dsos(struct machines *machines, FILE *fp) { struct rb_node *nd; - size_t ret = __dsos__fprintf(&machines->host.dsos.head, fp); + size_t ret = dsos__fprintf(&machines->host.dsos, fp); for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) { struct machine *pos = rb_entry(nd, struct machine, rb_node); - ret += __dsos__fprintf(&pos->dsos.head, fp); + ret += dsos__fprintf(&pos->dsos, fp); } return ret; @@ -1084,7 +867,7 @@ size_t machines__fprintf_dsos(struct machines *machines, FILE *fp) size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp, bool (skip)(struct dso *dso, int parm), int parm) { - return __dsos__fprintf_buildid(&m->dsos.head, fp, skip, parm); + return dsos__fprintf_buildid(&m->dsos, fp, skip, parm); } size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp, @@ -1106,43 +889,44 @@ size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp) size_t printed = 0; struct dso *kdso = machine__kernel_dso(machine); - if (kdso->has_build_id) { + if (dso__has_build_id(kdso)) { char filename[PATH_MAX]; - if (dso__build_id_filename(kdso, filename, sizeof(filename), - false)) + + if (dso__build_id_filename(kdso, filename, sizeof(filename), false)) printed += fprintf(fp, "[0] %s\n", filename); } - for (i = 0; i < vmlinux_path__nr_entries; ++i) - printed += fprintf(fp, "[%d] %s\n", - i + kdso->has_build_id, vmlinux_path[i]); - + for (i = 0; i < vmlinux_path__nr_entries; ++i) { + printed += fprintf(fp, "[%d] %s\n", i + dso__has_build_id(kdso), + vmlinux_path[i]); + } return printed; } -size_t machine__fprintf(struct machine *machine, FILE *fp) -{ - struct rb_node *nd; - size_t ret; - int i; - - for (i = 0; i < THREADS__TABLE_SIZE; i++) { - struct threads *threads = &machine->threads[i]; - - down_read(&threads->lock); +struct machine_fprintf_cb_args { + FILE *fp; + size_t printed; +}; - ret = fprintf(fp, "Threads: %u\n", threads->nr); +static int machine_fprintf_cb(struct thread *thread, void *data) +{ + struct machine_fprintf_cb_args *args = data; - for (nd = rb_first_cached(&threads->entries); nd; - nd = rb_next(nd)) { - struct thread *pos = rb_entry(nd, struct thread_rb_node, rb_node)->thread; + /* TODO: handle fprintf errors. */ + args->printed += thread__fprintf(thread, args->fp); + return 0; +} - ret += thread__fprintf(pos, fp); - } +size_t machine__fprintf(struct machine *machine, FILE *fp) +{ + struct machine_fprintf_cb_args args = { + .fp = fp, + .printed = 0, + }; + size_t ret = fprintf(fp, "Threads: %zu\n", threads__nr(&machine->threads)); - up_read(&threads->lock); - } - return ret; + machine__for_each_thread(machine, machine_fprintf_cb, &args); + return ret + args.printed; } static struct dso *machine__get_kernel(struct machine *machine) @@ -1165,7 +949,7 @@ static struct dso *machine__get_kernel(struct machine *machine) DSO_SPACE__KERNEL_GUEST); } - if (kernel != NULL && (!kernel->has_build_id)) + if (kernel != NULL && (!dso__has_build_id(kernel))) dso__read_running_kernel_build_id(kernel, machine); return kernel; @@ -1300,9 +1084,10 @@ static int machine__map_x86_64_entry_trampolines_cb(struct map *map, void *data) return 0; dest_map = maps__find(args->kmaps, map__pgoff(map)); - if (dest_map != map) + if (RC_CHK_ACCESS(dest_map) != RC_CHK_ACCESS(map)) map__set_pgoff(map, map__map_ip(dest_map, map__pgoff(map))); + map__put(dest_map); args->found = true; return 0; } @@ -1529,8 +1314,8 @@ static char *get_kernel_version(const char *root_dir) static bool is_kmod_dso(struct dso *dso) { - return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE || - dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE; + return dso__symtab_type(dso) == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE || + dso__symtab_type(dso) == DSO_BINARY_TYPE__GUEST_KMODULE; } static int maps__set_module_path(struct maps *maps, const char *path, struct kmod_path *m) @@ -1543,8 +1328,10 @@ static int maps__set_module_path(struct maps *maps, const char *path, struct kmo return 0; long_name = strdup(path); - if (long_name == NULL) + if (long_name == NULL) { + map__put(map); return -ENOMEM; + } dso = map__dso(map); dso__set_long_name(dso, long_name, true); @@ -1555,10 +1342,10 @@ static int maps__set_module_path(struct maps *maps, const char *path, struct kmo * we need to update the symtab_type if needed. */ if (m->comp && is_kmod_dso(dso)) { - dso->symtab_type++; - dso->comp = m->comp; + dso__set_symtab_type(dso, dso__symtab_type(dso)); + dso__set_comp(dso, m->comp); } - + map__put(map); return 0; } @@ -1709,8 +1496,8 @@ static int machine__update_kernel_mmap(struct machine *machine, updated = map__get(orig); machine->vmlinux_map = updated; - machine__set_kernel_mmap(machine, start, end); maps__remove(machine__kernel_maps(machine), orig); + machine__set_kernel_mmap(machine, start, end); err = maps__insert(machine__kernel_maps(machine), updated); map__put(orig); @@ -1765,8 +1552,10 @@ int machine__create_kernel_maps(struct machine *machine) struct map *next = maps__find_next_entry(machine__kernel_maps(machine), machine__kernel_map(machine)); - if (next) + if (next) { machine__set_kernel_mmap(machine, start, map__start(next)); + map__put(next); + } } out_put: @@ -1774,16 +1563,14 @@ out_put: return ret; } -static bool machine__uses_kcore(struct machine *machine) +static int machine__uses_kcore_cb(struct dso *dso, void *data __maybe_unused) { - struct dso *dso; - - list_for_each_entry(dso, &machine->dsos.head, node) { - if (dso__is_kcore(dso)) - return true; - } + return dso__is_kcore(dso) ? 1 : 0; +} - return false; +static bool machine__uses_kcore(struct machine *machine) +{ + return dsos__for_each_dso(&machine->dsos, machine__uses_kcore_cb, NULL) != 0 ? true : false; } static bool perf_event__is_extra_kernel_mmap(struct machine *machine, @@ -1850,53 +1637,20 @@ static int machine__process_kernel_mmap_event(struct machine *machine, * Should be there already, from the build-id table in * the header. */ - struct dso *kernel = NULL; - struct dso *dso; - - down_read(&machine->dsos.lock); - - list_for_each_entry(dso, &machine->dsos.head, node) { - - /* - * The cpumode passed to is_kernel_module is not the - * cpumode of *this* event. If we insist on passing - * correct cpumode to is_kernel_module, we should - * record the cpumode when we adding this dso to the - * linked list. - * - * However we don't really need passing correct - * cpumode. We know the correct cpumode must be kernel - * mode (if not, we should not link it onto kernel_dsos - * list). - * - * Therefore, we pass PERF_RECORD_MISC_CPUMODE_UNKNOWN. - * is_kernel_module() treats it as a kernel cpumode. - */ - - if (!dso->kernel || - is_kernel_module(dso->long_name, - PERF_RECORD_MISC_CPUMODE_UNKNOWN)) - continue; - - - kernel = dso__get(dso); - break; - } - - up_read(&machine->dsos.lock); + struct dso *kernel = dsos__find_kernel_dso(&machine->dsos); if (kernel == NULL) kernel = machine__findnew_dso(machine, machine->mmap_name); if (kernel == NULL) goto out_problem; - kernel->kernel = dso_space; + dso__set_kernel(kernel, dso_space); if (__machine__create_kernel_maps(machine, kernel) < 0) { dso__put(kernel); goto out_problem; } - if (strstr(kernel->long_name, "vmlinux")) + if (strstr(dso__long_name(kernel), "vmlinux")) dso__set_short_name(kernel, "[kernel.vmlinux]", false); if (machine__update_kernel_mmap(machine, xm->start, xm->end) < 0) { @@ -2060,36 +1814,9 @@ out_problem: return 0; } -static void __machine__remove_thread(struct machine *machine, struct thread_rb_node *nd, - struct thread *th, bool lock) -{ - struct threads *threads = machine__threads(machine, thread__tid(th)); - - if (!nd) - nd = thread_rb_node__find(th, &threads->entries.rb_root); - - if (threads->last_match && RC_CHK_EQUAL(threads->last_match, th)) - threads__set_last_match(threads, NULL); - - if (lock) - down_write(&threads->lock); - - BUG_ON(refcount_read(thread__refcnt(th)) == 0); - - thread__put(nd->thread); - rb_erase_cached(&nd->rb_node, &threads->entries); - RB_CLEAR_NODE(&nd->rb_node); - --threads->nr; - - free(nd); - - if (lock) - up_write(&threads->lock); -} - void machine__remove_thread(struct machine *machine, struct thread *th) { - return __machine__remove_thread(machine, NULL, th, true); + return threads__remove(&machine->threads, th); } int machine__process_fork_event(struct machine *machine, union perf_event *event, @@ -2286,11 +2013,11 @@ struct mem_info *sample__resolve_mem(struct perf_sample *sample, if (!mi) return NULL; - ip__resolve_ams(al->thread, &mi->iaddr, sample->ip); - ip__resolve_data(al->thread, al->cpumode, &mi->daddr, + ip__resolve_ams(al->thread, mem_info__iaddr(mi), sample->ip); + ip__resolve_data(al->thread, al->cpumode, mem_info__daddr(mi), sample->addr, sample->phys_addr, sample->data_page_size); - mi->data_src.val = sample->data_src; + mem_info__data_src(mi)->val = sample->data_src; return mi; } @@ -2305,14 +2032,14 @@ static char *callchain_srcline(struct map_symbol *ms, u64 ip) return srcline; dso = map__dso(map); - srcline = srcline__tree_find(&dso->srclines, ip); + srcline = srcline__tree_find(dso__srclines(dso), ip); if (!srcline) { bool show_sym = false; bool show_addr = callchain_param.key == CCKEY_ADDRESS; srcline = get_srcline(dso, map__rip_2objdump(map, ip), ms->sym, show_sym, show_addr, ip); - srcline__tree_insert(&dso->srclines, ip, srcline); + srcline__tree_insert(dso__srclines(dso), ip, srcline); } return srcline; @@ -3110,12 +2837,12 @@ static int append_inlines(struct callchain_cursor *cursor, struct map_symbol *ms addr = map__rip_2objdump(map, addr); dso = map__dso(map); - inline_node = inlines__tree_find(&dso->inlined_nodes, addr); + inline_node = inlines__tree_find(dso__inlined_nodes(dso), addr); if (!inline_node) { inline_node = dso__parse_addr_inlines(dso, addr, sym); if (!inline_node) return ret; - inlines__tree_insert(&dso->inlined_nodes, inline_node); + inlines__tree_insert(dso__inlined_nodes(dso), inline_node); } ilist_ms = (struct map_symbol) { @@ -3223,23 +2950,7 @@ int machine__for_each_thread(struct machine *machine, int (*fn)(struct thread *thread, void *p), void *priv) { - struct threads *threads; - struct rb_node *nd; - int rc = 0; - int i; - - for (i = 0; i < THREADS__TABLE_SIZE; i++) { - threads = &machine->threads[i]; - for (nd = rb_first_cached(&threads->entries); nd; - nd = rb_next(nd)) { - struct thread_rb_node *trb = rb_entry(nd, struct thread_rb_node, rb_node); - - rc = fn(trb->thread, priv); - if (rc != 0) - return rc; - } - } - return rc; + return threads__for_each_thread(&machine->threads, fn, priv); } int machines__for_each_thread(struct machines *machines, @@ -3263,6 +2974,36 @@ int machines__for_each_thread(struct machines *machines, return rc; } + +static int thread_list_cb(struct thread *thread, void *data) +{ + struct list_head *list = data; + struct thread_list *entry = malloc(sizeof(*entry)); + + if (!entry) + return -ENOMEM; + + entry->thread = thread__get(thread); + list_add_tail(&entry->list, list); + return 0; +} + +int machine__thread_list(struct machine *machine, struct list_head *list) +{ + return machine__for_each_thread(machine, thread_list_cb, list); +} + +void thread_list__delete(struct list_head *list) +{ + struct thread_list *pos, *next; + + list_for_each_entry_safe(pos, next, list, list) { + thread__zput(pos->thread); + list_del(&pos->list); + free(pos); + } +} + pid_t machine__get_current_tid(struct machine *machine, int cpu) { if (cpu < 0 || (size_t)cpu >= machine->current_tid_sz) @@ -3390,21 +3131,33 @@ char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, ch if (sym == NULL) return NULL; - *modp = __map__is_kmodule(map) ? (char *)map__dso(map)->short_name : NULL; + *modp = __map__is_kmodule(map) ? (char *)dso__short_name(map__dso(map)) : NULL; *addrp = map__unmap_ip(map, sym->start); return sym->name; } +struct machine__for_each_dso_cb_args { + struct machine *machine; + machine__dso_t fn; + void *priv; +}; + +static int machine__for_each_dso_cb(struct dso *dso, void *data) +{ + struct machine__for_each_dso_cb_args *args = data; + + return args->fn(dso, args->machine, args->priv); +} + int machine__for_each_dso(struct machine *machine, machine__dso_t fn, void *priv) { - struct dso *pos; - int err = 0; + struct machine__for_each_dso_cb_args args = { + .machine = machine, + .fn = fn, + .priv = priv, + }; - list_for_each_entry(pos, &machine->dsos.head, node) { - if (fn(pos, machine, priv)) - err = -1; - } - return err; + return dsos__for_each_dso(&machine->dsos, machine__for_each_dso_cb, &args); } int machine__for_each_kernel_map(struct machine *machine, machine__map_t fn, void *priv) @@ -3437,6 +3190,17 @@ bool machine__is_lock_function(struct machine *machine, u64 addr) sym = machine__find_kernel_symbol_by_name(machine, "__lock_text_end", &kmap); machine->lock.text_end = map__unmap_ip(kmap, sym->start); + + sym = machine__find_kernel_symbol_by_name(machine, "__traceiter_contention_begin", &kmap); + if (sym) { + machine->traceiter.text_start = map__unmap_ip(kmap, sym->start); + machine->traceiter.text_end = map__unmap_ip(kmap, sym->end); + } + sym = machine__find_kernel_symbol_by_name(machine, "trace_contention_begin", &kmap); + if (sym) { + machine->trace.text_start = map__unmap_ip(kmap, sym->start); + machine->trace.text_end = map__unmap_ip(kmap, sym->end); + } } /* failed to get kernel symbols */ @@ -3451,5 +3215,23 @@ bool machine__is_lock_function(struct machine *machine, u64 addr) if (machine->lock.text_start <= addr && addr < machine->lock.text_end) return true; + /* traceiter functions currently don't have their own section + * but we consider them lock functions + */ + if (machine->traceiter.text_start != 0) { + if (machine->traceiter.text_start <= addr && addr < machine->traceiter.text_end) + return true; + } + + if (machine->trace.text_start != 0) { + if (machine->trace.text_start <= addr && addr < machine->trace.text_end) + return true; + } + return false; } + +int machine__hit_all_dsos(struct machine *machine) +{ + return dsos__hit_all(&machine->dsos); +} diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h index 1279acda6a8a..82a47bac8023 100644 --- a/tools/perf/util/machine.h +++ b/tools/perf/util/machine.h @@ -7,6 +7,7 @@ #include "maps.h" #include "dsos.h" #include "rwsem.h" +#include "threads.h" struct addr_location; struct branch_stack; @@ -28,16 +29,6 @@ extern const char *ref_reloc_sym_names[]; struct vdso_info; -#define THREADS__TABLE_BITS 8 -#define THREADS__TABLE_SIZE (1 << THREADS__TABLE_BITS) - -struct threads { - struct rb_root_cached entries; - struct rw_semaphore lock; - unsigned int nr; - struct thread *last_match; -}; - struct machine { struct rb_node rb_node; pid_t pid; @@ -48,7 +39,7 @@ struct machine { char *root_dir; char *mmap_name; char *kallsyms_filename; - struct threads threads[THREADS__TABLE_SIZE]; + struct threads threads; struct vdso_info *vdso_info; struct perf_env *env; struct dsos dsos; @@ -58,7 +49,7 @@ struct machine { struct { u64 text_start; u64 text_end; - } sched, lock; + } sched, lock, traceiter, trace; pid_t *current_tid; size_t current_tid_sz; union { /* Tool specific area */ @@ -69,12 +60,6 @@ struct machine { bool trampolines_mapped; }; -static inline struct threads *machine__threads(struct machine *machine, pid_t tid) -{ - /* Cast it to handle tid == -1 */ - return &machine->threads[(unsigned int)tid % THREADS__TABLE_SIZE]; -} - /* * The main kernel (vmlinux) map */ @@ -220,7 +205,6 @@ bool machine__is(struct machine *machine, const char *arch); bool machine__normalized_is(struct machine *machine, const char *arch); int machine__nr_cpus_avail(struct machine *machine); -struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid); struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid); struct dso *machine__findnew_dso_id(struct machine *machine, const char *filename, struct dso_id *id); @@ -280,6 +264,16 @@ int machines__for_each_thread(struct machines *machines, int (*fn)(struct thread *thread, void *p), void *priv); +struct thread_list { + struct list_head list; + struct thread *thread; +}; + +/* Make a list of struct thread_list based on threads in the machine. */ +int machine__thread_list(struct machine *machine, struct list_head *list); +/* Free up the nodes within the thread_list list. */ +void thread_list__delete(struct list_head *list); + pid_t machine__get_current_tid(struct machine *machine, int cpu); int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid, pid_t tid); @@ -312,4 +306,6 @@ int machine__map_x86_64_entry_trampolines(struct machine *machine, int machine__resolve(struct machine *machine, struct addr_location *al, struct perf_sample *sample); +int machine__hit_all_dsos(struct machine *machine); + #endif /* __PERF_MACHINE_H */ diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index 54c67cb7ecef..e1d14936a60d 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -168,6 +168,7 @@ struct map *map__new(struct machine *machine, u64 start, u64 len, if (dso == NULL) goto out_delete; + assert(!dso__kernel(dso)); map__init(result, start, start + len, pgoff, dso); if (anon || no_dso) { @@ -181,10 +182,9 @@ struct map *map__new(struct machine *machine, u64 start, u64 len, if (!(prot & PROT_EXEC)) dso__set_loaded(dso); } - mutex_lock(&dso->lock); - nsinfo__put(dso->nsinfo); - dso->nsinfo = nsi; - mutex_unlock(&dso->lock); + mutex_lock(dso__lock(dso)); + dso__set_nsinfo(dso, nsi); + mutex_unlock(dso__lock(dso)); if (build_id__is_defined(bid)) { dso__set_build_id(dso, bid); @@ -195,13 +195,12 @@ struct map *map__new(struct machine *machine, u64 start, u64 len, * reading the header will have the build ID set and all future mmaps will * have it missing. */ - down_read(&machine->dsos.lock); - header_bid_dso = __dsos__find(&machine->dsos, filename, false); - up_read(&machine->dsos.lock); - if (header_bid_dso && header_bid_dso->header_build_id) { - dso__set_build_id(dso, &header_bid_dso->bid); - dso->header_build_id = 1; + header_bid_dso = dsos__find(&machine->dsos, filename, false); + if (header_bid_dso && dso__header_build_id(header_bid_dso)) { + dso__set_build_id(dso, dso__bid(header_bid_dso)); + dso__set_header_build_id(dso, 1); } + dso__put(header_bid_dso); } dso__put(dso); } @@ -222,7 +221,7 @@ struct map *map__new2(u64 start, struct dso *dso) struct map *result; RC_STRUCT(map) *map; - map = calloc(1, sizeof(*map) + (dso->kernel ? sizeof(struct kmap) : 0)); + map = calloc(1, sizeof(*map) + (dso__kernel(dso) ? sizeof(struct kmap) : 0)); if (ADD_RC_CHK(result, map)) { /* * ->end will be filled after we load all the symbols @@ -235,7 +234,7 @@ struct map *map__new2(u64 start, struct dso *dso) bool __map__is_kernel(const struct map *map) { - if (!map__dso(map)->kernel) + if (!dso__kernel(map__dso(map))) return false; return machine__kernel_map(maps__machine(map__kmaps((struct map *)map))) == map; } @@ -252,7 +251,7 @@ bool __map__is_bpf_prog(const struct map *map) const char *name; struct dso *dso = map__dso(map); - if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO) + if (dso__binary_type(dso) == DSO_BINARY_TYPE__BPF_PROG_INFO) return true; /* @@ -260,7 +259,7 @@ bool __map__is_bpf_prog(const struct map *map) * type of DSO_BINARY_TYPE__BPF_PROG_INFO. In such cases, we can * guess the type based on name. */ - name = dso->short_name; + name = dso__short_name(dso); return name && (strstr(name, "bpf_prog_") == name); } @@ -269,7 +268,7 @@ bool __map__is_bpf_image(const struct map *map) const char *name; struct dso *dso = map__dso(map); - if (dso->binary_type == DSO_BINARY_TYPE__BPF_IMAGE) + if (dso__binary_type(dso) == DSO_BINARY_TYPE__BPF_IMAGE) return true; /* @@ -277,7 +276,7 @@ bool __map__is_bpf_image(const struct map *map) * type of DSO_BINARY_TYPE__BPF_IMAGE. In such cases, we can * guess the type based on name. */ - name = dso->short_name; + name = dso__short_name(dso); return name && is_bpf_image(name); } @@ -285,7 +284,7 @@ bool __map__is_ool(const struct map *map) { const struct dso *dso = map__dso(map); - return dso && dso->binary_type == DSO_BINARY_TYPE__OOL; + return dso && dso__binary_type(dso) == DSO_BINARY_TYPE__OOL; } bool map__has_symbols(const struct map *map) @@ -316,7 +315,7 @@ void map__put(struct map *map) void map__fixup_start(struct map *map) { struct dso *dso = map__dso(map); - struct rb_root_cached *symbols = &dso->symbols; + struct rb_root_cached *symbols = dso__symbols(dso); struct rb_node *nd = rb_first_cached(symbols); if (nd != NULL) { @@ -329,7 +328,7 @@ void map__fixup_start(struct map *map) void map__fixup_end(struct map *map) { struct dso *dso = map__dso(map); - struct rb_root_cached *symbols = &dso->symbols; + struct rb_root_cached *symbols = dso__symbols(dso); struct rb_node *nd = rb_last(&symbols->rb_root); if (nd != NULL) { @@ -343,7 +342,7 @@ void map__fixup_end(struct map *map) int map__load(struct map *map) { struct dso *dso = map__dso(map); - const char *name = dso->long_name; + const char *name = dso__long_name(dso); int nr; if (dso__loaded(dso)) @@ -351,10 +350,10 @@ int map__load(struct map *map) nr = dso__load(dso, map); if (nr < 0) { - if (dso->has_build_id) { + if (dso__has_build_id(dso)) { char sbuild_id[SBUILD_ID_SIZE]; - build_id__sprintf(&dso->bid, sbuild_id); + build_id__sprintf(dso__bid(dso), sbuild_id); pr_debug("%s with build id %s not found", name, sbuild_id); } else pr_debug("Failed to open %s", name); @@ -416,7 +415,7 @@ struct map *map__clone(struct map *from) size_t size = sizeof(RC_STRUCT(map)); struct dso *dso = map__dso(from); - if (dso && dso->kernel) + if (dso && dso__kernel(dso)) size += sizeof(struct kmap); map = memdup(RC_CHK_ACCESS(from), size); @@ -433,14 +432,14 @@ size_t map__fprintf(struct map *map, FILE *fp) const struct dso *dso = map__dso(map); return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n", - map__start(map), map__end(map), map__pgoff(map), dso->name); + map__start(map), map__end(map), map__pgoff(map), dso__name(dso)); } static bool prefer_dso_long_name(const struct dso *dso, bool print_off) { - return dso->long_name && + return dso__long_name(dso) && (symbol_conf.show_kernel_path || - (print_off && (dso->name[0] == '[' || dso__is_kcore(dso)))); + (print_off && (dso__name(dso)[0] == '[' || dso__is_kcore(dso)))); } static size_t __map__fprintf_dsoname(struct map *map, bool print_off, FILE *fp) @@ -451,9 +450,9 @@ static size_t __map__fprintf_dsoname(struct map *map, bool print_off, FILE *fp) if (dso) { if (prefer_dso_long_name(dso, print_off)) - dsoname = dso->long_name; + dsoname = dso__long_name(dso); else - dsoname = dso->name; + dsoname = dso__name(dso); } if (symbol_conf.pad_output_len_dso) { @@ -546,18 +545,14 @@ u64 map__rip_2objdump(struct map *map, u64 rip) } } - if (!dso->adjust_symbols) + if (!dso__adjust_symbols(dso)) return rip; - if (dso->rel) + if (dso__rel(dso)) return rip - map__pgoff(map); - /* - * kernel modules also have DSO_TYPE_USER in dso->kernel, - * but all kernel modules are ET_REL, so won't get here. - */ - if (dso->kernel == DSO_SPACE__USER) - return rip + dso->text_offset; + if (dso__kernel(dso) == DSO_SPACE__USER) + return rip + dso__text_offset(dso); return map__unmap_ip(map, rip) - map__reloc(map); } @@ -578,22 +573,35 @@ u64 map__objdump_2mem(struct map *map, u64 ip) { const struct dso *dso = map__dso(map); - if (!dso->adjust_symbols) + if (!dso__adjust_symbols(dso)) return map__unmap_ip(map, ip); - if (dso->rel) + if (dso__rel(dso)) return map__unmap_ip(map, ip + map__pgoff(map)); - /* - * kernel modules also have DSO_TYPE_USER in dso->kernel, - * but all kernel modules are ET_REL, so won't get here. - */ - if (dso->kernel == DSO_SPACE__USER) - return map__unmap_ip(map, ip - dso->text_offset); + if (dso__kernel(dso) == DSO_SPACE__USER) + return map__unmap_ip(map, ip - dso__text_offset(dso)); return ip + map__reloc(map); } +/* convert objdump address to relative address. (To be removed) */ +u64 map__objdump_2rip(struct map *map, u64 ip) +{ + const struct dso *dso = map__dso(map); + + if (!dso__adjust_symbols(dso)) + return ip; + + if (dso__rel(dso)) + return ip + map__pgoff(map); + + if (dso__kernel(dso) == DSO_SPACE__USER) + return ip - dso__text_offset(dso); + + return map__map_ip(map, ip + map__reloc(map)); +} + bool map__contains_symbol(const struct map *map, const struct symbol *sym) { u64 ip = map__unmap_ip(map, sym->start); @@ -605,7 +613,7 @@ struct kmap *__map__kmap(struct map *map) { const struct dso *dso = map__dso(map); - if (!dso || !dso->kernel) + if (!dso || !dso__kernel(dso)) return NULL; return (struct kmap *)(&RC_CHK_ACCESS(map)[1]); } diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h index 49756716cb13..65e2609fa1b1 100644 --- a/tools/perf/util/map.h +++ b/tools/perf/util/map.h @@ -132,6 +132,9 @@ u64 map__rip_2objdump(struct map *map, u64 rip); /* objdump address -> memory address */ u64 map__objdump_2mem(struct map *map, u64 ip); +/* objdump address -> rip */ +u64 map__objdump_2rip(struct map *map, u64 ip); + struct symbol; struct thread; diff --git a/tools/perf/util/maps.c b/tools/perf/util/maps.c index 0334fc18d9c6..16b39db594f4 100644 --- a/tools/perf/util/maps.c +++ b/tools/perf/util/maps.c @@ -6,85 +6,282 @@ #include "dso.h" #include "map.h" #include "maps.h" +#include "rwsem.h" #include "thread.h" #include "ui/ui.h" #include "unwind.h" +#include <internal/rc_check.h> -struct map_rb_node { - struct rb_node rb_node; - struct map *map; +/* + * Locking/sorting note: + * + * Sorting is done with the write lock, iteration and binary searching happens + * under the read lock requiring being sorted. There is a race between sorting + * releasing the write lock and acquiring the read lock for iteration/searching + * where another thread could insert and break the sorting of the maps. In + * practice inserting maps should be rare meaning that the race shouldn't lead + * to live lock. Removal of maps doesn't break being sorted. + */ + +DECLARE_RC_STRUCT(maps) { + struct rw_semaphore lock; + /** + * @maps_by_address: array of maps sorted by their starting address if + * maps_by_address_sorted is true. + */ + struct map **maps_by_address; + /** + * @maps_by_name: optional array of maps sorted by their dso name if + * maps_by_name_sorted is true. + */ + struct map **maps_by_name; + struct machine *machine; +#ifdef HAVE_LIBUNWIND_SUPPORT + void *addr_space; + const struct unwind_libunwind_ops *unwind_libunwind_ops; +#endif + refcount_t refcnt; + /** + * @nr_maps: number of maps_by_address, and possibly maps_by_name, + * entries that contain maps. + */ + unsigned int nr_maps; + /** + * @nr_maps_allocated: number of entries in maps_by_address and possibly + * maps_by_name. + */ + unsigned int nr_maps_allocated; + /** + * @last_search_by_name_idx: cache of last found by name entry's index + * as frequent searches for the same dso name are common. + */ + unsigned int last_search_by_name_idx; + /** @maps_by_address_sorted: is maps_by_address sorted. */ + bool maps_by_address_sorted; + /** @maps_by_name_sorted: is maps_by_name sorted. */ + bool maps_by_name_sorted; + /** @ends_broken: does the map contain a map where end values are unset/unsorted? */ + bool ends_broken; }; -#define maps__for_each_entry(maps, map) \ - for (map = maps__first(maps); map; map = map_rb_node__next(map)) +static void check_invariants(const struct maps *maps __maybe_unused) +{ +#ifndef NDEBUG + assert(RC_CHK_ACCESS(maps)->nr_maps <= RC_CHK_ACCESS(maps)->nr_maps_allocated); + for (unsigned int i = 0; i < RC_CHK_ACCESS(maps)->nr_maps; i++) { + struct map *map = RC_CHK_ACCESS(maps)->maps_by_address[i]; + + /* Check map is well-formed. */ + assert(map__end(map) == 0 || map__start(map) <= map__end(map)); + /* Expect at least 1 reference count. */ + assert(refcount_read(map__refcnt(map)) > 0); + + if (map__dso(map) && dso__kernel(map__dso(map))) + assert(RC_CHK_EQUAL(map__kmap(map)->kmaps, maps)); + + if (i > 0) { + struct map *prev = RC_CHK_ACCESS(maps)->maps_by_address[i - 1]; + + /* If addresses are sorted... */ + if (RC_CHK_ACCESS(maps)->maps_by_address_sorted) { + /* Maps should be in start address order. */ + assert(map__start(prev) <= map__start(map)); + /* + * If the ends of maps aren't broken (during + * construction) then they should be ordered + * too. + */ + if (!RC_CHK_ACCESS(maps)->ends_broken) { + assert(map__end(prev) <= map__end(map)); + assert(map__end(prev) <= map__start(map) || + map__start(prev) == map__start(map)); + } + } + } + } + if (RC_CHK_ACCESS(maps)->maps_by_name) { + for (unsigned int i = 0; i < RC_CHK_ACCESS(maps)->nr_maps; i++) { + struct map *map = RC_CHK_ACCESS(maps)->maps_by_name[i]; -#define maps__for_each_entry_safe(maps, map, next) \ - for (map = maps__first(maps), next = map_rb_node__next(map); map; \ - map = next, next = map_rb_node__next(map)) + /* + * Maps by name maps should be in maps_by_address, so + * the reference count should be higher. + */ + assert(refcount_read(map__refcnt(map)) > 1); + } + } +#endif +} -static struct rb_root *maps__entries(struct maps *maps) +static struct map **maps__maps_by_address(const struct maps *maps) { - return &RC_CHK_ACCESS(maps)->entries; + return RC_CHK_ACCESS(maps)->maps_by_address; } -static struct rw_semaphore *maps__lock(struct maps *maps) +static void maps__set_maps_by_address(struct maps *maps, struct map **new) { - return &RC_CHK_ACCESS(maps)->lock; + RC_CHK_ACCESS(maps)->maps_by_address = new; + } -static struct map **maps__maps_by_name(struct maps *maps) +static void maps__set_nr_maps_allocated(struct maps *maps, unsigned int nr_maps_allocated) +{ + RC_CHK_ACCESS(maps)->nr_maps_allocated = nr_maps_allocated; +} + +static void maps__set_nr_maps(struct maps *maps, unsigned int nr_maps) +{ + RC_CHK_ACCESS(maps)->nr_maps = nr_maps; +} + +/* Not in the header, to aid reference counting. */ +static struct map **maps__maps_by_name(const struct maps *maps) { return RC_CHK_ACCESS(maps)->maps_by_name; + } -static struct map_rb_node *maps__first(struct maps *maps) +static void maps__set_maps_by_name(struct maps *maps, struct map **new) { - struct rb_node *first = rb_first(maps__entries(maps)); + RC_CHK_ACCESS(maps)->maps_by_name = new; - if (first) - return rb_entry(first, struct map_rb_node, rb_node); - return NULL; } -static struct map_rb_node *map_rb_node__next(struct map_rb_node *node) +static bool maps__maps_by_address_sorted(const struct maps *maps) { - struct rb_node *next; + return RC_CHK_ACCESS(maps)->maps_by_address_sorted; +} - if (!node) - return NULL; +static void maps__set_maps_by_address_sorted(struct maps *maps, bool value) +{ + RC_CHK_ACCESS(maps)->maps_by_address_sorted = value; +} - next = rb_next(&node->rb_node); +static bool maps__maps_by_name_sorted(const struct maps *maps) +{ + return RC_CHK_ACCESS(maps)->maps_by_name_sorted; +} - if (!next) - return NULL; +static void maps__set_maps_by_name_sorted(struct maps *maps, bool value) +{ + RC_CHK_ACCESS(maps)->maps_by_name_sorted = value; +} - return rb_entry(next, struct map_rb_node, rb_node); +struct machine *maps__machine(const struct maps *maps) +{ + return RC_CHK_ACCESS(maps)->machine; } -static struct map_rb_node *maps__find_node(struct maps *maps, struct map *map) +unsigned int maps__nr_maps(const struct maps *maps) { - struct map_rb_node *rb_node; + return RC_CHK_ACCESS(maps)->nr_maps; +} - maps__for_each_entry(maps, rb_node) { - if (rb_node->RC_CHK_ACCESS(map) == RC_CHK_ACCESS(map)) - return rb_node; - } - return NULL; +refcount_t *maps__refcnt(struct maps *maps) +{ + return &RC_CHK_ACCESS(maps)->refcnt; +} + +#ifdef HAVE_LIBUNWIND_SUPPORT +void *maps__addr_space(const struct maps *maps) +{ + return RC_CHK_ACCESS(maps)->addr_space; +} + +void maps__set_addr_space(struct maps *maps, void *addr_space) +{ + RC_CHK_ACCESS(maps)->addr_space = addr_space; +} + +const struct unwind_libunwind_ops *maps__unwind_libunwind_ops(const struct maps *maps) +{ + return RC_CHK_ACCESS(maps)->unwind_libunwind_ops; +} + +void maps__set_unwind_libunwind_ops(struct maps *maps, const struct unwind_libunwind_ops *ops) +{ + RC_CHK_ACCESS(maps)->unwind_libunwind_ops = ops; +} +#endif + +static struct rw_semaphore *maps__lock(struct maps *maps) +{ + return &RC_CHK_ACCESS(maps)->lock; } static void maps__init(struct maps *maps, struct machine *machine) { - refcount_set(maps__refcnt(maps), 1); init_rwsem(maps__lock(maps)); - RC_CHK_ACCESS(maps)->entries = RB_ROOT; + RC_CHK_ACCESS(maps)->maps_by_address = NULL; + RC_CHK_ACCESS(maps)->maps_by_name = NULL; RC_CHK_ACCESS(maps)->machine = machine; - RC_CHK_ACCESS(maps)->last_search_by_name = NULL; +#ifdef HAVE_LIBUNWIND_SUPPORT + RC_CHK_ACCESS(maps)->addr_space = NULL; + RC_CHK_ACCESS(maps)->unwind_libunwind_ops = NULL; +#endif + refcount_set(maps__refcnt(maps), 1); RC_CHK_ACCESS(maps)->nr_maps = 0; - RC_CHK_ACCESS(maps)->maps_by_name = NULL; + RC_CHK_ACCESS(maps)->nr_maps_allocated = 0; + RC_CHK_ACCESS(maps)->last_search_by_name_idx = 0; + RC_CHK_ACCESS(maps)->maps_by_address_sorted = true; + RC_CHK_ACCESS(maps)->maps_by_name_sorted = false; +} + +static void maps__exit(struct maps *maps) +{ + struct map **maps_by_address = maps__maps_by_address(maps); + struct map **maps_by_name = maps__maps_by_name(maps); + + for (unsigned int i = 0; i < maps__nr_maps(maps); i++) { + map__zput(maps_by_address[i]); + if (maps_by_name) + map__zput(maps_by_name[i]); + } + zfree(&maps_by_address); + zfree(&maps_by_name); + unwind__finish_access(maps); +} + +struct maps *maps__new(struct machine *machine) +{ + struct maps *result; + RC_STRUCT(maps) *maps = zalloc(sizeof(*maps)); + + if (ADD_RC_CHK(result, maps)) + maps__init(result, machine); + + return result; +} + +static void maps__delete(struct maps *maps) +{ + maps__exit(maps); + RC_CHK_FREE(maps); +} + +struct maps *maps__get(struct maps *maps) +{ + struct maps *result; + + if (RC_CHK_GET(result, maps)) + refcount_inc(maps__refcnt(maps)); + + return result; +} + +void maps__put(struct maps *maps) +{ + if (maps && refcount_dec_and_test(maps__refcnt(maps))) + maps__delete(maps); + else + RC_CHK_PUT(maps); } static void __maps__free_maps_by_name(struct maps *maps) { + if (!maps__maps_by_name(maps)) + return; + /* * Free everything to try to do it from the rbtree in the next search */ @@ -92,219 +289,338 @@ static void __maps__free_maps_by_name(struct maps *maps) map__put(maps__maps_by_name(maps)[i]); zfree(&RC_CHK_ACCESS(maps)->maps_by_name); - RC_CHK_ACCESS(maps)->nr_maps_allocated = 0; + + /* Consistent with maps__init(). When maps_by_name == NULL, maps_by_name_sorted == false */ + maps__set_maps_by_name_sorted(maps, false); } -static int __maps__insert(struct maps *maps, struct map *map) +static int map__start_cmp(const void *a, const void *b) { - struct rb_node **p = &maps__entries(maps)->rb_node; - struct rb_node *parent = NULL; - const u64 ip = map__start(map); - struct map_rb_node *m, *new_rb_node; - - new_rb_node = malloc(sizeof(*new_rb_node)); - if (!new_rb_node) - return -ENOMEM; - - RB_CLEAR_NODE(&new_rb_node->rb_node); - new_rb_node->map = map__get(map); - - while (*p != NULL) { - parent = *p; - m = rb_entry(parent, struct map_rb_node, rb_node); - if (ip < map__start(m->map)) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; + const struct map *map_a = *(const struct map * const *)a; + const struct map *map_b = *(const struct map * const *)b; + u64 map_a_start = map__start(map_a); + u64 map_b_start = map__start(map_b); + + if (map_a_start == map_b_start) { + u64 map_a_end = map__end(map_a); + u64 map_b_end = map__end(map_b); + + if (map_a_end == map_b_end) { + /* Ensure maps with the same addresses have a fixed order. */ + if (RC_CHK_ACCESS(map_a) == RC_CHK_ACCESS(map_b)) + return 0; + return (intptr_t)RC_CHK_ACCESS(map_a) > (intptr_t)RC_CHK_ACCESS(map_b) + ? 1 : -1; + } + return map_a_end > map_b_end ? 1 : -1; } - - rb_link_node(&new_rb_node->rb_node, parent, p); - rb_insert_color(&new_rb_node->rb_node, maps__entries(maps)); - return 0; + return map_a_start > map_b_start ? 1 : -1; } -int maps__insert(struct maps *maps, struct map *map) +static void __maps__sort_by_address(struct maps *maps) { - int err; - const struct dso *dso = map__dso(map); + if (maps__maps_by_address_sorted(maps)) + return; + + qsort(maps__maps_by_address(maps), + maps__nr_maps(maps), + sizeof(struct map *), + map__start_cmp); + maps__set_maps_by_address_sorted(maps, true); +} +static void maps__sort_by_address(struct maps *maps) +{ down_write(maps__lock(maps)); - err = __maps__insert(maps, map); - if (err) - goto out; - - ++RC_CHK_ACCESS(maps)->nr_maps; + __maps__sort_by_address(maps); + up_write(maps__lock(maps)); +} - if (dso && dso->kernel) { - struct kmap *kmap = map__kmap(map); +static int map__strcmp(const void *a, const void *b) +{ + const struct map *map_a = *(const struct map * const *)a; + const struct map *map_b = *(const struct map * const *)b; + const struct dso *dso_a = map__dso(map_a); + const struct dso *dso_b = map__dso(map_b); + int ret = strcmp(dso__short_name(dso_a), dso__short_name(dso_b)); - if (kmap) - kmap->kmaps = maps; - else - pr_err("Internal error: kernel dso with non kernel map\n"); + if (ret == 0 && RC_CHK_ACCESS(map_a) != RC_CHK_ACCESS(map_b)) { + /* Ensure distinct but name equal maps have an order. */ + return map__start_cmp(a, b); } + return ret; +} +static int maps__sort_by_name(struct maps *maps) +{ + int err = 0; - /* - * If we already performed some search by name, then we need to add the just - * inserted map and resort. - */ - if (maps__maps_by_name(maps)) { - if (maps__nr_maps(maps) > RC_CHK_ACCESS(maps)->nr_maps_allocated) { - int nr_allocate = maps__nr_maps(maps) * 2; - struct map **maps_by_name = realloc(maps__maps_by_name(maps), - nr_allocate * sizeof(map)); + down_write(maps__lock(maps)); + if (!maps__maps_by_name_sorted(maps)) { + struct map **maps_by_name = maps__maps_by_name(maps); - if (maps_by_name == NULL) { - __maps__free_maps_by_name(maps); + if (!maps_by_name) { + maps_by_name = malloc(RC_CHK_ACCESS(maps)->nr_maps_allocated * + sizeof(*maps_by_name)); + if (!maps_by_name) err = -ENOMEM; - goto out; - } + else { + struct map **maps_by_address = maps__maps_by_address(maps); + unsigned int n = maps__nr_maps(maps); - RC_CHK_ACCESS(maps)->maps_by_name = maps_by_name; - RC_CHK_ACCESS(maps)->nr_maps_allocated = nr_allocate; + maps__set_maps_by_name(maps, maps_by_name); + for (unsigned int i = 0; i < n; i++) + maps_by_name[i] = map__get(maps_by_address[i]); + } + } + if (!err) { + qsort(maps_by_name, + maps__nr_maps(maps), + sizeof(struct map *), + map__strcmp); + maps__set_maps_by_name_sorted(maps, true); } - maps__maps_by_name(maps)[maps__nr_maps(maps) - 1] = map__get(map); - __maps__sort_by_name(maps); } - out: + check_invariants(maps); up_write(maps__lock(maps)); return err; } -static void __maps__remove(struct maps *maps, struct map_rb_node *rb_node) +static unsigned int maps__by_address_index(const struct maps *maps, const struct map *map) { - rb_erase_init(&rb_node->rb_node, maps__entries(maps)); - map__put(rb_node->map); - free(rb_node); + struct map **maps_by_address = maps__maps_by_address(maps); + + if (maps__maps_by_address_sorted(maps)) { + struct map **mapp = + bsearch(&map, maps__maps_by_address(maps), maps__nr_maps(maps), + sizeof(*mapp), map__start_cmp); + + if (mapp) + return mapp - maps_by_address; + } else { + for (unsigned int i = 0; i < maps__nr_maps(maps); i++) { + if (RC_CHK_ACCESS(maps_by_address[i]) == RC_CHK_ACCESS(map)) + return i; + } + } + pr_err("Map missing from maps"); + return -1; } -void maps__remove(struct maps *maps, struct map *map) +static unsigned int maps__by_name_index(const struct maps *maps, const struct map *map) { - struct map_rb_node *rb_node; - - down_write(maps__lock(maps)); - if (RC_CHK_ACCESS(maps)->last_search_by_name == map) - RC_CHK_ACCESS(maps)->last_search_by_name = NULL; - - rb_node = maps__find_node(maps, map); - assert(rb_node->RC_CHK_ACCESS(map) == RC_CHK_ACCESS(map)); - __maps__remove(maps, rb_node); - if (maps__maps_by_name(maps)) - __maps__free_maps_by_name(maps); - --RC_CHK_ACCESS(maps)->nr_maps; - up_write(maps__lock(maps)); + struct map **maps_by_name = maps__maps_by_name(maps); + + if (maps__maps_by_name_sorted(maps)) { + struct map **mapp = + bsearch(&map, maps_by_name, maps__nr_maps(maps), + sizeof(*mapp), map__strcmp); + + if (mapp) + return mapp - maps_by_name; + } else { + for (unsigned int i = 0; i < maps__nr_maps(maps); i++) { + if (RC_CHK_ACCESS(maps_by_name[i]) == RC_CHK_ACCESS(map)) + return i; + } + } + pr_err("Map missing from maps"); + return -1; } -static void __maps__purge(struct maps *maps) +static int __maps__insert(struct maps *maps, struct map *new) { - struct map_rb_node *pos, *next; + struct map **maps_by_address = maps__maps_by_address(maps); + struct map **maps_by_name = maps__maps_by_name(maps); + const struct dso *dso = map__dso(new); + unsigned int nr_maps = maps__nr_maps(maps); + unsigned int nr_allocate = RC_CHK_ACCESS(maps)->nr_maps_allocated; + + if (nr_maps + 1 > nr_allocate) { + nr_allocate = !nr_allocate ? 32 : nr_allocate * 2; + + maps_by_address = realloc(maps_by_address, nr_allocate * sizeof(new)); + if (!maps_by_address) + return -ENOMEM; + + maps__set_maps_by_address(maps, maps_by_address); + if (maps_by_name) { + maps_by_name = realloc(maps_by_name, nr_allocate * sizeof(new)); + if (!maps_by_name) { + /* + * If by name fails, just disable by name and it will + * recompute next time it is required. + */ + __maps__free_maps_by_name(maps); + } + maps__set_maps_by_name(maps, maps_by_name); + } + RC_CHK_ACCESS(maps)->nr_maps_allocated = nr_allocate; + } + /* Insert the value at the end. */ + maps_by_address[nr_maps] = map__get(new); + if (maps_by_name) + maps_by_name[nr_maps] = map__get(new); - if (maps__maps_by_name(maps)) - __maps__free_maps_by_name(maps); + nr_maps++; + RC_CHK_ACCESS(maps)->nr_maps = nr_maps; - maps__for_each_entry_safe(maps, pos, next) { - rb_erase_init(&pos->rb_node, maps__entries(maps)); - map__put(pos->map); - free(pos); + /* + * Recompute if things are sorted. If things are inserted in a sorted + * manner, for example by processing /proc/pid/maps, then no + * sorting/resorting will be necessary. + */ + if (nr_maps == 1) { + /* If there's just 1 entry then maps are sorted. */ + maps__set_maps_by_address_sorted(maps, true); + maps__set_maps_by_name_sorted(maps, maps_by_name != NULL); + } else { + /* Sorted if maps were already sorted and this map starts after the last one. */ + maps__set_maps_by_address_sorted(maps, + maps__maps_by_address_sorted(maps) && + map__end(maps_by_address[nr_maps - 2]) <= map__start(new)); + maps__set_maps_by_name_sorted(maps, false); + } + if (map__end(new) < map__start(new)) + RC_CHK_ACCESS(maps)->ends_broken = true; + if (dso && dso__kernel(dso)) { + struct kmap *kmap = map__kmap(new); + + if (kmap) + kmap->kmaps = maps; + else + pr_err("Internal error: kernel dso with non kernel map\n"); } + return 0; } -static void maps__exit(struct maps *maps) +int maps__insert(struct maps *maps, struct map *map) { + int ret; + down_write(maps__lock(maps)); - __maps__purge(maps); + ret = __maps__insert(maps, map); + check_invariants(maps); up_write(maps__lock(maps)); + return ret; } -bool maps__empty(struct maps *maps) +static void __maps__remove(struct maps *maps, struct map *map) { - return !maps__first(maps); -} - -struct maps *maps__new(struct machine *machine) -{ - struct maps *result; - RC_STRUCT(maps) *maps = zalloc(sizeof(*maps)); - - if (ADD_RC_CHK(result, maps)) - maps__init(result, machine); + struct map **maps_by_address = maps__maps_by_address(maps); + struct map **maps_by_name = maps__maps_by_name(maps); + unsigned int nr_maps = maps__nr_maps(maps); + unsigned int address_idx; + + /* Slide later mappings over the one to remove */ + address_idx = maps__by_address_index(maps, map); + map__put(maps_by_address[address_idx]); + memmove(&maps_by_address[address_idx], + &maps_by_address[address_idx + 1], + (nr_maps - address_idx - 1) * sizeof(*maps_by_address)); + + if (maps_by_name) { + unsigned int name_idx = maps__by_name_index(maps, map); + + map__put(maps_by_name[name_idx]); + memmove(&maps_by_name[name_idx], + &maps_by_name[name_idx + 1], + (nr_maps - name_idx - 1) * sizeof(*maps_by_name)); + } - return result; + --RC_CHK_ACCESS(maps)->nr_maps; } -static void maps__delete(struct maps *maps) +void maps__remove(struct maps *maps, struct map *map) { - maps__exit(maps); - unwind__finish_access(maps); - RC_CHK_FREE(maps); + down_write(maps__lock(maps)); + __maps__remove(maps, map); + check_invariants(maps); + up_write(maps__lock(maps)); } -struct maps *maps__get(struct maps *maps) +bool maps__empty(struct maps *maps) { - struct maps *result; + bool res; - if (RC_CHK_GET(result, maps)) - refcount_inc(maps__refcnt(maps)); + down_read(maps__lock(maps)); + res = maps__nr_maps(maps) == 0; + up_read(maps__lock(maps)); - return result; + return res; } -void maps__put(struct maps *maps) +bool maps__equal(struct maps *a, struct maps *b) { - if (maps && refcount_dec_and_test(maps__refcnt(maps))) - maps__delete(maps); - else - RC_CHK_PUT(maps); + return RC_CHK_EQUAL(a, b); } int maps__for_each_map(struct maps *maps, int (*cb)(struct map *map, void *data), void *data) { - struct map_rb_node *pos; + bool done = false; int ret = 0; - down_read(maps__lock(maps)); - maps__for_each_entry(maps, pos) { - ret = cb(pos->map, data); - if (ret) - break; + /* See locking/sorting note. */ + while (!done) { + down_read(maps__lock(maps)); + if (maps__maps_by_address_sorted(maps)) { + /* + * maps__for_each_map callbacks may buggily/unsafely + * insert into maps_by_address. Deliberately reload + * maps__nr_maps and maps_by_address on each iteration + * to avoid using memory freed by maps__insert growing + * the array - this may cause maps to be skipped or + * repeated. + */ + for (unsigned int i = 0; i < maps__nr_maps(maps); i++) { + struct map **maps_by_address = maps__maps_by_address(maps); + struct map *map = maps_by_address[i]; + + ret = cb(map, data); + if (ret) + break; + } + done = true; + } + up_read(maps__lock(maps)); + if (!done) + maps__sort_by_address(maps); } - up_read(maps__lock(maps)); return ret; } void maps__remove_maps(struct maps *maps, bool (*cb)(struct map *map, void *data), void *data) { - struct map_rb_node *pos, *next; - unsigned int start_nr_maps; + struct map **maps_by_address; down_write(maps__lock(maps)); - start_nr_maps = maps__nr_maps(maps); - maps__for_each_entry_safe(maps, pos, next) { - if (cb(pos->map, data)) { - __maps__remove(maps, pos); - --RC_CHK_ACCESS(maps)->nr_maps; - } + maps_by_address = maps__maps_by_address(maps); + for (unsigned int i = 0; i < maps__nr_maps(maps);) { + if (cb(maps_by_address[i], data)) + __maps__remove(maps, maps_by_address[i]); + else + i++; } - if (maps__maps_by_name(maps) && start_nr_maps != maps__nr_maps(maps)) - __maps__free_maps_by_name(maps); - + check_invariants(maps); up_write(maps__lock(maps)); } struct symbol *maps__find_symbol(struct maps *maps, u64 addr, struct map **mapp) { struct map *map = maps__find(maps, addr); + struct symbol *result = NULL; /* Ensure map is loaded before using map->map_ip */ - if (map != NULL && map__load(map) >= 0) { - if (mapp != NULL) - *mapp = map; - return map__find_symbol(map, map__map_ip(map, addr)); - } + if (map != NULL && map__load(map) >= 0) + result = map__find_symbol(map, map__map_ip(map, addr)); + + if (mapp) + *mapp = map; + else + map__put(map); - return NULL; + return result; } struct maps__find_symbol_by_name_args { @@ -393,24 +709,28 @@ size_t maps__fprintf(struct maps *maps, FILE *fp) * Find first map where end > map->start. * Same as find_vma() in kernel. */ -static struct rb_node *first_ending_after(struct maps *maps, const struct map *map) +static unsigned int first_ending_after(struct maps *maps, const struct map *map) { - struct rb_root *root; - struct rb_node *next, *first; + struct map **maps_by_address = maps__maps_by_address(maps); + int low = 0, high = (int)maps__nr_maps(maps) - 1, first = high + 1; - root = maps__entries(maps); - next = root->rb_node; - first = NULL; - while (next) { - struct map_rb_node *pos = rb_entry(next, struct map_rb_node, rb_node); + assert(maps__maps_by_address_sorted(maps)); + if (low <= high && map__end(maps_by_address[0]) > map__start(map)) + return 0; - if (map__end(pos->map) > map__start(map)) { - first = next; - if (map__start(pos->map) <= map__start(map)) + while (low <= high) { + int mid = (low + high) / 2; + struct map *pos = maps_by_address[mid]; + + if (map__end(pos) > map__start(map)) { + first = mid; + if (map__start(pos) <= map__start(map)) { + /* Entry overlaps map. */ break; - next = next->rb_left; + } + high = mid - 1; } else - next = next->rb_right; + low = mid + 1; } return first; } @@ -419,300 +739,368 @@ static struct rb_node *first_ending_after(struct maps *maps, const struct map *m * Adds new to maps, if new overlaps existing entries then the existing maps are * adjusted or removed so that new fits without overlapping any entries. */ -int maps__fixup_overlap_and_insert(struct maps *maps, struct map *new) +static int __maps__fixup_overlap_and_insert(struct maps *maps, struct map *new) { - - struct rb_node *next; + struct map **maps_by_address; int err = 0; FILE *fp = debug_file(); - down_write(maps__lock(maps)); +sort_again: + if (!maps__maps_by_address_sorted(maps)) + __maps__sort_by_address(maps); - next = first_ending_after(maps, new); - while (next && !err) { - struct map_rb_node *pos = rb_entry(next, struct map_rb_node, rb_node); - next = rb_next(&pos->rb_node); + maps_by_address = maps__maps_by_address(maps); + /* + * Iterate through entries where the end of the existing entry is + * greater-than the new map's start. + */ + for (unsigned int i = first_ending_after(maps, new); i < maps__nr_maps(maps); ) { + struct map *pos = maps_by_address[i]; + struct map *before = NULL, *after = NULL; /* * Stop if current map starts after map->end. * Maps are ordered by start: next will not overlap for sure. */ - if (map__start(pos->map) >= map__end(new)) + if (map__start(pos) >= map__end(new)) break; - if (verbose >= 2) { - - if (use_browser) { - pr_debug("overlapping maps in %s (disable tui for more info)\n", - map__dso(new)->name); - } else { - pr_debug("overlapping maps:\n"); - map__fprintf(new, fp); - map__fprintf(pos->map, fp); - } + if (use_browser) { + pr_debug("overlapping maps in %s (disable tui for more info)\n", + dso__name(map__dso(new))); + } else if (verbose >= 2) { + pr_debug("overlapping maps:\n"); + map__fprintf(new, fp); + map__fprintf(pos, fp); } - rb_erase_init(&pos->rb_node, maps__entries(maps)); /* * Now check if we need to create new maps for areas not * overlapped by the new map: */ - if (map__start(new) > map__start(pos->map)) { - struct map *before = map__clone(pos->map); + if (map__start(new) > map__start(pos)) { + /* Map starts within existing map. Need to shorten the existing map. */ + before = map__clone(pos); if (before == NULL) { err = -ENOMEM; - goto put_map; + goto out_err; } - map__set_end(before, map__start(new)); - err = __maps__insert(maps, before); - if (err) { - map__put(before); - goto put_map; - } if (verbose >= 2 && !use_browser) map__fprintf(before, fp); - map__put(before); } - - if (map__end(new) < map__end(pos->map)) { - struct map *after = map__clone(pos->map); + if (map__end(new) < map__end(pos)) { + /* The new map isn't as long as the existing map. */ + after = map__clone(pos); if (after == NULL) { + map__zput(before); err = -ENOMEM; - goto put_map; + goto out_err; } map__set_start(after, map__end(new)); - map__add_pgoff(after, map__end(new) - map__start(pos->map)); - assert(map__map_ip(pos->map, map__end(new)) == - map__map_ip(after, map__end(new))); - err = __maps__insert(maps, after); - if (err) { - map__put(after); - goto put_map; - } + map__add_pgoff(after, map__end(new) - map__start(pos)); + assert(map__map_ip(pos, map__end(new)) == + map__map_ip(after, map__end(new))); + if (verbose >= 2 && !use_browser) map__fprintf(after, fp); - map__put(after); } -put_map: - map__put(pos->map); - free(pos); + /* + * If adding one entry, for `before` or `after`, we can replace + * the existing entry. If both `before` and `after` are + * necessary than an insert is needed. If the existing entry + * entirely overlaps the existing entry it can just be removed. + */ + if (before) { + map__put(maps_by_address[i]); + maps_by_address[i] = before; + /* Maps are still ordered, go to next one. */ + i++; + if (after) { + __maps__insert(maps, after); + map__put(after); + if (!maps__maps_by_address_sorted(maps)) { + /* + * Sorting broken so invariants don't + * hold, sort and go again. + */ + goto sort_again; + } + /* + * Maps are still ordered, skip after and go to + * next one (terminate loop). + */ + i++; + } + } else if (after) { + map__put(maps_by_address[i]); + maps_by_address[i] = after; + /* Maps are ordered, go to next one. */ + i++; + } else { + __maps__remove(maps, pos); + /* + * Maps are ordered but no need to increase `i` as the + * later maps were moved down. + */ + } + check_invariants(maps); } /* Add the map. */ - err = __maps__insert(maps, new); - up_write(maps__lock(maps)); + __maps__insert(maps, new); +out_err: return err; } -int maps__copy_from(struct maps *maps, struct maps *parent) +int maps__fixup_overlap_and_insert(struct maps *maps, struct map *new) { int err; - struct map_rb_node *rb_node; + down_write(maps__lock(maps)); + err = __maps__fixup_overlap_and_insert(maps, new); + up_write(maps__lock(maps)); + return err; +} + +int maps__copy_from(struct maps *dest, struct maps *parent) +{ + /* Note, if struct map were immutable then cloning could use ref counts. */ + struct map **parent_maps_by_address; + int err = 0; + unsigned int n; + + down_write(maps__lock(dest)); down_read(maps__lock(parent)); - maps__for_each_entry(parent, rb_node) { - struct map *new = map__clone(rb_node->map); + parent_maps_by_address = maps__maps_by_address(parent); + n = maps__nr_maps(parent); + if (maps__nr_maps(dest) == 0) { + /* No existing mappings so just copy from parent to avoid reallocs in insert. */ + unsigned int nr_maps_allocated = RC_CHK_ACCESS(parent)->nr_maps_allocated; + struct map **dest_maps_by_address = + malloc(nr_maps_allocated * sizeof(struct map *)); + struct map **dest_maps_by_name = NULL; - if (new == NULL) { + if (!dest_maps_by_address) err = -ENOMEM; - goto out_unlock; + else { + if (maps__maps_by_name(parent)) { + dest_maps_by_name = + malloc(nr_maps_allocated * sizeof(struct map *)); + } + + RC_CHK_ACCESS(dest)->maps_by_address = dest_maps_by_address; + RC_CHK_ACCESS(dest)->maps_by_name = dest_maps_by_name; + RC_CHK_ACCESS(dest)->nr_maps_allocated = nr_maps_allocated; } - err = unwind__prepare_access(maps, new, NULL); - if (err) - goto out_unlock; + for (unsigned int i = 0; !err && i < n; i++) { + struct map *pos = parent_maps_by_address[i]; + struct map *new = map__clone(pos); - err = maps__insert(maps, new); - if (err) - goto out_unlock; + if (!new) + err = -ENOMEM; + else { + err = unwind__prepare_access(dest, new, NULL); + if (!err) { + dest_maps_by_address[i] = new; + if (dest_maps_by_name) + dest_maps_by_name[i] = map__get(new); + RC_CHK_ACCESS(dest)->nr_maps = i + 1; + } + } + if (err) + map__put(new); + } + maps__set_maps_by_address_sorted(dest, maps__maps_by_address_sorted(parent)); + if (!err) { + RC_CHK_ACCESS(dest)->last_search_by_name_idx = + RC_CHK_ACCESS(parent)->last_search_by_name_idx; + maps__set_maps_by_name_sorted(dest, + dest_maps_by_name && + maps__maps_by_name_sorted(parent)); + } else { + RC_CHK_ACCESS(dest)->last_search_by_name_idx = 0; + maps__set_maps_by_name_sorted(dest, false); + } + } else { + /* Unexpected copying to a maps containing entries. */ + for (unsigned int i = 0; !err && i < n; i++) { + struct map *pos = parent_maps_by_address[i]; + struct map *new = map__clone(pos); - map__put(new); + if (!new) + err = -ENOMEM; + else { + err = unwind__prepare_access(dest, new, NULL); + if (!err) + err = __maps__insert(dest, new); + } + map__put(new); + } } + check_invariants(dest); - err = 0; -out_unlock: up_read(maps__lock(parent)); + up_write(maps__lock(dest)); return err; } -struct map *maps__find(struct maps *maps, u64 ip) +static int map__addr_cmp(const void *key, const void *entry) { - struct rb_node *p; - struct map_rb_node *m; - - - down_read(maps__lock(maps)); + const u64 ip = *(const u64 *)key; + const struct map *map = *(const struct map * const *)entry; - p = maps__entries(maps)->rb_node; - while (p != NULL) { - m = rb_entry(p, struct map_rb_node, rb_node); - if (ip < map__start(m->map)) - p = p->rb_left; - else if (ip >= map__end(m->map)) - p = p->rb_right; - else - goto out; - } - - m = NULL; -out: - up_read(maps__lock(maps)); - return m ? m->map : NULL; + if (ip < map__start(map)) + return -1; + if (ip >= map__end(map)) + return 1; + return 0; } -static int map__strcmp(const void *a, const void *b) +struct map *maps__find(struct maps *maps, u64 ip) { - const struct map *map_a = *(const struct map **)a; - const struct map *map_b = *(const struct map **)b; - const struct dso *dso_a = map__dso(map_a); - const struct dso *dso_b = map__dso(map_b); - int ret = strcmp(dso_a->short_name, dso_b->short_name); - - if (ret == 0 && map_a != map_b) { - /* - * Ensure distinct but name equal maps have an order in part to - * aid reference counting. - */ - ret = (int)map__start(map_a) - (int)map__start(map_b); - if (ret == 0) - ret = (int)((intptr_t)map_a - (intptr_t)map_b); + struct map *result = NULL; + bool done = false; + + /* See locking/sorting note. */ + while (!done) { + down_read(maps__lock(maps)); + if (maps__maps_by_address_sorted(maps)) { + struct map **mapp = + bsearch(&ip, maps__maps_by_address(maps), maps__nr_maps(maps), + sizeof(*mapp), map__addr_cmp); + + if (mapp) + result = map__get(*mapp); + done = true; + } + up_read(maps__lock(maps)); + if (!done) + maps__sort_by_address(maps); } - - return ret; + return result; } static int map__strcmp_name(const void *name, const void *b) { const struct dso *dso = map__dso(*(const struct map **)b); - return strcmp(name, dso->short_name); -} - -void __maps__sort_by_name(struct maps *maps) -{ - qsort(maps__maps_by_name(maps), maps__nr_maps(maps), sizeof(struct map *), map__strcmp); -} - -static int map__groups__sort_by_name_from_rbtree(struct maps *maps) -{ - struct map_rb_node *rb_node; - struct map **maps_by_name = realloc(maps__maps_by_name(maps), - maps__nr_maps(maps) * sizeof(struct map *)); - int i = 0; - - if (maps_by_name == NULL) - return -1; - - up_read(maps__lock(maps)); - down_write(maps__lock(maps)); - - RC_CHK_ACCESS(maps)->maps_by_name = maps_by_name; - RC_CHK_ACCESS(maps)->nr_maps_allocated = maps__nr_maps(maps); - - maps__for_each_entry(maps, rb_node) - maps_by_name[i++] = map__get(rb_node->map); - - __maps__sort_by_name(maps); - - up_write(maps__lock(maps)); - down_read(maps__lock(maps)); - - return 0; + return strcmp(name, dso__short_name(dso)); } -static struct map *__maps__find_by_name(struct maps *maps, const char *name) +struct map *maps__find_by_name(struct maps *maps, const char *name) { - struct map **mapp; + struct map *result = NULL; + bool done = false; - if (maps__maps_by_name(maps) == NULL && - map__groups__sort_by_name_from_rbtree(maps)) - return NULL; + /* See locking/sorting note. */ + while (!done) { + unsigned int i; - mapp = bsearch(name, maps__maps_by_name(maps), maps__nr_maps(maps), - sizeof(*mapp), map__strcmp_name); - if (mapp) - return *mapp; - return NULL; -} + down_read(maps__lock(maps)); -struct map *maps__find_by_name(struct maps *maps, const char *name) -{ - struct map_rb_node *rb_node; - struct map *map; - - down_read(maps__lock(maps)); + /* First check last found entry. */ + i = RC_CHK_ACCESS(maps)->last_search_by_name_idx; + if (i < maps__nr_maps(maps) && maps__maps_by_name(maps)) { + struct dso *dso = map__dso(maps__maps_by_name(maps)[i]); + if (dso && strcmp(dso__short_name(dso), name) == 0) { + result = map__get(maps__maps_by_name(maps)[i]); + done = true; + } + } - if (RC_CHK_ACCESS(maps)->last_search_by_name) { - const struct dso *dso = map__dso(RC_CHK_ACCESS(maps)->last_search_by_name); + /* Second search sorted array. */ + if (!done && maps__maps_by_name_sorted(maps)) { + struct map **mapp = + bsearch(name, maps__maps_by_name(maps), maps__nr_maps(maps), + sizeof(*mapp), map__strcmp_name); - if (strcmp(dso->short_name, name) == 0) { - map = RC_CHK_ACCESS(maps)->last_search_by_name; - goto out_unlock; + if (mapp) { + result = map__get(*mapp); + i = mapp - maps__maps_by_name(maps); + RC_CHK_ACCESS(maps)->last_search_by_name_idx = i; + } + done = true; } - } - /* - * If we have maps->maps_by_name, then the name isn't in the rbtree, - * as maps->maps_by_name mirrors the rbtree when lookups by name are - * made. - */ - map = __maps__find_by_name(maps, name); - if (map || maps__maps_by_name(maps) != NULL) - goto out_unlock; - - /* Fallback to traversing the rbtree... */ - maps__for_each_entry(maps, rb_node) { - struct dso *dso; - - map = rb_node->map; - dso = map__dso(map); - if (strcmp(dso->short_name, name) == 0) { - RC_CHK_ACCESS(maps)->last_search_by_name = map; - goto out_unlock; + up_read(maps__lock(maps)); + if (!done) { + /* Sort and retry binary search. */ + if (maps__sort_by_name(maps)) { + /* + * Memory allocation failed do linear search + * through address sorted maps. + */ + struct map **maps_by_address; + unsigned int n; + + down_read(maps__lock(maps)); + maps_by_address = maps__maps_by_address(maps); + n = maps__nr_maps(maps); + for (i = 0; i < n; i++) { + struct map *pos = maps_by_address[i]; + struct dso *dso = map__dso(pos); + + if (dso && strcmp(dso__short_name(dso), name) == 0) { + result = map__get(pos); + break; + } + } + up_read(maps__lock(maps)); + done = true; + } } } - map = NULL; - -out_unlock: - up_read(maps__lock(maps)); - return map; + return result; } struct map *maps__find_next_entry(struct maps *maps, struct map *map) { - struct map_rb_node *rb_node = maps__find_node(maps, map); - struct map_rb_node *next = map_rb_node__next(rb_node); + unsigned int i; + struct map *result = NULL; - if (next) - return next->map; + down_read(maps__lock(maps)); + i = maps__by_address_index(maps, map); + if (i < maps__nr_maps(maps)) + result = map__get(maps__maps_by_address(maps)[i]); - return NULL; + up_read(maps__lock(maps)); + return result; } void maps__fixup_end(struct maps *maps) { - struct map_rb_node *prev = NULL, *curr; + struct map **maps_by_address; + unsigned int n; down_write(maps__lock(maps)); + if (!maps__maps_by_address_sorted(maps)) + __maps__sort_by_address(maps); - maps__for_each_entry(maps, curr) { - if (prev && (!map__end(prev->map) || map__end(prev->map) > map__start(curr->map))) - map__set_end(prev->map, map__start(curr->map)); + maps_by_address = maps__maps_by_address(maps); + n = maps__nr_maps(maps); + for (unsigned int i = 1; i < n; i++) { + struct map *prev = maps_by_address[i - 1]; + struct map *curr = maps_by_address[i]; - prev = curr; + if (!map__end(prev) || map__end(prev) > map__start(curr)) + map__set_end(prev, map__start(curr)); } /* * We still haven't the actual symbols, so guess the * last map final address. */ - if (curr && !map__end(curr->map)) - map__set_end(curr->map, ~0ULL); + if (n > 0 && !map__end(maps_by_address[n - 1])) + map__set_end(maps_by_address[n - 1], ~0ULL); + + RC_CHK_ACCESS(maps)->ends_broken = false; + check_invariants(maps); up_write(maps__lock(maps)); } @@ -723,117 +1111,95 @@ void maps__fixup_end(struct maps *maps) */ int maps__merge_in(struct maps *kmaps, struct map *new_map) { - struct map_rb_node *rb_node; - struct rb_node *first; - bool overlaps; - LIST_HEAD(merged); - int err = 0; + unsigned int first_after_, kmaps__nr_maps; + struct map **kmaps_maps_by_address; + struct map **merged_maps_by_address; + unsigned int merged_nr_maps_allocated; + + /* First try under a read lock. */ + while (true) { + down_read(maps__lock(kmaps)); + if (maps__maps_by_address_sorted(kmaps)) + break; - down_read(maps__lock(kmaps)); - first = first_ending_after(kmaps, new_map); - rb_node = first ? rb_entry(first, struct map_rb_node, rb_node) : NULL; - overlaps = rb_node && map__start(rb_node->map) < map__end(new_map); - up_read(maps__lock(kmaps)); + up_read(maps__lock(kmaps)); + + /* First after binary search requires sorted maps. Sort and try again. */ + maps__sort_by_address(kmaps); + } + first_after_ = first_ending_after(kmaps, new_map); + kmaps_maps_by_address = maps__maps_by_address(kmaps); - if (!overlaps) + if (first_after_ >= maps__nr_maps(kmaps) || + map__start(kmaps_maps_by_address[first_after_]) >= map__end(new_map)) { + /* No overlap so regular insert suffices. */ + up_read(maps__lock(kmaps)); return maps__insert(kmaps, new_map); + } + up_read(maps__lock(kmaps)); - maps__for_each_entry(kmaps, rb_node) { - struct map *old_map = rb_node->map; + /* Plain insert with a read-lock failed, try again now with the write lock. */ + down_write(maps__lock(kmaps)); + if (!maps__maps_by_address_sorted(kmaps)) + __maps__sort_by_address(kmaps); - /* no overload with this one */ - if (map__end(new_map) < map__start(old_map) || - map__start(new_map) >= map__end(old_map)) - continue; + first_after_ = first_ending_after(kmaps, new_map); + kmaps_maps_by_address = maps__maps_by_address(kmaps); + kmaps__nr_maps = maps__nr_maps(kmaps); - if (map__start(new_map) < map__start(old_map)) { - /* - * |new...... - * |old.... - */ - if (map__end(new_map) < map__end(old_map)) { - /* - * |new......| -> |new..| - * |old....| -> |old....| - */ - map__set_end(new_map, map__start(old_map)); - } else { - /* - * |new.............| -> |new..| |new..| - * |old....| -> |old....| - */ - struct map_list_node *m = map_list_node__new(); + if (first_after_ >= kmaps__nr_maps || + map__start(kmaps_maps_by_address[first_after_]) >= map__end(new_map)) { + /* No overlap so regular insert suffices. */ + int ret = __maps__insert(kmaps, new_map); - if (!m) { - err = -ENOMEM; - goto out; - } + check_invariants(kmaps); + up_write(maps__lock(kmaps)); + return ret; + } + /* Array to merge into, possibly 1 more for the sake of new_map. */ + merged_nr_maps_allocated = RC_CHK_ACCESS(kmaps)->nr_maps_allocated; + if (kmaps__nr_maps + 1 == merged_nr_maps_allocated) + merged_nr_maps_allocated++; + + merged_maps_by_address = malloc(merged_nr_maps_allocated * sizeof(*merged_maps_by_address)); + if (!merged_maps_by_address) { + up_write(maps__lock(kmaps)); + return -ENOMEM; + } + maps__set_maps_by_address(kmaps, merged_maps_by_address); + maps__set_maps_by_address_sorted(kmaps, true); + __maps__free_maps_by_name(kmaps); + maps__set_nr_maps_allocated(kmaps, merged_nr_maps_allocated); - m->map = map__clone(new_map); - if (!m->map) { - free(m); - err = -ENOMEM; - goto out; - } + /* Copy entries before the new_map that can't overlap. */ + for (unsigned int i = 0; i < first_after_; i++) + merged_maps_by_address[i] = map__get(kmaps_maps_by_address[i]); - map__set_end(m->map, map__start(old_map)); - list_add_tail(&m->node, &merged); - map__add_pgoff(new_map, map__end(old_map) - map__start(new_map)); - map__set_start(new_map, map__end(old_map)); - } - } else { - /* - * |new...... - * |old.... - */ - if (map__end(new_map) < map__end(old_map)) { - /* - * |new..| -> x - * |old.........| -> |old.........| - */ - map__put(new_map); - new_map = NULL; - break; - } else { - /* - * |new......| -> |new...| - * |old....| -> |old....| - */ - map__add_pgoff(new_map, map__end(old_map) - map__start(new_map)); - map__set_start(new_map, map__end(old_map)); - } - } - } + maps__set_nr_maps(kmaps, first_after_); -out: - while (!list_empty(&merged)) { - struct map_list_node *old_node; + /* Add the new map, it will be split when the later overlapping mappings are added. */ + __maps__insert(kmaps, new_map); - old_node = list_entry(merged.next, struct map_list_node, node); - list_del_init(&old_node->node); - if (!err) - err = maps__insert(kmaps, old_node->map); - map__put(old_node->map); - free(old_node); - } + /* Insert mappings after new_map, splitting new_map in the process. */ + for (unsigned int i = first_after_; i < kmaps__nr_maps; i++) + __maps__fixup_overlap_and_insert(kmaps, kmaps_maps_by_address[i]); - if (new_map) { - if (!err) - err = maps__insert(kmaps, new_map); - map__put(new_map); - } - return err; + /* Copy the maps from merged into kmaps. */ + for (unsigned int i = 0; i < kmaps__nr_maps; i++) + map__zput(kmaps_maps_by_address[i]); + + free(kmaps_maps_by_address); + check_invariants(kmaps); + up_write(maps__lock(kmaps)); + return 0; } void maps__load_first(struct maps *maps) { - struct map_rb_node *first; - down_read(maps__lock(maps)); - first = maps__first(maps); - if (first) - map__load(first->map); + if (maps__nr_maps(maps) > 0) + map__load(maps__maps_by_address(maps)[0]); up_read(maps__lock(maps)); } diff --git a/tools/perf/util/maps.h b/tools/perf/util/maps.h index d836d04c9402..d9aa62ed968a 100644 --- a/tools/perf/util/maps.h +++ b/tools/perf/util/maps.h @@ -3,45 +3,15 @@ #define __PERF_MAPS_H #include <linux/refcount.h> -#include <linux/rbtree.h> #include <stdio.h> #include <stdbool.h> #include <linux/types.h> -#include "rwsem.h" -#include <internal/rc_check.h> struct ref_reloc_sym; struct machine; struct map; struct maps; -struct map_list_node { - struct list_head node; - struct map *map; -}; - -static inline struct map_list_node *map_list_node__new(void) -{ - return malloc(sizeof(struct map_list_node)); -} - -struct map *maps__find(struct maps *maps, u64 addr); - -DECLARE_RC_STRUCT(maps) { - struct rb_root entries; - struct rw_semaphore lock; - struct machine *machine; - struct map *last_search_by_name; - struct map **maps_by_name; - refcount_t refcnt; - unsigned int nr_maps; - unsigned int nr_maps_allocated; -#ifdef HAVE_LIBUNWIND_SUPPORT - void *addr_space; - const struct unwind_libunwind_ops *unwind_libunwind_ops; -#endif -}; - #define KMAP_NAME_LEN 256 struct kmap { @@ -65,36 +35,22 @@ static inline void __maps__zput(struct maps **map) #define maps__zput(map) __maps__zput(&map) +bool maps__equal(struct maps *a, struct maps *b); + /* Iterate over map calling cb for each entry. */ int maps__for_each_map(struct maps *maps, int (*cb)(struct map *map, void *data), void *data); /* Iterate over map removing an entry if cb returns true. */ void maps__remove_maps(struct maps *maps, bool (*cb)(struct map *map, void *data), void *data); -static inline struct machine *maps__machine(struct maps *maps) -{ - return RC_CHK_ACCESS(maps)->machine; -} - -static inline unsigned int maps__nr_maps(const struct maps *maps) -{ - return RC_CHK_ACCESS(maps)->nr_maps; -} - -static inline refcount_t *maps__refcnt(struct maps *maps) -{ - return &RC_CHK_ACCESS(maps)->refcnt; -} +struct machine *maps__machine(const struct maps *maps); +unsigned int maps__nr_maps(const struct maps *maps); /* Test only. */ +refcount_t *maps__refcnt(struct maps *maps); /* Test only. */ #ifdef HAVE_LIBUNWIND_SUPPORT -static inline void *maps__addr_space(struct maps *maps) -{ - return RC_CHK_ACCESS(maps)->addr_space; -} - -static inline const struct unwind_libunwind_ops *maps__unwind_libunwind_ops(const struct maps *maps) -{ - return RC_CHK_ACCESS(maps)->unwind_libunwind_ops; -} +void *maps__addr_space(const struct maps *maps); +void maps__set_addr_space(struct maps *maps, void *addr_space); +const struct unwind_libunwind_ops *maps__unwind_libunwind_ops(const struct maps *maps); +void maps__set_unwind_libunwind_ops(struct maps *maps, const struct unwind_libunwind_ops *ops); #endif size_t maps__fprintf(struct maps *maps, FILE *fp); @@ -102,6 +58,7 @@ size_t maps__fprintf(struct maps *maps, FILE *fp); int maps__insert(struct maps *maps, struct map *map); void maps__remove(struct maps *maps, struct map *map); +struct map *maps__find(struct maps *maps, u64 addr); struct symbol *maps__find_symbol(struct maps *maps, u64 addr, struct map **mapp); struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp); @@ -117,8 +74,6 @@ struct map *maps__find_next_entry(struct maps *maps, struct map *map); int maps__merge_in(struct maps *kmaps, struct map *new_map); -void __maps__sort_by_name(struct maps *maps); - void maps__fixup_end(struct maps *maps); void maps__load_first(struct maps *maps); diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c index 3a2e3687878c..6dda47bb774f 100644 --- a/tools/perf/util/mem-events.c +++ b/tools/perf/util/mem-events.c @@ -10,58 +10,135 @@ #include <linux/kernel.h> #include "map_symbol.h" #include "mem-events.h" +#include "mem-info.h" #include "debug.h" +#include "evsel.h" #include "symbol.h" #include "pmu.h" #include "pmus.h" unsigned int perf_mem_events__loads_ldlat = 30; -#define E(t, n, s) { .tag = t, .name = n, .sysfs_name = s } +#define E(t, n, s, l, a) { .tag = t, .name = n, .event_name = s, .ldlat = l, .aux_event = a } -static struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX] = { - E("ldlat-loads", "cpu/mem-loads,ldlat=%u/P", "cpu/events/mem-loads"), - E("ldlat-stores", "cpu/mem-stores/P", "cpu/events/mem-stores"), - E(NULL, NULL, NULL), +struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX] = { + E("ldlat-loads", "%s/mem-loads,ldlat=%u/P", "mem-loads", true, 0), + E("ldlat-stores", "%s/mem-stores/P", "mem-stores", false, 0), + E(NULL, NULL, NULL, false, 0), }; #undef E static char mem_loads_name[100]; -static bool mem_loads_name__init; +static char mem_stores_name[100]; -struct perf_mem_event * __weak perf_mem_events__ptr(int i) +struct perf_mem_event *perf_pmu__mem_events_ptr(struct perf_pmu *pmu, int i) { - if (i >= PERF_MEM_EVENTS__MAX) + if (i >= PERF_MEM_EVENTS__MAX || !pmu) return NULL; - return &perf_mem_events[i]; + return &pmu->mem_events[i]; } -const char * __weak perf_mem_events__name(int i, const char *pmu_name __maybe_unused) +static struct perf_pmu *perf_pmus__scan_mem(struct perf_pmu *pmu) { - struct perf_mem_event *e = perf_mem_events__ptr(i); + while ((pmu = perf_pmus__scan(pmu)) != NULL) { + if (pmu->mem_events) + return pmu; + } + return NULL; +} + +struct perf_pmu *perf_mem_events_find_pmu(void) +{ + /* + * The current perf mem doesn't support per-PMU configuration. + * The exact same configuration is applied to all the + * mem_events supported PMUs. + * Return the first mem_events supported PMU. + * + * Notes: The only case which may support multiple mem_events + * supported PMUs is Intel hybrid. The exact same mem_events + * is shared among the PMUs. Only configure the first PMU + * is good enough as well. + */ + return perf_pmus__scan_mem(NULL); +} + +/** + * perf_pmu__mem_events_num_mem_pmus - Get the number of mem PMUs since the given pmu + * @pmu: Start pmu. If it's NULL, search the entire PMU list. + */ +int perf_pmu__mem_events_num_mem_pmus(struct perf_pmu *pmu) +{ + int num = 0; + + while ((pmu = perf_pmus__scan_mem(pmu)) != NULL) + num++; + + return num; +} +static const char *perf_pmu__mem_events_name(int i, struct perf_pmu *pmu) +{ + struct perf_mem_event *e; + + if (i >= PERF_MEM_EVENTS__MAX || !pmu) + return NULL; + + e = &pmu->mem_events[i]; if (!e) return NULL; - if (i == PERF_MEM_EVENTS__LOAD) { - if (!mem_loads_name__init) { - mem_loads_name__init = true; - scnprintf(mem_loads_name, sizeof(mem_loads_name), - e->name, perf_mem_events__loads_ldlat); + if (i == PERF_MEM_EVENTS__LOAD || i == PERF_MEM_EVENTS__LOAD_STORE) { + if (e->ldlat) { + if (!e->aux_event) { + /* ARM and Most of Intel */ + scnprintf(mem_loads_name, sizeof(mem_loads_name), + e->name, pmu->name, + perf_mem_events__loads_ldlat); + } else { + /* Intel with mem-loads-aux event */ + scnprintf(mem_loads_name, sizeof(mem_loads_name), + e->name, pmu->name, pmu->name, + perf_mem_events__loads_ldlat); + } + } else { + if (!e->aux_event) { + /* AMD and POWER */ + scnprintf(mem_loads_name, sizeof(mem_loads_name), + e->name, pmu->name); + } else + return NULL; } + return mem_loads_name; } - return e->name; + if (i == PERF_MEM_EVENTS__STORE) { + scnprintf(mem_stores_name, sizeof(mem_stores_name), + e->name, pmu->name); + return mem_stores_name; + } + + return NULL; } -__weak bool is_mem_loads_aux_event(struct evsel *leader __maybe_unused) +bool is_mem_loads_aux_event(struct evsel *leader) { - return false; + struct perf_pmu *pmu = leader->pmu; + struct perf_mem_event *e; + + if (!pmu || !pmu->mem_events) + return false; + + e = &pmu->mem_events[PERF_MEM_EVENTS__LOAD]; + if (!e->aux_event) + return false; + + return leader->core.attr.config == e->aux_event; } -int perf_mem_events__parse(const char *str) +int perf_pmu__mem_events_parse(struct perf_pmu *pmu, const char *str) { char *tok, *saveptr = NULL; bool found = false; @@ -79,7 +156,7 @@ int perf_mem_events__parse(const char *str) while (tok) { for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) { - struct perf_mem_event *e = perf_mem_events__ptr(j); + struct perf_mem_event *e = perf_pmu__mem_events_ptr(pmu, j); if (!e->tag) continue; @@ -100,19 +177,21 @@ int perf_mem_events__parse(const char *str) return -1; } -static bool perf_mem_event__supported(const char *mnt, struct perf_pmu *pmu, +static bool perf_pmu__mem_events_supported(const char *mnt, struct perf_pmu *pmu, struct perf_mem_event *e) { - char sysfs_name[100]; char path[PATH_MAX]; struct stat st; - scnprintf(sysfs_name, sizeof(sysfs_name), e->sysfs_name, pmu->name); - scnprintf(path, PATH_MAX, "%s/devices/%s", mnt, sysfs_name); + if (!e->event_name) + return true; + + scnprintf(path, PATH_MAX, "%s/devices/%s/events/%s", mnt, pmu->name, e->event_name); + return !stat(path, &st); } -int perf_mem_events__init(void) +int perf_pmu__mem_events_init(struct perf_pmu *pmu) { const char *mnt = sysfs__mount(); bool found = false; @@ -122,8 +201,7 @@ int perf_mem_events__init(void) return -ENOENT; for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) { - struct perf_mem_event *e = perf_mem_events__ptr(j); - struct perf_pmu *pmu = NULL; + struct perf_mem_event *e = perf_pmu__mem_events_ptr(pmu, j); /* * If the event entry isn't valid, skip initialization @@ -132,103 +210,66 @@ int perf_mem_events__init(void) if (!e->tag) continue; - /* - * Scan all PMUs not just core ones, since perf mem/c2c on - * platforms like AMD uses IBS OP PMU which is independent - * of core PMU. - */ - while ((pmu = perf_pmus__scan(pmu)) != NULL) { - e->supported |= perf_mem_event__supported(mnt, pmu, e); - if (e->supported) { - found = true; - break; - } - } + e->supported |= perf_pmu__mem_events_supported(mnt, pmu, e); + if (e->supported) + found = true; } return found ? 0 : -ENOENT; } -void perf_mem_events__list(void) +void perf_pmu__mem_events_list(struct perf_pmu *pmu) { int j; for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) { - struct perf_mem_event *e = perf_mem_events__ptr(j); + struct perf_mem_event *e = perf_pmu__mem_events_ptr(pmu, j); fprintf(stderr, "%-*s%-*s%s", e->tag ? 13 : 0, e->tag ? : "", e->tag && verbose > 0 ? 25 : 0, - e->tag && verbose > 0 ? perf_mem_events__name(j, NULL) : "", + e->tag && verbose > 0 ? perf_pmu__mem_events_name(j, pmu) : "", e->supported ? ": available\n" : ""); } } -static void perf_mem_events__print_unsupport_hybrid(struct perf_mem_event *e, - int idx) +int perf_mem_events__record_args(const char **rec_argv, int *argv_nr) { const char *mnt = sysfs__mount(); struct perf_pmu *pmu = NULL; - - while ((pmu = perf_pmus__scan(pmu)) != NULL) { - if (!perf_mem_event__supported(mnt, pmu, e)) { - pr_err("failed: event '%s' not supported\n", - perf_mem_events__name(idx, pmu->name)); - } - } -} - -int perf_mem_events__record_args(const char **rec_argv, int *argv_nr, - char **rec_tmp, int *tmp_nr) -{ - const char *mnt = sysfs__mount(); - int i = *argv_nr, k = 0; struct perf_mem_event *e; + int i = *argv_nr; + const char *s; + char *copy; - for (int j = 0; j < PERF_MEM_EVENTS__MAX; j++) { - e = perf_mem_events__ptr(j); - if (!e->record) - continue; + while ((pmu = perf_pmus__scan_mem(pmu)) != NULL) { + for (int j = 0; j < PERF_MEM_EVENTS__MAX; j++) { + e = perf_pmu__mem_events_ptr(pmu, j); + + if (!e->record) + continue; - if (perf_pmus__num_mem_pmus() == 1) { if (!e->supported) { pr_err("failed: event '%s' not supported\n", - perf_mem_events__name(j, NULL)); + perf_pmu__mem_events_name(j, pmu)); return -1; } - rec_argv[i++] = "-e"; - rec_argv[i++] = perf_mem_events__name(j, NULL); - } else { - struct perf_pmu *pmu = NULL; + s = perf_pmu__mem_events_name(j, pmu); + if (!s || !perf_pmu__mem_events_supported(mnt, pmu, e)) + continue; - if (!e->supported) { - perf_mem_events__print_unsupport_hybrid(e, j); + copy = strdup(s); + if (!copy) return -1; - } - - while ((pmu = perf_pmus__scan(pmu)) != NULL) { - const char *s = perf_mem_events__name(j, pmu->name); - - if (!perf_mem_event__supported(mnt, pmu, e)) - continue; - rec_argv[i++] = "-e"; - if (s) { - char *copy = strdup(s); - if (!copy) - return -1; - - rec_argv[i++] = copy; - rec_tmp[k++] = copy; - } - } + rec_argv[i++] = "-e"; + rec_argv[i++] = copy; } } *argv_nr = i; - *tmp_nr = k; return 0; } @@ -242,7 +283,7 @@ static const char * const tlb_access[] = { "Fault", }; -int perf_mem__tlb_scnprintf(char *out, size_t sz, struct mem_info *mem_info) +int perf_mem__tlb_scnprintf(char *out, size_t sz, const struct mem_info *mem_info) { size_t l = 0, i; u64 m = PERF_MEM_TLB_NA; @@ -252,7 +293,7 @@ int perf_mem__tlb_scnprintf(char *out, size_t sz, struct mem_info *mem_info) out[0] = '\0'; if (mem_info) - m = mem_info->data_src.mem_dtlb; + m = mem_info__const_data_src(mem_info)->mem_dtlb; hit = m & PERF_MEM_TLB_HIT; miss = m & PERF_MEM_TLB_MISS; @@ -320,13 +361,13 @@ static const char * const mem_hops[] = { "board", }; -static int perf_mem__op_scnprintf(char *out, size_t sz, struct mem_info *mem_info) +static int perf_mem__op_scnprintf(char *out, size_t sz, const struct mem_info *mem_info) { u64 op = PERF_MEM_LOCK_NA; int l; if (mem_info) - op = mem_info->data_src.mem_op; + op = mem_info__const_data_src(mem_info)->mem_op; if (op & PERF_MEM_OP_NA) l = scnprintf(out, sz, "N/A"); @@ -344,7 +385,7 @@ static int perf_mem__op_scnprintf(char *out, size_t sz, struct mem_info *mem_inf return l; } -int perf_mem__lvl_scnprintf(char *out, size_t sz, struct mem_info *mem_info) +int perf_mem__lvl_scnprintf(char *out, size_t sz, const struct mem_info *mem_info) { union perf_mem_data_src data_src; int printed = 0; @@ -359,7 +400,7 @@ int perf_mem__lvl_scnprintf(char *out, size_t sz, struct mem_info *mem_info) if (!mem_info) goto na; - data_src = mem_info->data_src; + data_src = *mem_info__const_data_src(mem_info); if (data_src.mem_lvl & PERF_MEM_LVL_HIT) memcpy(hit_miss, "hit", 3); @@ -426,7 +467,7 @@ static const char * const snoopx_access[] = { "Peer", }; -int perf_mem__snp_scnprintf(char *out, size_t sz, struct mem_info *mem_info) +int perf_mem__snp_scnprintf(char *out, size_t sz, const struct mem_info *mem_info) { size_t i, l = 0; u64 m = PERF_MEM_SNOOP_NA; @@ -435,7 +476,7 @@ int perf_mem__snp_scnprintf(char *out, size_t sz, struct mem_info *mem_info) out[0] = '\0'; if (mem_info) - m = mem_info->data_src.mem_snoop; + m = mem_info__const_data_src(mem_info)->mem_snoop; for (i = 0; m && i < ARRAY_SIZE(snoop_access); i++, m >>= 1) { if (!(m & 0x1)) @@ -449,7 +490,7 @@ int perf_mem__snp_scnprintf(char *out, size_t sz, struct mem_info *mem_info) m = 0; if (mem_info) - m = mem_info->data_src.mem_snoopx; + m = mem_info__const_data_src(mem_info)->mem_snoopx; for (i = 0; m && i < ARRAY_SIZE(snoopx_access); i++, m >>= 1) { if (!(m & 0x1)) @@ -468,13 +509,13 @@ int perf_mem__snp_scnprintf(char *out, size_t sz, struct mem_info *mem_info) return l; } -int perf_mem__lck_scnprintf(char *out, size_t sz, struct mem_info *mem_info) +int perf_mem__lck_scnprintf(char *out, size_t sz, const struct mem_info *mem_info) { u64 mask = PERF_MEM_LOCK_NA; int l; if (mem_info) - mask = mem_info->data_src.mem_lock; + mask = mem_info__const_data_src(mem_info)->mem_lock; if (mask & PERF_MEM_LOCK_NA) l = scnprintf(out, sz, "N/A"); @@ -486,7 +527,7 @@ int perf_mem__lck_scnprintf(char *out, size_t sz, struct mem_info *mem_info) return l; } -int perf_mem__blk_scnprintf(char *out, size_t sz, struct mem_info *mem_info) +int perf_mem__blk_scnprintf(char *out, size_t sz, const struct mem_info *mem_info) { size_t l = 0; u64 mask = PERF_MEM_BLK_NA; @@ -495,7 +536,7 @@ int perf_mem__blk_scnprintf(char *out, size_t sz, struct mem_info *mem_info) out[0] = '\0'; if (mem_info) - mask = mem_info->data_src.mem_blk; + mask = mem_info__const_data_src(mem_info)->mem_blk; if (!mask || (mask & PERF_MEM_BLK_NA)) { l += scnprintf(out + l, sz - l, " N/A"); @@ -509,7 +550,7 @@ int perf_mem__blk_scnprintf(char *out, size_t sz, struct mem_info *mem_info) return l; } -int perf_script__meminfo_scnprintf(char *out, size_t sz, struct mem_info *mem_info) +int perf_script__meminfo_scnprintf(char *out, size_t sz, const struct mem_info *mem_info) { int i = 0; @@ -531,8 +572,8 @@ int perf_script__meminfo_scnprintf(char *out, size_t sz, struct mem_info *mem_in int c2c_decode_stats(struct c2c_stats *stats, struct mem_info *mi) { - union perf_mem_data_src *data_src = &mi->data_src; - u64 daddr = mi->daddr.addr; + union perf_mem_data_src *data_src = mem_info__data_src(mi); + u64 daddr = mem_info__daddr(mi)->addr; u64 op = data_src->mem_op; u64 lvl = data_src->mem_lvl; u64 snoop = data_src->mem_snoop; @@ -659,7 +700,7 @@ do { \ return -1; } - if (!mi->daddr.ms.map || !mi->iaddr.ms.map) { + if (!mem_info__daddr(mi)->ms.map || !mem_info__iaddr(mi)->ms.map) { stats->nomap++; return -1; } diff --git a/tools/perf/util/mem-events.h b/tools/perf/util/mem-events.h index b40ad6ea93fc..ca31014d7934 100644 --- a/tools/perf/util/mem-events.h +++ b/tools/perf/util/mem-events.h @@ -3,27 +3,16 @@ #define __PERF_MEM_EVENTS_H #include <stdbool.h> -#include <stdint.h> -#include <stdio.h> #include <linux/types.h> -#include <linux/refcount.h> -#include <linux/perf_event.h> -#include "stat.h" -#include "evsel.h" struct perf_mem_event { bool record; bool supported; + bool ldlat; + u32 aux_event; const char *tag; const char *name; - const char *sysfs_name; -}; - -struct mem_info { - struct addr_map_symbol iaddr; - struct addr_map_symbol daddr; - union perf_mem_data_src data_src; - refcount_t refcnt; + const char *event_name; }; enum { @@ -33,26 +22,31 @@ enum { PERF_MEM_EVENTS__MAX, }; +struct evsel; +struct mem_info; +struct perf_pmu; + extern unsigned int perf_mem_events__loads_ldlat; +extern struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX]; -int perf_mem_events__parse(const char *str); -int perf_mem_events__init(void); +int perf_pmu__mem_events_parse(struct perf_pmu *pmu, const char *str); +int perf_pmu__mem_events_init(struct perf_pmu *pmu); -const char *perf_mem_events__name(int i, const char *pmu_name); -struct perf_mem_event *perf_mem_events__ptr(int i); +struct perf_mem_event *perf_pmu__mem_events_ptr(struct perf_pmu *pmu, int i); +struct perf_pmu *perf_mem_events_find_pmu(void); +int perf_pmu__mem_events_num_mem_pmus(struct perf_pmu *pmu); bool is_mem_loads_aux_event(struct evsel *leader); -void perf_mem_events__list(void); -int perf_mem_events__record_args(const char **rec_argv, int *argv_nr, - char **rec_tmp, int *tmp_nr); +void perf_pmu__mem_events_list(struct perf_pmu *pmu); +int perf_mem_events__record_args(const char **rec_argv, int *argv_nr); -int perf_mem__tlb_scnprintf(char *out, size_t sz, struct mem_info *mem_info); -int perf_mem__lvl_scnprintf(char *out, size_t sz, struct mem_info *mem_info); -int perf_mem__snp_scnprintf(char *out, size_t sz, struct mem_info *mem_info); -int perf_mem__lck_scnprintf(char *out, size_t sz, struct mem_info *mem_info); -int perf_mem__blk_scnprintf(char *out, size_t sz, struct mem_info *mem_info); +int perf_mem__tlb_scnprintf(char *out, size_t sz, const struct mem_info *mem_info); +int perf_mem__lvl_scnprintf(char *out, size_t sz, const struct mem_info *mem_info); +int perf_mem__snp_scnprintf(char *out, size_t sz, const struct mem_info *mem_info); +int perf_mem__lck_scnprintf(char *out, size_t sz, const struct mem_info *mem_info); +int perf_mem__blk_scnprintf(char *out, size_t sz, const struct mem_info *mem_info); -int perf_script__meminfo_scnprintf(char *bf, size_t size, struct mem_info *mem_info); +int perf_script__meminfo_scnprintf(char *bf, size_t size, const struct mem_info *mem_info); struct c2c_stats { u32 nr_entries; diff --git a/tools/perf/util/mem-info.c b/tools/perf/util/mem-info.c new file mode 100644 index 000000000000..27d67721a695 --- /dev/null +++ b/tools/perf/util/mem-info.c @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/zalloc.h> +#include "mem-info.h" + +struct mem_info *mem_info__get(struct mem_info *mi) +{ + struct mem_info *result; + + if (RC_CHK_GET(result, mi)) + refcount_inc(mem_info__refcnt(mi)); + + return result; +} + +void mem_info__put(struct mem_info *mi) +{ + if (mi && refcount_dec_and_test(mem_info__refcnt(mi))) { + addr_map_symbol__exit(mem_info__iaddr(mi)); + addr_map_symbol__exit(mem_info__daddr(mi)); + RC_CHK_FREE(mi); + } else { + RC_CHK_PUT(mi); + } +} + +struct mem_info *mem_info__new(void) +{ + struct mem_info *result = NULL; + RC_STRUCT(mem_info) *mi = zalloc(sizeof(*mi)); + + if (ADD_RC_CHK(result, mi)) + refcount_set(mem_info__refcnt(result), 1); + + return result; +} diff --git a/tools/perf/util/mem-info.h b/tools/perf/util/mem-info.h new file mode 100644 index 000000000000..0f68e29f311b --- /dev/null +++ b/tools/perf/util/mem-info.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __PERF_MEM_INFO_H +#define __PERF_MEM_INFO_H + +#include <linux/refcount.h> +#include <linux/perf_event.h> +#include <internal/rc_check.h> +#include "map_symbol.h" + +DECLARE_RC_STRUCT(mem_info) { + struct addr_map_symbol iaddr; + struct addr_map_symbol daddr; + union perf_mem_data_src data_src; + refcount_t refcnt; +}; + +struct mem_info *mem_info__new(void); +struct mem_info *mem_info__get(struct mem_info *mi); +void mem_info__put(struct mem_info *mi); + +static inline void __mem_info__zput(struct mem_info **mi) +{ + mem_info__put(*mi); + *mi = NULL; +} + +#define mem_info__zput(mi) __mem_info__zput(&mi) + +static inline struct addr_map_symbol *mem_info__iaddr(struct mem_info *mi) +{ + return &RC_CHK_ACCESS(mi)->iaddr; +} + +static inline struct addr_map_symbol *mem_info__daddr(struct mem_info *mi) +{ + return &RC_CHK_ACCESS(mi)->daddr; +} + +static inline union perf_mem_data_src *mem_info__data_src(struct mem_info *mi) +{ + return &RC_CHK_ACCESS(mi)->data_src; +} + +static inline const union perf_mem_data_src *mem_info__const_data_src(const struct mem_info *mi) +{ + return &RC_CHK_ACCESS(mi)->data_src; +} + +static inline refcount_t *mem_info__refcnt(struct mem_info *mi) +{ + return &RC_CHK_ACCESS(mi)->refcnt; +} + +#endif /* __PERF_MEM_INFO_H */ diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c index 966cca5a3e88..69f6a46402c3 100644 --- a/tools/perf/util/metricgroup.c +++ b/tools/perf/util/metricgroup.c @@ -44,6 +44,8 @@ struct metric_event *metricgroup__lookup(struct rblist *metric_events, if (!metric_events) return NULL; + if (evsel && evsel->metric_leader) + me.evsel = evsel->metric_leader; nd = rblist__find(metric_events, &me); if (nd) return container_of(nd, struct metric_event, nd); @@ -350,25 +352,23 @@ static int setup_metric_events(const char *pmu, struct hashmap *ids, return 0; } -static bool match_metric(const char *n, const char *list) +static bool match_metric(const char *metric_or_groups, const char *sought) { int len; char *m; - if (!list) + if (!sought) return false; - if (!strcmp(list, "all")) + if (!strcmp(sought, "all")) return true; - if (!n) - return !strcasecmp(list, "No_group"); - len = strlen(list); - m = strcasestr(n, list); - if (!m) - return false; - if ((m == n || m[-1] == ';' || m[-1] == ' ') && - (m[len] == 0 || m[len] == ';')) + if (!metric_or_groups) + return !strcasecmp(sought, "No_group"); + len = strlen(sought); + if (!strncasecmp(metric_or_groups, sought, len) && + (metric_or_groups[len] == 0 || metric_or_groups[len] == ';')) return true; - return false; + m = strchr(metric_or_groups, ';'); + return m && match_metric(m + 1, sought); } static bool match_pm_metric(const struct pmu_metric *pm, const char *pmu, const char *metric) @@ -455,7 +455,7 @@ static int metricgroup__add_to_mep_groups(const struct pmu_metric *pm, const char *g; char *omg, *mg; - mg = strdup(pm->metric_group ?: "No_group"); + mg = strdup(pm->metric_group ?: pm->metric_name); if (!mg) return -ENOMEM; omg = mg; @@ -466,7 +466,7 @@ static int metricgroup__add_to_mep_groups(const struct pmu_metric *pm, if (strlen(g)) me = mep_lookup(groups, g, pm->metric_name); else - me = mep_lookup(groups, "No_group", pm->metric_name); + me = mep_lookup(groups, pm->metric_name, pm->metric_name); if (me) { me->metric_desc = pm->desc; @@ -1502,7 +1502,8 @@ static int parse_ids(bool metric_no_merge, struct perf_pmu *fake_pmu, pr_debug("Parsing metric events '%s'\n", events.buf); parse_events_error__init(&parse_error); ret = __parse_events(parsed_evlist, events.buf, /*pmu_filter=*/NULL, - &parse_error, fake_pmu, /*warn_if_reordered=*/false); + &parse_error, fake_pmu, /*warn_if_reordered=*/false, + /*fake_tp=*/false); if (ret) { parse_events_error__print(&parse_error, events.buf); goto err_out; @@ -1690,12 +1691,15 @@ int metricgroup__parse_groups(struct evlist *perf_evlist, bool metric_no_threshold, const char *user_requested_cpu_list, bool system_wide, + bool hardware_aware_grouping, struct rblist *metric_events) { const struct pmu_metrics_table *table = pmu_metrics_table__find(); if (!table) return -EINVAL; + if (hardware_aware_grouping) + pr_debug("Use hardware aware grouping instead of traditional metric grouping method\n"); return parse_groups(perf_evlist, pmu, str, metric_no_group, metric_no_merge, metric_no_threshold, user_requested_cpu_list, system_wide, diff --git a/tools/perf/util/metricgroup.h b/tools/perf/util/metricgroup.h index d5325c6ec8e1..779f6ede1b51 100644 --- a/tools/perf/util/metricgroup.h +++ b/tools/perf/util/metricgroup.h @@ -77,6 +77,7 @@ int metricgroup__parse_groups(struct evlist *perf_evlist, bool metric_no_threshold, const char *user_requested_cpu_list, bool system_wide, + bool hardware_aware_grouping, struct rblist *metric_events); int metricgroup__parse_groups_test(struct evlist *evlist, const struct pmu_metrics_table *table, diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 66eabcea4242..6ed0f9c5581d 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -34,11 +34,12 @@ #ifdef PARSER_DEBUG extern int parse_events_debug; #endif -static int get_config_terms(struct parse_events_terms *head_config, struct list_head *head_terms); +static int get_config_terms(const struct parse_events_terms *head_config, + struct list_head *head_terms); static int parse_events_terms__copy(const struct parse_events_terms *src, struct parse_events_terms *dest); -struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = { +const struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = { [PERF_COUNT_HW_CPU_CYCLES] = { .symbol = "cpu-cycles", .alias = "cycles", @@ -81,7 +82,7 @@ struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = { }, }; -struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = { +const struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = { [PERF_COUNT_SW_CPU_CLOCK] = { .symbol = "cpu-clock", .alias = "", @@ -154,7 +155,7 @@ const char *event_type(int type) return "unknown"; } -static char *get_config_str(struct parse_events_terms *head_terms, +static char *get_config_str(const struct parse_events_terms *head_terms, enum parse_events__term_type type_term) { struct parse_events_term *term; @@ -169,12 +170,12 @@ static char *get_config_str(struct parse_events_terms *head_terms, return NULL; } -static char *get_config_metric_id(struct parse_events_terms *head_terms) +static char *get_config_metric_id(const struct parse_events_terms *head_terms) { return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_METRIC_ID); } -static char *get_config_name(struct parse_events_terms *head_terms) +static char *get_config_name(const struct parse_events_terms *head_terms) { return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_NAME); } @@ -358,7 +359,7 @@ static int config_term_common(struct perf_event_attr *attr, struct parse_events_term *term, struct parse_events_error *err); static int config_attr(struct perf_event_attr *attr, - struct parse_events_terms *head, + const struct parse_events_terms *head, struct parse_events_error *err, config_term_func_t config_term); @@ -442,17 +443,21 @@ bool parse_events__filter_pmu(const struct parse_events_state *parse_state, return strcmp(parse_state->pmu_filter, pmu->name) != 0; } +static int parse_events_add_pmu(struct parse_events_state *parse_state, + struct list_head *list, struct perf_pmu *pmu, + const struct parse_events_terms *const_parsed_terms, + bool auto_merge_stats); + int parse_events_add_cache(struct list_head *list, int *idx, const char *name, struct parse_events_state *parse_state, - struct parse_events_terms *head_config) + struct parse_events_terms *parsed_terms) { struct perf_pmu *pmu = NULL; bool found_supported = false; - const char *config_name = get_config_name(head_config); - const char *metric_id = get_config_metric_id(head_config); + const char *config_name = get_config_name(parsed_terms); + const char *metric_id = get_config_metric_id(parsed_terms); - /* Legacy cache events are only supported by core PMUs. */ - while ((pmu = perf_pmus__scan_core(pmu)) != NULL) { + while ((pmu = perf_pmus__scan(pmu)) != NULL) { LIST_HEAD(config_terms); struct perf_event_attr attr; int ret; @@ -460,6 +465,24 @@ int parse_events_add_cache(struct list_head *list, int *idx, const char *name, if (parse_events__filter_pmu(parse_state, pmu)) continue; + if (perf_pmu__have_event(pmu, name)) { + /* + * The PMU has the event so add as not a legacy cache + * event. + */ + ret = parse_events_add_pmu(parse_state, list, pmu, + parsed_terms, + perf_pmu__auto_merge_stats(pmu)); + if (ret) + return ret; + continue; + } + + if (!pmu->is_core) { + /* Legacy cache events are only supported by core PMUs. */ + continue; + } + memset(&attr, 0, sizeof(attr)); attr.type = PERF_TYPE_HW_CACHE; @@ -469,11 +492,12 @@ int parse_events_add_cache(struct list_head *list, int *idx, const char *name, found_supported = true; - if (head_config) { - if (config_attr(&attr, head_config, parse_state->error, config_term_common)) + if (parsed_terms) { + if (config_attr(&attr, parsed_terms, parse_state->error, + config_term_common)) return -EINVAL; - if (get_config_terms(head_config, &config_terms)) + if (get_config_terms(parsed_terms, &config_terms)) return -ENOMEM; } @@ -519,13 +543,15 @@ static void tracepoint_error(struct parse_events_error *e, int err, parse_events_error__handle(e, column, strdup(str), strdup(help)); } -static int add_tracepoint(struct list_head *list, int *idx, +static int add_tracepoint(struct parse_events_state *parse_state, + struct list_head *list, const char *sys_name, const char *evt_name, struct parse_events_error *err, struct parse_events_terms *head_config, void *loc_) { YYLTYPE *loc = loc_; - struct evsel *evsel = evsel__newtp_idx(sys_name, evt_name, (*idx)++); + struct evsel *evsel = evsel__newtp_idx(sys_name, evt_name, parse_state->idx++, + !parse_state->fake_tp); if (IS_ERR(evsel)) { tracepoint_error(err, PTR_ERR(evsel), sys_name, evt_name, loc->first_column); @@ -544,7 +570,8 @@ static int add_tracepoint(struct list_head *list, int *idx, return 0; } -static int add_tracepoint_multi_event(struct list_head *list, int *idx, +static int add_tracepoint_multi_event(struct parse_events_state *parse_state, + struct list_head *list, const char *sys_name, const char *evt_name, struct parse_events_error *err, struct parse_events_terms *head_config, YYLTYPE *loc) @@ -578,7 +605,7 @@ static int add_tracepoint_multi_event(struct list_head *list, int *idx, found++; - ret = add_tracepoint(list, idx, sys_name, evt_ent->d_name, + ret = add_tracepoint(parse_state, list, sys_name, evt_ent->d_name, err, head_config, loc); } @@ -592,19 +619,21 @@ static int add_tracepoint_multi_event(struct list_head *list, int *idx, return ret; } -static int add_tracepoint_event(struct list_head *list, int *idx, +static int add_tracepoint_event(struct parse_events_state *parse_state, + struct list_head *list, const char *sys_name, const char *evt_name, struct parse_events_error *err, struct parse_events_terms *head_config, YYLTYPE *loc) { return strpbrk(evt_name, "*?") ? - add_tracepoint_multi_event(list, idx, sys_name, evt_name, + add_tracepoint_multi_event(parse_state, list, sys_name, evt_name, err, head_config, loc) : - add_tracepoint(list, idx, sys_name, evt_name, + add_tracepoint(parse_state, list, sys_name, evt_name, err, head_config, loc); } -static int add_tracepoint_multi_sys(struct list_head *list, int *idx, +static int add_tracepoint_multi_sys(struct parse_events_state *parse_state, + struct list_head *list, const char *sys_name, const char *evt_name, struct parse_events_error *err, struct parse_events_terms *head_config, YYLTYPE *loc) @@ -630,7 +659,7 @@ static int add_tracepoint_multi_sys(struct list_head *list, int *idx, if (!strglobmatch(events_ent->d_name, sys_name)) continue; - ret = add_tracepoint_event(list, idx, events_ent->d_name, + ret = add_tracepoint_event(parse_state, list, events_ent->d_name, evt_name, err, head_config, loc); } @@ -1085,7 +1114,7 @@ static int config_term_tracepoint(struct perf_event_attr *attr, #endif static int config_attr(struct perf_event_attr *attr, - struct parse_events_terms *head, + const struct parse_events_terms *head, struct parse_events_error *err, config_term_func_t config_term) { @@ -1098,7 +1127,8 @@ static int config_attr(struct perf_event_attr *attr, return 0; } -static int get_config_terms(struct parse_events_terms *head_config, struct list_head *head_terms) +static int get_config_terms(const struct parse_events_terms *head_config, + struct list_head *head_terms) { #define ADD_CONFIG_TERM(__type, __weak) \ struct evsel_config_term *__t; \ @@ -1266,7 +1296,8 @@ static int get_config_chgs(struct perf_pmu *pmu, struct parse_events_terms *head return 0; } -int parse_events_add_tracepoint(struct list_head *list, int *idx, +int parse_events_add_tracepoint(struct parse_events_state *parse_state, + struct list_head *list, const char *sys, const char *event, struct parse_events_error *err, struct parse_events_terms *head_config, void *loc_) @@ -1282,14 +1313,14 @@ int parse_events_add_tracepoint(struct list_head *list, int *idx, } if (strpbrk(sys, "*?")) - return add_tracepoint_multi_sys(list, idx, sys, event, + return add_tracepoint_multi_sys(parse_state, list, sys, event, err, head_config, loc); else - return add_tracepoint_event(list, idx, sys, event, + return add_tracepoint_event(parse_state, list, sys, event, err, head_config, loc); #else + (void)parse_state; (void)list; - (void)idx; (void)sys; (void)event; (void)head_config; @@ -1302,7 +1333,7 @@ int parse_events_add_tracepoint(struct list_head *list, int *idx, static int __parse_events_add_numeric(struct parse_events_state *parse_state, struct list_head *list, struct perf_pmu *pmu, u32 type, u32 extended_type, - u64 config, struct parse_events_terms *head_config) + u64 config, const struct parse_events_terms *head_config) { struct perf_event_attr attr; LIST_HEAD(config_terms); @@ -1338,7 +1369,7 @@ static int __parse_events_add_numeric(struct parse_events_state *parse_state, int parse_events_add_numeric(struct parse_events_state *parse_state, struct list_head *list, u32 type, u64 config, - struct parse_events_terms *head_config, + const struct parse_events_terms *head_config, bool wildcard) { struct perf_pmu *pmu = NULL; @@ -1385,56 +1416,34 @@ static bool config_term_percore(struct list_head *config_terms) return false; } -int parse_events_add_pmu(struct parse_events_state *parse_state, - struct list_head *list, const char *name, - const struct parse_events_terms *const_parsed_terms, - bool auto_merge_stats, void *loc_) +static int parse_events_add_pmu(struct parse_events_state *parse_state, + struct list_head *list, struct perf_pmu *pmu, + const struct parse_events_terms *const_parsed_terms, + bool auto_merge_stats) { struct perf_event_attr attr; struct perf_pmu_info info; - struct perf_pmu *pmu; struct evsel *evsel; struct parse_events_error *err = parse_state->error; - YYLTYPE *loc = loc_; LIST_HEAD(config_terms); struct parse_events_terms parsed_terms; bool alias_rewrote_terms = false; - pmu = parse_state->fake_pmu ?: perf_pmus__find(name); - - if (!pmu) { - char *err_str; - - if (asprintf(&err_str, - "Cannot find PMU `%s'. Missing kernel support?", - name) >= 0) - parse_events_error__handle(err, loc->first_column, err_str, NULL); - return -EINVAL; - } - - parse_events_terms__init(&parsed_terms); - if (const_parsed_terms) { - int ret = parse_events_terms__copy(const_parsed_terms, &parsed_terms); - - if (ret) - return ret; - } - if (verbose > 1) { struct strbuf sb; strbuf_init(&sb, /*hint=*/ 0); - if (pmu->selectable && list_empty(&parsed_terms.terms)) { - strbuf_addf(&sb, "%s//", name); + if (pmu->selectable && const_parsed_terms && + list_empty(&const_parsed_terms->terms)) { + strbuf_addf(&sb, "%s//", pmu->name); } else { - strbuf_addf(&sb, "%s/", name); - parse_events_terms__to_strbuf(&parsed_terms, &sb); + strbuf_addf(&sb, "%s/", pmu->name); + parse_events_terms__to_strbuf(const_parsed_terms, &sb); strbuf_addch(&sb, '/'); } fprintf(stderr, "Attempt to add: %s\n", sb.buf); strbuf_release(&sb); } - fix_raw(&parsed_terms, pmu); memset(&attr, 0, sizeof(attr)); if (pmu->perf_event_attr_init_default) @@ -1442,7 +1451,7 @@ int parse_events_add_pmu(struct parse_events_state *parse_state, attr.type = pmu->type; - if (list_empty(&parsed_terms.terms)) { + if (!const_parsed_terms || list_empty(&const_parsed_terms->terms)) { evsel = __add_event(list, &parse_state->idx, &attr, /*init_attr=*/true, /*name=*/NULL, /*metric_id=*/NULL, pmu, @@ -1451,6 +1460,15 @@ int parse_events_add_pmu(struct parse_events_state *parse_state, return evsel ? 0 : -ENOMEM; } + parse_events_terms__init(&parsed_terms); + if (const_parsed_terms) { + int ret = parse_events_terms__copy(const_parsed_terms, &parsed_terms); + + if (ret) + return ret; + } + fix_raw(&parsed_terms, pmu); + /* Configure attr/terms with a known PMU, this will set hardcoded terms. */ if (config_attr(&attr, &parsed_terms, parse_state->error, config_term_pmu)) { parse_events_terms__exit(&parsed_terms); @@ -1469,7 +1487,7 @@ int parse_events_add_pmu(struct parse_events_state *parse_state, strbuf_init(&sb, /*hint=*/ 0); parse_events_terms__to_strbuf(&parsed_terms, &sb); - fprintf(stderr, "..after resolving event: %s/%s/\n", name, sb.buf); + fprintf(stderr, "..after resolving event: %s/%s/\n", pmu->name, sb.buf); strbuf_release(&sb); } @@ -1583,8 +1601,8 @@ int parse_events_multi_pmu_add(struct parse_events_state *parse_state, continue; auto_merge_stats = perf_pmu__auto_merge_stats(pmu); - if (!parse_events_add_pmu(parse_state, list, pmu->name, - &parsed_terms, auto_merge_stats, loc)) { + if (!parse_events_add_pmu(parse_state, list, pmu, + &parsed_terms, auto_merge_stats)) { struct strbuf sb; strbuf_init(&sb, /*hint=*/ 0); @@ -1596,8 +1614,8 @@ int parse_events_multi_pmu_add(struct parse_events_state *parse_state, } if (parse_state->fake_pmu) { - if (!parse_events_add_pmu(parse_state, list, event_name, &parsed_terms, - /*auto_merge_stats=*/true, loc)) { + if (!parse_events_add_pmu(parse_state, list, parse_state->fake_pmu, &parsed_terms, + /*auto_merge_stats=*/true)) { struct strbuf sb; strbuf_init(&sb, /*hint=*/ 0); @@ -1618,10 +1636,59 @@ out_err: return ok ? 0 : -1; } -int parse_events__modifier_group(struct list_head *list, - char *event_mod) +int parse_events_multi_pmu_add_or_add_pmu(struct parse_events_state *parse_state, + const char *event_or_pmu, + const struct parse_events_terms *const_parsed_terms, + struct list_head **listp, + void *loc_) { - return parse_events__modifier_event(list, event_mod, true); + YYLTYPE *loc = loc_; + struct perf_pmu *pmu; + int ok = 0; + char *help; + + *listp = malloc(sizeof(**listp)); + if (!*listp) + return -ENOMEM; + + INIT_LIST_HEAD(*listp); + + /* Attempt to add to list assuming event_or_pmu is a PMU name. */ + pmu = parse_state->fake_pmu ?: perf_pmus__find(event_or_pmu); + if (pmu && !parse_events_add_pmu(parse_state, *listp, pmu, const_parsed_terms, + /*auto_merge_stats=*/false)) + return 0; + + pmu = NULL; + /* Failed to add, try wildcard expansion of event_or_pmu as a PMU name. */ + while ((pmu = perf_pmus__scan(pmu)) != NULL) { + if (!parse_events__filter_pmu(parse_state, pmu) && + perf_pmu__match(pmu, event_or_pmu)) { + bool auto_merge_stats = perf_pmu__auto_merge_stats(pmu); + + if (!parse_events_add_pmu(parse_state, *listp, pmu, + const_parsed_terms, + auto_merge_stats)) { + ok++; + parse_state->wild_card_pmus = true; + } + } + } + if (ok) + return 0; + + /* Failure to add, assume event_or_pmu is an event name. */ + zfree(listp); + if (!parse_events_multi_pmu_add(parse_state, event_or_pmu, const_parsed_terms, listp, loc)) + return 0; + + if (asprintf(&help, "Unable to find PMU or event on a PMU of '%s'", event_or_pmu) < 0) + help = NULL; + parse_events_error__handle(parse_state->error, loc->first_column, + strdup("Bad event or PMU"), + help); + zfree(listp); + return -EINVAL; } void parse_events__set_leader(char *name, struct list_head *list) @@ -1635,213 +1702,146 @@ void parse_events__set_leader(char *name, struct list_head *list) leader = list_first_entry(list, struct evsel, core.node); __perf_evlist__set_leader(list, &leader->core); + zfree(&leader->group_name); leader->group_name = name; } -/* list_event is assumed to point to malloc'ed memory */ -void parse_events_update_lists(struct list_head *list_event, - struct list_head *list_all) +static int parse_events__modifier_list(struct parse_events_state *parse_state, + YYLTYPE *loc, + struct list_head *list, + struct parse_events_modifier mod, + bool group) { - /* - * Called for single event definition. Update the - * 'all event' list, and reinit the 'single event' - * list, for next event definition. - */ - list_splice_tail(list_event, list_all); - free(list_event); -} - -struct event_modifier { - int eu; - int ek; - int eh; - int eH; - int eG; - int eI; - int precise; - int precise_max; - int exclude_GH; - int sample_read; - int pinned; - int weak; - int exclusive; - int bpf_counter; -}; + struct evsel *evsel; + + if (!group && mod.weak) { + parse_events_error__handle(parse_state->error, loc->first_column, + strdup("Weak modifier is for use with groups"), NULL); + return -EINVAL; + } -static int get_event_modifier(struct event_modifier *mod, char *str, - struct evsel *evsel) -{ - int eu = evsel ? evsel->core.attr.exclude_user : 0; - int ek = evsel ? evsel->core.attr.exclude_kernel : 0; - int eh = evsel ? evsel->core.attr.exclude_hv : 0; - int eH = evsel ? evsel->core.attr.exclude_host : 0; - int eG = evsel ? evsel->core.attr.exclude_guest : 0; - int eI = evsel ? evsel->core.attr.exclude_idle : 0; - int precise = evsel ? evsel->core.attr.precise_ip : 0; - int precise_max = 0; - int sample_read = 0; - int pinned = evsel ? evsel->core.attr.pinned : 0; - int exclusive = evsel ? evsel->core.attr.exclusive : 0; - - int exclude = eu | ek | eh; - int exclude_GH = evsel ? evsel->exclude_GH : 0; - int weak = 0; - int bpf_counter = 0; - - memset(mod, 0, sizeof(*mod)); - - while (*str) { - if (*str == 'u') { + __evlist__for_each_entry(list, evsel) { + /* Translate modifiers into the equivalent evsel excludes. */ + int eu = group ? evsel->core.attr.exclude_user : 0; + int ek = group ? evsel->core.attr.exclude_kernel : 0; + int eh = group ? evsel->core.attr.exclude_hv : 0; + int eH = group ? evsel->core.attr.exclude_host : 0; + int eG = group ? evsel->core.attr.exclude_guest : 0; + int exclude = eu | ek | eh; + int exclude_GH = group ? evsel->exclude_GH : 0; + + if (mod.precise) { + /* use of precise requires exclude_guest */ + eG = 1; + } + if (mod.user) { if (!exclude) exclude = eu = ek = eh = 1; if (!exclude_GH && !perf_guest) eG = 1; eu = 0; - } else if (*str == 'k') { + } + if (mod.kernel) { if (!exclude) exclude = eu = ek = eh = 1; ek = 0; - } else if (*str == 'h') { + } + if (mod.hypervisor) { if (!exclude) exclude = eu = ek = eh = 1; eh = 0; - } else if (*str == 'G') { + } + if (mod.guest) { if (!exclude_GH) exclude_GH = eG = eH = 1; eG = 0; - } else if (*str == 'H') { + } + if (mod.host) { if (!exclude_GH) exclude_GH = eG = eH = 1; eH = 0; - } else if (*str == 'I') { - eI = 1; - } else if (*str == 'p') { - precise++; - /* use of precise requires exclude_guest */ - if (!exclude_GH) - eG = 1; - } else if (*str == 'P') { - precise_max = 1; - } else if (*str == 'S') { - sample_read = 1; - } else if (*str == 'D') { - pinned = 1; - } else if (*str == 'e') { - exclusive = 1; - } else if (*str == 'W') { - weak = 1; - } else if (*str == 'b') { - bpf_counter = 1; - } else - break; - - ++str; + } + evsel->core.attr.exclude_user = eu; + evsel->core.attr.exclude_kernel = ek; + evsel->core.attr.exclude_hv = eh; + evsel->core.attr.exclude_host = eH; + evsel->core.attr.exclude_guest = eG; + evsel->exclude_GH = exclude_GH; + + /* Simple modifiers copied to the evsel. */ + if (mod.precise) { + u8 precise = evsel->core.attr.precise_ip + mod.precise; + /* + * precise ip: + * + * 0 - SAMPLE_IP can have arbitrary skid + * 1 - SAMPLE_IP must have constant skid + * 2 - SAMPLE_IP requested to have 0 skid + * 3 - SAMPLE_IP must have 0 skid + * + * See also PERF_RECORD_MISC_EXACT_IP + */ + if (precise > 3) { + char *help; + + if (asprintf(&help, + "Maximum combined precise value is 3, adding precision to \"%s\"", + evsel__name(evsel)) > 0) { + parse_events_error__handle(parse_state->error, + loc->first_column, + help, NULL); + } + return -EINVAL; + } + evsel->core.attr.precise_ip = precise; + } + if (mod.precise_max) + evsel->precise_max = 1; + if (mod.non_idle) + evsel->core.attr.exclude_idle = 1; + if (mod.sample_read) + evsel->sample_read = 1; + if (mod.pinned && evsel__is_group_leader(evsel)) + evsel->core.attr.pinned = 1; + if (mod.exclusive && evsel__is_group_leader(evsel)) + evsel->core.attr.exclusive = 1; + if (mod.weak) + evsel->weak_group = true; + if (mod.bpf) + evsel->bpf_counter = true; } - - /* - * precise ip: - * - * 0 - SAMPLE_IP can have arbitrary skid - * 1 - SAMPLE_IP must have constant skid - * 2 - SAMPLE_IP requested to have 0 skid - * 3 - SAMPLE_IP must have 0 skid - * - * See also PERF_RECORD_MISC_EXACT_IP - */ - if (precise > 3) - return -EINVAL; - - mod->eu = eu; - mod->ek = ek; - mod->eh = eh; - mod->eH = eH; - mod->eG = eG; - mod->eI = eI; - mod->precise = precise; - mod->precise_max = precise_max; - mod->exclude_GH = exclude_GH; - mod->sample_read = sample_read; - mod->pinned = pinned; - mod->weak = weak; - mod->bpf_counter = bpf_counter; - mod->exclusive = exclusive; - return 0; } -/* - * Basic modifier sanity check to validate it contains only one - * instance of any modifier (apart from 'p') present. - */ -static int check_modifier(char *str) +int parse_events__modifier_group(struct parse_events_state *parse_state, void *loc, + struct list_head *list, + struct parse_events_modifier mod) { - char *p = str; - - /* The sizeof includes 0 byte as well. */ - if (strlen(str) > (sizeof("ukhGHpppPSDIWeb") - 1)) - return -1; - - while (*p) { - if (*p != 'p' && strchr(p + 1, *p)) - return -1; - p++; - } - - return 0; + return parse_events__modifier_list(parse_state, loc, list, mod, /*group=*/true); } -int parse_events__modifier_event(struct list_head *list, char *str, bool add) +int parse_events__modifier_event(struct parse_events_state *parse_state, void *loc, + struct list_head *list, + struct parse_events_modifier mod) { - struct evsel *evsel; - struct event_modifier mod; - - if (str == NULL) - return 0; - - if (check_modifier(str)) - return -EINVAL; - - if (!add && get_event_modifier(&mod, str, NULL)) - return -EINVAL; - - __evlist__for_each_entry(list, evsel) { - if (add && get_event_modifier(&mod, str, evsel)) - return -EINVAL; - - evsel->core.attr.exclude_user = mod.eu; - evsel->core.attr.exclude_kernel = mod.ek; - evsel->core.attr.exclude_hv = mod.eh; - evsel->core.attr.precise_ip = mod.precise; - evsel->core.attr.exclude_host = mod.eH; - evsel->core.attr.exclude_guest = mod.eG; - evsel->core.attr.exclude_idle = mod.eI; - evsel->exclude_GH = mod.exclude_GH; - evsel->sample_read = mod.sample_read; - evsel->precise_max = mod.precise_max; - evsel->weak_group = mod.weak; - evsel->bpf_counter = mod.bpf_counter; - - if (evsel__is_group_leader(evsel)) { - evsel->core.attr.pinned = mod.pinned; - evsel->core.attr.exclusive = mod.exclusive; - } - } - - return 0; + return parse_events__modifier_list(parse_state, loc, list, mod, /*group=*/false); } -int parse_events_name(struct list_head *list, const char *name) +int parse_events__set_default_name(struct list_head *list, char *name) { struct evsel *evsel; + bool used_name = false; __evlist__for_each_entry(list, evsel) { if (!evsel->name) { - evsel->name = strdup(name); + evsel->name = used_name ? strdup(name) : name; + used_name = true; if (!evsel->name) return -ENOMEM; } } - + if (!used_name) + free(name); return 0; } @@ -2121,7 +2121,7 @@ static int parse_events__sort_events_and_fix_groups(struct list_head *list) int __parse_events(struct evlist *evlist, const char *str, const char *pmu_filter, struct parse_events_error *err, struct perf_pmu *fake_pmu, - bool warn_if_reordered) + bool warn_if_reordered, bool fake_tp) { struct parse_events_state parse_state = { .list = LIST_HEAD_INIT(parse_state.list), @@ -2129,6 +2129,7 @@ int __parse_events(struct evlist *evlist, const char *str, const char *pmu_filte .error = err, .stoken = PE_START_EVENTS, .fake_pmu = fake_pmu, + .fake_tp = fake_tp, .pmu_filter = pmu_filter, .match_legacy_cache_terms = true, }; @@ -2181,50 +2182,53 @@ int parse_event(struct evlist *evlist, const char *str) return ret; } +struct parse_events_error_entry { + /** @list: The list the error is part of. */ + struct list_head list; + /** @idx: index in the parsed string */ + int idx; + /** @str: string to display at the index */ + char *str; + /** @help: optional help string */ + char *help; +}; + void parse_events_error__init(struct parse_events_error *err) { - bzero(err, sizeof(*err)); + INIT_LIST_HEAD(&err->list); } void parse_events_error__exit(struct parse_events_error *err) { - zfree(&err->str); - zfree(&err->help); - zfree(&err->first_str); - zfree(&err->first_help); + struct parse_events_error_entry *pos, *tmp; + + list_for_each_entry_safe(pos, tmp, &err->list, list) { + zfree(&pos->str); + zfree(&pos->help); + list_del_init(&pos->list); + free(pos); + } } void parse_events_error__handle(struct parse_events_error *err, int idx, char *str, char *help) { + struct parse_events_error_entry *entry; + if (WARN(!str || !err, "WARNING: failed to provide error string or struct\n")) goto out_free; - switch (err->num_errors) { - case 0: - err->idx = idx; - err->str = str; - err->help = help; - break; - case 1: - err->first_idx = err->idx; - err->idx = idx; - err->first_str = err->str; - err->str = str; - err->first_help = err->help; - err->help = help; - break; - default: - pr_debug("Multiple errors dropping message: %s (%s)\n", - err->str, err->help ?: "<no help>"); - free(err->str); - err->str = str; - free(err->help); - err->help = help; - break; + + entry = zalloc(sizeof(*entry)); + if (!entry) { + pr_err("Failed to allocate memory for event parsing error: %s (%s)\n", + str, help ?: "<no help>"); + goto out_free; } - err->num_errors++; + entry->idx = idx; + entry->str = str; + entry->help = help; + list_add(&entry->list, &err->list); return; - out_free: free(str); free(help); @@ -2294,19 +2298,34 @@ static void __parse_events_error__print(int err_idx, const char *err_str, } } -void parse_events_error__print(struct parse_events_error *err, +void parse_events_error__print(const struct parse_events_error *err, const char *event) { - if (!err->num_errors) - return; + struct parse_events_error_entry *pos; + bool first = true; + + list_for_each_entry(pos, &err->list, list) { + if (!first) + fputs("\n", stderr); + __parse_events_error__print(pos->idx, pos->str, pos->help, event); + first = false; + } +} - __parse_events_error__print(err->idx, err->str, err->help, event); +/* + * In the list of errors err, do any of the error strings (str) contain the + * given needle string? + */ +bool parse_events_error__contains(const struct parse_events_error *err, + const char *needle) +{ + struct parse_events_error_entry *pos; - if (err->num_errors > 1) { - fputs("\nInitial error:\n", stderr); - __parse_events_error__print(err->first_idx, err->first_str, - err->first_help, event); + list_for_each_entry(pos, &err->list, list) { + if (strstr(pos->str, needle) != NULL) + return true; } + return false; } #undef MAX_WIDTH @@ -2320,7 +2339,8 @@ int parse_events_option(const struct option *opt, const char *str, parse_events_error__init(&err); ret = __parse_events(*args->evlistp, str, args->pmu_filter, &err, - /*fake_pmu=*/NULL, /*warn_if_reordered=*/true); + /*fake_pmu=*/NULL, /*warn_if_reordered=*/true, + /*fake_tp=*/false); if (ret) { parse_events_error__print(&err, str); @@ -2558,7 +2578,7 @@ int parse_events_term__term(struct parse_events_term **term, } int parse_events_term__clone(struct parse_events_term **new, - struct parse_events_term *term) + const struct parse_events_term *term) { char *str; struct parse_events_term temp = *term; @@ -2673,15 +2693,6 @@ int parse_events_terms__to_strbuf(const struct parse_events_terms *terms, struct return 0; } -void parse_events_evlist_error(struct parse_events_state *parse_state, - int idx, const char *str) -{ - if (!parse_state->error) - return; - - parse_events_error__handle(parse_state->error, idx, strdup(str), NULL); -} - static void config_terms_list(char *buf, size_t buf_sz) { int i; diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h index 63c0a36a4bf1..e13de2c8b706 100644 --- a/tools/perf/util/parse-events.h +++ b/tools/perf/util/parse-events.h @@ -32,14 +32,14 @@ int parse_events_option_new_evlist(const struct option *opt, const char *str, in __attribute__((nonnull(1, 2, 4))) int __parse_events(struct evlist *evlist, const char *str, const char *pmu_filter, struct parse_events_error *error, struct perf_pmu *fake_pmu, - bool warn_if_reordered); + bool warn_if_reordered, bool fake_tp); __attribute__((nonnull(1, 2, 3))) static inline int parse_events(struct evlist *evlist, const char *str, struct parse_events_error *err) { return __parse_events(evlist, str, /*pmu_filter=*/NULL, err, /*fake_pmu=*/NULL, - /*warn_if_reordered=*/true); + /*warn_if_reordered=*/true, /*fake_tp=*/false); } int parse_event(struct evlist *evlist, const char *str); @@ -130,13 +130,8 @@ struct parse_events_term { }; struct parse_events_error { - int num_errors; /* number of errors encountered */ - int idx; /* index in the parsed string */ - char *str; /* string to display at the index */ - char *help; /* optional help string */ - int first_idx;/* as above, but for the first encountered error */ - char *first_str; - char *first_help; + /** @list: The head of a list of errors. */ + struct list_head list; }; /* A wrapper around a list of terms for the sake of better type safety. */ @@ -157,6 +152,8 @@ struct parse_events_state { int stoken; /* Special fake PMU marker for testing. */ struct perf_pmu *fake_pmu; + /* Skip actual tracepoint processing for testing. */ + bool fake_tp; /* If non-null, when wildcard matching only match the given PMU. */ const char *pmu_filter; /* Should PE_LEGACY_NAME tokens be generated for config terms? */ @@ -183,7 +180,7 @@ int parse_events_term__term(struct parse_events_term **term, enum parse_events__term_type term_rhs, void *loc_term, void *loc_val); int parse_events_term__clone(struct parse_events_term **new, - struct parse_events_term *term); + const struct parse_events_term *term); void parse_events_term__delete(struct parse_events_term *term); void parse_events_terms__delete(struct parse_events_terms *terms); @@ -191,33 +188,49 @@ void parse_events_terms__init(struct parse_events_terms *terms); void parse_events_terms__exit(struct parse_events_terms *terms); int parse_events_terms(struct parse_events_terms *terms, const char *str, FILE *input); int parse_events_terms__to_strbuf(const struct parse_events_terms *terms, struct strbuf *sb); -int parse_events__modifier_event(struct list_head *list, char *str, bool add); -int parse_events__modifier_group(struct list_head *list, char *event_mod); -int parse_events_name(struct list_head *list, const char *name); -int parse_events_add_tracepoint(struct list_head *list, int *idx, + +struct parse_events_modifier { + u8 precise; /* Number of repeated 'p' for precision. */ + bool precise_max : 1; /* 'P' */ + bool non_idle : 1; /* 'I' */ + bool sample_read : 1; /* 'S' */ + bool pinned : 1; /* 'D' */ + bool exclusive : 1; /* 'e' */ + bool weak : 1; /* 'W' */ + bool bpf : 1; /* 'b' */ + bool user : 1; /* 'u' */ + bool kernel : 1; /* 'k' */ + bool hypervisor : 1; /* 'h' */ + bool guest : 1; /* 'G' */ + bool host : 1; /* 'H' */ +}; + +int parse_events__modifier_event(struct parse_events_state *parse_state, void *loc, + struct list_head *list, struct parse_events_modifier mod); +int parse_events__modifier_group(struct parse_events_state *parse_state, void *loc, + struct list_head *list, struct parse_events_modifier mod); +int parse_events__set_default_name(struct list_head *list, char *name); +int parse_events_add_tracepoint(struct parse_events_state *parse_state, + struct list_head *list, const char *sys, const char *event, struct parse_events_error *error, struct parse_events_terms *head_config, void *loc); int parse_events_add_numeric(struct parse_events_state *parse_state, struct list_head *list, u32 type, u64 config, - struct parse_events_terms *head_config, + const struct parse_events_terms *head_config, bool wildcard); int parse_events_add_tool(struct parse_events_state *parse_state, struct list_head *list, int tool_event); int parse_events_add_cache(struct list_head *list, int *idx, const char *name, struct parse_events_state *parse_state, - struct parse_events_terms *head_config); + struct parse_events_terms *parsed_terms); int parse_events__decode_legacy_cache(const char *name, int pmu_type, __u64 *config); int parse_events_add_breakpoint(struct parse_events_state *parse_state, struct list_head *list, u64 addr, char *type, u64 len, struct parse_events_terms *head_config); -int parse_events_add_pmu(struct parse_events_state *parse_state, - struct list_head *list, const char *name, - const struct parse_events_terms *const_parsed_terms, - bool auto_merge_stats, void *loc); struct evsel *parse_events__add_event(int idx, struct perf_event_attr *attr, const char *name, const char *metric_id, @@ -228,18 +241,20 @@ int parse_events_multi_pmu_add(struct parse_events_state *parse_state, const struct parse_events_terms *const_parsed_terms, struct list_head **listp, void *loc); +int parse_events_multi_pmu_add_or_add_pmu(struct parse_events_state *parse_state, + const char *event_or_pmu, + const struct parse_events_terms *const_parsed_terms, + struct list_head **listp, + void *loc_); + void parse_events__set_leader(char *name, struct list_head *list); -void parse_events_update_lists(struct list_head *list_event, - struct list_head *list_all); -void parse_events_evlist_error(struct parse_events_state *parse_state, - int idx, const char *str); struct event_symbol { const char *symbol; const char *alias; }; -extern struct event_symbol event_symbols_hw[]; -extern struct event_symbol event_symbols_sw[]; +extern const struct event_symbol event_symbols_hw[]; +extern const struct event_symbol event_symbols_sw[]; char *parse_events_formats_error_string(char *additional_terms); @@ -247,9 +262,10 @@ void parse_events_error__init(struct parse_events_error *err); void parse_events_error__exit(struct parse_events_error *err); void parse_events_error__handle(struct parse_events_error *err, int idx, char *str, char *help); -void parse_events_error__print(struct parse_events_error *err, +void parse_events_error__print(const struct parse_events_error *err, const char *event); - +bool parse_events_error__contains(const struct parse_events_error *err, + const char *needle); #ifdef HAVE_LIBELF_SUPPORT /* * If the probe point starts with '%', diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l index e86c45675e1d..16045c383ada 100644 --- a/tools/perf/util/parse-events.l +++ b/tools/perf/util/parse-events.l @@ -18,26 +18,34 @@ char *parse_events_get_text(yyscan_t yyscanner); YYSTYPE *parse_events_get_lval(yyscan_t yyscanner); +int parse_events_get_column(yyscan_t yyscanner); +int parse_events_get_leng(yyscan_t yyscanner); -static int __value(YYSTYPE *yylval, char *str, int base, int token) +static int get_column(yyscan_t scanner) { - u64 num; - - errno = 0; - num = strtoull(str, NULL, base); - if (errno) - return PE_ERROR; - - yylval->num = num; - return token; + return parse_events_get_column(scanner) - parse_events_get_leng(scanner); } -static int value(yyscan_t scanner, int base) +static int value(struct parse_events_state *parse_state, yyscan_t scanner, int base) { YYSTYPE *yylval = parse_events_get_lval(scanner); char *text = parse_events_get_text(scanner); + u64 num; - return __value(yylval, text, base, PE_VALUE); + errno = 0; + num = strtoull(text, NULL, base); + if (errno) { + struct parse_events_error *error = parse_state->error; + char *help = NULL; + + if (asprintf(&help, "Bad base %d number \"%s\"", base, text) > 0) + parse_events_error__handle(error, get_column(scanner), help , NULL); + + return PE_ERROR; + } + + yylval->num = num; + return PE_VALUE; } static int str(yyscan_t scanner, int token) @@ -88,6 +96,11 @@ static int drv_str(yyscan_t scanner, int token) return token; } +/* + * Use yyless to return all the characaters to the input. Update the column for + * location debugging. If __alloc is non-zero set yylval to the text for the + * returned token's value. + */ #define REWIND(__alloc) \ do { \ YYSTYPE *__yylval = parse_events_get_lval(yyscanner); \ @@ -134,6 +147,77 @@ static int hw_term(yyscan_t scanner, int config) return PE_TERM_HW; } +static void modifiers_error(struct parse_events_state *parse_state, yyscan_t scanner, + int pos, char mod_char, const char *mod_name) +{ + struct parse_events_error *error = parse_state->error; + char *help = NULL; + + if (asprintf(&help, "Duplicate modifier '%c' (%s)", mod_char, mod_name) > 0) + parse_events_error__handle(error, get_column(scanner) + pos, help , NULL); +} + +static int modifiers(struct parse_events_state *parse_state, yyscan_t scanner) +{ + YYSTYPE *yylval = parse_events_get_lval(scanner); + char *text = parse_events_get_text(scanner); + struct parse_events_modifier mod = { .precise = 0, }; + + for (size_t i = 0, n = strlen(text); i < n; i++) { +#define CASE(c, field) \ + case c: \ + if (mod.field) { \ + modifiers_error(parse_state, scanner, i, c, #field); \ + return PE_ERROR; \ + } \ + mod.field = true; \ + break + + switch (text[i]) { + CASE('u', user); + CASE('k', kernel); + CASE('h', hypervisor); + CASE('I', non_idle); + CASE('G', guest); + CASE('H', host); + case 'p': + mod.precise++; + /* + * precise ip: + * + * 0 - SAMPLE_IP can have arbitrary skid + * 1 - SAMPLE_IP must have constant skid + * 2 - SAMPLE_IP requested to have 0 skid + * 3 - SAMPLE_IP must have 0 skid + * + * See also PERF_RECORD_MISC_EXACT_IP + */ + if (mod.precise > 3) { + struct parse_events_error *error = parse_state->error; + char *help = strdup("Maximum precise value is 3"); + + if (help) { + parse_events_error__handle(error, get_column(scanner) + i, + help , NULL); + } + return PE_ERROR; + } + break; + CASE('P', precise_max); + CASE('S', sample_read); + CASE('D', pinned); + CASE('W', weak); + CASE('e', exclusive); + CASE('b', bpf); + default: + return PE_ERROR; + } +#undef CASE + } + yylval->mod = mod; + return PE_MODIFIER_EVENT; +} + #define YY_USER_ACTION \ do { \ yylloc->last_column = yylloc->first_column; \ @@ -158,15 +242,15 @@ event [^,{}/]+ num_dec [0-9]+ num_hex 0x[a-fA-F0-9]{1,16} num_raw_hex [a-fA-F0-9]{1,16} -name [a-zA-Z_*?\[\]][a-zA-Z0-9_*?.\[\]!\-]* -name_tag [\'][a-zA-Z_*?\[\]][a-zA-Z0-9_*?\-,\.\[\]:=]*[\'] +name [a-zA-Z0-9_*?\[\]][a-zA-Z0-9_*?.\[\]!\-]* +name_tag [\'][a-zA-Z0-9_*?\[\]][a-zA-Z0-9_*?\-,\.\[\]:=]*[\'] name_minus [a-zA-Z_*?][a-zA-Z0-9\-_*?.:]* drv_cfg_term [a-zA-Z0-9_\.]+(=[a-zA-Z0-9_*?\.:]+)? /* * If you add a modifier you need to update check_modifier(). * Also, the letters in modifier_event must not be in modifier_bp. */ -modifier_event [ukhpPGHSDIWeb]+ +modifier_event [ukhpPGHSDIWeb]{1,15} modifier_bp [rwx]{1,3} lc_type (L1-dcache|l1-d|l1d|L1-data|L1-icache|l1-i|l1i|L1-instruction|LLC|L2|dTLB|d-tlb|Data-TLB|iTLB|i-tlb|Instruction-TLB|branch|branches|bpu|btb|bpc|node) lc_op_result (load|loads|read|store|stores|write|prefetch|prefetches|speculative-read|speculative-load|refs|Reference|ops|access|misses|miss) @@ -283,8 +367,8 @@ r0x{num_raw_hex} { return str(yyscanner, PE_RAW); } */ "/"/{digit} { return PE_BP_SLASH; } "/"/{non_digit} { BEGIN(config); return '/'; } -{num_dec} { return value(yyscanner, 10); } -{num_hex} { return value(yyscanner, 16); } +{num_dec} { return value(_parse_state, yyscanner, 10); } +{num_hex} { return value(_parse_state, yyscanner, 16); } /* * We need to separate 'mem:' scanner part, in order to get specific * modifier bits parsed out. Otherwise we would need to handle PE_NAME @@ -330,10 +414,10 @@ cgroup-switches { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CG {lc_type}-{lc_op_result}-{lc_op_result} { return str(yyscanner, PE_LEGACY_CACHE); } mem: { BEGIN(mem); return PE_PREFIX_MEM; } r{num_raw_hex} { return str(yyscanner, PE_RAW); } -{num_dec} { return value(yyscanner, 10); } -{num_hex} { return value(yyscanner, 16); } +{num_dec} { return value(_parse_state, yyscanner, 10); } +{num_hex} { return value(_parse_state, yyscanner, 16); } -{modifier_event} { return str(yyscanner, PE_MODIFIER_EVENT); } +{modifier_event} { return modifiers(_parse_state, yyscanner); } {name} { return str(yyscanner, PE_NAME); } {name_tag} { return str(yyscanner, PE_NAME); } "/" { BEGIN(config); return '/'; } diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y index de098caf0c1c..b3c51f06cbdc 100644 --- a/tools/perf/util/parse-events.y +++ b/tools/perf/util/parse-events.y @@ -69,12 +69,12 @@ static void free_list_evsel(struct list_head* list_evsel) %type <num> PE_VALUE_SYM_HW %type <num> PE_VALUE_SYM_SW %type <num> PE_VALUE_SYM_TOOL +%type <mod> PE_MODIFIER_EVENT %type <term_type> PE_TERM %type <num> value_sym %type <str> PE_RAW %type <str> PE_NAME %type <str> PE_LEGACY_CACHE -%type <str> PE_MODIFIER_EVENT %type <str> PE_MODIFIER_BP %type <str> PE_EVENT_NAME %type <str> PE_DRV_CFG_TERM @@ -111,6 +111,7 @@ static void free_list_evsel(struct list_head* list_evsel) { char *str; u64 num; + struct parse_events_modifier mod; enum parse_events__term_type term_type; struct list_head *list_evsel; struct parse_events_terms *list_terms; @@ -126,6 +127,10 @@ static void free_list_evsel(struct list_head* list_evsel) } %% + /* + * Entry points. We are either parsing events or terminals. Just terminal + * parsing is used for parsing events in sysfs. + */ start: PE_START_EVENTS start_events | @@ -133,31 +138,36 @@ PE_START_TERMS start_terms start_events: groups { + /* Take the parsed events, groups.. and place into parse_state. */ + struct list_head *groups = $1; struct parse_events_state *parse_state = _parse_state; - /* frees $1 */ - parse_events_update_lists($1, &parse_state->list); + list_splice_tail(groups, &parse_state->list); + free(groups); } -groups: +groups: /* A list of groups or events. */ groups ',' group { - struct list_head *list = $1; - struct list_head *group = $3; + /* Merge group into the list of events/groups. */ + struct list_head *groups = $1; + struct list_head *group = $3; - /* frees $3 */ - parse_events_update_lists(group, list); - $$ = list; + list_splice_tail(group, groups); + free(group); + $$ = groups; } | groups ',' event { - struct list_head *list = $1; + /* Merge event into the list of events/groups. */ + struct list_head *groups = $1; struct list_head *event = $3; - /* frees $3 */ - parse_events_update_lists(event, list); - $$ = list; + + list_splice_tail(event, groups); + free(event); + $$ = groups; } | group @@ -167,20 +177,13 @@ event group: group_def ':' PE_MODIFIER_EVENT { + /* Apply the modifier to the events in the group_def. */ struct list_head *list = $1; int err; - err = parse_events__modifier_group(list, $3); - free($3); - if (err) { - struct parse_events_state *parse_state = _parse_state; - struct parse_events_error *error = parse_state->error; - - parse_events_error__handle(error, @3.first_column, - strdup("Bad modifier"), NULL); - free_list_evsel(list); + err = parse_events__modifier_group(_parse_state, &@3, list, $3); + if (err) YYABORT; - } $$ = list; } | @@ -191,7 +194,10 @@ PE_NAME '{' events '}' { struct list_head *list = $3; - /* Takes ownership of $1. */ + /* + * Set the first entry of list to be the leader. Set the group name on + * the leader to $1 taking ownership. + */ parse_events__set_leader($1, list); $$ = list; } @@ -200,6 +206,7 @@ PE_NAME '{' events '}' { struct list_head *list = $2; + /* Set the first entry of list to be the leader clearing the group name. */ parse_events__set_leader(NULL, list); $$ = list; } @@ -207,12 +214,12 @@ PE_NAME '{' events '}' events: events ',' event { + struct list_head *events = $1; struct list_head *event = $3; - struct list_head *list = $1; - /* frees $3 */ - parse_events_update_lists(event, list); - $$ = list; + list_splice_tail(event, events); + free(event); + $$ = events; } | event @@ -230,17 +237,9 @@ event_name PE_MODIFIER_EVENT * (there could be more events added for multiple tracepoint * definitions via '*?'. */ - err = parse_events__modifier_event(list, $2, false); - free($2); - if (err) { - struct parse_events_state *parse_state = _parse_state; - struct parse_events_error *error = parse_state->error; - - parse_events_error__handle(error, @2.first_column, - strdup("Bad modifier"), NULL); - free_list_evsel(list); + err = parse_events__modifier_event(_parse_state, &@2, list, $2); + if (err) YYABORT; - } $$ = list; } | @@ -249,10 +248,14 @@ event_name event_name: PE_EVENT_NAME event_def { - int err; + /* + * When an event is parsed the text is rewound and the entire text of + * the event is set to the str of PE_EVENT_NAME token matched here. If + * no name was on an event via a term, set the name to the entire text + * taking ownership of the allocation. + */ + int err = parse_events__set_default_name($2, $1); - err = parse_events_name($2, $1); - free($1); if (err) { free_list_evsel($2); YYNOMEM; @@ -273,78 +276,15 @@ event_def: event_pmu | event_pmu: PE_NAME opt_pmu_config { - struct parse_events_state *parse_state = _parse_state; /* List of created evsels. */ struct list_head *list = NULL; - char *pattern = NULL; + int err = parse_events_multi_pmu_add_or_add_pmu(_parse_state, $1, $2, &list, &@1); -#define CLEANUP \ - do { \ - parse_events_terms__delete($2); \ - free(list); \ - free($1); \ - free(pattern); \ - } while(0) - - list = alloc_list(); - if (!list) { - CLEANUP; - YYNOMEM; - } - /* Attempt to add to list assuming $1 is a PMU name. */ - if (parse_events_add_pmu(parse_state, list, $1, $2, /*auto_merge_stats=*/false, &@1)) { - struct perf_pmu *pmu = NULL; - int ok = 0; - - /* Failure to add, try wildcard expansion of $1 as a PMU name. */ - if (asprintf(&pattern, "%s*", $1) < 0) { - CLEANUP; - YYNOMEM; - } - - while ((pmu = perf_pmus__scan(pmu)) != NULL) { - const char *name = pmu->name; - - if (parse_events__filter_pmu(parse_state, pmu)) - continue; - - if (!strncmp(name, "uncore_", 7) && - strncmp($1, "uncore_", 7)) - name += 7; - if (!perf_pmu__match(pattern, name, $1) || - !perf_pmu__match(pattern, pmu->alias_name, $1)) { - bool auto_merge_stats = perf_pmu__auto_merge_stats(pmu); - - if (!parse_events_add_pmu(parse_state, list, pmu->name, $2, - auto_merge_stats, &@1)) { - ok++; - parse_state->wild_card_pmus = true; - } - } - } - - if (!ok) { - /* Failure to add, assume $1 is an event name. */ - zfree(&list); - ok = !parse_events_multi_pmu_add(parse_state, $1, $2, &list, &@1); - } - if (!ok) { - struct parse_events_error *error = parse_state->error; - char *help; - - if (asprintf(&help, "Unable to find PMU or event on a PMU of '%s'", $1) < 0) - help = NULL; - parse_events_error__handle(error, @1.first_column, - strdup("Bad event or PMU"), - help); - CLEANUP; - YYABORT; - } - } + parse_events_terms__delete($2); + free($1); + if (err) + PE_ABORT(err); $$ = list; - list = NULL; - CLEANUP; -#undef CLEANUP } | PE_NAME sep_dc @@ -536,10 +476,8 @@ tracepoint_name opt_event_config list = alloc_list(); if (!list) YYNOMEM; - if (error) - error->idx = @1.first_column; - err = parse_events_add_tracepoint(list, &parse_state->idx, $1.sys, $1.event, + err = parse_events_add_tracepoint(parse_state, list, $1.sys, $1.event, error, $2, &@1); parse_events_terms__delete($2); @@ -668,6 +606,11 @@ event_term } name_or_raw: PE_RAW | PE_NAME | PE_LEGACY_CACHE +| +PE_TERM_HW +{ + $$ = $1.str; +} event_term: PE_RAW @@ -709,20 +652,6 @@ name_or_raw '=' PE_VALUE $$ = term; } | -name_or_raw '=' PE_TERM_HW -{ - struct parse_events_term *term; - int err = parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_USER, - $1, $3.str, &@1, &@3); - - if (err) { - free($1); - free($3.str); - PE_ABORT(err); - } - $$ = term; -} -| PE_LEGACY_CACHE { struct parse_events_term *term; @@ -775,18 +704,6 @@ PE_TERM '=' name_or_raw $$ = term; } | -PE_TERM '=' PE_TERM_HW -{ - struct parse_events_term *term; - int err = parse_events_term__str(&term, $1, /*config=*/NULL, $3.str, &@1, &@3); - - if (err) { - free($3.str); - PE_ABORT(err); - } - $$ = term; -} -| PE_TERM '=' PE_TERM { struct parse_events_term *term; @@ -847,9 +764,15 @@ sep_slash_slash_dc: '/' '/' | ':' | %% -void parse_events_error(YYLTYPE *loc, void *parse_state, +void parse_events_error(YYLTYPE *loc, void *_parse_state, void *scanner __maybe_unused, char const *msg __maybe_unused) { - parse_events_evlist_error(parse_state, loc->last_column, "parser error"); + struct parse_events_state *parse_state = _parse_state; + + if (!parse_state->error || !list_empty(&parse_state->error->list)) + return; + + parse_events_error__handle(parse_state->error, loc->last_column, + strdup("Unrecognized input"), NULL); } diff --git a/tools/perf/util/parse-regs-options.c b/tools/perf/util/parse-regs-options.c index a4a100425b3a..cda1c620968e 100644 --- a/tools/perf/util/parse-regs-options.c +++ b/tools/perf/util/parse-regs-options.c @@ -46,22 +46,18 @@ __parse_regs(const struct option *opt, const char *str, int unset, bool intr) if (!strcmp(s, "?")) { fprintf(stderr, "available registers: "); -#ifdef HAVE_PERF_REGS_SUPPORT - for (r = sample_reg_masks; r->name; r++) { + for (r = arch__sample_reg_masks(); r->name; r++) { if (r->mask & mask) fprintf(stderr, "%s ", r->name); } -#endif fputc('\n', stderr); /* just printing available regs */ goto error; } -#ifdef HAVE_PERF_REGS_SUPPORT - for (r = sample_reg_masks; r->name; r++) { + for (r = arch__sample_reg_masks(); r->name; r++) { if ((r->mask & mask) && !strcasecmp(s, r->name)) break; } -#endif if (!r || !r->name) { ui__warning("Unknown register \"%s\", check man page or run \"perf record %s?\"\n", s, intr ? "-I" : "--user-regs="); diff --git a/tools/perf/util/perf-regs-arch/perf_regs_aarch64.c b/tools/perf/util/perf-regs-arch/perf_regs_aarch64.c index 696566c54768..9dcda80d310f 100644 --- a/tools/perf/util/perf-regs-arch/perf_regs_aarch64.c +++ b/tools/perf/util/perf-regs-arch/perf_regs_aarch64.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -#ifdef HAVE_PERF_REGS_SUPPORT - #include "../perf_regs.h" #include "../../../arch/arm64/include/uapi/asm/perf_regs.h" @@ -92,5 +90,3 @@ uint64_t __perf_reg_sp_arm64(void) { return PERF_REG_ARM64_SP; } - -#endif diff --git a/tools/perf/util/perf-regs-arch/perf_regs_arm.c b/tools/perf/util/perf-regs-arch/perf_regs_arm.c index 700fd07cd2aa..e29d130a587a 100644 --- a/tools/perf/util/perf-regs-arch/perf_regs_arm.c +++ b/tools/perf/util/perf-regs-arch/perf_regs_arm.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -#ifdef HAVE_PERF_REGS_SUPPORT - #include "../perf_regs.h" #include "../../../arch/arm/include/uapi/asm/perf_regs.h" @@ -56,5 +54,3 @@ uint64_t __perf_reg_sp_arm(void) { return PERF_REG_ARM_SP; } - -#endif diff --git a/tools/perf/util/perf-regs-arch/perf_regs_csky.c b/tools/perf/util/perf-regs-arch/perf_regs_csky.c index a2841094e096..75b461ef2eba 100644 --- a/tools/perf/util/perf-regs-arch/perf_regs_csky.c +++ b/tools/perf/util/perf-regs-arch/perf_regs_csky.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -#ifdef HAVE_PERF_REGS_SUPPORT - #include "../perf_regs.h" #include "../../arch/csky/include/uapi/asm/perf_regs.h" @@ -96,5 +94,3 @@ uint64_t __perf_reg_sp_csky(void) { return PERF_REG_CSKY_SP; } - -#endif diff --git a/tools/perf/util/perf-regs-arch/perf_regs_loongarch.c b/tools/perf/util/perf-regs-arch/perf_regs_loongarch.c index a9ba0f934123..043f97f4e3ac 100644 --- a/tools/perf/util/perf-regs-arch/perf_regs_loongarch.c +++ b/tools/perf/util/perf-regs-arch/perf_regs_loongarch.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -#ifdef HAVE_PERF_REGS_SUPPORT - #include "../perf_regs.h" #include "../../../arch/loongarch/include/uapi/asm/perf_regs.h" @@ -87,5 +85,3 @@ uint64_t __perf_reg_sp_loongarch(void) { return PERF_REG_LOONGARCH_R3; } - -#endif diff --git a/tools/perf/util/perf-regs-arch/perf_regs_mips.c b/tools/perf/util/perf-regs-arch/perf_regs_mips.c index 5a45830cfbf5..793178fc3c78 100644 --- a/tools/perf/util/perf-regs-arch/perf_regs_mips.c +++ b/tools/perf/util/perf-regs-arch/perf_regs_mips.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -#ifdef HAVE_PERF_REGS_SUPPORT - #include "../perf_regs.h" #include "../../../arch/mips/include/uapi/asm/perf_regs.h" @@ -83,5 +81,3 @@ uint64_t __perf_reg_sp_mips(void) { return PERF_REG_MIPS_R29; } - -#endif diff --git a/tools/perf/util/perf-regs-arch/perf_regs_powerpc.c b/tools/perf/util/perf-regs-arch/perf_regs_powerpc.c index 1f0d682db74a..08636bb09a3a 100644 --- a/tools/perf/util/perf-regs-arch/perf_regs_powerpc.c +++ b/tools/perf/util/perf-regs-arch/perf_regs_powerpc.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -#ifdef HAVE_PERF_REGS_SUPPORT - #include "../perf_regs.h" #include "../../../arch/powerpc/include/uapi/asm/perf_regs.h" @@ -141,5 +139,3 @@ uint64_t __perf_reg_sp_powerpc(void) { return PERF_REG_POWERPC_R1; } - -#endif diff --git a/tools/perf/util/perf-regs-arch/perf_regs_riscv.c b/tools/perf/util/perf-regs-arch/perf_regs_riscv.c index e432630be4c5..337b687c655d 100644 --- a/tools/perf/util/perf-regs-arch/perf_regs_riscv.c +++ b/tools/perf/util/perf-regs-arch/perf_regs_riscv.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -#ifdef HAVE_PERF_REGS_SUPPORT - #include "../perf_regs.h" #include "../../../arch/riscv/include/uapi/asm/perf_regs.h" @@ -88,5 +86,3 @@ uint64_t __perf_reg_sp_riscv(void) { return PERF_REG_RISCV_SP; } - -#endif diff --git a/tools/perf/util/perf-regs-arch/perf_regs_s390.c b/tools/perf/util/perf-regs-arch/perf_regs_s390.c index 1c7a46db778c..d69bba881080 100644 --- a/tools/perf/util/perf-regs-arch/perf_regs_s390.c +++ b/tools/perf/util/perf-regs-arch/perf_regs_s390.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -#ifdef HAVE_PERF_REGS_SUPPORT - #include "../perf_regs.h" #include "../../../arch/s390/include/uapi/asm/perf_regs.h" @@ -92,5 +90,3 @@ uint64_t __perf_reg_sp_s390(void) { return PERF_REG_S390_R15; } - -#endif diff --git a/tools/perf/util/perf-regs-arch/perf_regs_x86.c b/tools/perf/util/perf-regs-arch/perf_regs_x86.c index 873c620f0634..708954a9d35d 100644 --- a/tools/perf/util/perf-regs-arch/perf_regs_x86.c +++ b/tools/perf/util/perf-regs-arch/perf_regs_x86.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -#ifdef HAVE_PERF_REGS_SUPPORT - #include "../perf_regs.h" #include "../../../arch/x86/include/uapi/asm/perf_regs.h" @@ -94,5 +92,3 @@ uint64_t __perf_reg_sp_x86(void) { return PERF_REG_X86_SP; } - -#endif diff --git a/tools/perf/util/perf_event_attr_fprintf.c b/tools/perf/util/perf_event_attr_fprintf.c index 8f04d3b7f3ec..59fbbba79697 100644 --- a/tools/perf/util/perf_event_attr_fprintf.c +++ b/tools/perf/util/perf_event_attr_fprintf.c @@ -7,6 +7,8 @@ #include <linux/types.h> #include <linux/perf_event.h> #include "util/evsel_fprintf.h" +#include "util/pmu.h" +#include "util/pmus.h" #include "trace-event.h" struct bit_names { @@ -75,9 +77,12 @@ static void __p_read_format(char *buf, size_t size, u64 value) } #define ENUM_ID_TO_STR_CASE(x) case x: return (#x); -static const char *stringify_perf_type_id(u64 value) +static const char *stringify_perf_type_id(struct perf_pmu *pmu, u32 type) { - switch (value) { + if (pmu) + return pmu->name; + + switch (type) { ENUM_ID_TO_STR_CASE(PERF_TYPE_HARDWARE) ENUM_ID_TO_STR_CASE(PERF_TYPE_SOFTWARE) ENUM_ID_TO_STR_CASE(PERF_TYPE_TRACEPOINT) @@ -175,9 +180,9 @@ do { \ #define print_id_unsigned(_s) PRINT_ID(_s, "%"PRIu64) #define print_id_hex(_s) PRINT_ID(_s, "%#"PRIx64) -static void __p_type_id(char *buf, size_t size, u64 value) +static void __p_type_id(struct perf_pmu *pmu, char *buf, size_t size, u64 value) { - print_id_unsigned(stringify_perf_type_id(value)); + print_id_unsigned(stringify_perf_type_id(pmu, value)); } static void __p_config_hw_id(char *buf, size_t size, u64 value) @@ -217,8 +222,14 @@ static void __p_config_tracepoint_id(char *buf, size_t size, u64 value) } #endif -static void __p_config_id(char *buf, size_t size, u32 type, u64 value) +static void __p_config_id(struct perf_pmu *pmu, char *buf, size_t size, u32 type, u64 value) { + const char *name = perf_pmu__name_from_config(pmu, value); + + if (name) { + print_id_hex(name); + return; + } switch (type) { case PERF_TYPE_HARDWARE: return __p_config_hw_id(buf, size, value); @@ -246,8 +257,8 @@ static void __p_config_id(char *buf, size_t size, u32 type, u64 value) #define p_sample_type(val) __p_sample_type(buf, BUF_SIZE, val) #define p_branch_sample_type(val) __p_branch_sample_type(buf, BUF_SIZE, val) #define p_read_format(val) __p_read_format(buf, BUF_SIZE, val) -#define p_type_id(val) __p_type_id(buf, BUF_SIZE, val) -#define p_config_id(val) __p_config_id(buf, BUF_SIZE, attr->type, val) +#define p_type_id(val) __p_type_id(pmu, buf, BUF_SIZE, val) +#define p_config_id(val) __p_config_id(pmu, buf, BUF_SIZE, attr->type, val) #define PRINT_ATTRn(_n, _f, _p, _a) \ do { \ @@ -262,6 +273,7 @@ do { \ int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr, attr__fprintf_f attr__fprintf, void *priv) { + struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type); char buf[BUF_SIZE]; int ret = 0; diff --git a/tools/perf/util/perf_regs.c b/tools/perf/util/perf_regs.c index e2275856b570..44b90bbf2d07 100644 --- a/tools/perf/util/perf_regs.c +++ b/tools/perf/util/perf_regs.c @@ -21,7 +21,14 @@ uint64_t __weak arch__user_reg_mask(void) return 0; } -#ifdef HAVE_PERF_REGS_SUPPORT +static const struct sample_reg sample_reg_masks[] = { + SMPL_REG_END +}; + +const struct sample_reg * __weak arch__sample_reg_masks(void) +{ + return sample_reg_masks; +} const char *perf_reg_name(int id, const char *arch) { @@ -125,5 +132,3 @@ uint64_t perf_arch_reg_sp(const char *arch) pr_err("Fail to find SP register for arch %s, returns 0\n", arch); return 0; } - -#endif diff --git a/tools/perf/util/perf_regs.h b/tools/perf/util/perf_regs.h index ecd2a5362042..f2d0736d65cc 100644 --- a/tools/perf/util/perf_regs.h +++ b/tools/perf/util/perf_regs.h @@ -26,9 +26,7 @@ enum { int arch_sdt_arg_parse_op(char *old_op, char **new_op); uint64_t arch__intr_reg_mask(void); uint64_t arch__user_reg_mask(void); - -#ifdef HAVE_PERF_REGS_SUPPORT -extern const struct sample_reg sample_reg_masks[]; +const struct sample_reg *arch__sample_reg_masks(void); const char *perf_reg_name(int id, const char *arch); int perf_reg_value(u64 *valp, struct regs_dump *regs, int id); @@ -67,34 +65,4 @@ static inline uint64_t DWARF_MINIMAL_REGS(const char *arch) return (1ULL << perf_arch_reg_ip(arch)) | (1ULL << perf_arch_reg_sp(arch)); } -#else - -static inline uint64_t DWARF_MINIMAL_REGS(const char *arch __maybe_unused) -{ - return 0; -} - -static inline const char *perf_reg_name(int id __maybe_unused, const char *arch __maybe_unused) -{ - return "unknown"; -} - -static inline int perf_reg_value(u64 *valp __maybe_unused, - struct regs_dump *regs __maybe_unused, - int id __maybe_unused) -{ - return 0; -} - -static inline uint64_t perf_arch_reg_ip(const char *arch __maybe_unused) -{ - return 0; -} - -static inline uint64_t perf_arch_reg_sp(const char *arch __maybe_unused) -{ - return 0; -} - -#endif /* HAVE_PERF_REGS_SUPPORT */ #endif /* __PERF_REGS_H */ diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c index 3c9609944a2f..888ce9912275 100644 --- a/tools/perf/util/pmu.c +++ b/tools/perf/util/pmu.c @@ -36,6 +36,18 @@ struct perf_pmu perf_pmu__fake = { #define UNIT_MAX_LEN 31 /* max length for event unit name */ +enum event_source { + /* An event loaded from /sys/devices/<pmu>/events. */ + EVENT_SRC_SYSFS, + /* An event loaded from a CPUID matched json file. */ + EVENT_SRC_CPU_JSON, + /* + * An event loaded from a /sys/devices/<pmu>/identifier matched json + * file. + */ + EVENT_SRC_SYS_JSON, +}; + /** * struct perf_pmu_alias - An event either read from sysfs or builtin in * pmu-events.c, created by parsing the pmu-events json files. @@ -182,7 +194,7 @@ static void perf_pmu_format__load(const struct perf_pmu *pmu, struct perf_pmu_fo * Parse & process all the sysfs attributes located under * the directory specified in 'dir' parameter. */ -int perf_pmu__format_parse(struct perf_pmu *pmu, int dirfd, bool eager_load) +static int perf_pmu__format_parse(struct perf_pmu *pmu, int dirfd, bool eager_load) { struct dirent *evt_ent; DIR *format_dir; @@ -232,7 +244,7 @@ int perf_pmu__format_parse(struct perf_pmu *pmu, int dirfd, bool eager_load) * located at: * /sys/bus/event_source/devices/<dev>/format as sysfs group attributes. */ -static int pmu_format(struct perf_pmu *pmu, int dirfd, const char *name) +static int pmu_format(struct perf_pmu *pmu, int dirfd, const char *name, bool eager_load) { int fd; @@ -241,7 +253,7 @@ static int pmu_format(struct perf_pmu *pmu, int dirfd, const char *name) return 0; /* it'll close the fd */ - if (perf_pmu__format_parse(pmu, fd, /*eager_load=*/false)) + if (perf_pmu__format_parse(pmu, fd, eager_load)) return -1; return 0; @@ -425,9 +437,30 @@ static struct perf_pmu_alias *perf_pmu__find_alias(struct perf_pmu *pmu, { struct perf_pmu_alias *alias; - if (load && !pmu->sysfs_aliases_loaded) - pmu_aliases_parse(pmu); + if (load && !pmu->sysfs_aliases_loaded) { + bool has_sysfs_event; + char event_file_name[FILENAME_MAX + 8]; + + /* + * Test if alias/event 'name' exists in the PMU's sysfs/events + * directory. If not skip parsing the sysfs aliases. Sysfs event + * name must be all lower or all upper case. + */ + scnprintf(event_file_name, sizeof(event_file_name), "events/%s", name); + for (size_t i = 7, n = 7 + strlen(name); i < n; i++) + event_file_name[i] = tolower(event_file_name[i]); + + has_sysfs_event = perf_pmu__file_exists(pmu, event_file_name); + if (!has_sysfs_event) { + for (size_t i = 7, n = 7 + strlen(name); i < n; i++) + event_file_name[i] = toupper(event_file_name[i]); + has_sysfs_event = perf_pmu__file_exists(pmu, event_file_name); + } + if (has_sysfs_event) + pmu_aliases_parse(pmu); + + } list_for_each_entry(alias, &pmu->aliases, list) { if (!strcasecmp(alias->name, name)) return alias; @@ -500,7 +533,7 @@ static int update_alias(const struct pmu_event *pe, static int perf_pmu__new_alias(struct perf_pmu *pmu, const char *name, const char *desc, const char *val, FILE *val_fd, - const struct pmu_event *pe) + const struct pmu_event *pe, enum event_source src) { struct perf_pmu_alias *alias; int ret; @@ -518,7 +551,8 @@ static int perf_pmu__new_alias(struct perf_pmu *pmu, const char *name, unit = pe->unit; perpkg = pe->perpkg; deprecated = pe->deprecated; - pmu_name = pe->pmu; + if (pe->pmu && strcmp(pe->pmu, "default_core")) + pmu_name = pe->pmu; } alias = zalloc(sizeof(*alias)); @@ -552,25 +586,30 @@ static int perf_pmu__new_alias(struct perf_pmu *pmu, const char *name, } snprintf(alias->unit, sizeof(alias->unit), "%s", unit); } - if (!pe) { - /* Update an event from sysfs with json data. */ - struct update_alias_data data = { - .pmu = pmu, - .alias = alias, - }; - + switch (src) { + default: + case EVENT_SRC_SYSFS: alias->from_sysfs = true; if (pmu->events_table) { + /* Update an event from sysfs with json data. */ + struct update_alias_data data = { + .pmu = pmu, + .alias = alias, + }; if (pmu_events_table__find_event(pmu->events_table, pmu, name, update_alias, &data) == 0) - pmu->loaded_json_aliases++; + pmu->cpu_json_aliases++; } - } - - if (!pe) pmu->sysfs_aliases++; - else - pmu->loaded_json_aliases++; + break; + case EVENT_SRC_CPU_JSON: + pmu->cpu_json_aliases++; + break; + case EVENT_SRC_SYS_JSON: + pmu->sys_json_aliases++; + break; + + } list_add_tail(&alias->list, &pmu->aliases); return 0; } @@ -596,33 +635,18 @@ static inline bool pmu_alias_info_file(const char *name) * Reading the pmu event aliases definition, which should be located at: * /sys/bus/event_source/devices/<dev>/events as sysfs group attributes. */ -static int pmu_aliases_parse(struct perf_pmu *pmu) +static int __pmu_aliases_parse(struct perf_pmu *pmu, int events_dir_fd) { - char path[PATH_MAX]; struct dirent *evt_ent; DIR *event_dir; - size_t len; - int fd, dir_fd; - - len = perf_pmu__event_source_devices_scnprintf(path, sizeof(path)); - if (!len) - return 0; - scnprintf(path + len, sizeof(path) - len, "%s/events", pmu->name); - - dir_fd = open(path, O_DIRECTORY); - if (dir_fd == -1) { - pmu->sysfs_aliases_loaded = true; - return 0; - } - event_dir = fdopendir(dir_fd); - if (!event_dir){ - close (dir_fd); + event_dir = fdopendir(events_dir_fd); + if (!event_dir) return -EINVAL; - } while ((evt_ent = readdir(event_dir))) { char *name = evt_ent->d_name; + int fd; FILE *file; if (!strcmp(name, ".") || !strcmp(name, "..")) @@ -634,7 +658,7 @@ static int pmu_aliases_parse(struct perf_pmu *pmu) if (pmu_alias_info_file(name)) continue; - fd = openat(dir_fd, name, O_RDONLY); + fd = openat(events_dir_fd, name, O_RDONLY); if (fd == -1) { pr_debug("Cannot open %s\n", name); continue; @@ -646,18 +670,58 @@ static int pmu_aliases_parse(struct perf_pmu *pmu) } if (perf_pmu__new_alias(pmu, name, /*desc=*/ NULL, - /*val=*/ NULL, file, /*pe=*/ NULL) < 0) + /*val=*/ NULL, file, /*pe=*/ NULL, + EVENT_SRC_SYSFS) < 0) pr_debug("Cannot set up %s\n", name); fclose(file); } closedir(event_dir); - close (dir_fd); pmu->sysfs_aliases_loaded = true; return 0; } -static int pmu_alias_terms(struct perf_pmu_alias *alias, struct list_head *terms) +static int pmu_aliases_parse(struct perf_pmu *pmu) +{ + char path[PATH_MAX]; + size_t len; + int events_dir_fd, ret; + + if (pmu->sysfs_aliases_loaded) + return 0; + + len = perf_pmu__event_source_devices_scnprintf(path, sizeof(path)); + if (!len) + return 0; + scnprintf(path + len, sizeof(path) - len, "%s/events", pmu->name); + + events_dir_fd = open(path, O_DIRECTORY); + if (events_dir_fd == -1) { + pmu->sysfs_aliases_loaded = true; + return 0; + } + ret = __pmu_aliases_parse(pmu, events_dir_fd); + close(events_dir_fd); + return ret; +} + +static int pmu_aliases_parse_eager(struct perf_pmu *pmu, int sysfs_fd) +{ + char path[FILENAME_MAX + 7]; + int ret, events_dir_fd; + + scnprintf(path, sizeof(path), "%s/events", pmu->name); + events_dir_fd = openat(sysfs_fd, path, O_DIRECTORY, 0); + if (events_dir_fd == -1) { + pmu->sysfs_aliases_loaded = true; + return 0; + } + ret = __pmu_aliases_parse(pmu, events_dir_fd); + close(events_dir_fd); + return ret; +} + +static int pmu_alias_terms(struct perf_pmu_alias *alias, int err_loc, struct list_head *terms) { struct parse_events_term *term, *cloned; struct parse_events_terms clone_terms; @@ -675,6 +739,7 @@ static int pmu_alias_terms(struct perf_pmu_alias *alias, struct list_head *terms * which we don't want for implicit terms in aliases. */ cloned->weak = true; + cloned->err_term = cloned->err_val = err_loc; list_add_tail(&cloned->list, &clone_terms.terms); } list_splice_init(&clone_terms.terms, terms); @@ -899,7 +964,8 @@ static int pmu_add_cpu_aliases_map_callback(const struct pmu_event *pe, { struct perf_pmu *pmu = vdata; - perf_pmu__new_alias(pmu, pe->name, pe->desc, pe->event, /*val_fd=*/ NULL, pe); + perf_pmu__new_alias(pmu, pe->name, pe->desc, pe->event, /*val_fd=*/ NULL, + pe, EVENT_SRC_CPU_JSON); return 0; } @@ -934,13 +1000,14 @@ static int pmu_add_sys_aliases_iter_fn(const struct pmu_event *pe, return 0; if (pmu_uncore_alias_match(pe->pmu, pmu->name) && - pmu_uncore_identifier_match(pe->compat, pmu->id)) { + pmu_uncore_identifier_match(pe->compat, pmu->id)) { perf_pmu__new_alias(pmu, pe->name, pe->desc, pe->event, /*val_fd=*/ NULL, - pe); + pe, + EVENT_SRC_SYS_JSON); } return 0; @@ -986,11 +1053,14 @@ static int pmu_max_precise(int dirfd, struct perf_pmu *pmu) } void __weak -perf_pmu__arch_init(struct perf_pmu *pmu __maybe_unused) +perf_pmu__arch_init(struct perf_pmu *pmu) { + if (pmu->is_core) + pmu->mem_events = perf_mem_events; } -struct perf_pmu *perf_pmu__lookup(struct list_head *pmus, int dirfd, const char *name) +struct perf_pmu *perf_pmu__lookup(struct list_head *pmus, int dirfd, const char *name, + bool eager_load) { struct perf_pmu *pmu; __u32 type; @@ -1019,10 +1089,9 @@ struct perf_pmu *perf_pmu__lookup(struct list_head *pmus, int dirfd, const char * type value and format definitions. Load both right * now. */ - if (pmu_format(pmu, dirfd, name)) { - free(pmu); - return NULL; - } + if (pmu_format(pmu, dirfd, name, eager_load)) + goto err; + pmu->is_core = is_pmu_core(name); pmu->cpus = pmu_cpumask(dirfd, name, pmu->is_core); @@ -1033,11 +1102,20 @@ struct perf_pmu *perf_pmu__lookup(struct list_head *pmus, int dirfd, const char pmu->max_precise = pmu_max_precise(dirfd, pmu); pmu->alias_name = pmu_find_alias_name(pmu, dirfd); pmu->events_table = perf_pmu__find_events_table(pmu); + /* + * Load the sys json events/aliases when loading the PMU as each event + * may have a different compat regular expression. We therefore can't + * know the number of sys json events/aliases without computing the + * regular expressions for them all. + */ pmu_add_sys_aliases(pmu); list_add_tail(&pmu->list, pmus); perf_pmu__arch_init(pmu); + if (eager_load) + pmu_aliases_parse_eager(pmu, dirfd); + return pmu; err: zfree(&pmu->name); @@ -1361,8 +1439,8 @@ static int pmu_config_term(const struct perf_pmu *pmu, parse_events_error__handle(err, term->err_val, asprintf(&err_str, - "value too big for format, maximum is %llu", - (unsigned long long)max_val) < 0 + "value too big for format (%s), maximum is %llu", + format->name, (unsigned long long)max_val) < 0 ? strdup("value too big for format") : err_str, NULL); @@ -1516,7 +1594,7 @@ int perf_pmu__check_alias(struct perf_pmu *pmu, struct parse_events_terms *head_ alias = pmu_find_alias(pmu, term); if (!alias) continue; - ret = pmu_alias_terms(alias, &term->list); + ret = pmu_alias_terms(alias, term->err_term, &term->list); if (ret) { parse_events_error__handle(err, term->err_term, strdup("Failure to duplicate terms"), @@ -1600,6 +1678,62 @@ bool perf_pmu__has_format(const struct perf_pmu *pmu, const char *name) return false; } +int perf_pmu__for_each_format(struct perf_pmu *pmu, void *state, pmu_format_callback cb) +{ + static const char *const terms[] = { + "config=0..0xffffffffffffffff", + "config1=0..0xffffffffffffffff", + "config2=0..0xffffffffffffffff", + "config3=0..0xffffffffffffffff", + "name=string", + "period=number", + "freq=number", + "branch_type=(u|k|hv|any|...)", + "time", + "call-graph=(fp|dwarf|lbr)", + "stack-size=number", + "max-stack=number", + "nr=number", + "inherit", + "no-inherit", + "overwrite", + "no-overwrite", + "percore", + "aux-output", + "aux-sample-size=number", + }; + struct perf_pmu_format *format; + int ret; + + /* + * max-events and driver-config are missing above as are the internal + * types user, metric-id, raw, legacy cache and hardware. Assert against + * the enum parse_events__term_type so they are kept in sync. + */ + _Static_assert(ARRAY_SIZE(terms) == __PARSE_EVENTS__TERM_TYPE_NR - 6, + "perf_pmu__for_each_format()'s terms must be kept in sync with enum parse_events__term_type"); + list_for_each_entry(format, &pmu->format, list) { + perf_pmu_format__load(pmu, format); + ret = cb(state, format->name, (int)format->value, format->bits); + if (ret) + return ret; + } + if (!pmu->is_core) + return 0; + + for (size_t i = 0; i < ARRAY_SIZE(terms); i++) { + int config = PERF_PMU_FORMAT_VALUE_CONFIG; + + if (i < PERF_PMU_FORMAT_VALUE_CONFIG_END) + config = i; + + ret = cb(state, terms[i], config, /*bits=*/NULL); + if (ret) + return ret; + } + return 0; +} + bool is_pmu_core(const char *name) { return !strcmp(name, "cpu") || !strcmp(name, "cpum_cf") || is_sysfs_pmu_core(name); @@ -1630,15 +1764,15 @@ size_t perf_pmu__num_events(struct perf_pmu *pmu) { size_t nr; - if (!pmu->sysfs_aliases_loaded) - pmu_aliases_parse(pmu); - - nr = pmu->sysfs_aliases; + pmu_aliases_parse(pmu); + nr = pmu->sysfs_aliases + pmu->sys_json_aliases;; if (pmu->cpu_aliases_added) - nr += pmu->loaded_json_aliases; + nr += pmu->cpu_json_aliases; else if (pmu->events_table) - nr += pmu_events_table__num_events(pmu->events_table, pmu) - pmu->loaded_json_aliases; + nr += pmu_events_table__num_events(pmu->events_table, pmu) - pmu->cpu_json_aliases; + else + assert(pmu->cpu_json_aliases == 0); return pmu->selectable ? nr + 1 : nr; } @@ -1691,11 +1825,16 @@ int perf_pmu__for_each_event(struct perf_pmu *pmu, bool skip_duplicate_pmus, struct strbuf sb; strbuf_init(&sb, /*hint=*/ 0); + pmu_aliases_parse(pmu); pmu_add_cpu_aliases(pmu); list_for_each_entry(event, &pmu->aliases, list) { size_t buf_used; + int pmu_name_len; info.pmu_name = event->pmu_name ?: pmu->name; + pmu_name_len = skip_duplicate_pmus + ? pmu_name_len_no_suffix(info.pmu_name, /*num=*/NULL) + : (int)strlen(info.pmu_name); info.alias = NULL; if (event->desc) { info.name = event->name; @@ -1720,7 +1859,7 @@ int perf_pmu__for_each_event(struct perf_pmu *pmu, bool skip_duplicate_pmus, info.encoding_desc = buf + buf_used; parse_events_terms__to_strbuf(&event->terms, &sb); buf_used += snprintf(buf + buf_used, sizeof(buf) - buf_used, - "%s/%s/", info.pmu_name, sb.buf) + 1; + "%.*s/%s/", pmu_name_len, info.pmu_name, sb.buf) + 1; info.topic = event->topic; info.str = sb.buf; info.deprecated = event->deprecated; @@ -1760,6 +1899,12 @@ bool pmu__name_match(const struct perf_pmu *pmu, const char *pmu_name) bool perf_pmu__is_software(const struct perf_pmu *pmu) { + const char *known_sw_pmus[] = { + "kprobe", + "msr", + "uprobe", + }; + if (pmu->is_core || pmu->is_uncore || pmu->auxtrace) return false; switch (pmu->type) { @@ -1771,7 +1916,11 @@ bool perf_pmu__is_software(const struct perf_pmu *pmu) case PERF_TYPE_BREAKPOINT: return true; default: break; } - return !strcmp(pmu->name, "kprobe") || !strcmp(pmu->name, "uprobe"); + for (size_t i = 0; i < ARRAY_SIZE(known_sw_pmus); i++) { + if (!strcmp(pmu->name, known_sw_pmus[i])) + return true; + } + return false; } FILE *perf_pmu__open_file(const struct perf_pmu *pmu, const char *name) @@ -1991,18 +2140,29 @@ void perf_pmu__warn_invalid_config(struct perf_pmu *pmu, __u64 config, name ?: "N/A", buf, config_name, config); } -int perf_pmu__match(const char *pattern, const char *name, const char *tok) +bool perf_pmu__match(const struct perf_pmu *pmu, const char *tok) { - if (!name) - return -1; + const char *name = pmu->name; + bool need_fnmatch = strchr(tok, '*') != NULL; - if (fnmatch(pattern, name, 0)) - return -1; + if (!strncmp(tok, "uncore_", 7)) + tok += 7; + if (!strncmp(name, "uncore_", 7)) + name += 7; - if (tok && !perf_pmu__match_ignoring_suffix(name, tok)) - return -1; + if (perf_pmu__match_ignoring_suffix(name, tok) || + (need_fnmatch && !fnmatch(tok, name, 0))) + return true; - return 0; + name = pmu->alias_name; + if (!name) + return false; + + if (!strncmp(name, "uncore_", 7)) + name += 7; + + return perf_pmu__match_ignoring_suffix(name, tok) || + (need_fnmatch && !fnmatch(tok, name, 0)); } double __weak perf_pmu__cpu_slots_per_cycle(void) @@ -2073,3 +2233,22 @@ void perf_pmu__delete(struct perf_pmu *pmu) zfree(&pmu->id); free(pmu); } + +const char *perf_pmu__name_from_config(struct perf_pmu *pmu, u64 config) +{ + struct perf_pmu_alias *event; + + if (!pmu) + return NULL; + + pmu_aliases_parse(pmu); + pmu_add_cpu_aliases(pmu); + list_for_each_entry(event, &pmu->aliases, list) { + struct perf_event_attr attr = {.config = 0,}; + int ret = perf_pmu__config(pmu, &attr, &event->terms, NULL); + + if (ret == 0 && config == attr.config) + return event->name; + } + return NULL; +} diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h index 424c3fee0949..b2d3fd291f02 100644 --- a/tools/perf/util/pmu.h +++ b/tools/perf/util/pmu.h @@ -10,6 +10,8 @@ #include <stdio.h> #include "parse-events.h" #include "pmu-events/pmu-events.h" +#include "map_symbol.h" +#include "mem-events.h" struct evsel_config_term; struct perf_cpu_map; @@ -121,8 +123,10 @@ struct perf_pmu { const struct pmu_events_table *events_table; /** @sysfs_aliases: Number of sysfs aliases loaded. */ uint32_t sysfs_aliases; - /** @sysfs_aliases: Number of json event aliases loaded. */ - uint32_t loaded_json_aliases; + /** @cpu_json_aliases: Number of json event aliases loaded specific to the CPUID. */ + uint32_t cpu_json_aliases; + /** @sys_json_aliases: Number of json event aliases loaded matching the PMU's identifier. */ + uint32_t sys_json_aliases; /** @sysfs_aliases_loaded: Are sysfs aliases loaded from disk? */ bool sysfs_aliases_loaded; /** @@ -162,6 +166,11 @@ struct perf_pmu { */ bool exclude_guest; } missing_features; + + /** + * @mem_events: List of the supported mem events + */ + struct perf_mem_event *mem_events; }; /** @perf_pmu__fake: A special global PMU used for testing. */ @@ -189,6 +198,8 @@ struct pmu_event_info { }; typedef int (*pmu_event_callback)(void *state, struct pmu_event_info *info); +typedef int (*pmu_format_callback)(void *state, const char *name, int config, + const unsigned long *bits); void pmu_add_sys_aliases(struct perf_pmu *pmu); int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr, @@ -205,9 +216,9 @@ int perf_pmu__check_alias(struct perf_pmu *pmu, struct parse_events_terms *head_ struct parse_events_error *err); int perf_pmu__find_event(struct perf_pmu *pmu, const char *event, void *state, pmu_event_callback cb); -int perf_pmu__format_parse(struct perf_pmu *pmu, int dirfd, bool eager_load); void perf_pmu_format__set_value(void *format, int config, unsigned long *bits); bool perf_pmu__has_format(const struct perf_pmu *pmu, const char *name); +int perf_pmu__for_each_format(struct perf_pmu *pmu, void *state, pmu_format_callback cb); bool is_pmu_core(const char *name); bool perf_pmu__supports_legacy_cache(const struct perf_pmu *pmu); @@ -253,7 +264,7 @@ void perf_pmu__warn_invalid_config(struct perf_pmu *pmu, __u64 config, const char *config_name); void perf_pmu__warn_invalid_formats(struct perf_pmu *pmu); -int perf_pmu__match(const char *pattern, const char *name, const char *tok); +bool perf_pmu__match(const struct perf_pmu *pmu, const char *tok); double perf_pmu__cpu_slots_per_cycle(void); int perf_pmu__event_source_devices_scnprintf(char *pathname, size_t size); @@ -262,9 +273,11 @@ int perf_pmu__pathname_scnprintf(char *buf, size_t size, int perf_pmu__event_source_devices_fd(void); int perf_pmu__pathname_fd(int dirfd, const char *pmu_name, const char *filename, int flags); -struct perf_pmu *perf_pmu__lookup(struct list_head *pmus, int dirfd, const char *lookup_name); +struct perf_pmu *perf_pmu__lookup(struct list_head *pmus, int dirfd, const char *lookup_name, + bool eager_load); struct perf_pmu *perf_pmu__create_placeholder_core_pmu(struct list_head *core_pmus); void perf_pmu__delete(struct perf_pmu *pmu); struct perf_pmu *perf_pmus__find_core_pmu(void); +const char *perf_pmu__name_from_config(struct perf_pmu *pmu, u64 config); #endif /* __PMU_H */ diff --git a/tools/perf/util/pmus.c b/tools/perf/util/pmus.c index ce4931461741..b9b4c5eb5002 100644 --- a/tools/perf/util/pmus.c +++ b/tools/perf/util/pmus.c @@ -16,6 +16,7 @@ #include "pmus.h" #include "pmu.h" #include "print-events.h" +#include "strbuf.h" /* * core_pmus: A PMU belongs to core_pmus if it's name is "cpu" or it's sysfs @@ -123,7 +124,8 @@ struct perf_pmu *perf_pmus__find(const char *name) return NULL; dirfd = perf_pmu__event_source_devices_fd(); - pmu = perf_pmu__lookup(core_pmu ? &core_pmus : &other_pmus, dirfd, name); + pmu = perf_pmu__lookup(core_pmu ? &core_pmus : &other_pmus, dirfd, name, + /*eager_load=*/false); close(dirfd); if (!pmu) { @@ -158,7 +160,8 @@ static struct perf_pmu *perf_pmu__find2(int dirfd, const char *name) if (core_pmu && read_sysfs_core_pmus) return NULL; - return perf_pmu__lookup(core_pmu ? &core_pmus : &other_pmus, dirfd, name); + return perf_pmu__lookup(core_pmu ? &core_pmus : &other_pmus, dirfd, name, + /*eager_load=*/false); } static int pmus_cmp(void *priv __maybe_unused, @@ -345,12 +348,6 @@ const struct perf_pmu *perf_pmus__pmu_for_pmu_filter(const char *str) return NULL; } -int __weak perf_pmus__num_mem_pmus(void) -{ - /* All core PMUs are for mem events. */ - return perf_pmus__num_core_pmus(); -} - /** Struct for ordering events as output in perf list. */ struct sevent { /** PMU for event. */ @@ -509,6 +506,99 @@ void perf_pmus__print_pmu_events(const struct print_callbacks *print_cb, void *p zfree(&aliases); } +struct build_format_string_args { + struct strbuf short_string; + struct strbuf long_string; + int num_formats; +}; + +static int build_format_string(void *state, const char *name, int config, + const unsigned long *bits) +{ + struct build_format_string_args *args = state; + unsigned int num_bits; + int ret1, ret2 = 0; + + (void)config; + args->num_formats++; + if (args->num_formats > 1) { + strbuf_addch(&args->long_string, ','); + if (args->num_formats < 4) + strbuf_addch(&args->short_string, ','); + } + num_bits = bits ? bitmap_weight(bits, PERF_PMU_FORMAT_BITS) : 0; + if (num_bits <= 1) { + ret1 = strbuf_addf(&args->long_string, "%s", name); + if (args->num_formats < 4) + ret2 = strbuf_addf(&args->short_string, "%s", name); + } else if (num_bits > 8) { + ret1 = strbuf_addf(&args->long_string, "%s=0..0x%llx", name, + ULLONG_MAX >> (64 - num_bits)); + if (args->num_formats < 4) { + ret2 = strbuf_addf(&args->short_string, "%s=0..0x%llx", name, + ULLONG_MAX >> (64 - num_bits)); + } + } else { + ret1 = strbuf_addf(&args->long_string, "%s=0..%llu", name, + ULLONG_MAX >> (64 - num_bits)); + if (args->num_formats < 4) { + ret2 = strbuf_addf(&args->short_string, "%s=0..%llu", name, + ULLONG_MAX >> (64 - num_bits)); + } + } + return ret1 < 0 ? ret1 : (ret2 < 0 ? ret2 : 0); +} + +void perf_pmus__print_raw_pmu_events(const struct print_callbacks *print_cb, void *print_state) +{ + bool skip_duplicate_pmus = print_cb->skip_duplicate_pmus(print_state); + struct perf_pmu *(*scan_fn)(struct perf_pmu *); + struct perf_pmu *pmu = NULL; + + if (skip_duplicate_pmus) + scan_fn = perf_pmus__scan_skip_duplicates; + else + scan_fn = perf_pmus__scan; + + while ((pmu = scan_fn(pmu)) != NULL) { + struct build_format_string_args format_args = { + .short_string = STRBUF_INIT, + .long_string = STRBUF_INIT, + .num_formats = 0, + }; + int len = pmu_name_len_no_suffix(pmu->name, /*num=*/NULL); + const char *desc = "(see 'man perf-list' or 'man perf-record' on how to encode it)"; + + if (!pmu->is_core) + desc = NULL; + + strbuf_addf(&format_args.short_string, "%.*s/", len, pmu->name); + strbuf_addf(&format_args.long_string, "%.*s/", len, pmu->name); + perf_pmu__for_each_format(pmu, &format_args, build_format_string); + + if (format_args.num_formats > 3) + strbuf_addf(&format_args.short_string, ",.../modifier"); + else + strbuf_addf(&format_args.short_string, "/modifier"); + + strbuf_addf(&format_args.long_string, "/modifier"); + print_cb->print_event(print_state, + /*topic=*/NULL, + /*pmu_name=*/NULL, + format_args.short_string.buf, + /*event_alias=*/NULL, + /*scale_unit=*/NULL, + /*deprecated=*/false, + "Raw event descriptor", + desc, + /*long_desc=*/NULL, + format_args.long_string.buf); + + strbuf_release(&format_args.short_string); + strbuf_release(&format_args.long_string); + } +} + bool perf_pmus__have_event(const char *pname, const char *name) { struct perf_pmu *pmu = perf_pmus__find(pname); @@ -608,3 +698,13 @@ struct perf_pmu *perf_pmus__find_core_pmu(void) { return perf_pmus__scan_core(NULL); } + +struct perf_pmu *perf_pmus__add_test_pmu(int test_sysfs_dirfd, const char *name) +{ + /* + * Some PMU functions read from the sysfs mount point, so care is + * needed, hence passing the eager_load flag to load things like the + * format files. + */ + return perf_pmu__lookup(&other_pmus, test_sysfs_dirfd, name, /*eager_load=*/true); +} diff --git a/tools/perf/util/pmus.h b/tools/perf/util/pmus.h index 4c67153ac257..9d4ded80b8e9 100644 --- a/tools/perf/util/pmus.h +++ b/tools/perf/util/pmus.h @@ -17,11 +17,13 @@ struct perf_pmu *perf_pmus__scan_core(struct perf_pmu *pmu); const struct perf_pmu *perf_pmus__pmu_for_pmu_filter(const char *str); -int perf_pmus__num_mem_pmus(void); void perf_pmus__print_pmu_events(const struct print_callbacks *print_cb, void *print_state); +void perf_pmus__print_raw_pmu_events(const struct print_callbacks *print_cb, void *print_state); bool perf_pmus__have_event(const char *pname, const char *name); int perf_pmus__num_core_pmus(void); bool perf_pmus__supports_extended_type(void); char *perf_pmus__default_pmu_name(void); +struct perf_pmu *perf_pmus__add_test_pmu(int test_sysfs_dirfd, const char *name); + #endif /* __PMUS_H */ diff --git a/tools/perf/util/print-events.c b/tools/perf/util/print-events.c index 9e47712507cc..3f38c27f0157 100644 --- a/tools/perf/util/print-events.c +++ b/tools/perf/util/print-events.c @@ -9,6 +9,7 @@ #include <unistd.h> #include <api/fs/tracing_path.h> +#include <api/io.h> #include <linux/stddef.h> #include <linux/perf_event.h> #include <linux/zalloc.h> @@ -28,6 +29,7 @@ #include "tracepoint.h" #include "pfm.h" #include "thread_map.h" +#include "util.h" #define MAX_NAME_LEN 100 @@ -37,7 +39,7 @@ static const char * const event_type_descriptors[] = { "Software event", "Tracepoint event", "Hardware cache event", - "Raw hardware event descriptor", + "Raw event descriptor", "Hardware breakpoint", }; @@ -63,6 +65,8 @@ void print_tracepoint_events(const struct print_callbacks *print_cb __maybe_unus { char *events_path = get_tracing_file("events"); int events_fd = open(events_path, O_PATH); + struct dirent **sys_namelist = NULL; + int sys_items; put_tracing_file(events_path); if (events_fd < 0) { @@ -70,10 +74,7 @@ void print_tracepoint_events(const struct print_callbacks *print_cb __maybe_unus return; } -#ifdef HAVE_SCANDIRAT_SUPPORT -{ - struct dirent **sys_namelist = NULL; - int sys_items = tracing_events__scandir_alphasort(&sys_namelist); + sys_items = tracing_events__scandir_alphasort(&sys_namelist); for (int i = 0; i < sys_items; i++) { struct dirent *sys_dirent = sys_namelist[i]; @@ -92,34 +93,48 @@ void print_tracepoint_events(const struct print_callbacks *print_cb __maybe_unus evt_items = scandirat(events_fd, sys_dirent->d_name, &evt_namelist, NULL, alphasort); for (int j = 0; j < evt_items; j++) { + /* + * Buffer sized at twice the max filename length + 1 + * separator + 1 \0 terminator. + */ + char buf[NAME_MAX * 2 + 2]; + /* 16 possible hex digits and 22 other characters and \0. */ + char encoding[16 + 22]; struct dirent *evt_dirent = evt_namelist[j]; - char evt_path[MAXPATHLEN]; - int evt_fd; + struct io id; + __u64 config; if (evt_dirent->d_type != DT_DIR || !strcmp(evt_dirent->d_name, ".") || !strcmp(evt_dirent->d_name, "..")) goto next_evt; - snprintf(evt_path, sizeof(evt_path), "%s/id", evt_dirent->d_name); - evt_fd = openat(dir_fd, evt_path, O_RDONLY); - if (evt_fd < 0) + snprintf(buf, sizeof(buf), "%s/id", evt_dirent->d_name); + io__init(&id, openat(dir_fd, buf, O_RDONLY), buf, sizeof(buf)); + + if (id.fd < 0) goto next_evt; - close(evt_fd); - snprintf(evt_path, MAXPATHLEN, "%s:%s", + if (io__get_dec(&id, &config) < 0) { + close(id.fd); + goto next_evt; + } + close(id.fd); + + snprintf(buf, sizeof(buf), "%s:%s", sys_dirent->d_name, evt_dirent->d_name); + snprintf(encoding, sizeof(encoding), "tracepoint/config=0x%llx/", config); print_cb->print_event(print_state, /*topic=*/NULL, - /*pmu_name=*/NULL, - evt_path, + /*pmu_name=*/NULL, /* really "tracepoint" */ + /*event_name=*/buf, /*event_alias=*/NULL, /*scale_unit=*/NULL, /*deprecated=*/false, "Tracepoint event", /*desc=*/NULL, /*long_desc=*/NULL, - /*encoding_desc=*/NULL); + encoding); next_evt: free(evt_namelist[j]); } @@ -130,11 +145,6 @@ next_sys: } free(sys_namelist); -} -#else - printf("\nWARNING: Your libc doesn't have the scandirat function, please ask its maintainers to implement it.\n" - " As a rough fallback, please do 'ls %s' to see the available tracepoint events.\n", events_path); -#endif close(events_fd); } @@ -232,7 +242,6 @@ void print_sdt_events(const struct print_callbacks *print_cb, void *print_state) bool is_event_supported(u8 type, u64 config) { bool ret = true; - int open_return; struct evsel *evsel; struct perf_event_attr attr = { .type = type, @@ -246,20 +255,32 @@ bool is_event_supported(u8 type, u64 config) evsel = evsel__new(&attr); if (evsel) { - open_return = evsel__open(evsel, NULL, tmap); - ret = open_return >= 0; + ret = evsel__open(evsel, NULL, tmap) >= 0; - if (open_return == -EACCES) { + if (!ret) { /* - * This happens if the paranoid value + * The event may fail to open if the paranoid value * /proc/sys/kernel/perf_event_paranoid is set to 2 - * Re-run with exclude_kernel set; we don't do that - * by default as some ARM machines do not support it. - * + * Re-run with exclude_kernel set; we don't do that by + * default as some ARM machines do not support it. */ evsel->core.attr.exclude_kernel = 1; ret = evsel__open(evsel, NULL, tmap) >= 0; } + + if (!ret) { + /* + * The event may fail to open if the PMU requires + * exclude_guest to be set (e.g. as the Apple M1 PMU + * requires). + * Re-run with exclude_guest set; we don't do that by + * default as it's equally legitimate for another PMU + * driver to require that exclude_guest is clear. + */ + evsel->core.attr.exclude_guest = 1; + ret = evsel__open(evsel, NULL, tmap) >= 0; + } + evsel__delete(evsel); } @@ -395,8 +416,6 @@ void print_symbol_events(const struct print_callbacks *print_cb, void *print_sta */ void print_events(const struct print_callbacks *print_cb, void *print_state) { - char *tmp; - print_symbol_events(print_cb, print_state, PERF_TYPE_HARDWARE, event_symbols_hw, PERF_COUNT_HW_MAX); print_symbol_events(print_cb, print_state, PERF_TYPE_SOFTWARE, @@ -420,21 +439,7 @@ void print_events(const struct print_callbacks *print_cb, void *print_state) /*long_desc=*/NULL, /*encoding_desc=*/NULL); - if (asprintf(&tmp, "%s/t1=v1[,t2=v2,t3 ...]/modifier", - perf_pmus__scan_core(/*pmu=*/NULL)->name) > 0) { - print_cb->print_event(print_state, - /*topic=*/NULL, - /*pmu_name=*/NULL, - tmp, - /*event_alias=*/NULL, - /*scale_unit=*/NULL, - /*deprecated=*/false, - event_type_descriptors[PERF_TYPE_RAW], - "(see 'man perf-list' on how to encode it)", - /*long_desc=*/NULL, - /*encoding_desc=*/NULL); - free(tmp); - } + perf_pmus__print_raw_pmu_events(print_cb, print_state); print_cb->print_event(print_state, /*topic=*/NULL, diff --git a/tools/perf/util/print_insn.c b/tools/perf/util/print_insn.c new file mode 100644 index 000000000000..a950e9157d2d --- /dev/null +++ b/tools/perf/util/print_insn.c @@ -0,0 +1,170 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Instruction binary disassembler based on capstone. + * + * Author(s): Changbin Du <changbin.du@huawei.com> + */ +#include <inttypes.h> +#include <string.h> +#include <stdbool.h> +#include "debug.h" +#include "sample.h" +#include "symbol.h" +#include "machine.h" +#include "thread.h" +#include "print_insn.h" +#include "dump-insn.h" +#include "map.h" +#include "dso.h" + +size_t sample__fprintf_insn_raw(struct perf_sample *sample, FILE *fp) +{ + int printed = 0; + + for (int i = 0; i < sample->insn_len; i++) { + printed += fprintf(fp, "%02x", (unsigned char)sample->insn[i]); + if (sample->insn_len - i > 1) + printed += fprintf(fp, " "); + } + return printed; +} + +#ifdef HAVE_LIBCAPSTONE_SUPPORT +#include <capstone/capstone.h> + +static int capstone_init(struct machine *machine, csh *cs_handle, bool is64) +{ + cs_arch arch; + cs_mode mode; + + if (machine__is(machine, "x86_64") && is64) { + arch = CS_ARCH_X86; + mode = CS_MODE_64; + } else if (machine__normalized_is(machine, "x86")) { + arch = CS_ARCH_X86; + mode = CS_MODE_32; + } else if (machine__normalized_is(machine, "arm64")) { + arch = CS_ARCH_ARM64; + mode = CS_MODE_ARM; + } else if (machine__normalized_is(machine, "arm")) { + arch = CS_ARCH_ARM; + mode = CS_MODE_ARM + CS_MODE_V8; + } else if (machine__normalized_is(machine, "s390")) { + arch = CS_ARCH_SYSZ; + mode = CS_MODE_BIG_ENDIAN; + } else { + return -1; + } + + if (cs_open(arch, mode, cs_handle) != CS_ERR_OK) { + pr_warning_once("cs_open failed\n"); + return -1; + } + + if (machine__normalized_is(machine, "x86")) { + cs_option(*cs_handle, CS_OPT_SYNTAX, CS_OPT_SYNTAX_ATT); + /* + * Resolving address operands to symbols is implemented + * on x86 by investigating instruction details. + */ + cs_option(*cs_handle, CS_OPT_DETAIL, CS_OPT_ON); + } + + return 0; +} + +static size_t print_insn_x86(struct thread *thread, u8 cpumode, cs_insn *insn, + int print_opts, FILE *fp) +{ + struct addr_location al; + size_t printed = 0; + + if (insn->detail && insn->detail->x86.op_count == 1) { + cs_x86_op *op = &insn->detail->x86.operands[0]; + + addr_location__init(&al); + if (op->type == X86_OP_IMM && + thread__find_symbol(thread, cpumode, op->imm, &al)) { + printed += fprintf(fp, "%s ", insn[0].mnemonic); + printed += symbol__fprintf_symname_offs(al.sym, &al, fp); + if (print_opts & PRINT_INSN_IMM_HEX) + printed += fprintf(fp, " [%#" PRIx64 "]", op->imm); + addr_location__exit(&al); + return printed; + } + addr_location__exit(&al); + } + + printed += fprintf(fp, "%s %s", insn[0].mnemonic, insn[0].op_str); + return printed; +} + +static bool is64bitip(struct machine *machine, struct addr_location *al) +{ + const struct dso *dso = al->map ? map__dso(al->map) : NULL; + + if (dso) + return dso__is_64_bit(dso); + + return machine__is(machine, "x86_64") || + machine__normalized_is(machine, "arm64") || + machine__normalized_is(machine, "s390"); +} + +ssize_t fprintf_insn_asm(struct machine *machine, struct thread *thread, u8 cpumode, + bool is64bit, const uint8_t *code, size_t code_size, + uint64_t ip, int *lenp, int print_opts, FILE *fp) +{ + size_t printed; + cs_insn *insn; + csh cs_handle; + size_t count; + int ret; + + /* TODO: Try to initiate capstone only once but need a proper place. */ + ret = capstone_init(machine, &cs_handle, is64bit); + if (ret < 0) + return ret; + + count = cs_disasm(cs_handle, code, code_size, ip, 1, &insn); + if (count > 0) { + if (machine__normalized_is(machine, "x86")) + printed = print_insn_x86(thread, cpumode, &insn[0], print_opts, fp); + else + printed = fprintf(fp, "%s %s", insn[0].mnemonic, insn[0].op_str); + if (lenp) + *lenp = insn->size; + cs_free(insn, count); + } else { + printed = -1; + } + + cs_close(&cs_handle); + return printed; +} + +size_t sample__fprintf_insn_asm(struct perf_sample *sample, struct thread *thread, + struct machine *machine, FILE *fp, + struct addr_location *al) +{ + bool is64bit = is64bitip(machine, al); + ssize_t printed; + + printed = fprintf_insn_asm(machine, thread, sample->cpumode, is64bit, + (uint8_t *)sample->insn, sample->insn_len, + sample->ip, NULL, 0, fp); + if (printed < 0) + return sample__fprintf_insn_raw(sample, fp); + + return printed; +} +#else +size_t sample__fprintf_insn_asm(struct perf_sample *sample __maybe_unused, + struct thread *thread __maybe_unused, + struct machine *machine __maybe_unused, + FILE *fp __maybe_unused, + struct addr_location *al __maybe_unused) +{ + return 0; +} +#endif diff --git a/tools/perf/util/print_insn.h b/tools/perf/util/print_insn.h new file mode 100644 index 000000000000..07d11af3fc1c --- /dev/null +++ b/tools/perf/util/print_insn.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef PERF_PRINT_INSN_H +#define PERF_PRINT_INSN_H + +#include <stddef.h> +#include <stdio.h> + +struct perf_sample; +struct thread; +struct machine; +struct perf_insn; + +#define PRINT_INSN_IMM_HEX (1<<0) + +size_t sample__fprintf_insn_asm(struct perf_sample *sample, struct thread *thread, + struct machine *machine, FILE *fp, struct addr_location *al); +size_t sample__fprintf_insn_raw(struct perf_sample *sample, FILE *fp); +ssize_t fprintf_insn_asm(struct machine *machine, struct thread *thread, u8 cpumode, + bool is64bit, const uint8_t *code, size_t code_size, + uint64_t ip, int *lenp, int print_opts, FILE *fp); + +#endif /* PERF_PRINT_INSN_H */ diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index a1a796043691..a17c9b8a7a79 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -11,6 +11,7 @@ #include <sys/stat.h> #include <fcntl.h> #include <errno.h> +#include <libgen.h> #include <stdio.h> #include <unistd.h> #include <stdlib.h> @@ -158,8 +159,8 @@ static int kernel_get_module_map_cb(struct map *map, void *data) { struct kernel_get_module_map_cb_args *args = data; struct dso *dso = map__dso(map); - const char *short_name = dso->short_name; /* short_name is "[module]" */ - u16 short_name_len = dso->short_name_len; + const char *short_name = dso__short_name(dso); + u16 short_name_len = dso__short_name_len(dso); if (strncmp(short_name + 1, args->module, short_name_len - 2) == 0 && args->module[short_name_len - 2] == '\0') { @@ -201,10 +202,9 @@ struct map *get_target_map(const char *target, struct nsinfo *nsi, bool user) map = dso__new_map(target); dso = map ? map__dso(map) : NULL; if (dso) { - mutex_lock(&dso->lock); - nsinfo__put(dso->nsinfo); - dso->nsinfo = nsinfo__get(nsi); - mutex_unlock(&dso->lock); + mutex_lock(dso__lock(dso)); + dso__set_nsinfo(dso, nsinfo__get(nsi)); + mutex_unlock(dso__lock(dso)); } return map; } else { @@ -235,7 +235,7 @@ static int convert_exec_to_group(const char *exec, char **result) } } - ret = e_snprintf(buf, 64, "%s_%s", PERFPROBE_GROUP, ptr1); + ret = e_snprintf(buf, sizeof(buf), "%s_%s", PERFPROBE_GROUP, ptr1); if (ret < 0) goto out; @@ -358,6 +358,7 @@ static int kernel_get_module_dso(const char *module, struct dso **pdso) map = maps__find_by_name(machine__kernel_maps(host_machine), module_name); if (map) { dso = map__dso(map); + map__put(map); goto found; } pr_debug("Failed to find module %s.\n", module); @@ -366,11 +367,11 @@ static int kernel_get_module_dso(const char *module, struct dso **pdso) map = machine__kernel_map(host_machine); dso = map__dso(map); - if (!dso->has_build_id) + if (!dso__has_build_id(dso)) dso__read_running_kernel_build_id(dso, host_machine); vmlinux_name = symbol_conf.vmlinux_name; - dso->load_errno = 0; + *dso__load_errno(dso) = 0; if (vmlinux_name) ret = dso__load_vmlinux(dso, map, vmlinux_name, false); else @@ -497,7 +498,7 @@ static struct debuginfo *open_from_debuginfod(struct dso *dso, struct nsinfo *ns if (!c) return NULL; - build_id__sprintf(&dso->bid, sbuild_id); + build_id__sprintf(dso__bid(dso), sbuild_id); fd = debuginfod_find_debuginfo(c, (const unsigned char *)sbuild_id, 0, &path); if (fd >= 0) @@ -540,7 +541,7 @@ static struct debuginfo *open_debuginfo(const char *module, struct nsinfo *nsi, if (!module || !strchr(module, '/')) { err = kernel_get_module_dso(module, &dso); if (err < 0) { - if (!dso || dso->load_errno == 0) { + if (!dso || *dso__load_errno(dso) == 0) { if (!str_error_r(-err, reason, STRERR_BUFSIZE)) strcpy(reason, "(unknown)"); } else @@ -557,7 +558,7 @@ static struct debuginfo *open_debuginfo(const char *module, struct nsinfo *nsi, } return NULL; } - path = dso->long_name; + path = dso__long_name(dso); } nsinfo__mountns_enter(nsi, &nsc); ret = debuginfo__new(path); @@ -2273,9 +2274,7 @@ static int find_perf_probe_point_from_map(struct probe_trace_point *tp, ret = pp->function ? 0 : -ENOMEM; out: - if (map && !is_kprobe) { - map__put(map); - } + map__put(map); return ret; } @@ -2758,7 +2757,7 @@ static int get_new_event_name(char *buf, size_t len, const char *base, /* Try no suffix number */ ret = e_snprintf(buf, len, "%s%s", nbase, ret_event ? "__return" : ""); if (ret < 0) { - pr_debug("snprintf() failed: %d\n", ret); + pr_warning("snprintf() failed: %d; the event name nbase='%s' is too long\n", ret, nbase); goto out; } if (!strlist__has_entry(namelist, buf)) @@ -2867,7 +2866,7 @@ static int probe_trace_event__set_name(struct probe_trace_event *tev, group = PERFPROBE_GROUP; /* Get an unused new event name */ - ret = get_new_event_name(buf, 64, event, namelist, + ret = get_new_event_name(buf, sizeof(buf), event, namelist, tev->point.retprobe, allow_suffix); if (ret < 0) return ret; @@ -3795,8 +3794,8 @@ int show_available_funcs(const char *target, struct nsinfo *nsi, /* Show all (filtered) symbols */ setup_pager(); - for (size_t i = 0; i < dso->symbol_names_len; i++) { - struct symbol *pos = dso->symbol_names[i]; + for (size_t i = 0; i < dso__symbol_names_len(dso); i++) { + struct symbol *pos = dso__symbol_names(dso)[i]; if (strfilter__compare(_filter, pos->name)) printf("%s\n", pos->name); diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index c8923375e30d..630e16c54ed5 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -186,8 +186,6 @@ static_var: return ret2; } -#define BYTES_TO_BITS(nb) ((nb) * BITS_PER_LONG / sizeof(long)) - static int convert_variable_type(Dwarf_Die *vr_die, struct probe_trace_arg *tvar, const char *cast, bool user_access) @@ -217,7 +215,7 @@ static int convert_variable_type(Dwarf_Die *vr_die, total = dwarf_bytesize(vr_die); if (boffs < 0 || total < 0) return -ENOENT; - ret = snprintf(buf, 16, "b%d@%d/%zd", bsize, boffs, + ret = snprintf(buf, 16, "b%d@%d/%d", bsize, boffs, BYTES_TO_BITS(total)); goto formatted; } diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources index 593b660ec75e..1bec945f4838 100644 --- a/tools/perf/util/python-ext-sources +++ b/tools/perf/util/python-ext-sources @@ -31,6 +31,7 @@ util/counts.c util/print_binary.c util/strlist.c util/trace-event.c +util/trace-event-parse.c ../lib/rbtree.c util/string.c util/symbol_fprintf.c diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c index 8761f51b5c7c..0aeb97c11c03 100644 --- a/tools/perf/util/python.c +++ b/tools/perf/util/python.c @@ -103,6 +103,16 @@ int perf_pmu__scan_file(const struct perf_pmu *pmu, const char *name, const char return EOF; } +const char *perf_pmu__name_from_config(struct perf_pmu *pmu __maybe_unused, u64 config __maybe_unused) +{ + return NULL; +} + +struct perf_pmu *perf_pmus__find_by_type(unsigned int type __maybe_unused) +{ + return NULL; +} + int perf_pmus__num_core_pmus(void) { return 1; @@ -181,6 +191,7 @@ int perf_bpf_filter__destroy(struct evsel *evsel __maybe_unused) * implementing 'verbose' and 'eprintf'. */ int verbose; +int debug_kmaps; int debug_peo_args; int eprintf(int level, int var, const char *fmt, ...); diff --git a/tools/perf/util/rb_resort.h b/tools/perf/util/rb_resort.h index 376e86cb4c3c..d927a0d25052 100644 --- a/tools/perf/util/rb_resort.h +++ b/tools/perf/util/rb_resort.h @@ -143,9 +143,4 @@ struct __name##_sorted *__name = __name##_sorted__new DECLARE_RESORT_RB(__name)(&__ilist->rblist.entries.rb_root, \ __ilist->rblist.nr_entries) -/* For 'struct machine->threads' */ -#define DECLARE_RESORT_RB_MACHINE_THREADS(__name, __machine, hash_bucket) \ - DECLARE_RESORT_RB(__name)(&__machine->threads[hash_bucket].entries.rb_root, \ - __machine->threads[hash_bucket].nr) - #endif /* _PERF_RESORT_RB_H_ */ diff --git a/tools/perf/util/record.c b/tools/perf/util/record.c index 87e817b3cf7e..e867de8ddaaa 100644 --- a/tools/perf/util/record.c +++ b/tools/perf/util/record.c @@ -237,7 +237,7 @@ bool evlist__can_select_event(struct evlist *evlist, const char *str) evsel = evlist__last(temp_evlist); - if (!evlist || perf_cpu_map__has_any_cpu_or_is_empty(evlist->core.user_requested_cpus)) { + if (!evlist || perf_cpu_map__is_any_cpu_or_is_empty(evlist->core.user_requested_cpus)) { struct perf_cpu_map *cpus = perf_cpu_map__new_online_cpus(); if (cpus) diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c index b072ac5d3bc2..e16257d5ab2c 100644 --- a/tools/perf/util/scripting-engines/trace-event-perl.c +++ b/tools/perf/util/scripting-engines/trace-event-perl.c @@ -320,10 +320,10 @@ static SV *perl_process_callchain(struct perf_sample *sample, const char *dsoname = "[unknown]"; if (dso) { - if (symbol_conf.show_kernel_path && dso->long_name) - dsoname = dso->long_name; + if (symbol_conf.show_kernel_path && dso__long_name(dso)) + dsoname = dso__long_name(dso); else - dsoname = dso->name; + dsoname = dso__name(dso); } if (!hv_stores(elem, "dso", newSVpv(dsoname,0))) { hv_undef(elem); diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c index 860e1837ba96..fb00f3ad6815 100644 --- a/tools/perf/util/scripting-engines/trace-event-python.c +++ b/tools/perf/util/scripting-engines/trace-event-python.c @@ -45,6 +45,7 @@ #include "../thread.h" #include "../comm.h" #include "../machine.h" +#include "../mem-info.h" #include "../db-export.h" #include "../thread-stack.h" #include "../trace-event.h" @@ -393,10 +394,10 @@ static const char *get_dsoname(struct map *map) struct dso *dso = map ? map__dso(map) : NULL; if (dso) { - if (symbol_conf.show_kernel_path && dso->long_name) - dsoname = dso->long_name; + if (symbol_conf.show_kernel_path && dso__long_name(dso)) + dsoname = dso__long_name(dso); else - dsoname = dso->name; + dsoname = dso__name(dso); } return dsoname; @@ -720,15 +721,20 @@ static void set_sample_read_in_dict(PyObject *dict_sample, } static void set_sample_datasrc_in_dict(PyObject *dict, - struct perf_sample *sample) + struct perf_sample *sample) { - struct mem_info mi = { .data_src.val = sample->data_src }; + struct mem_info *mi = mem_info__new(); char decode[100]; + if (!mi) + Py_FatalError("couldn't create mem-info"); + pydict_set_item_string_decref(dict, "datasrc", PyLong_FromUnsignedLongLong(sample->data_src)); - perf_script__meminfo_scnprintf(decode, 100, &mi); + mem_info__data_src(mi)->val = sample->data_src; + perf_script__meminfo_scnprintf(decode, 100, mi); + mem_info__put(mi); pydict_set_item_string_decref(dict, "datasrc_decode", _PyUnicode_FromString(decode)); @@ -799,8 +805,9 @@ static void set_sym_in_dict(PyObject *dict, struct addr_location *al, if (al->map) { struct dso *dso = map__dso(al->map); - pydict_set_item_string_decref(dict, dso_field, _PyUnicode_FromString(dso->name)); - build_id__sprintf(&dso->bid, sbuild_id); + pydict_set_item_string_decref(dict, dso_field, + _PyUnicode_FromString(dso__name(dso))); + build_id__sprintf(dso__bid(dso), sbuild_id); pydict_set_item_string_decref(dict, dso_bid_field, _PyUnicode_FromString(sbuild_id)); pydict_set_item_string_decref(dict, dso_map_start, @@ -858,6 +865,10 @@ static PyObject *get_perf_sample_dict(struct perf_sample *sample, pydict_set_item_string_decref(dict, "ev_name", _PyUnicode_FromString(evsel__name(evsel))); pydict_set_item_string_decref(dict, "attr", _PyBytes_FromStringAndSize((const char *)&evsel->core.attr, sizeof(evsel->core.attr))); + pydict_set_item_string_decref(dict_sample, "id", + PyLong_FromUnsignedLongLong(sample->id)); + pydict_set_item_string_decref(dict_sample, "stream_id", + PyLong_FromUnsignedLongLong(sample->stream_id)); pydict_set_item_string_decref(dict_sample, "pid", _PyLong_FromLong(sample->pid)); pydict_set_item_string_decref(dict_sample, "tid", @@ -1242,14 +1253,14 @@ static int python_export_dso(struct db_export *dbe, struct dso *dso, char sbuild_id[SBUILD_ID_SIZE]; PyObject *t; - build_id__sprintf(&dso->bid, sbuild_id); + build_id__sprintf(dso__bid(dso), sbuild_id); t = tuple_new(5); - tuple_set_d64(t, 0, dso->db_id); + tuple_set_d64(t, 0, dso__db_id(dso)); tuple_set_d64(t, 1, machine->db_id); - tuple_set_string(t, 2, dso->short_name); - tuple_set_string(t, 3, dso->long_name); + tuple_set_string(t, 2, dso__short_name(dso)); + tuple_set_string(t, 3, dso__long_name(dso)); tuple_set_string(t, 4, sbuild_id); call_object(tables->dso_handler, t, "dso_table"); @@ -1269,7 +1280,7 @@ static int python_export_symbol(struct db_export *dbe, struct symbol *sym, t = tuple_new(6); tuple_set_d64(t, 0, *sym_db_id); - tuple_set_d64(t, 1, dso->db_id); + tuple_set_d64(t, 1, dso__db_id(dso)); tuple_set_d64(t, 2, sym->start); tuple_set_d64(t, 3, sym->end); tuple_set_s32(t, 4, sym->binding); @@ -1306,7 +1317,7 @@ static void python_export_sample_table(struct db_export *dbe, struct tables *tables = container_of(dbe, struct tables, dbe); PyObject *t; - t = tuple_new(25); + t = tuple_new(27); tuple_set_d64(t, 0, es->db_id); tuple_set_d64(t, 1, es->evsel->db_id); @@ -1333,6 +1344,8 @@ static void python_export_sample_table(struct db_export *dbe, tuple_set_d64(t, 22, es->sample->insn_cnt); tuple_set_d64(t, 23, es->sample->cyc_cnt); tuple_set_s32(t, 24, es->sample->flags); + tuple_set_d64(t, 25, es->sample->id); + tuple_set_d64(t, 26, es->sample->stream_id); call_object(tables->sample_handler, t, "sample_table"); @@ -1693,13 +1706,15 @@ static void python_process_stat(struct perf_stat_config *config, { struct perf_thread_map *threads = counter->core.threads; struct perf_cpu_map *cpus = counter->core.cpus; - int cpu, thread; - for (thread = 0; thread < perf_thread_map__nr(threads); thread++) { - for (cpu = 0; cpu < perf_cpu_map__nr(cpus); cpu++) { - process_stat(counter, perf_cpu_map__cpu(cpus, cpu), + for (int thread = 0; thread < perf_thread_map__nr(threads); thread++) { + int idx; + struct perf_cpu cpu; + + perf_cpu_map__for_each_cpu(cpu, idx, cpus) { + process_stat(counter, cpu, perf_thread_map__pid(threads, thread), tstamp, - perf_counts(counter->counts, cpu, thread)); + perf_counts(counter->counts, idx, thread)); } } } diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 199d3e8df315..a10343b9dcd4 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -2720,6 +2720,17 @@ size_t perf_session__fprintf(struct perf_session *session, FILE *fp) return machine__fprintf(&session->machines.host, fp); } +void perf_session__dump_kmaps(struct perf_session *session) +{ + int save_verbose = verbose; + + fflush(stdout); + fprintf(stderr, "Kernel and module maps:\n"); + verbose = 0; /* Suppress verbose to print a summary only */ + maps__fprintf(machine__kernel_maps(&session->machines.host), stderr); + verbose = save_verbose; +} + struct evsel *perf_session__find_first_evtype(struct perf_session *session, unsigned int type) { @@ -2738,6 +2749,7 @@ int perf_session__cpu_bitmap(struct perf_session *session, int i, err = -1; struct perf_cpu_map *map; int nr_cpus = min(session->header.env.nr_cpus_avail, MAX_NR_CPUS); + struct perf_cpu cpu; for (i = 0; i < PERF_TYPE_MAX; ++i) { struct evsel *evsel; @@ -2759,9 +2771,7 @@ int perf_session__cpu_bitmap(struct perf_session *session, return -1; } - for (i = 0; i < perf_cpu_map__nr(map); i++) { - struct perf_cpu cpu = perf_cpu_map__cpu(map, i); - + perf_cpu_map__for_each_cpu(cpu, i, map) { if (cpu.cpu >= nr_cpus) { pr_err("Requested CPU %d too large. " "Consider raising MAX_NR_CPUS\n", cpu.cpu); @@ -2906,3 +2916,24 @@ int perf_event__process_id_index(struct perf_session *session, } return 0; } + +int perf_session__dsos_hit_all(struct perf_session *session) +{ + struct rb_node *nd; + int err; + + err = machine__hit_all_dsos(&session->machines.host); + if (err) + return err; + + for (nd = rb_first_cached(&session->machines.guests); nd; + nd = rb_next(nd)) { + struct machine *pos = rb_entry(nd, struct machine, rb_node); + + err = machine__hit_all_dsos(pos); + if (err) + return err; + } + + return 0; +} diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h index ee3715e8563b..3b0256e977a6 100644 --- a/tools/perf/util/session.h +++ b/tools/perf/util/session.h @@ -133,6 +133,8 @@ size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp, bool skip_empty); +void perf_session__dump_kmaps(struct perf_session *session); + struct evsel *perf_session__find_first_evtype(struct perf_session *session, unsigned int type); @@ -154,6 +156,8 @@ int perf_session__deliver_synth_event(struct perf_session *session, union perf_event *event, struct perf_sample *sample); +int perf_session__dsos_hit_all(struct perf_session *session); + int perf_event__process_id_index(struct perf_session *session, union perf_event *event); diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py index 79d5e2955f85..3107f5aa8c9a 100644 --- a/tools/perf/util/setup.py +++ b/tools/perf/util/setup.py @@ -85,6 +85,7 @@ if '-DHAVE_LIBTRACEEVENT' in cflags: extra_libraries += [ 'traceevent' ] else: ext_sources.remove('util/trace-event.c') + ext_sources.remove('util/trace-event-parse.c') # use full paths with source files ext_sources = list(map(lambda x: '%s/%s' % (src_perf, x) , ext_sources)) diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 30254eb63709..cd39ea972193 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -23,6 +23,7 @@ #include "strlist.h" #include "strbuf.h" #include "mem-events.h" +#include "mem-info.h" #include "annotate.h" #include "annotate-data.h" #include "event.h" @@ -239,11 +240,11 @@ static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r) return cmp_null(dso_r, dso_l); if (verbose > 0) { - dso_name_l = dso_l->long_name; - dso_name_r = dso_r->long_name; + dso_name_l = dso__long_name(dso_l); + dso_name_r = dso__long_name(dso_r); } else { - dso_name_l = dso_l->short_name; - dso_name_r = dso_r->short_name; + dso_name_l = dso__short_name(dso_l); + dso_name_r = dso__short_name(dso_r); } return strcmp(dso_name_l, dso_name_r); @@ -262,7 +263,7 @@ static int _hist_entry__dso_snprintf(struct map *map, char *bf, const char *dso_name = "[unknown]"; if (dso) - dso_name = verbose > 0 ? dso->long_name : dso->short_name; + dso_name = verbose > 0 ? dso__long_name(dso) : dso__short_name(dso); return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name); } @@ -364,7 +365,7 @@ static int _hist_entry__sym_snprintf(struct map_symbol *ms, char o = dso ? dso__symtab_origin(dso) : '!'; u64 rip = ip; - if (dso && dso->kernel && dso->adjust_symbols) + if (dso && dso__kernel(dso) && dso__adjust_symbols(dso)) rip = map__unmap_ip(map, ip); ret += repsep_snprintf(bf, size, "%-#*llx %c ", @@ -1364,9 +1365,9 @@ sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right) uint64_t l = 0, r = 0; if (left->mem_info) - l = left->mem_info->daddr.addr; + l = mem_info__daddr(left->mem_info)->addr; if (right->mem_info) - r = right->mem_info->daddr.addr; + r = mem_info__daddr(right->mem_info)->addr; return (int64_t)(r - l); } @@ -1378,8 +1379,8 @@ static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf, struct map_symbol *ms = NULL; if (he->mem_info) { - addr = he->mem_info->daddr.addr; - ms = &he->mem_info->daddr.ms; + addr = mem_info__daddr(he->mem_info)->addr; + ms = &mem_info__daddr(he->mem_info)->ms; } return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width); } @@ -1390,9 +1391,9 @@ sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right) uint64_t l = 0, r = 0; if (left->mem_info) - l = left->mem_info->iaddr.addr; + l = mem_info__iaddr(left->mem_info)->addr; if (right->mem_info) - r = right->mem_info->iaddr.addr; + r = mem_info__iaddr(right->mem_info)->addr; return (int64_t)(r - l); } @@ -1404,8 +1405,8 @@ static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf, struct map_symbol *ms = NULL; if (he->mem_info) { - addr = he->mem_info->iaddr.addr; - ms = &he->mem_info->iaddr.ms; + addr = mem_info__iaddr(he->mem_info)->addr; + ms = &mem_info__iaddr(he->mem_info)->ms; } return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width); } @@ -1417,9 +1418,9 @@ sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right) struct map *map_r = NULL; if (left->mem_info) - map_l = left->mem_info->daddr.ms.map; + map_l = mem_info__daddr(left->mem_info)->ms.map; if (right->mem_info) - map_r = right->mem_info->daddr.ms.map; + map_r = mem_info__daddr(right->mem_info)->ms.map; return _sort__dso_cmp(map_l, map_r); } @@ -1430,7 +1431,7 @@ static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf, struct map *map = NULL; if (he->mem_info) - map = he->mem_info->daddr.ms.map; + map = mem_info__daddr(he->mem_info)->ms.map; return _hist_entry__dso_snprintf(map, bf, size, width); } @@ -1442,12 +1443,12 @@ sort__locked_cmp(struct hist_entry *left, struct hist_entry *right) union perf_mem_data_src data_src_r; if (left->mem_info) - data_src_l = left->mem_info->data_src; + data_src_l = *mem_info__data_src(left->mem_info); else data_src_l.mem_lock = PERF_MEM_LOCK_NA; if (right->mem_info) - data_src_r = right->mem_info->data_src; + data_src_r = *mem_info__data_src(right->mem_info); else data_src_r.mem_lock = PERF_MEM_LOCK_NA; @@ -1470,12 +1471,12 @@ sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right) union perf_mem_data_src data_src_r; if (left->mem_info) - data_src_l = left->mem_info->data_src; + data_src_l = *mem_info__data_src(left->mem_info); else data_src_l.mem_dtlb = PERF_MEM_TLB_NA; if (right->mem_info) - data_src_r = right->mem_info->data_src; + data_src_r = *mem_info__data_src(right->mem_info); else data_src_r.mem_dtlb = PERF_MEM_TLB_NA; @@ -1498,12 +1499,12 @@ sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right) union perf_mem_data_src data_src_r; if (left->mem_info) - data_src_l = left->mem_info->data_src; + data_src_l = *mem_info__data_src(left->mem_info); else data_src_l.mem_lvl = PERF_MEM_LVL_NA; if (right->mem_info) - data_src_r = right->mem_info->data_src; + data_src_r = *mem_info__data_src(right->mem_info); else data_src_r.mem_lvl = PERF_MEM_LVL_NA; @@ -1526,12 +1527,12 @@ sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right) union perf_mem_data_src data_src_r; if (left->mem_info) - data_src_l = left->mem_info->data_src; + data_src_l = *mem_info__data_src(left->mem_info); else data_src_l.mem_snoop = PERF_MEM_SNOOP_NA; if (right->mem_info) - data_src_r = right->mem_info->data_src; + data_src_r = *mem_info__data_src(right->mem_info); else data_src_r.mem_snoop = PERF_MEM_SNOOP_NA; @@ -1562,8 +1563,8 @@ sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right) if (left->cpumode > right->cpumode) return -1; if (left->cpumode < right->cpumode) return 1; - l_map = left->mem_info->daddr.ms.map; - r_map = right->mem_info->daddr.ms.map; + l_map = mem_info__daddr(left->mem_info)->ms.map; + r_map = mem_info__daddr(right->mem_info)->ms.map; /* if both are NULL, jump to sort on al_addr instead */ if (!l_map && !r_map) @@ -1586,8 +1587,8 @@ sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right) */ if ((left->cpumode != PERF_RECORD_MISC_KERNEL) && - (!(map__flags(l_map) & MAP_SHARED)) && !l_dso->id.maj && !l_dso->id.min && - !l_dso->id.ino && !l_dso->id.ino_generation) { + (!(map__flags(l_map) & MAP_SHARED)) && !dso__id(l_dso)->maj && !dso__id(l_dso)->min && + !dso__id(l_dso)->ino && !dso__id(l_dso)->ino_generation) { /* userspace anonymous */ if (thread__pid(left->thread) > thread__pid(right->thread)) @@ -1598,8 +1599,8 @@ sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right) addr: /* al_addr does all the right addr - start + offset calculations */ - l = cl_address(left->mem_info->daddr.al_addr, chk_double_cl); - r = cl_address(right->mem_info->daddr.al_addr, chk_double_cl); + l = cl_address(mem_info__daddr(left->mem_info)->al_addr, chk_double_cl); + r = cl_address(mem_info__daddr(right->mem_info)->al_addr, chk_double_cl); if (l > r) return -1; if (l < r) return 1; @@ -1616,17 +1617,18 @@ static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf, char level = he->level; if (he->mem_info) { - struct map *map = he->mem_info->daddr.ms.map; + struct map *map = mem_info__daddr(he->mem_info)->ms.map; struct dso *dso = map ? map__dso(map) : NULL; - addr = cl_address(he->mem_info->daddr.al_addr, chk_double_cl); - ms = &he->mem_info->daddr.ms; + addr = cl_address(mem_info__daddr(he->mem_info)->al_addr, chk_double_cl); + ms = &mem_info__daddr(he->mem_info)->ms; /* print [s] for shared data mmaps */ if ((he->cpumode != PERF_RECORD_MISC_KERNEL) && map && !(map__prot(map) & PROT_EXEC) && (map__flags(map) & MAP_SHARED) && - (dso->id.maj || dso->id.min || dso->id.ino || dso->id.ino_generation)) + (dso__id(dso)->maj || dso__id(dso)->min || dso__id(dso)->ino || + dso__id(dso)->ino_generation)) level = 's'; else if (!map) level = 'X'; @@ -1804,12 +1806,12 @@ sort__blocked_cmp(struct hist_entry *left, struct hist_entry *right) union perf_mem_data_src data_src_r; if (left->mem_info) - data_src_l = left->mem_info->data_src; + data_src_l = *mem_info__data_src(left->mem_info); else data_src_l.mem_blk = PERF_MEM_BLK_NA; if (right->mem_info) - data_src_r = right->mem_info->data_src; + data_src_r = *mem_info__data_src(right->mem_info); else data_src_r.mem_blk = PERF_MEM_BLK_NA; @@ -1838,9 +1840,9 @@ sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right) uint64_t l = 0, r = 0; if (left->mem_info) - l = left->mem_info->daddr.phys_addr; + l = mem_info__daddr(left->mem_info)->phys_addr; if (right->mem_info) - r = right->mem_info->daddr.phys_addr; + r = mem_info__daddr(right->mem_info)->phys_addr; return (int64_t)(r - l); } @@ -1852,7 +1854,7 @@ static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf, size_t ret = 0; size_t len = BITS_PER_LONG / 4; - addr = he->mem_info->daddr.phys_addr; + addr = mem_info__daddr(he->mem_info)->phys_addr; ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level); @@ -1879,9 +1881,9 @@ sort__data_page_size_cmp(struct hist_entry *left, struct hist_entry *right) uint64_t l = 0, r = 0; if (left->mem_info) - l = left->mem_info->daddr.data_page_size; + l = mem_info__daddr(left->mem_info)->data_page_size; if (right->mem_info) - r = right->mem_info->daddr.data_page_size; + r = mem_info__daddr(right->mem_info)->data_page_size; return (int64_t)(r - l); } @@ -1892,7 +1894,7 @@ static int hist_entry__data_page_size_snprintf(struct hist_entry *he, char *bf, char str[PAGE_SIZE_NAME_LEN]; return repsep_snprintf(bf, size, "%-*s", width, - get_page_size_name(he->mem_info->daddr.data_page_size, str)); + get_page_size_name(mem_info__daddr(he->mem_info)->data_page_size, str)); } struct sort_entry sort_mem_data_page_size = { @@ -2441,6 +2443,13 @@ static struct hpp_dimension hpp_sort_dimensions[] = { DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"), DIM(PERF_HPP__SAMPLES, "sample"), DIM(PERF_HPP__PERIOD, "period"), + DIM(PERF_HPP__WEIGHT1, "weight1"), + DIM(PERF_HPP__WEIGHT2, "weight2"), + DIM(PERF_HPP__WEIGHT3, "weight3"), + /* aliases for weight_struct */ + DIM(PERF_HPP__WEIGHT2, "ins_lat"), + DIM(PERF_HPP__WEIGHT3, "retire_lat"), + DIM(PERF_HPP__WEIGHT3, "p_stage_cyc"), }; #undef DIM @@ -3372,7 +3381,7 @@ int sort_dimension__add(struct perf_hpp_list *list, const char *tok, sort_dimension_add_dynamic_header(sd); } - if (sd->entry == &sort_parent) { + if (sd->entry == &sort_parent && parent_pattern) { int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED); if (ret) { char err[BUFSIZ]; @@ -3743,26 +3752,29 @@ void sort__setup_elide(FILE *output) } } -int output_field_add(struct perf_hpp_list *list, char *tok) +int output_field_add(struct perf_hpp_list *list, const char *tok) { unsigned int i; - for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { - struct sort_dimension *sd = &common_sort_dimensions[i]; + for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { + struct hpp_dimension *hd = &hpp_sort_dimensions[i]; - if (!sd->name || strncasecmp(tok, sd->name, strlen(tok))) + if (strncasecmp(tok, hd->name, strlen(tok))) continue; - return __sort_dimension__add_output(list, sd); + if (!strcasecmp(tok, "weight")) + ui__warning("--fields weight shows the average value unlike in the --sort key.\n"); + + return __hpp_dimension__add_output(list, hd); } - for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { - struct hpp_dimension *hd = &hpp_sort_dimensions[i]; + for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { + struct sort_dimension *sd = &common_sort_dimensions[i]; - if (strncasecmp(tok, hd->name, strlen(tok))) + if (!sd->name || strncasecmp(tok, sd->name, strlen(tok))) continue; - return __hpp_dimension__add_output(list, hd); + return __sort_dimension__add_output(list, sd); } for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h index 6f6b4189a389..0bd0ee3ae76b 100644 --- a/tools/perf/util/sort.h +++ b/tools/perf/util/sort.h @@ -3,19 +3,9 @@ #define __PERF_SORT_H #include <regex.h> #include <stdbool.h> -#include <linux/list.h> -#include <linux/rbtree.h> -#include "map_symbol.h" -#include "symbol_conf.h" -#include "callchain.h" -#include "values.h" #include "hist.h" -#include "stat.h" -#include "spark.h" struct option; -struct thread; -struct annotated_data_type; extern regex_t parent_regex; extern const char *sort_order; @@ -39,175 +29,6 @@ extern struct sort_entry sort_type; extern const char default_mem_sort_order[]; extern bool chk_double_cl; -struct res_sample { - u64 time; - int cpu; - int tid; -}; - -struct he_stat { - u64 period; - u64 period_sys; - u64 period_us; - u64 period_guest_sys; - u64 period_guest_us; - u32 nr_events; -}; - -struct namespace_id { - u64 dev; - u64 ino; -}; - -struct hist_entry_diff { - bool computed; - union { - /* PERF_HPP__DELTA */ - double period_ratio_delta; - - /* PERF_HPP__RATIO */ - double period_ratio; - - /* HISTC_WEIGHTED_DIFF */ - s64 wdiff; - - /* PERF_HPP_DIFF__CYCLES */ - s64 cycles; - }; - struct stats stats; - unsigned long svals[NUM_SPARKS]; -}; - -struct hist_entry_ops { - void *(*new)(size_t size); - void (*free)(void *ptr); -}; - -/** - * struct hist_entry - histogram entry - * - * @row_offset - offset from the first callchain expanded to appear on screen - * @nr_rows - rows expanded in callchain, recalculated on folding/unfolding - */ -struct hist_entry { - struct rb_node rb_node_in; - struct rb_node rb_node; - union { - struct list_head node; - struct list_head head; - } pairs; - struct he_stat stat; - struct he_stat *stat_acc; - struct map_symbol ms; - struct thread *thread; - struct comm *comm; - struct namespace_id cgroup_id; - u64 cgroup; - u64 ip; - u64 transaction; - s32 socket; - s32 cpu; - u64 code_page_size; - u64 weight; - u64 ins_lat; - u64 p_stage_cyc; - u8 cpumode; - u8 depth; - int mem_type_off; - struct simd_flags simd_flags; - - /* We are added by hists__add_dummy_entry. */ - bool dummy; - bool leaf; - - char level; - u8 filtered; - - u16 callchain_size; - union { - /* - * Since perf diff only supports the stdio output, TUI - * fields are only accessed from perf report (or perf - * top). So make it a union to reduce memory usage. - */ - struct hist_entry_diff diff; - struct /* for TUI */ { - u16 row_offset; - u16 nr_rows; - bool init_have_children; - bool unfolded; - bool has_children; - bool has_no_entry; - }; - }; - char *srcline; - char *srcfile; - struct symbol *parent; - struct branch_info *branch_info; - long time; - struct hists *hists; - struct mem_info *mem_info; - struct block_info *block_info; - struct kvm_info *kvm_info; - void *raw_data; - u32 raw_size; - int num_res; - struct res_sample *res_samples; - void *trace_output; - struct perf_hpp_list *hpp_list; - struct hist_entry *parent_he; - struct hist_entry_ops *ops; - struct annotated_data_type *mem_type; - union { - /* this is for hierarchical entry structure */ - struct { - struct rb_root_cached hroot_in; - struct rb_root_cached hroot_out; - }; /* non-leaf entries */ - struct rb_root sorted_chain; /* leaf entry has callchains */ - }; - struct callchain_root callchain[0]; /* must be last member */ -}; - -static __pure inline bool hist_entry__has_callchains(struct hist_entry *he) -{ - return he->callchain_size != 0; -} - -int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width); - -static inline bool hist_entry__has_pairs(struct hist_entry *he) -{ - return !list_empty(&he->pairs.node); -} - -static inline struct hist_entry *hist_entry__next_pair(struct hist_entry *he) -{ - if (hist_entry__has_pairs(he)) - return list_entry(he->pairs.node.next, struct hist_entry, pairs.node); - return NULL; -} - -static inline void hist_entry__add_pair(struct hist_entry *pair, - struct hist_entry *he) -{ - list_add_tail(&pair->pairs.node, &he->pairs.head); -} - -static inline float hist_entry__get_percent_limit(struct hist_entry *he) -{ - u64 period = he->stat.period; - u64 total_period = hists__total_period(he->hists); - - if (unlikely(total_period == 0)) - return 0; - - if (symbol_conf.cumulate_callchain) - period = he->stat_acc->period; - - return period * 100.0 / total_period; -} - enum sort_mode { SORT_MODE__NORMAL, SORT_MODE__BRANCH, @@ -299,15 +120,6 @@ struct sort_entry { u8 se_width_idx; }; -struct block_hist { - struct hists block_hists; - struct perf_hpp_list block_list; - struct perf_hpp_fmt block_fmt; - int block_idx; - bool valid; - struct hist_entry he; -}; - extern struct sort_entry sort_thread; struct evlist; @@ -329,7 +141,7 @@ void reset_dimensions(void); int sort_dimension__add(struct perf_hpp_list *list, const char *tok, struct evlist *evlist, int level); -int output_field_add(struct perf_hpp_list *list, char *tok); +int output_field_add(struct perf_hpp_list *list, const char *tok); int64_t sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right); int64_t diff --git a/tools/perf/util/srcline.c b/tools/perf/util/srcline.c index 034b496df297..9d670d8c1c08 100644 --- a/tools/perf/util/srcline.c +++ b/tools/perf/util/srcline.c @@ -27,14 +27,14 @@ bool srcline_full_filename; char *srcline__unknown = (char *)"??:0"; -static const char *dso__name(struct dso *dso) +static const char *srcline_dso_name(struct dso *dso) { const char *dso_name; - if (dso->symsrc_filename) - dso_name = dso->symsrc_filename; + if (dso__symsrc_filename(dso)) + dso_name = dso__symsrc_filename(dso); else - dso_name = dso->long_name; + dso_name = dso__long_name(dso); if (dso_name[0] == '[') return NULL; @@ -399,6 +399,8 @@ static void addr2line_subprocess_cleanup(struct child_process *a2l) kill(a2l->pid, SIGKILL); finish_command(a2l); /* ignore result, we don't care */ a2l->pid = -1; + close(a2l->in); + close(a2l->out); } free(a2l); @@ -636,7 +638,7 @@ static int addr2line(const char *dso_name, u64 addr, struct inline_node *node, struct symbol *sym __maybe_unused) { - struct child_process *a2l = dso->a2l; + struct child_process *a2l = dso__a2l(dso); char *record_function = NULL; char *record_filename = NULL; unsigned int record_line_nr = 0; @@ -653,8 +655,9 @@ static int addr2line(const char *dso_name, u64 addr, if (!filename__has_section(dso_name, ".debug_line")) goto out; - dso->a2l = addr2line_subprocess_init(symbol_conf.addr2line_path, dso_name); - a2l = dso->a2l; + dso__set_a2l(dso, + addr2line_subprocess_init(symbol_conf.addr2line_path, dso_name)); + a2l = dso__a2l(dso); } if (a2l == NULL) { @@ -768,7 +771,7 @@ out: free(record_function); free(record_filename); if (io.eof) { - dso->a2l = NULL; + dso__set_a2l(dso, NULL); addr2line_subprocess_cleanup(a2l); } return ret; @@ -776,14 +779,14 @@ out: void dso__free_a2l(struct dso *dso) { - struct child_process *a2l = dso->a2l; + struct child_process *a2l = dso__a2l(dso); if (!a2l) return; addr2line_subprocess_cleanup(a2l); - dso->a2l = NULL; + dso__set_a2l(dso, NULL); } #endif /* HAVE_LIBBFD_SUPPORT */ @@ -821,33 +824,34 @@ char *__get_srcline(struct dso *dso, u64 addr, struct symbol *sym, char *srcline; const char *dso_name; - if (!dso->has_srcline) + if (!dso__has_srcline(dso)) goto out; - dso_name = dso__name(dso); + dso_name = srcline_dso_name(dso); if (dso_name == NULL) - goto out; + goto out_err; if (!addr2line(dso_name, addr, &file, &line, dso, unwind_inlines, NULL, sym)) - goto out; + goto out_err; srcline = srcline_from_fileline(file, line); free(file); if (!srcline) - goto out; + goto out_err; - dso->a2l_fails = 0; + dso__set_a2l_fails(dso, 0); return srcline; -out: - if (dso->a2l_fails && ++dso->a2l_fails > A2L_FAIL_LIMIT) { - dso->has_srcline = 0; +out_err: + dso__set_a2l_fails(dso, dso__a2l_fails(dso) + 1); + if (dso__a2l_fails(dso) > A2L_FAIL_LIMIT) { + dso__set_has_srcline(dso, false); dso__free_a2l(dso); } - +out: if (!show_addr) return (show_sym && sym) ? strndup(sym->name, sym->namelen) : SRCLINE_UNKNOWN; @@ -856,7 +860,7 @@ out: if (asprintf(&srcline, "%s+%" PRIu64, show_sym ? sym->name : "", ip - sym->start) < 0) return SRCLINE_UNKNOWN; - } else if (asprintf(&srcline, "%s[%" PRIx64 "]", dso->short_name, addr) < 0) + } else if (asprintf(&srcline, "%s[%" PRIx64 "]", dso__short_name(dso), addr) < 0) return SRCLINE_UNKNOWN; return srcline; } @@ -867,22 +871,23 @@ char *get_srcline_split(struct dso *dso, u64 addr, unsigned *line) char *file = NULL; const char *dso_name; - if (!dso->has_srcline) - goto out; + if (!dso__has_srcline(dso)) + return NULL; - dso_name = dso__name(dso); + dso_name = srcline_dso_name(dso); if (dso_name == NULL) - goto out; + goto out_err; if (!addr2line(dso_name, addr, &file, line, dso, true, NULL, NULL)) - goto out; + goto out_err; - dso->a2l_fails = 0; + dso__set_a2l_fails(dso, 0); return file; -out: - if (dso->a2l_fails && ++dso->a2l_fails > A2L_FAIL_LIMIT) { - dso->has_srcline = 0; +out_err: + dso__set_a2l_fails(dso, dso__a2l_fails(dso) + 1); + if (dso__a2l_fails(dso) > A2L_FAIL_LIMIT) { + dso__set_has_srcline(dso, false); dso__free_a2l(dso); } @@ -980,7 +985,7 @@ struct inline_node *dso__parse_addr_inlines(struct dso *dso, u64 addr, { const char *dso_name; - dso_name = dso__name(dso); + dso_name = srcline_dso_name(dso); if (dso_name == NULL) return NULL; diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c index 8c61f8627ebc..91d2f7f65df7 100644 --- a/tools/perf/util/stat-display.c +++ b/tools/perf/util/stat-display.c @@ -201,6 +201,9 @@ static void print_aggr_id_std(struct perf_stat_config *config, snprintf(buf, sizeof(buf), "S%d-D%d-L%d-ID%d", id.socket, id.die, id.cache_lvl, id.cache); break; + case AGGR_CLUSTER: + snprintf(buf, sizeof(buf), "S%d-D%d-CLS%d", id.socket, id.die, id.cluster); + break; case AGGR_DIE: snprintf(buf, sizeof(buf), "S%d-D%d", id.socket, id.die); break; @@ -251,6 +254,10 @@ static void print_aggr_id_csv(struct perf_stat_config *config, fprintf(config->output, "S%d-D%d-L%d-ID%d%s%d%s", id.socket, id.die, id.cache_lvl, id.cache, sep, aggr_nr, sep); break; + case AGGR_CLUSTER: + fprintf(config->output, "S%d-D%d-CLS%d%s%d%s", + id.socket, id.die, id.cluster, sep, aggr_nr, sep); + break; case AGGR_DIE: fprintf(output, "S%d-D%d%s%d%s", id.socket, id.die, sep, aggr_nr, sep); @@ -300,6 +307,10 @@ static void print_aggr_id_json(struct perf_stat_config *config, fprintf(output, "\"cache\" : \"S%d-D%d-L%d-ID%d\", \"aggregate-number\" : %d, ", id.socket, id.die, id.cache_lvl, id.cache, aggr_nr); break; + case AGGR_CLUSTER: + fprintf(output, "\"cluster\" : \"S%d-D%d-CLS%d\", \"aggregate-number\" : %d, ", + id.socket, id.die, id.cluster, aggr_nr); + break; case AGGR_DIE: fprintf(output, "\"die\" : \"S%d-D%d\", \"aggregate-number\" : %d, ", id.socket, id.die, aggr_nr); @@ -560,7 +571,7 @@ static void print_metric_only(struct perf_stat_config *config, if (color) mlen += strlen(color) + sizeof(PERF_COLOR_RESET) - 1; - color_snprintf(str, sizeof(str), color ?: "", fmt, val); + color_snprintf(str, sizeof(str), color ?: "", fmt ?: "", val); fprintf(out, "%*s ", mlen, str); os->first = false; } @@ -1126,11 +1137,16 @@ static void print_no_aggr_metric(struct perf_stat_config *config, u64 ena, run, val; double uval; struct perf_stat_evsel *ps = counter->stats; - int aggr_idx = perf_cpu_map__idx(evsel__cpus(counter), cpu); + int aggr_idx = 0; - if (aggr_idx < 0) + if (!perf_cpu_map__has(evsel__cpus(counter), cpu)) continue; + cpu_aggr_map__for_each_idx(aggr_idx, config->aggr_map) { + if (config->aggr_map->map[aggr_idx].cpu.cpu == cpu.cpu) + break; + } + os->evsel = counter; os->id = aggr_cpu_id__cpu(cpu, /*data=*/NULL); if (first) { @@ -1207,6 +1223,9 @@ static void print_metric_headers(struct perf_stat_config *config, /* Print metrics headers only */ evlist__for_each_entry(evlist, counter) { + if (config->aggr_mode != AGGR_NONE && counter->metric_leader != counter) + continue; + os.evsel = counter; perf_stat__print_shadow_stats(config, counter, 0, @@ -1248,6 +1267,7 @@ static void print_header_interval_std(struct perf_stat_config *config, case AGGR_NODE: case AGGR_SOCKET: case AGGR_DIE: + case AGGR_CLUSTER: case AGGR_CACHE: case AGGR_CORE: fprintf(output, "#%*s %-*s cpus", @@ -1550,6 +1570,7 @@ void evlist__print_counters(struct evlist *evlist, struct perf_stat_config *conf switch (config->aggr_mode) { case AGGR_CORE: case AGGR_CACHE: + case AGGR_CLUSTER: case AGGR_DIE: case AGGR_SOCKET: case AGGR_NODE: diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c index e31426167852..3466aa952442 100644 --- a/tools/perf/util/stat-shadow.c +++ b/tools/perf/util/stat-shadow.c @@ -355,11 +355,13 @@ static void print_nsecs(struct perf_stat_config *config, print_metric(config, ctxp, NULL, NULL, "CPUs utilized", 0); } -static int prepare_metric(struct evsel **metric_events, - struct metric_ref *metric_refs, +static int prepare_metric(const struct metric_expr *mexp, + const struct evsel *evsel, struct expr_parse_ctx *pctx, int aggr_idx) { + struct evsel * const *metric_events = mexp->metric_events; + struct metric_ref *metric_refs = mexp->metric_refs; int i; for (i = 0; metric_events[i]; i++) { @@ -398,12 +400,33 @@ static int prepare_metric(struct evsel **metric_events, source_count = 1; } else { struct perf_stat_evsel *ps = metric_events[i]->stats; - struct perf_stat_aggr *aggr = &ps->aggr[aggr_idx]; + struct perf_stat_aggr *aggr; + /* + * If there are multiple uncore PMUs and we're not + * reading the leader's stats, determine the stats for + * the appropriate uncore PMU. + */ + if (evsel && evsel->metric_leader && + evsel->pmu != evsel->metric_leader->pmu && + mexp->metric_events[i]->pmu == evsel->metric_leader->pmu) { + struct evsel *pos; + + evlist__for_each_entry(evsel->evlist, pos) { + if (pos->pmu != evsel->pmu) + continue; + if (pos->metric_leader != mexp->metric_events[i]) + continue; + ps = pos->stats; + source_count = 1; + break; + } + } + aggr = &ps->aggr[aggr_idx]; if (!aggr) break; - if (!metric_events[i]->supported) { + if (!metric_events[i]->supported) { /* * Not supported events will have a count of 0, * which can be confusing in a @@ -414,13 +437,9 @@ static int prepare_metric(struct evsel **metric_events, val = NAN; source_count = 0; } else { - /* - * If an event was scaled during stat gathering, - * reverse the scale before computing the - * metric. - */ - val = aggr->counts.val * (1.0 / metric_events[i]->scale); - source_count = evsel__source_count(metric_events[i]); + val = aggr->counts.val; + if (!source_count) + source_count = evsel__source_count(metric_events[i]); } } n = strdup(evsel__metric_id(metric_events[i])); @@ -441,18 +460,18 @@ static int prepare_metric(struct evsel **metric_events, } static void generic_metric(struct perf_stat_config *config, - const char *metric_expr, - const char *metric_threshold, - struct evsel **metric_events, - struct metric_ref *metric_refs, - char *name, - const char *metric_name, - const char *metric_unit, - int runtime, + struct metric_expr *mexp, + struct evsel *evsel, int aggr_idx, struct perf_stat_output_ctx *out) { print_metric_t print_metric = out->print_metric; + const char *metric_name = mexp->metric_name; + const char *metric_expr = mexp->metric_expr; + const char *metric_threshold = mexp->metric_threshold; + const char *metric_unit = mexp->metric_unit; + struct evsel * const *metric_events = mexp->metric_events; + int runtime = mexp->runtime; struct expr_parse_ctx *pctx; double ratio, scale, threshold; int i; @@ -467,7 +486,7 @@ static void generic_metric(struct perf_stat_config *config, pctx->sctx.user_requested_cpu_list = strdup(config->user_requested_cpu_list); pctx->sctx.runtime = runtime; pctx->sctx.system_wide = config->system_wide; - i = prepare_metric(metric_events, metric_refs, pctx, aggr_idx); + i = prepare_metric(mexp, evsel, pctx, aggr_idx); if (i < 0) { expr__ctx_free(pctx); return; @@ -502,18 +521,18 @@ static void generic_metric(struct perf_stat_config *config, print_metric(config, ctxp, color, "%8.2f", metric_name ? metric_name : - out->force_header ? name : "", + out->force_header ? evsel->name : "", ratio); } } else { print_metric(config, ctxp, color, /*unit=*/NULL, out->force_header ? - (metric_name ? metric_name : name) : "", 0); + (metric_name ?: evsel->name) : "", 0); } } else { print_metric(config, ctxp, color, /*unit=*/NULL, out->force_header ? - (metric_name ? metric_name : name) : "", 0); + (metric_name ?: evsel->name) : "", 0); } expr__ctx_free(pctx); @@ -528,7 +547,7 @@ double test_generic_metric(struct metric_expr *mexp, int aggr_idx) if (!pctx) return NAN; - if (prepare_metric(mexp->metric_events, mexp->metric_refs, pctx, aggr_idx) < 0) + if (prepare_metric(mexp, /*evsel=*/NULL, pctx, aggr_idx) < 0) goto out; if (expr__parse(&ratio, pctx, mexp->metric_expr)) @@ -630,10 +649,7 @@ void *perf_stat__print_shadow_stats_metricgroup(struct perf_stat_config *config, if ((*num)++ > 0) out->new_line(config, ctxp); - generic_metric(config, mexp->metric_expr, mexp->metric_threshold, - mexp->metric_events, mexp->metric_refs, evsel->name, - mexp->metric_name, mexp->metric_unit, mexp->runtime, - aggr_idx, out); + generic_metric(config, mexp, evsel, aggr_idx, out); } return NULL; diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c index b0bcf92f0f9c..0bd5467389e4 100644 --- a/tools/perf/util/stat.c +++ b/tools/perf/util/stat.c @@ -315,7 +315,7 @@ static int check_per_pkg(struct evsel *counter, struct perf_counts_values *vals, if (!counter->per_pkg) return 0; - if (perf_cpu_map__has_any_cpu_or_is_empty(cpus)) + if (perf_cpu_map__is_any_cpu_or_is_empty(cpus)) return 0; if (!mask) { diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h index 4357ba114822..fd7a187551bd 100644 --- a/tools/perf/util/stat.h +++ b/tools/perf/util/stat.h @@ -48,6 +48,7 @@ enum aggr_mode { AGGR_GLOBAL, AGGR_SOCKET, AGGR_DIE, + AGGR_CLUSTER, AGGR_CACHE, AGGR_CORE, AGGR_THREAD, @@ -86,6 +87,7 @@ struct perf_stat_config { bool metric_no_group; bool metric_no_merge; bool metric_no_threshold; + bool hardware_aware_grouping; bool stop_read_counter; bool iostat_run; char *user_requested_cpu_list; diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c index 1892e9b6aa7f..2b04f47f4db0 100644 --- a/tools/perf/util/svghelper.c +++ b/tools/perf/util/svghelper.c @@ -725,26 +725,24 @@ static void scan_core_topology(int *map, struct topology *t, int nr_cpus) static int str_to_bitmap(char *s, cpumask_t *b, int nr_cpus) { - int i; - int ret = 0; - struct perf_cpu_map *m; - struct perf_cpu c; + int idx, ret = 0; + struct perf_cpu_map *map; + struct perf_cpu cpu; - m = perf_cpu_map__new(s); - if (!m) + map = perf_cpu_map__new(s); + if (!map) return -1; - for (i = 0; i < perf_cpu_map__nr(m); i++) { - c = perf_cpu_map__cpu(m, i); - if (c.cpu >= nr_cpus) { + perf_cpu_map__for_each_cpu(cpu, idx, map) { + if (cpu.cpu >= nr_cpus) { ret = -1; break; } - __set_bit(c.cpu, cpumask_bits(b)); + __set_bit(cpu.cpu, cpumask_bits(b)); } - perf_cpu_map__put(m); + perf_cpu_map__put(map); return ret; } diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index 4b934ed3bfd1..e398abfd13a0 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c @@ -23,6 +23,7 @@ #include <linux/ctype.h> #include <linux/kernel.h> #include <linux/zalloc.h> +#include <linux/string.h> #include <symbol/kallsyms.h> #include <internal/lib.h> @@ -173,7 +174,7 @@ static inline bool elf_sec__is_data(const GElf_Shdr *shdr, static bool elf_sec__filter(GElf_Shdr *shdr, Elf_Data *secstrs) { - return elf_sec__is_text(shdr, secstrs) || + return elf_sec__is_text(shdr, secstrs) || elf_sec__is_data(shdr, secstrs); } @@ -311,8 +312,8 @@ static char *demangle_sym(struct dso *dso, int kmodule, const char *elf_name) * DWARF DW_compile_unit has this, but we don't always have access * to it... */ - if (!want_demangle(dso->kernel || kmodule)) - return demangled; + if (!want_demangle(dso__kernel(dso) || kmodule)) + return demangled; demangled = cxx_demangle_sym(elf_name, verbose > 0, verbose > 0); if (demangled == NULL) { @@ -469,7 +470,7 @@ static bool get_plt_sizes(struct dso *dso, GElf_Ehdr *ehdr, GElf_Shdr *shdr_plt, } if (*plt_entry_size) return true; - pr_debug("Missing PLT entry size for %s\n", dso->long_name); + pr_debug("Missing PLT entry size for %s\n", dso__long_name(dso)); return false; } @@ -653,7 +654,7 @@ static int dso__synthesize_plt_got_symbols(struct dso *dso, Elf *elf, sym = symbol__new(shdr.sh_offset + i, shdr.sh_entsize, STB_GLOBAL, STT_FUNC, buf); if (!sym) goto out; - symbols__insert(&dso->symbols, sym); + symbols__insert(dso__symbols(dso), sym); } err = 0; out: @@ -707,7 +708,7 @@ int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss) plt_sym = symbol__new(shdr_plt.sh_offset, plt_header_size, STB_GLOBAL, STT_FUNC, ".plt"); if (!plt_sym) goto out_elf_end; - symbols__insert(&dso->symbols, plt_sym); + symbols__insert(dso__symbols(dso), plt_sym); /* Only x86 has .plt.got */ if (machine_is_x86(ehdr.e_machine) && @@ -829,7 +830,7 @@ int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss) goto out_elf_end; plt_offset += plt_entry_size; - symbols__insert(&dso->symbols, f); + symbols__insert(dso__symbols(dso), f); ++nr; } @@ -839,7 +840,7 @@ out_elf_end: if (err == 0) return nr; pr_debug("%s: problems reading %s PLT info.\n", - __func__, dso->long_name); + __func__, dso__long_name(dso)); return 0; } @@ -1174,19 +1175,19 @@ static int dso__swap_init(struct dso *dso, unsigned char eidata) { static unsigned int const endian = 1; - dso->needs_swap = DSO_SWAP__NO; + dso__set_needs_swap(dso, DSO_SWAP__NO); switch (eidata) { case ELFDATA2LSB: /* We are big endian, DSO is little endian. */ if (*(unsigned char const *)&endian != 1) - dso->needs_swap = DSO_SWAP__YES; + dso__set_needs_swap(dso, DSO_SWAP__YES); break; case ELFDATA2MSB: /* We are little endian, DSO is big endian. */ if (*(unsigned char const *)&endian != 0) - dso->needs_swap = DSO_SWAP__YES; + dso__set_needs_swap(dso, DSO_SWAP__YES); break; default: @@ -1237,11 +1238,11 @@ int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name, if (fd < 0) return -1; - type = dso->symtab_type; + type = dso__symtab_type(dso); } else { fd = open(name, O_RDONLY); if (fd < 0) { - dso->load_errno = errno; + *dso__load_errno(dso) = errno; return -1; } } @@ -1249,37 +1250,37 @@ int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name, elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); if (elf == NULL) { pr_debug("%s: cannot read %s ELF file.\n", __func__, name); - dso->load_errno = DSO_LOAD_ERRNO__INVALID_ELF; + *dso__load_errno(dso) = DSO_LOAD_ERRNO__INVALID_ELF; goto out_close; } if (gelf_getehdr(elf, &ehdr) == NULL) { - dso->load_errno = DSO_LOAD_ERRNO__INVALID_ELF; + *dso__load_errno(dso) = DSO_LOAD_ERRNO__INVALID_ELF; pr_debug("%s: cannot get elf header.\n", __func__); goto out_elf_end; } if (dso__swap_init(dso, ehdr.e_ident[EI_DATA])) { - dso->load_errno = DSO_LOAD_ERRNO__INTERNAL_ERROR; + *dso__load_errno(dso) = DSO_LOAD_ERRNO__INTERNAL_ERROR; goto out_elf_end; } /* Always reject images with a mismatched build-id: */ - if (dso->has_build_id && !symbol_conf.ignore_vmlinux_buildid) { + if (dso__has_build_id(dso) && !symbol_conf.ignore_vmlinux_buildid) { u8 build_id[BUILD_ID_SIZE]; struct build_id bid; int size; size = elf_read_build_id(elf, build_id, BUILD_ID_SIZE); if (size <= 0) { - dso->load_errno = DSO_LOAD_ERRNO__CANNOT_READ_BUILDID; + *dso__load_errno(dso) = DSO_LOAD_ERRNO__CANNOT_READ_BUILDID; goto out_elf_end; } build_id__init(&bid, build_id, size); if (!dso__build_id_equal(dso, &bid)) { pr_debug("%s: build id mismatch for %s.\n", __func__, name); - dso->load_errno = DSO_LOAD_ERRNO__MISMATCHING_BUILDID; + *dso__load_errno(dso) = DSO_LOAD_ERRNO__MISMATCHING_BUILDID; goto out_elf_end; } } @@ -1304,14 +1305,14 @@ int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name, if (ss->opdshdr.sh_type != SHT_PROGBITS) ss->opdsec = NULL; - if (dso->kernel == DSO_SPACE__USER) + if (dso__kernel(dso) == DSO_SPACE__USER) ss->adjust_symbols = true; else ss->adjust_symbols = elf__needs_adjust_symbols(ehdr); ss->name = strdup(name); if (!ss->name) { - dso->load_errno = errno; + *dso__load_errno(dso) = errno; goto out_elf_end; } @@ -1329,6 +1330,58 @@ out_close: return -1; } +static bool is_exe_text(int flags) +{ + return (flags & (SHF_ALLOC | SHF_EXECINSTR)) == (SHF_ALLOC | SHF_EXECINSTR); +} + +/* + * Some executable module sections like .noinstr.text might be laid out with + * .text so they can use the same mapping (memory address to file offset). + * Check if that is the case. Refer to kernel layout_sections(). Return the + * maximum offset. + */ +static u64 max_text_section(Elf *elf, GElf_Ehdr *ehdr) +{ + Elf_Scn *sec = NULL; + GElf_Shdr shdr; + u64 offs = 0; + + /* Doesn't work for some arch */ + if (ehdr->e_machine == EM_PARISC || + ehdr->e_machine == EM_ALPHA) + return 0; + + /* ELF is corrupted/truncated, avoid calling elf_strptr. */ + if (!elf_rawdata(elf_getscn(elf, ehdr->e_shstrndx), NULL)) + return 0; + + while ((sec = elf_nextscn(elf, sec)) != NULL) { + char *sec_name; + + if (!gelf_getshdr(sec, &shdr)) + break; + + if (!is_exe_text(shdr.sh_flags)) + continue; + + /* .init and .exit sections are not placed with .text */ + sec_name = elf_strptr(elf, ehdr->e_shstrndx, shdr.sh_name); + if (!sec_name || + strstarts(sec_name, ".init") || + strstarts(sec_name, ".exit")) + break; + + /* Must be next to previous, assumes .text is first */ + if (offs && PERF_ALIGN(offs, shdr.sh_addralign ?: 1) != shdr.sh_offset) + break; + + offs = shdr.sh_offset + shdr.sh_size; + } + + return offs; +} + /** * ref_reloc_sym_not_found - has kernel relocation symbol been found. * @kmap: kernel maps and relocation reference symbol @@ -1366,9 +1419,10 @@ void __weak arch__sym_update(struct symbol *s __maybe_unused, static int dso__process_kernel_symbol(struct dso *dso, struct map *map, GElf_Sym *sym, GElf_Shdr *shdr, struct maps *kmaps, struct kmap *kmap, - struct dso **curr_dsop, struct map **curr_mapp, + struct dso **curr_dsop, const char *section_name, - bool adjust_kernel_syms, bool kmodule, bool *remap_kernel) + bool adjust_kernel_syms, bool kmodule, bool *remap_kernel, + u64 max_text_sh_offset) { struct dso *curr_dso = *curr_dsop; struct map *curr_map; @@ -1378,7 +1432,7 @@ static int dso__process_kernel_symbol(struct dso *dso, struct map *map, if (adjust_kernel_syms) sym->st_value -= shdr->sh_addr - shdr->sh_offset; - if (strcmp(section_name, (curr_dso->short_name + dso->short_name_len)) == 0) + if (strcmp(section_name, (dso__short_name(curr_dso) + dso__short_name_len(dso))) == 0) return 0; if (strcmp(section_name, ".text") == 0) { @@ -1387,7 +1441,7 @@ static int dso__process_kernel_symbol(struct dso *dso, struct map *map, * kallsyms and identity maps. Overwrite it to * map to the kernel dso. */ - if (*remap_kernel && dso->kernel && !kmodule) { + if (*remap_kernel && dso__kernel(dso) && !kmodule) { *remap_kernel = false; map__set_start(map, shdr->sh_addr + ref_reloc(kmap)); map__set_end(map, map__start(map) + shdr->sh_size); @@ -1416,15 +1470,26 @@ static int dso__process_kernel_symbol(struct dso *dso, struct map *map, map__set_pgoff(map, shdr->sh_offset); } - *curr_mapp = map; - *curr_dsop = dso; + dso__put(*curr_dsop); + *curr_dsop = dso__get(dso); return 0; } if (!kmap) return 0; - snprintf(dso_name, sizeof(dso_name), "%s%s", dso->short_name, section_name); + /* + * perf does not record module section addresses except for .text, but + * some sections can use the same mapping as .text. + */ + if (kmodule && adjust_kernel_syms && is_exe_text(shdr->sh_flags) && + shdr->sh_offset <= max_text_sh_offset) { + dso__put(*curr_dsop); + *curr_dsop = dso__get(dso); + return 0; + } + + snprintf(dso_name, sizeof(dso_name), "%s%s", dso__short_name(dso), section_name); curr_map = maps__find_by_name(kmaps, dso_name); if (curr_map == NULL) { @@ -1436,17 +1501,17 @@ static int dso__process_kernel_symbol(struct dso *dso, struct map *map, curr_dso = dso__new(dso_name); if (curr_dso == NULL) return -1; - curr_dso->kernel = dso->kernel; - curr_dso->long_name = dso->long_name; - curr_dso->long_name_len = dso->long_name_len; - curr_dso->binary_type = dso->binary_type; - curr_dso->adjust_symbols = dso->adjust_symbols; + dso__set_kernel(curr_dso, dso__kernel(dso)); + RC_CHK_ACCESS(curr_dso)->long_name = dso__long_name(dso); + RC_CHK_ACCESS(curr_dso)->long_name_len = dso__long_name_len(dso); + dso__set_binary_type(curr_dso, dso__binary_type(dso)); + dso__set_adjust_symbols(curr_dso, dso__adjust_symbols(dso)); curr_map = map__new2(start, curr_dso); - dso__put(curr_dso); - if (curr_map == NULL) + if (curr_map == NULL) { + dso__put(curr_dso); return -1; - - if (curr_dso->kernel) + } + if (dso__kernel(curr_dso)) map__kmap(curr_map)->kmaps = kmaps; if (adjust_kernel_syms) { @@ -1456,22 +1521,18 @@ static int dso__process_kernel_symbol(struct dso *dso, struct map *map, } else { map__set_mapping_type(curr_map, MAPPING_TYPE__IDENTITY); } - curr_dso->symtab_type = dso->symtab_type; + dso__set_symtab_type(curr_dso, dso__symtab_type(dso)); if (maps__insert(kmaps, curr_map)) return -1; - /* - * Add it before we drop the reference to curr_map, i.e. while - * we still are sure to have a reference to this DSO via - * *curr_map->dso. - */ dsos__add(&maps__machine(kmaps)->dsos, curr_dso); - /* kmaps already got it */ - map__put(curr_map); dso__set_loaded(curr_dso); - *curr_mapp = curr_map; + dso__put(*curr_dsop); *curr_dsop = curr_dso; - } else - *curr_dsop = map__dso(curr_map); + } else { + dso__put(*curr_dsop); + *curr_dsop = dso__get(map__dso(curr_map)); + } + map__put(curr_map); return 0; } @@ -1480,13 +1541,11 @@ static int dso__load_sym_internal(struct dso *dso, struct map *map, struct symsrc *syms_ss, struct symsrc *runtime_ss, int kmodule, int dynsym) { - struct kmap *kmap = dso->kernel ? map__kmap(map) : NULL; + struct kmap *kmap = dso__kernel(dso) ? map__kmap(map) : NULL; struct maps *kmaps = kmap ? map__kmaps(map) : NULL; - struct map *curr_map = map; - struct dso *curr_dso = dso; + struct dso *curr_dso = NULL; Elf_Data *symstrs, *secstrs, *secstrs_run, *secstrs_sym; uint32_t nr_syms; - int err = -1; uint32_t idx; GElf_Ehdr ehdr; GElf_Shdr shdr; @@ -1497,6 +1556,7 @@ dso__load_sym_internal(struct dso *dso, struct map *map, struct symsrc *syms_ss, Elf *elf; int nr = 0; bool remap_kernel = false, adjust_kernel_syms = false; + u64 max_text_sh_offset = 0; if (kmap && !kmaps) return -1; @@ -1513,8 +1573,8 @@ dso__load_sym_internal(struct dso *dso, struct map *map, struct symsrc *syms_ss, if (elf_section_by_name(runtime_ss->elf, &runtime_ss->ehdr, &tshdr, ".text", NULL)) { - dso->text_offset = tshdr.sh_addr - tshdr.sh_offset; - dso->text_end = tshdr.sh_offset + tshdr.sh_size; + dso__set_text_offset(dso, tshdr.sh_addr - tshdr.sh_offset); + dso__set_text_end(dso, tshdr.sh_offset + tshdr.sh_size); } if (runtime_ss->opdsec) @@ -1573,17 +1633,22 @@ dso__load_sym_internal(struct dso *dso, struct map *map, struct symsrc *syms_ss, * attempted to prelink vdso to its virtual address. */ if (dso__is_vdso(dso)) - map__set_reloc(map, map__start(map) - dso->text_offset); + map__set_reloc(map, map__start(map) - dso__text_offset(dso)); - dso->adjust_symbols = runtime_ss->adjust_symbols || ref_reloc(kmap); + dso__set_adjust_symbols(dso, runtime_ss->adjust_symbols || ref_reloc(kmap)); /* * Initial kernel and module mappings do not map to the dso. * Flag the fixups. */ - if (dso->kernel) { + if (dso__kernel(dso)) { remap_kernel = true; - adjust_kernel_syms = dso->adjust_symbols; + adjust_kernel_syms = dso__adjust_symbols(dso); } + + if (kmodule && adjust_kernel_syms) + max_text_sh_offset = max_text_section(runtime_ss->elf, &runtime_ss->ehdr); + + curr_dso = dso__get(dso); elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) { struct symbol *f; const char *elf_name = elf_sym__name(&sym, symstrs); @@ -1671,9 +1736,14 @@ dso__load_sym_internal(struct dso *dso, struct map *map, struct symsrc *syms_ss, (sym.st_value & 1)) --sym.st_value; - if (dso->kernel) { - if (dso__process_kernel_symbol(dso, map, &sym, &shdr, kmaps, kmap, &curr_dso, &curr_map, - section_name, adjust_kernel_syms, kmodule, &remap_kernel)) + if (dso__kernel(dso)) { + if (dso__process_kernel_symbol(dso, map, &sym, &shdr, + kmaps, kmap, &curr_dso, + section_name, + adjust_kernel_syms, + kmodule, + &remap_kernel, + max_text_sh_offset)) goto out_elf_end; } else if ((used_opd && runtime_ss->adjust_symbols) || (!used_opd && syms_ss->adjust_symbols)) { @@ -1719,16 +1789,17 @@ dso__load_sym_internal(struct dso *dso, struct map *map, struct symsrc *syms_ss, arch__sym_update(f, &sym); - __symbols__insert(&curr_dso->symbols, f, dso->kernel); + __symbols__insert(dso__symbols(curr_dso), f, dso__kernel(dso)); nr++; } + dso__put(curr_dso); /* * For misannotated, zeroed, ASM function sizes. */ if (nr > 0) { - symbols__fixup_end(&dso->symbols, false); - symbols__fixup_duplicate(&dso->symbols); + symbols__fixup_end(dso__symbols(dso), false); + symbols__fixup_duplicate(dso__symbols(dso)); if (kmap) { /* * We need to fixup this here too because we create new @@ -1737,9 +1808,10 @@ dso__load_sym_internal(struct dso *dso, struct map *map, struct symsrc *syms_ss, maps__fixup_end(kmaps); } } - err = nr; + return nr; out_elf_end: - return err; + dso__put(curr_dso); + return -1; } int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss, @@ -1748,16 +1820,16 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss, int nr = 0; int err = -1; - dso->symtab_type = syms_ss->type; - dso->is_64_bit = syms_ss->is_64_bit; - dso->rel = syms_ss->ehdr.e_type == ET_REL; + dso__set_symtab_type(dso, syms_ss->type); + dso__set_is_64_bit(dso, syms_ss->is_64_bit); + dso__set_rel(dso, syms_ss->ehdr.e_type == ET_REL); /* * Modules may already have symbols from kallsyms, but those symbols * have the wrong values for the dso maps, so remove them. */ if (kmodule && syms_ss->symtab) - symbols__delete(&dso->symbols); + symbols__delete(dso__symbols(dso)); if (!syms_ss->symtab) { /* @@ -1765,7 +1837,7 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss, * to using kallsyms. The vmlinux runtime symbols aren't * of much use. */ - if (dso->kernel) + if (dso__kernel(dso)) return err; } else { err = dso__load_sym_internal(dso, map, syms_ss, runtime_ss, diff --git a/tools/perf/util/symbol-minimal.c b/tools/perf/util/symbol-minimal.c index 1da8b713509c..c6f369b5d893 100644 --- a/tools/perf/util/symbol-minimal.c +++ b/tools/perf/util/symbol-minimal.c @@ -273,7 +273,7 @@ int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name, out_close: close(fd); out_errno: - dso->load_errno = errno; + RC_CHK_ACCESS(dso)->load_errno = errno; return -1; } @@ -348,7 +348,7 @@ int dso__load_sym(struct dso *dso, struct map *map __maybe_unused, ret = fd__is_64_bit(ss->fd); if (ret >= 0) - dso->is_64_bit = ret; + RC_CHK_ACCESS(dso)->is_64_bit = ret; if (filename__read_build_id(ss->name, &bid) > 0) dso__set_build_id(dso, &bid); diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index be212ba157dc..9e5940b5bc59 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -27,6 +27,7 @@ #include "symbol.h" #include "map_symbol.h" #include "mem-events.h" +#include "mem-info.h" #include "symsrc.h" #include "strlist.h" #include "intlist.h" @@ -63,6 +64,16 @@ struct symbol_conf symbol_conf = { .res_sample = 0, }; +struct map_list_node { + struct list_head node; + struct map *map; +}; + +static struct map_list_node *map_list_node__new(void) +{ + return malloc(sizeof(struct map_list_node)); +} + static enum dso_binary_type binary_type_symtab[] = { DSO_BINARY_TYPE__KALLSYMS, DSO_BINARY_TYPE__GUEST_KALLSYMS, @@ -238,14 +249,31 @@ void symbols__fixup_end(struct rb_root_cached *symbols, bool is_kallsyms) * segment is very big. Therefore do not fill this gap and do * not assign it to the kernel dso map (kallsyms). * + * Also BPF code can be allocated separately from text segments + * and modules. So the last entry in a module should not fill + * the gap too. + * * In kallsyms, it determines module symbols using '[' character * like in: * ffffffffc1937000 T hdmi_driver_init [snd_hda_codec_hdmi] */ if (prev->end == prev->start) { + const char *prev_mod; + const char *curr_mod; + + if (!is_kallsyms) { + prev->end = curr->start; + continue; + } + + prev_mod = strchr(prev->name, '['); + curr_mod = strchr(curr->name, '['); + /* Last kernel/module symbol mapped to end of page */ - if (is_kallsyms && (!strchr(prev->name, '[') != - !strchr(curr->name, '['))) + if (!prev_mod != !curr_mod) + prev->end = roundup(prev->end + 4096, 4096); + /* Last symbol in the previous module */ + else if (prev_mod && strcmp(prev_mod, curr_mod)) prev->end = roundup(prev->end + 4096, 4096); else prev->end = curr->start; @@ -505,52 +533,52 @@ static struct symbol *symbols__find_by_name(struct symbol *symbols[], void dso__reset_find_symbol_cache(struct dso *dso) { - dso->last_find_result.addr = 0; - dso->last_find_result.symbol = NULL; + dso__set_last_find_result_addr(dso, 0); + dso__set_last_find_result_symbol(dso, NULL); } void dso__insert_symbol(struct dso *dso, struct symbol *sym) { - __symbols__insert(&dso->symbols, sym, dso->kernel); + __symbols__insert(dso__symbols(dso), sym, dso__kernel(dso)); /* update the symbol cache if necessary */ - if (dso->last_find_result.addr >= sym->start && - (dso->last_find_result.addr < sym->end || + if (dso__last_find_result_addr(dso) >= sym->start && + (dso__last_find_result_addr(dso) < sym->end || sym->start == sym->end)) { - dso->last_find_result.symbol = sym; + dso__set_last_find_result_symbol(dso, sym); } } void dso__delete_symbol(struct dso *dso, struct symbol *sym) { - rb_erase_cached(&sym->rb_node, &dso->symbols); + rb_erase_cached(&sym->rb_node, dso__symbols(dso)); symbol__delete(sym); dso__reset_find_symbol_cache(dso); } struct symbol *dso__find_symbol(struct dso *dso, u64 addr) { - if (dso->last_find_result.addr != addr || dso->last_find_result.symbol == NULL) { - dso->last_find_result.addr = addr; - dso->last_find_result.symbol = symbols__find(&dso->symbols, addr); + if (dso__last_find_result_addr(dso) != addr || dso__last_find_result_symbol(dso) == NULL) { + dso__set_last_find_result_addr(dso, addr); + dso__set_last_find_result_symbol(dso, symbols__find(dso__symbols(dso), addr)); } - return dso->last_find_result.symbol; + return dso__last_find_result_symbol(dso); } struct symbol *dso__find_symbol_nocache(struct dso *dso, u64 addr) { - return symbols__find(&dso->symbols, addr); + return symbols__find(dso__symbols(dso), addr); } struct symbol *dso__first_symbol(struct dso *dso) { - return symbols__first(&dso->symbols); + return symbols__first(dso__symbols(dso)); } struct symbol *dso__last_symbol(struct dso *dso) { - return symbols__last(&dso->symbols); + return symbols__last(dso__symbols(dso)); } struct symbol *dso__next_symbol(struct symbol *sym) @@ -560,11 +588,11 @@ struct symbol *dso__next_symbol(struct symbol *sym) struct symbol *dso__next_symbol_by_name(struct dso *dso, size_t *idx) { - if (*idx + 1 >= dso->symbol_names_len) + if (*idx + 1 >= dso__symbol_names_len(dso)) return NULL; ++*idx; - return dso->symbol_names[*idx]; + return dso__symbol_names(dso)[*idx]; } /* @@ -572,27 +600,29 @@ struct symbol *dso__next_symbol_by_name(struct dso *dso, size_t *idx) */ struct symbol *dso__find_symbol_by_name(struct dso *dso, const char *name, size_t *idx) { - struct symbol *s = symbols__find_by_name(dso->symbol_names, dso->symbol_names_len, - name, SYMBOL_TAG_INCLUDE__NONE, idx); - if (!s) - s = symbols__find_by_name(dso->symbol_names, dso->symbol_names_len, - name, SYMBOL_TAG_INCLUDE__DEFAULT_ONLY, idx); + struct symbol *s = symbols__find_by_name(dso__symbol_names(dso), + dso__symbol_names_len(dso), + name, SYMBOL_TAG_INCLUDE__NONE, idx); + if (!s) { + s = symbols__find_by_name(dso__symbol_names(dso), dso__symbol_names_len(dso), + name, SYMBOL_TAG_INCLUDE__DEFAULT_ONLY, idx); + } return s; } void dso__sort_by_name(struct dso *dso) { - mutex_lock(&dso->lock); + mutex_lock(dso__lock(dso)); if (!dso__sorted_by_name(dso)) { size_t len; - dso->symbol_names = symbols__sort_by_name(&dso->symbols, &len); - if (dso->symbol_names) { - dso->symbol_names_len = len; + dso__set_symbol_names(dso, symbols__sort_by_name(dso__symbols(dso), &len)); + if (dso__symbol_names(dso)) { + dso__set_symbol_names_len(dso, len); dso__set_sorted_by_name(dso); } } - mutex_unlock(&dso->lock); + mutex_unlock(dso__lock(dso)); } /* @@ -719,7 +749,7 @@ static int map__process_kallsym_symbol(void *arg, const char *name, { struct symbol *sym; struct dso *dso = arg; - struct rb_root_cached *root = &dso->symbols; + struct rb_root_cached *root = dso__symbols(dso); if (!symbol_type__filter(type)) return 0; @@ -757,11 +787,10 @@ static int dso__load_all_kallsyms(struct dso *dso, const char *filename) static int maps__split_kallsyms_for_kcore(struct maps *kmaps, struct dso *dso) { - struct map *curr_map; struct symbol *pos; int count = 0; - struct rb_root_cached old_root = dso->symbols; - struct rb_root_cached *root = &dso->symbols; + struct rb_root_cached *root = dso__symbols(dso); + struct rb_root_cached old_root = *root; struct rb_node *next = rb_first_cached(root); if (!kmaps) @@ -770,6 +799,7 @@ static int maps__split_kallsyms_for_kcore(struct maps *kmaps, struct dso *dso) *root = RB_ROOT_CACHED; while (next) { + struct map *curr_map; struct dso *curr_map_dso; char *module; @@ -794,12 +824,13 @@ static int maps__split_kallsyms_for_kcore(struct maps *kmaps, struct dso *dso) pos->end = map__end(curr_map); if (pos->end) pos->end -= map__start(curr_map) - map__pgoff(curr_map); - symbols__insert(&curr_map_dso->symbols, pos); + symbols__insert(dso__symbols(curr_map_dso), pos); ++count; + map__put(curr_map); } /* Symbols have been adjusted */ - dso->adjust_symbols = 1; + dso__set_adjust_symbols(dso, true); return count; } @@ -813,10 +844,10 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta, struct map *initial_map) { struct machine *machine; - struct map *curr_map = initial_map; + struct map *curr_map = map__get(initial_map); struct symbol *pos; int count = 0, moved = 0; - struct rb_root_cached *root = &dso->symbols; + struct rb_root_cached *root = dso__symbols(dso); struct rb_node *next = rb_first_cached(root); int kernel_range = 0; bool x86_64; @@ -843,9 +874,9 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta, *module++ = '\0'; curr_map_dso = map__dso(curr_map); - if (strcmp(curr_map_dso->short_name, module)) { + if (strcmp(dso__short_name(curr_map_dso), module)) { if (!RC_CHK_EQUAL(curr_map, initial_map) && - dso->kernel == DSO_SPACE__KERNEL_GUEST && + dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST && machine__is_default_guest(machine)) { /* * We assume all symbols of a module are @@ -857,17 +888,18 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta, dso__set_loaded(curr_map_dso); } + map__zput(curr_map); curr_map = maps__find_by_name(kmaps, module); if (curr_map == NULL) { pr_debug("%s/proc/{kallsyms,modules} " "inconsistency while looking " "for \"%s\" module!\n", machine->root_dir, module); - curr_map = initial_map; + curr_map = map__get(initial_map); goto discard_symbol; } curr_map_dso = map__dso(curr_map); - if (curr_map_dso->loaded && + if (dso__loaded(curr_map_dso) && !machine__is_default_guest(machine)) goto discard_symbol; } @@ -887,7 +919,7 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta, * symbols at this point. */ goto discard_symbol; - } else if (curr_map != initial_map) { + } else if (!RC_CHK_EQUAL(curr_map, initial_map)) { char dso_name[PATH_MAX]; struct dso *ndso; @@ -898,11 +930,12 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta, } if (count == 0) { - curr_map = initial_map; + map__zput(curr_map); + curr_map = map__get(initial_map); goto add_symbol; } - if (dso->kernel == DSO_SPACE__KERNEL_GUEST) + if (dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST) snprintf(dso_name, sizeof(dso_name), "[guest.kernel].%d", kernel_range++); @@ -912,10 +945,11 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta, kernel_range++); ndso = dso__new(dso_name); + map__zput(curr_map); if (ndso == NULL) return -1; - ndso->kernel = dso->kernel; + dso__set_kernel(ndso, dso__kernel(dso)); curr_map = map__new2(pos->start, ndso); if (curr_map == NULL) { @@ -925,6 +959,7 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta, map__set_mapping_type(curr_map, MAPPING_TYPE__IDENTITY); if (maps__insert(kmaps, curr_map)) { + map__zput(curr_map); dso__put(ndso); return -1; } @@ -935,11 +970,11 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta, pos->end -= delta; } add_symbol: - if (curr_map != initial_map) { + if (!RC_CHK_EQUAL(curr_map, initial_map)) { struct dso *curr_map_dso = map__dso(curr_map); rb_erase_cached(&pos->rb_node, root); - symbols__insert(&curr_map_dso->symbols, pos); + symbols__insert(dso__symbols(curr_map_dso), pos); ++moved; } else ++count; @@ -950,12 +985,12 @@ discard_symbol: symbol__delete(pos); } - if (curr_map != initial_map && - dso->kernel == DSO_SPACE__KERNEL_GUEST && + if (!RC_CHK_EQUAL(curr_map, initial_map) && + dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST && machine__is_default_guest(maps__machine(kmaps))) { dso__set_loaded(map__dso(curr_map)); } - + map__put(curr_map); return count + moved; } @@ -1125,7 +1160,7 @@ static int do_validate_kcore_modules_cb(struct map *old_map, void *data) dso = map__dso(old_map); /* Module must be in memory at the same address */ - mi = find_module(dso->short_name, modules); + mi = find_module(dso__short_name(dso), modules); if (!mi || mi->start != map__start(old_map)) return -EINVAL; @@ -1255,7 +1290,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map, { struct maps *kmaps = map__kmaps(map); struct kcore_mapfn_data md; - struct map *replacement_map = NULL; + struct map *map_ref, *replacement_map = NULL; struct machine *machine; bool is_64_bit; int err, fd; @@ -1294,7 +1329,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map, &is_64_bit); if (err) goto out_err; - dso->is_64_bit = is_64_bit; + dso__set_is_64_bit(dso, is_64_bit); if (list_empty(&md.maps)) { err = -EINVAL; @@ -1333,6 +1368,24 @@ static int dso__load_kcore(struct dso *dso, struct map *map, if (!replacement_map) replacement_map = list_entry(md.maps.next, struct map_list_node, node)->map; + /* + * Update addresses of vmlinux map. Re-insert it to ensure maps are + * correctly ordered. Do this before using maps__merge_in() for the + * remaining maps so vmlinux gets split if necessary. + */ + map_ref = map__get(map); + maps__remove(kmaps, map_ref); + + map__set_start(map_ref, map__start(replacement_map)); + map__set_end(map_ref, map__end(replacement_map)); + map__set_pgoff(map_ref, map__pgoff(replacement_map)); + map__set_mapping_type(map_ref, map__mapping_type(replacement_map)); + + err = maps__insert(kmaps, map_ref); + map__put(map_ref); + if (err) + goto out_err; + /* Add new maps */ while (!list_empty(&md.maps)) { struct map_list_node *new_node = list_entry(md.maps.next, struct map_list_node, node); @@ -1340,22 +1393,8 @@ static int dso__load_kcore(struct dso *dso, struct map *map, list_del_init(&new_node->node); - if (RC_CHK_EQUAL(new_map, replacement_map)) { - struct map *map_ref; - - map__set_start(map, map__start(new_map)); - map__set_end(map, map__end(new_map)); - map__set_pgoff(map, map__pgoff(new_map)); - map__set_mapping_type(map, map__mapping_type(new_map)); - /* Ensure maps are correctly ordered */ - map_ref = map__get(map); - maps__remove(kmaps, map_ref); - err = maps__insert(kmaps, map_ref); - map__put(map_ref); - map__put(new_map); - if (err) - goto out_err; - } else { + /* skip if replacement_map, already inserted above */ + if (!RC_CHK_EQUAL(new_map, replacement_map)) { /* * Merge kcore map into existing maps, * and ensure that current maps (eBPF) @@ -1386,10 +1425,10 @@ static int dso__load_kcore(struct dso *dso, struct map *map, * Set the data type and long name so that kcore can be read via * dso__data_read_addr(). */ - if (dso->kernel == DSO_SPACE__KERNEL_GUEST) - dso->binary_type = DSO_BINARY_TYPE__GUEST_KCORE; + if (dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST) + dso__set_binary_type(dso, DSO_BINARY_TYPE__GUEST_KCORE); else - dso->binary_type = DSO_BINARY_TYPE__KCORE; + dso__set_binary_type(dso, DSO_BINARY_TYPE__KCORE); dso__set_long_name(dso, strdup(kcore_filename), true); close(fd); @@ -1450,13 +1489,13 @@ int __dso__load_kallsyms(struct dso *dso, const char *filename, if (kallsyms__delta(kmap, filename, &delta)) return -1; - symbols__fixup_end(&dso->symbols, true); - symbols__fixup_duplicate(&dso->symbols); + symbols__fixup_end(dso__symbols(dso), true); + symbols__fixup_duplicate(dso__symbols(dso)); - if (dso->kernel == DSO_SPACE__KERNEL_GUEST) - dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS; + if (dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST) + dso__set_symtab_type(dso, DSO_BINARY_TYPE__GUEST_KALLSYMS); else - dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS; + dso__set_symtab_type(dso, DSO_BINARY_TYPE__KALLSYMS); if (!no_kcore && !dso__load_kcore(dso, map, filename)) return maps__split_kallsyms_for_kcore(kmap->kmaps, dso); @@ -1512,7 +1551,7 @@ static int dso__load_perf_map(const char *map_path, struct dso *dso) if (sym == NULL) goto out_delete_line; - symbols__insert(&dso->symbols, sym); + symbols__insert(dso__symbols(dso), sym); nr_syms++; } @@ -1638,15 +1677,15 @@ int dso__load_bfd_symbols(struct dso *dso, const char *debugfile) if (!symbol) goto out_free; - symbols__insert(&dso->symbols, symbol); + symbols__insert(dso__symbols(dso), symbol); } #ifdef bfd_get_section #undef bfd_asymbol_section #endif - symbols__fixup_end(&dso->symbols, false); - symbols__fixup_duplicate(&dso->symbols); - dso->adjust_symbols = 1; + symbols__fixup_end(dso__symbols(dso), false); + symbols__fixup_duplicate(dso__symbols(dso)); + dso__set_adjust_symbols(dso, true); err = 0; out_free: @@ -1669,17 +1708,17 @@ static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod, case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO: case DSO_BINARY_TYPE__BUILDID_DEBUGINFO: case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO: - return !kmod && dso->kernel == DSO_SPACE__USER; + return !kmod && dso__kernel(dso) == DSO_SPACE__USER; case DSO_BINARY_TYPE__KALLSYMS: case DSO_BINARY_TYPE__VMLINUX: case DSO_BINARY_TYPE__KCORE: - return dso->kernel == DSO_SPACE__KERNEL; + return dso__kernel(dso) == DSO_SPACE__KERNEL; case DSO_BINARY_TYPE__GUEST_KALLSYMS: case DSO_BINARY_TYPE__GUEST_VMLINUX: case DSO_BINARY_TYPE__GUEST_KCORE: - return dso->kernel == DSO_SPACE__KERNEL_GUEST; + return dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST; case DSO_BINARY_TYPE__GUEST_KMODULE: case DSO_BINARY_TYPE__GUEST_KMODULE_COMP: @@ -1689,7 +1728,7 @@ static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod, * kernel modules know their symtab type - it's set when * creating a module dso in machine__addnew_module_map(). */ - return kmod && dso->symtab_type == type; + return kmod && dso__symtab_type(dso) == type; case DSO_BINARY_TYPE__BUILD_ID_CACHE: case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO: @@ -1757,18 +1796,19 @@ int dso__load(struct dso *dso, struct map *map) struct build_id bid; struct nscookie nsc; char newmapname[PATH_MAX]; - const char *map_path = dso->long_name; + const char *map_path = dso__long_name(dso); - mutex_lock(&dso->lock); - perfmap = strncmp(dso->name, "/tmp/perf-", 10) == 0; + mutex_lock(dso__lock(dso)); + perfmap = strncmp(dso__name(dso), "/tmp/perf-", 10) == 0; if (perfmap) { - if (dso->nsinfo && (dso__find_perf_map(newmapname, - sizeof(newmapname), &dso->nsinfo) == 0)) { + if (dso__nsinfo(dso) && + (dso__find_perf_map(newmapname, sizeof(newmapname), + dso__nsinfo_ptr(dso)) == 0)) { map_path = newmapname; } } - nsinfo__mountns_enter(dso->nsinfo, &nsc); + nsinfo__mountns_enter(dso__nsinfo(dso), &nsc); /* check again under the dso->lock */ if (dso__loaded(dso)) { @@ -1776,15 +1816,15 @@ int dso__load(struct dso *dso, struct map *map) goto out; } - kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE || - dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP || - dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE || - dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP; + kmod = dso__symtab_type(dso) == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE || + dso__symtab_type(dso) == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP || + dso__symtab_type(dso) == DSO_BINARY_TYPE__GUEST_KMODULE || + dso__symtab_type(dso) == DSO_BINARY_TYPE__GUEST_KMODULE_COMP; - if (dso->kernel && !kmod) { - if (dso->kernel == DSO_SPACE__KERNEL) + if (dso__kernel(dso) && !kmod) { + if (dso__kernel(dso) == DSO_SPACE__KERNEL) ret = dso__load_kernel_sym(dso, map); - else if (dso->kernel == DSO_SPACE__KERNEL_GUEST) + else if (dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST) ret = dso__load_guest_kernel_sym(dso, map); machine = maps__machine(map__kmaps(map)); @@ -1793,12 +1833,13 @@ int dso__load(struct dso *dso, struct map *map) goto out; } - dso->adjust_symbols = 0; + dso__set_adjust_symbols(dso, false); if (perfmap) { ret = dso__load_perf_map(map_path, dso); - dso->symtab_type = ret > 0 ? DSO_BINARY_TYPE__JAVA_JIT : - DSO_BINARY_TYPE__NOT_FOUND; + dso__set_symtab_type(dso, ret > 0 + ? DSO_BINARY_TYPE__JAVA_JIT + : DSO_BINARY_TYPE__NOT_FOUND); goto out; } @@ -1813,9 +1854,9 @@ int dso__load(struct dso *dso, struct map *map) * Read the build id if possible. This is required for * DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work */ - if (!dso->has_build_id && - is_regular_file(dso->long_name)) { - __symbol__join_symfs(name, PATH_MAX, dso->long_name); + if (!dso__has_build_id(dso) && + is_regular_file(dso__long_name(dso))) { + __symbol__join_symfs(name, PATH_MAX, dso__long_name(dso)); if (filename__read_build_id(name, &bid) > 0) dso__set_build_id(dso, &bid); } @@ -1849,7 +1890,7 @@ int dso__load(struct dso *dso, struct map *map) nsinfo__mountns_exit(&nsc); is_reg = is_regular_file(name); - if (!is_reg && errno == ENOENT && dso->nsinfo) { + if (!is_reg && errno == ENOENT && dso__nsinfo(dso)) { char *new_name = dso__filename_with_chroot(dso, name); if (new_name) { is_reg = is_regular_file(new_name); @@ -1866,7 +1907,7 @@ int dso__load(struct dso *dso, struct map *map) sirc = symsrc__init(ss, dso, name, symtab_type); if (nsexit) - nsinfo__mountns_enter(dso->nsinfo, &nsc); + nsinfo__mountns_enter(dso__nsinfo(dso), &nsc); if (bfdrc == 0) { ret = 0; @@ -1879,8 +1920,8 @@ int dso__load(struct dso *dso, struct map *map) if (!syms_ss && symsrc__has_symtab(ss)) { syms_ss = ss; next_slot = true; - if (!dso->symsrc_filename) - dso->symsrc_filename = strdup(name); + if (!dso__symsrc_filename(dso)) + dso__set_symsrc_filename(dso, strdup(name)); } if (!runtime_ss && symsrc__possibly_runtime(ss)) { @@ -1927,16 +1968,20 @@ int dso__load(struct dso *dso, struct map *map) symsrc__destroy(&ss_[ss_pos - 1]); out_free: free(name); - if (ret < 0 && strstr(dso->name, " (deleted)") != NULL) + if (ret < 0 && strstr(dso__name(dso), " (deleted)") != NULL) ret = 0; out: dso__set_loaded(dso); - mutex_unlock(&dso->lock); + mutex_unlock(dso__lock(dso)); nsinfo__mountns_exit(&nsc); return ret; } +/* + * Always takes ownership of vmlinux when vmlinux_allocated == true, even if + * it returns an error. + */ int dso__load_vmlinux(struct dso *dso, struct map *map, const char *vmlinux, bool vmlinux_allocated) { @@ -1950,23 +1995,26 @@ int dso__load_vmlinux(struct dso *dso, struct map *map, else symbol__join_symfs(symfs_vmlinux, vmlinux); - if (dso->kernel == DSO_SPACE__KERNEL_GUEST) + if (dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST) symtab_type = DSO_BINARY_TYPE__GUEST_VMLINUX; else symtab_type = DSO_BINARY_TYPE__VMLINUX; - if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type)) + if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type)) { + if (vmlinux_allocated) + free((char *) vmlinux); return -1; + } /* * dso__load_sym() may copy 'dso' which will result in the copies having * an incorrect long name unless we set it here first. */ dso__set_long_name(dso, vmlinux, vmlinux_allocated); - if (dso->kernel == DSO_SPACE__KERNEL_GUEST) - dso->binary_type = DSO_BINARY_TYPE__GUEST_VMLINUX; + if (dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST) + dso__set_binary_type(dso, DSO_BINARY_TYPE__GUEST_VMLINUX); else - dso->binary_type = DSO_BINARY_TYPE__VMLINUX; + dso__set_binary_type(dso, DSO_BINARY_TYPE__VMLINUX); err = dso__load_sym(dso, map, &ss, &ss, 0); symsrc__destroy(&ss); @@ -1999,7 +2047,6 @@ int dso__load_vmlinux_path(struct dso *dso, struct map *map) err = dso__load_vmlinux(dso, map, filename, true); if (err > 0) goto out; - free(filename); } out: return err; @@ -2059,7 +2106,7 @@ static char *dso__find_kallsyms(struct dso *dso, struct map *map) bool is_host = false; char path[PATH_MAX]; - if (!dso->has_build_id) { + if (!dso__has_build_id(dso)) { /* * Last resort, if we don't have a build-id and couldn't find * any vmlinux file, try the running kernel kallsyms table. @@ -2084,7 +2131,7 @@ static char *dso__find_kallsyms(struct dso *dso, struct map *map) goto proc_kallsyms; } - build_id__sprintf(&dso->bid, sbuild_id); + build_id__sprintf(dso__bid(dso), sbuild_id); /* Find kallsyms in build-id cache with kcore */ scnprintf(path, sizeof(path), "%s/%s/%s", @@ -2151,7 +2198,6 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map) err = dso__load_vmlinux(dso, map, filename, true); if (err > 0) return err; - free(filename); } if (!symbol_conf.ignore_vmlinux && vmlinux_path != NULL) { @@ -2177,7 +2223,7 @@ do_kallsyms: free(kallsyms_allocated_filename); if (err > 0 && !dso__is_kcore(dso)) { - dso->binary_type = DSO_BINARY_TYPE__KALLSYMS; + dso__set_binary_type(dso, DSO_BINARY_TYPE__KALLSYMS); dso__set_long_name(dso, DSO__NAME_KALLSYMS, false); map__fixup_start(map); map__fixup_end(map); @@ -2220,7 +2266,7 @@ static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map) if (err > 0) pr_debug("Using %s for symbols\n", kallsyms_filename); if (err > 0 && !dso__is_kcore(dso)) { - dso->binary_type = DSO_BINARY_TYPE__GUEST_KALLSYMS; + dso__set_binary_type(dso, DSO_BINARY_TYPE__GUEST_KALLSYMS); dso__set_long_name(dso, machine->mmap_name, false); map__fixup_start(map); map__fixup_end(map); @@ -2534,31 +2580,6 @@ int symbol__config_symfs(const struct option *opt __maybe_unused, return 0; } -struct mem_info *mem_info__get(struct mem_info *mi) -{ - if (mi) - refcount_inc(&mi->refcnt); - return mi; -} - -void mem_info__put(struct mem_info *mi) -{ - if (mi && refcount_dec_and_test(&mi->refcnt)) { - addr_map_symbol__exit(&mi->iaddr); - addr_map_symbol__exit(&mi->daddr); - free(mi); - } -} - -struct mem_info *mem_info__new(void) -{ - struct mem_info *mi = zalloc(sizeof(*mi)); - - if (mi) - refcount_set(&mi->refcnt, 1); - return mi; -} - /* * Checks that user supplied symbol kernel files are accessible because * the default mechanism for accessing elf files fails silently. i.e. if diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 071837ddce2a..3fb5d146d9b1 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -268,18 +268,6 @@ enum { SDT_NOTE_IDX_REFCTR, }; -struct mem_info *mem_info__new(void); -struct mem_info *mem_info__get(struct mem_info *mi); -void mem_info__put(struct mem_info *mi); - -static inline void __mem_info__zput(struct mem_info **mi) -{ - mem_info__put(*mi); - *mi = NULL; -} - -#define mem_info__zput(mi) __mem_info__zput(&mi) - int symbol__validate_sym_arguments(void); #endif /* __PERF_SYMBOL */ diff --git a/tools/perf/util/symbol_fprintf.c b/tools/perf/util/symbol_fprintf.c index 088f4abf230f..53e1af4ed9ac 100644 --- a/tools/perf/util/symbol_fprintf.c +++ b/tools/perf/util/symbol_fprintf.c @@ -64,8 +64,8 @@ size_t dso__fprintf_symbols_by_name(struct dso *dso, { size_t ret = 0; - for (size_t i = 0; i < dso->symbol_names_len; i++) { - struct symbol *pos = dso->symbol_names[i]; + for (size_t i = 0; i < dso__symbol_names_len(dso); i++) { + struct symbol *pos = dso__symbol_names(dso)[i]; ret += fprintf(fp, "%s\n", pos->name); } diff --git a/tools/perf/util/synthetic-events.c b/tools/perf/util/synthetic-events.c index 2a0289c14959..5498048f56ea 100644 --- a/tools/perf/util/synthetic-events.c +++ b/tools/perf/util/synthetic-events.c @@ -385,8 +385,8 @@ static void perf_record_mmap2__read_build_id(struct perf_record_mmap2 *event, id.ino_generation = event->ino_generation; dso = dsos__findnew_id(&machine->dsos, event->filename, &id); - if (dso && dso->has_build_id) { - bid = dso->bid; + if (dso && dso__has_build_id(dso)) { + bid = *dso__bid(dso); rc = 0; goto out; } @@ -407,7 +407,7 @@ out: event->__reserved_1 = 0; event->__reserved_2 = 0; - if (dso && !dso->has_build_id) + if (dso && !dso__has_build_id(dso)) dso__set_build_id(dso, &bid); } else { if (event->filename[0] == '/') { @@ -684,7 +684,7 @@ static int perf_event__synthesize_modules_maps_cb(struct map *map, void *data) dso = map__dso(map); if (symbol_conf.buildid_mmap2) { - size = PERF_ALIGN(dso->long_name_len + 1, sizeof(u64)); + size = PERF_ALIGN(dso__long_name_len(dso) + 1, sizeof(u64)); event->mmap2.header.type = PERF_RECORD_MMAP2; event->mmap2.header.size = (sizeof(event->mmap2) - (sizeof(event->mmap2.filename) - size)); @@ -694,11 +694,11 @@ static int perf_event__synthesize_modules_maps_cb(struct map *map, void *data) event->mmap2.len = map__size(map); event->mmap2.pid = args->machine->pid; - memcpy(event->mmap2.filename, dso->long_name, dso->long_name_len + 1); + memcpy(event->mmap2.filename, dso__long_name(dso), dso__long_name_len(dso) + 1); perf_record_mmap2__read_build_id(&event->mmap2, args->machine, false); } else { - size = PERF_ALIGN(dso->long_name_len + 1, sizeof(u64)); + size = PERF_ALIGN(dso__long_name_len(dso) + 1, sizeof(u64)); event->mmap.header.type = PERF_RECORD_MMAP; event->mmap.header.size = (sizeof(event->mmap) - (sizeof(event->mmap.filename) - size)); @@ -708,7 +708,7 @@ static int perf_event__synthesize_modules_maps_cb(struct map *map, void *data) event->mmap.len = map__size(map); event->mmap.pid = args->machine->pid; - memcpy(event->mmap.filename, dso->long_name, dso->long_name_len + 1); + memcpy(event->mmap.filename, dso__long_name(dso), dso__long_name_len(dso) + 1); } if (perf_tool__process_synth_event(args->tool, event, args->machine, args->process) != 0) @@ -2231,20 +2231,20 @@ int perf_event__synthesize_build_id(struct perf_tool *tool, struct dso *pos, u16 union perf_event ev; size_t len; - if (!pos->hit) + if (!dso__hit(pos)) return 0; memset(&ev, 0, sizeof(ev)); - len = pos->long_name_len + 1; + len = dso__long_name_len(pos) + 1; len = PERF_ALIGN(len, NAME_ALIGN); - ev.build_id.size = min(pos->bid.size, sizeof(pos->bid.data)); - memcpy(&ev.build_id.build_id, pos->bid.data, ev.build_id.size); + ev.build_id.size = min(dso__bid(pos)->size, sizeof(dso__bid(pos)->data)); + memcpy(&ev.build_id.build_id, dso__bid(pos)->data, ev.build_id.size); ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID; ev.build_id.header.misc = misc | PERF_RECORD_MISC_BUILD_ID_SIZE; ev.build_id.pid = machine->pid; ev.build_id.header.size = sizeof(ev.build_id) + len; - memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len); + memcpy(&ev.build_id.filename, dso__long_name(pos), dso__long_name_len(pos)); return process(tool, &ev, NULL, machine); } diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index 89c47a5098e2..87c59aa9fe38 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c @@ -26,7 +26,7 @@ int thread__init_maps(struct thread *thread, struct machine *machine) if (pid == thread__tid(thread) || pid == -1) { thread__set_maps(thread, maps__new(machine)); } else { - struct thread *leader = __machine__findnew_thread(machine, pid, pid); + struct thread *leader = machine__findnew_thread(machine, pid, pid); if (leader) { thread__set_maps(thread, maps__get(thread__maps(leader))); @@ -39,12 +39,13 @@ int thread__init_maps(struct thread *thread, struct machine *machine) struct thread *thread__new(pid_t pid, pid_t tid) { - char *comm_str; - struct comm *comm; RC_STRUCT(thread) *_thread = zalloc(sizeof(*_thread)); struct thread *thread; if (ADD_RC_CHK(thread, _thread) != NULL) { + struct comm *comm; + char comm_str[32]; + thread__set_pid(thread, pid); thread__set_tid(thread, tid); thread__set_ppid(thread, -1); @@ -56,13 +57,8 @@ struct thread *thread__new(pid_t pid, pid_t tid) init_rwsem(thread__namespaces_lock(thread)); init_rwsem(thread__comm_lock(thread)); - comm_str = malloc(32); - if (!comm_str) - goto err_thread; - - snprintf(comm_str, 32, ":%d", tid); + snprintf(comm_str, sizeof(comm_str), ":%d", tid); comm = comm__new(comm_str, 0, false); - free(comm_str); if (!comm) goto err_thread; @@ -76,7 +72,7 @@ struct thread *thread__new(pid_t pid, pid_t tid) return thread; err_thread: - free(thread); + thread__delete(thread); return NULL; } @@ -383,7 +379,7 @@ static int thread__clone_maps(struct thread *thread, struct thread *parent, bool if (thread__pid(thread) == thread__pid(parent)) return thread__prepare_access(thread); - if (RC_CHK_EQUAL(thread__maps(thread), thread__maps(parent))) { + if (maps__equal(thread__maps(thread), thread__maps(parent))) { pr_debug("broken map groups on thread %d/%d parent %d/%d\n", thread__pid(thread), thread__tid(thread), thread__pid(parent), thread__tid(parent)); @@ -457,14 +453,14 @@ int thread__memcpy(struct thread *thread, struct machine *machine, dso = map__dso(al.map); - if (!dso || dso->data.status == DSO_DATA_STATUS_ERROR || map__load(al.map) < 0) { + if (!dso || dso__data(dso)->status == DSO_DATA_STATUS_ERROR || map__load(al.map) < 0) { addr_location__exit(&al); return -1; } offset = map__map_ip(al.map, ip); if (is64bit) - *is64bit = dso->is_64_bit; + *is64bit = dso__is_64_bit(dso); addr_location__exit(&al); diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h index 0df775b5c110..8b4a3c69bad1 100644 --- a/tools/perf/util/thread.h +++ b/tools/perf/util/thread.h @@ -3,7 +3,6 @@ #define __PERF_THREAD_H #include <linux/refcount.h> -#include <linux/rbtree.h> #include <linux/list.h> #include <stdio.h> #include <unistd.h> @@ -13,7 +12,6 @@ #include <strlist.h> #include <intlist.h> #include "rwsem.h" -#include "event.h" #include "callchain.h" #include <internal/rc_check.h> @@ -30,11 +28,6 @@ struct lbr_stitch { struct callchain_cursor_node *prev_lbr_cursor; }; -struct thread_rb_node { - struct rb_node rb_node; - struct thread *thread; -}; - DECLARE_RC_STRUCT(thread) { /** @maps: mmaps associated with this thread. */ struct maps *maps; diff --git a/tools/perf/util/thread_map.c b/tools/perf/util/thread_map.c index e848579e61a8..b5f12390c355 100644 --- a/tools/perf/util/thread_map.c +++ b/tools/perf/util/thread_map.c @@ -109,9 +109,10 @@ static struct perf_thread_map *__thread_map__new_all_cpus(uid_t uid) snprintf(path, sizeof(path), "/proc/%d/task", pid); items = scandir(path, &namelist, filter, NULL); - if (items <= 0) - goto out_free_closedir; - + if (items <= 0) { + pr_debug("scandir for %d returned empty, skipping\n", pid); + continue; + } while (threads->nr + items >= max_threads) { max_threads *= 2; grow = true; @@ -152,8 +153,6 @@ out_free_namelist: for (i = 0; i < items; i++) zfree(&namelist[i]); free(namelist); - -out_free_closedir: zfree(&threads); goto out_closedir; } @@ -280,13 +279,13 @@ struct perf_thread_map *thread_map__new_by_tid_str(const char *tid_str) threads->nr = ntasks; } out: + strlist__delete(slist); if (threads) refcount_set(&threads->refcnt, 1); return threads; out_free_threads: zfree(&threads); - strlist__delete(slist); goto out; } diff --git a/tools/perf/util/threads.c b/tools/perf/util/threads.c new file mode 100644 index 000000000000..ff2b169e0085 --- /dev/null +++ b/tools/perf/util/threads.c @@ -0,0 +1,190 @@ +// SPDX-License-Identifier: GPL-2.0 +#include "threads.h" +#include "machine.h" +#include "thread.h" + +static struct threads_table_entry *threads__table(struct threads *threads, pid_t tid) +{ + /* Cast it to handle tid == -1 */ + return &threads->table[(unsigned int)tid % THREADS__TABLE_SIZE]; +} + +static size_t key_hash(long key, void *ctx __maybe_unused) +{ + /* The table lookup removes low bit entropy, but this is just ignored here. */ + return key; +} + +static bool key_equal(long key1, long key2, void *ctx __maybe_unused) +{ + return key1 == key2; +} + +void threads__init(struct threads *threads) +{ + for (int i = 0; i < THREADS__TABLE_SIZE; i++) { + struct threads_table_entry *table = &threads->table[i]; + + hashmap__init(&table->shard, key_hash, key_equal, NULL); + init_rwsem(&table->lock); + table->last_match = NULL; + } +} + +void threads__exit(struct threads *threads) +{ + threads__remove_all_threads(threads); + for (int i = 0; i < THREADS__TABLE_SIZE; i++) { + struct threads_table_entry *table = &threads->table[i]; + + hashmap__clear(&table->shard); + exit_rwsem(&table->lock); + } +} + +size_t threads__nr(struct threads *threads) +{ + size_t nr = 0; + + for (int i = 0; i < THREADS__TABLE_SIZE; i++) { + struct threads_table_entry *table = &threads->table[i]; + + down_read(&table->lock); + nr += hashmap__size(&table->shard); + up_read(&table->lock); + } + return nr; +} + +/* + * Front-end cache - TID lookups come in blocks, + * so most of the time we dont have to look up + * the full rbtree: + */ +static struct thread *__threads_table_entry__get_last_match(struct threads_table_entry *table, + pid_t tid) +{ + struct thread *th, *res = NULL; + + th = table->last_match; + if (th != NULL) { + if (thread__tid(th) == tid) + res = thread__get(th); + } + return res; +} + +static void __threads_table_entry__set_last_match(struct threads_table_entry *table, + struct thread *th) +{ + thread__put(table->last_match); + table->last_match = thread__get(th); +} + +static void threads_table_entry__set_last_match(struct threads_table_entry *table, + struct thread *th) +{ + down_write(&table->lock); + __threads_table_entry__set_last_match(table, th); + up_write(&table->lock); +} + +struct thread *threads__find(struct threads *threads, pid_t tid) +{ + struct threads_table_entry *table = threads__table(threads, tid); + struct thread *res; + + down_read(&table->lock); + res = __threads_table_entry__get_last_match(table, tid); + if (!res) { + if (hashmap__find(&table->shard, tid, &res)) + res = thread__get(res); + } + up_read(&table->lock); + if (res) + threads_table_entry__set_last_match(table, res); + return res; +} + +struct thread *threads__findnew(struct threads *threads, pid_t pid, pid_t tid, bool *created) +{ + struct threads_table_entry *table = threads__table(threads, tid); + struct thread *res = NULL; + + *created = false; + down_write(&table->lock); + res = thread__new(pid, tid); + if (res) { + if (hashmap__add(&table->shard, tid, res)) { + /* Add failed. Assume a race so find other entry. */ + thread__put(res); + res = NULL; + if (hashmap__find(&table->shard, tid, &res)) + res = thread__get(res); + } else { + res = thread__get(res); + *created = true; + } + if (res) + __threads_table_entry__set_last_match(table, res); + } + up_write(&table->lock); + return res; +} + +void threads__remove_all_threads(struct threads *threads) +{ + for (int i = 0; i < THREADS__TABLE_SIZE; i++) { + struct threads_table_entry *table = &threads->table[i]; + struct hashmap_entry *cur, *tmp; + size_t bkt; + + down_write(&table->lock); + __threads_table_entry__set_last_match(table, NULL); + hashmap__for_each_entry_safe((&table->shard), cur, tmp, bkt) { + struct thread *old_value; + + hashmap__delete(&table->shard, cur->key, /*old_key=*/NULL, &old_value); + thread__put(old_value); + } + up_write(&table->lock); + } +} + +void threads__remove(struct threads *threads, struct thread *thread) +{ + struct threads_table_entry *table = threads__table(threads, thread__tid(thread)); + struct thread *old_value; + + down_write(&table->lock); + if (table->last_match && RC_CHK_EQUAL(table->last_match, thread)) + __threads_table_entry__set_last_match(table, NULL); + + hashmap__delete(&table->shard, thread__tid(thread), /*old_key=*/NULL, &old_value); + thread__put(old_value); + up_write(&table->lock); +} + +int threads__for_each_thread(struct threads *threads, + int (*fn)(struct thread *thread, void *data), + void *data) +{ + for (int i = 0; i < THREADS__TABLE_SIZE; i++) { + struct threads_table_entry *table = &threads->table[i]; + struct hashmap_entry *cur; + size_t bkt; + + down_read(&table->lock); + hashmap__for_each_entry((&table->shard), cur, bkt) { + int rc = fn((struct thread *)cur->pvalue, data); + + if (rc != 0) { + up_read(&table->lock); + return rc; + } + } + up_read(&table->lock); + } + return 0; + +} diff --git a/tools/perf/util/threads.h b/tools/perf/util/threads.h new file mode 100644 index 000000000000..da68d2223f18 --- /dev/null +++ b/tools/perf/util/threads.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __PERF_THREADS_H +#define __PERF_THREADS_H + +#include "hashmap.h" +#include "rwsem.h" + +struct thread; + +#define THREADS__TABLE_BITS 3 +#define THREADS__TABLE_SIZE (1 << THREADS__TABLE_BITS) + +struct threads_table_entry { + /* Key is tid, value is struct thread. */ + struct hashmap shard; + struct rw_semaphore lock; + struct thread *last_match; +}; + +struct threads { + struct threads_table_entry table[THREADS__TABLE_SIZE]; +}; + +void threads__init(struct threads *threads); +void threads__exit(struct threads *threads); +size_t threads__nr(struct threads *threads); +struct thread *threads__find(struct threads *threads, pid_t tid); +struct thread *threads__findnew(struct threads *threads, pid_t pid, pid_t tid, bool *created); +void threads__remove_all_threads(struct threads *threads); +void threads__remove(struct threads *threads, struct thread *thread); +int threads__for_each_thread(struct threads *threads, + int (*fn)(struct thread *thread, void *data), + void *data); + +#endif /* __PERF_THREADS_H */ diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index 2d3c2576bab7..f0332bd3a501 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -122,6 +122,119 @@ void event_format__print(struct tep_event *event, return event_format__fprintf(event, cpu, data, size, stdout); } +/* + * prev_state is of size long, which is 32 bits on 32 bit architectures. + * As it needs to have the same bits for both 32 bit and 64 bit architectures + * we can just assume that the flags we care about will all be within + * the 32 bits. + */ +#define MAX_STATE_BITS 32 + +static const char *convert_sym(struct tep_print_flag_sym *sym) +{ + static char save_states[MAX_STATE_BITS + 1]; + + memset(save_states, 0, sizeof(save_states)); + + /* This is the flags for the prev_state_field, now make them into a string */ + for (; sym; sym = sym->next) { + long bitmask = strtoul(sym->value, NULL, 0); + int i; + + for (i = 0; !(bitmask & 1); i++) + bitmask >>= 1; + + if (i >= MAX_STATE_BITS) + continue; + + save_states[i] = sym->str[0]; + } + + return save_states; +} + +static struct tep_print_arg_field * +find_arg_field(struct tep_format_field *prev_state_field, struct tep_print_arg *arg) +{ + struct tep_print_arg_field *field; + + if (!arg) + return NULL; + + if (arg->type == TEP_PRINT_FIELD) + return &arg->field; + + if (arg->type == TEP_PRINT_OP) { + field = find_arg_field(prev_state_field, arg->op.left); + if (field && field->field == prev_state_field) + return field; + field = find_arg_field(prev_state_field, arg->op.right); + if (field && field->field == prev_state_field) + return field; + } + return NULL; +} + +static struct tep_print_flag_sym * +test_flags(struct tep_format_field *prev_state_field, struct tep_print_arg *arg) +{ + struct tep_print_arg_field *field; + + field = find_arg_field(prev_state_field, arg->flags.field); + if (!field) + return NULL; + + return arg->flags.flags; +} + +static struct tep_print_flag_sym * +search_op(struct tep_format_field *prev_state_field, struct tep_print_arg *arg) +{ + struct tep_print_flag_sym *sym = NULL; + + if (!arg) + return NULL; + + if (arg->type == TEP_PRINT_OP) { + sym = search_op(prev_state_field, arg->op.left); + if (sym) + return sym; + + sym = search_op(prev_state_field, arg->op.right); + if (sym) + return sym; + } else if (arg->type == TEP_PRINT_FLAGS) { + sym = test_flags(prev_state_field, arg); + } + + return sym; +} + +const char *parse_task_states(struct tep_format_field *state_field) +{ + struct tep_print_flag_sym *sym; + struct tep_print_arg *arg; + struct tep_event *event; + + event = state_field->event; + + /* + * Look at the event format fields, and search for where + * the prev_state is parsed via the format flags. + */ + for (arg = event->print_fmt.args; arg; arg = arg->next) { + /* + * Currently, the __print_flags() for the prev_state + * is embedded in operations, so they too must be + * searched. + */ + sym = search_op(state_field, arg); + if (sym) + return convert_sym(sym); + } + return NULL; +} + void parse_ftrace_printk(struct tep_handle *pevent, char *file, unsigned int size __maybe_unused) { diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h index a69ee29419f3..bbf8b26bc8da 100644 --- a/tools/perf/util/trace-event.h +++ b/tools/perf/util/trace-event.h @@ -15,6 +15,7 @@ struct perf_tool; struct thread; struct tep_plugin_list; struct evsel; +struct tep_format_field; struct trace_event { struct tep_handle *pevent; @@ -51,6 +52,8 @@ int parse_event_file(struct tep_handle *pevent, unsigned long long raw_field_value(struct tep_event *event, const char *name, void *data); +const char *parse_task_states(struct tep_format_field *state_field); + void parse_proc_kallsyms(struct tep_handle *pevent, char *file, unsigned int size); void parse_ftrace_printk(struct tep_handle *pevent, char *file, unsigned int size); void parse_saved_cmdline(struct tep_handle *pevent, char *file, unsigned int size); diff --git a/tools/perf/util/tracepoint.c b/tools/perf/util/tracepoint.c index 92dd8b455b90..95377ed5d87b 100644 --- a/tools/perf/util/tracepoint.c +++ b/tools/perf/util/tracepoint.c @@ -4,10 +4,12 @@ #include <errno.h> #include <fcntl.h> #include <stdio.h> +#include <stdlib.h> #include <sys/param.h> #include <unistd.h> #include <api/fs/tracing_path.h> +#include "fncache.h" int tp_event_has_id(const char *dir_path, struct dirent *evt_dir) { @@ -26,39 +28,25 @@ int tp_event_has_id(const char *dir_path, struct dirent *evt_dir) /* * Check whether event is in <debugfs_mount_point>/tracing/events */ -int is_valid_tracepoint(const char *event_string) +bool is_valid_tracepoint(const char *event_string) { - DIR *sys_dir, *evt_dir; - struct dirent *sys_dirent, *evt_dirent; - char evt_path[MAXPATHLEN]; - char *dir_path; - - sys_dir = tracing_events__opendir(); - if (!sys_dir) - return 0; - - for_each_subsystem(sys_dir, sys_dirent) { - dir_path = get_events_file(sys_dirent->d_name); - if (!dir_path) - continue; - evt_dir = opendir(dir_path); - if (!evt_dir) - goto next; - - for_each_event(dir_path, evt_dir, evt_dirent) { - snprintf(evt_path, MAXPATHLEN, "%s:%s", - sys_dirent->d_name, evt_dirent->d_name); - if (!strcmp(evt_path, event_string)) { - closedir(evt_dir); - put_events_file(dir_path); - closedir(sys_dir); - return 1; - } - } - closedir(evt_dir); -next: - put_events_file(dir_path); - } - closedir(sys_dir); - return 0; + char *dst, *path = malloc(strlen(event_string) + 4); /* Space for "/id\0". */ + bool have_file = false; /* Conservatively return false if memory allocation failed. */ + const char *src; + + if (!path) + return false; + + /* Copy event_string replacing the ':' with '/'. */ + for (src = event_string, dst = path; *src; src++, dst++) + *dst = (*src == ':') ? '/' : *src; + /* Add "/id\0". */ + memcpy(dst, "/id", 4); + + dst = get_events_file(path); + if (dst) + have_file = file_available(dst); + free(dst); + free(path); + return have_file; } diff --git a/tools/perf/util/tracepoint.h b/tools/perf/util/tracepoint.h index c4a110fe87d7..65ccb01fc312 100644 --- a/tools/perf/util/tracepoint.h +++ b/tools/perf/util/tracepoint.h @@ -4,6 +4,7 @@ #include <dirent.h> #include <string.h> +#include <stdbool.h> int tp_event_has_id(const char *dir_path, struct dirent *evt_dir); @@ -20,6 +21,6 @@ int tp_event_has_id(const char *dir_path, struct dirent *evt_dir); (strcmp(sys_dirent->d_name, ".")) && \ (strcmp(sys_dirent->d_name, ".."))) -int is_valid_tracepoint(const char *event_string); +bool is_valid_tracepoint(const char *event_string); #endif /* __PERF_TRACEPOINT_H */ diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c index 6013335a8dae..b38d322734b4 100644 --- a/tools/perf/util/unwind-libdw.c +++ b/tools/perf/util/unwind-libdw.c @@ -263,7 +263,7 @@ int unwind__get_entries(unwind_entry_cb_t cb, void *arg, struct unwind_info *ui, ui_buf = { .sample = data, .thread = thread, - .machine = RC_CHK_ACCESS(thread__maps(thread))->machine, + .machine = maps__machine((thread__maps(thread))), .cb = cb, .arg = arg, .max_stack = max_stack, diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c index dac536e28360..cde267ea3e99 100644 --- a/tools/perf/util/unwind-libunwind-local.c +++ b/tools/perf/util/unwind-libunwind-local.c @@ -329,27 +329,27 @@ static int read_unwind_spec_eh_frame(struct dso *dso, struct unwind_info *ui, }; int ret, fd; - if (dso->data.eh_frame_hdr_offset == 0) { + if (dso__data(dso)->eh_frame_hdr_offset == 0) { fd = dso__data_get_fd(dso, ui->machine); if (fd < 0) return -EINVAL; /* Check the .eh_frame section for unwinding info */ ret = elf_section_address_and_offset(fd, ".eh_frame_hdr", - &dso->data.eh_frame_hdr_addr, - &dso->data.eh_frame_hdr_offset); - dso->data.elf_base_addr = elf_base_address(fd); + &dso__data(dso)->eh_frame_hdr_addr, + &dso__data(dso)->eh_frame_hdr_offset); + dso__data(dso)->elf_base_addr = elf_base_address(fd); dso__data_put_fd(dso); - if (ret || dso->data.eh_frame_hdr_offset == 0) + if (ret || dso__data(dso)->eh_frame_hdr_offset == 0) return -EINVAL; } maps__for_each_map(thread__maps(ui->thread), read_unwind_spec_eh_frame_maps_cb, &args); - args.base_addr -= dso->data.elf_base_addr; + args.base_addr -= dso__data(dso)->elf_base_addr; /* Address of .eh_frame_hdr */ - *segbase = args.base_addr + dso->data.eh_frame_hdr_addr; - ret = unwind_spec_ehframe(dso, ui->machine, dso->data.eh_frame_hdr_offset, + *segbase = args.base_addr + dso__data(dso)->eh_frame_hdr_addr; + ret = unwind_spec_ehframe(dso, ui->machine, dso__data(dso)->eh_frame_hdr_offset, table_data, fde_count); if (ret) return ret; @@ -460,7 +460,7 @@ find_proc_info(unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi, return -EINVAL; } - pr_debug("unwind: find_proc_info dso %s\n", dso->name); + pr_debug("unwind: find_proc_info dso %s\n", dso__name(dso)); /* Check the .eh_frame section for unwinding info */ if (!read_unwind_spec_eh_frame(dso, ui, &table_data, &segbase, &fde_count)) { @@ -706,7 +706,7 @@ static int _unwind__prepare_access(struct maps *maps) { void *addr_space = unw_create_addr_space(&accessors, 0); - RC_CHK_ACCESS(maps)->addr_space = addr_space; + maps__set_addr_space(maps, addr_space); if (!addr_space) { pr_err("unwind: Can't create unwind address space.\n"); return -ENOMEM; diff --git a/tools/perf/util/unwind-libunwind.c b/tools/perf/util/unwind-libunwind.c index 76cd63de80a8..cb8be6acfb6f 100644 --- a/tools/perf/util/unwind-libunwind.c +++ b/tools/perf/util/unwind-libunwind.c @@ -12,11 +12,6 @@ struct unwind_libunwind_ops __weak *local_unwind_libunwind_ops; struct unwind_libunwind_ops __weak *x86_32_unwind_libunwind_ops; struct unwind_libunwind_ops __weak *arm64_unwind_libunwind_ops; -static void unwind__register_ops(struct maps *maps, struct unwind_libunwind_ops *ops) -{ - RC_CHK_ACCESS(maps)->unwind_libunwind_ops = ops; -} - int unwind__prepare_access(struct maps *maps, struct map *map, bool *initialized) { const char *arch; @@ -30,7 +25,7 @@ int unwind__prepare_access(struct maps *maps, struct map *map, bool *initialized return 0; if (maps__addr_space(maps)) { - pr_debug("unwind: thread map already set, dso=%s\n", dso->name); + pr_debug("unwind: thread map already set, dso=%s\n", dso__name(dso)); if (initialized) *initialized = true; return 0; @@ -60,7 +55,7 @@ int unwind__prepare_access(struct maps *maps, struct map *map, bool *initialized return 0; } out_register: - unwind__register_ops(maps, ops); + maps__set_unwind_libunwind_ops(maps, ops); err = maps__unwind_libunwind_ops(maps)->prepare_access(maps); if (initialized) diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c index c1fd9ba6d697..4f561e5e4162 100644 --- a/tools/perf/util/util.c +++ b/tools/perf/util/util.c @@ -552,3 +552,22 @@ int sched_getcpu(void) return -1; } #endif + +#ifndef HAVE_SCANDIRAT_SUPPORT +int scandirat(int dirfd, const char *dirp, + struct dirent ***namelist, + int (*filter)(const struct dirent *), + int (*compar)(const struct dirent **, const struct dirent **)) +{ + char path[PATH_MAX]; + int err, fd = openat(dirfd, dirp, O_PATH); + + if (fd < 0) + return fd; + + snprintf(path, sizeof(path), "/proc/%d/fd/%d", getpid(), fd); + err = scandir(path, namelist, filter, compar); + close(fd); + return err; +} +#endif diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h index 7c8915d92dca..9966c21aaf04 100644 --- a/tools/perf/util/util.h +++ b/tools/perf/util/util.h @@ -6,6 +6,7 @@ /* glibc 2.20 deprecates _BSD_SOURCE in favour of _DEFAULT_SOURCE */ #define _DEFAULT_SOURCE 1 +#include <dirent.h> #include <fcntl.h> #include <stdbool.h> #include <stddef.h> @@ -56,6 +57,13 @@ int perf_tip(char **strp, const char *dirpath); int sched_getcpu(void); #endif +#ifndef HAVE_SCANDIRAT_SUPPORT +int scandirat(int dirfd, const char *dirp, + struct dirent ***namelist, + int (*filter)(const struct dirent *), + int (*compar)(const struct dirent **, const struct dirent **)); +#endif + extern bool perf_singlethreaded; void perf_set_singlethreaded(void); diff --git a/tools/perf/util/values.h b/tools/perf/util/values.h index 8c41f22f42cf..791c1ad606c2 100644 --- a/tools/perf/util/values.h +++ b/tools/perf/util/values.h @@ -2,6 +2,7 @@ #ifndef __PERF_VALUES_H #define __PERF_VALUES_H +#include <stdio.h> #include <linux/types.h> struct perf_read_values { diff --git a/tools/perf/util/vdso.c b/tools/perf/util/vdso.c index df8963796187..1b6f8f6db7aa 100644 --- a/tools/perf/util/vdso.c +++ b/tools/perf/util/vdso.c @@ -133,8 +133,6 @@ static struct dso *__machine__addnew_vdso(struct machine *machine, const char *s if (dso != NULL) { __dsos__add(&machine->dsos, dso); dso__set_long_name(dso, long_name, false); - /* Put dso here because __dsos_add already got it */ - dso__put(dso); } return dso; @@ -150,7 +148,7 @@ static int machine__thread_dso_type_maps_cb(struct map *map, void *data) struct machine__thread_dso_type_maps_cb_args *args = data; struct dso *dso = map__dso(map); - if (!dso || dso->long_name[0] != '/') + if (!dso || dso__long_name(dso)[0] != '/') return 0; args->dso_type = dso__type(dso, args->machine); @@ -252,17 +250,15 @@ static struct dso *__machine__findnew_compat(struct machine *machine, const char *file_name; struct dso *dso; - dso = __dsos__find(&machine->dsos, vdso_file->dso_name, true); + dso = dsos__find(&machine->dsos, vdso_file->dso_name, true); if (dso) - goto out; + return dso; file_name = vdso__get_compat_file(vdso_file); if (!file_name) - goto out; + return NULL; - dso = __machine__addnew_vdso(machine, vdso_file->dso_name, file_name); -out: - return dso; + return __machine__addnew_vdso(machine, vdso_file->dso_name, file_name); } static int __machine__findnew_vdso_compat(struct machine *machine, @@ -308,21 +304,21 @@ static struct dso *machine__find_vdso(struct machine *machine, dso_type = machine__thread_dso_type(machine, thread); switch (dso_type) { case DSO__TYPE_32BIT: - dso = __dsos__find(&machine->dsos, DSO__NAME_VDSO32, true); + dso = dsos__find(&machine->dsos, DSO__NAME_VDSO32, true); if (!dso) { - dso = __dsos__find(&machine->dsos, DSO__NAME_VDSO, - true); + dso = dsos__find(&machine->dsos, DSO__NAME_VDSO, + true); if (dso && dso_type != dso__type(dso, machine)) dso = NULL; } break; case DSO__TYPE_X32BIT: - dso = __dsos__find(&machine->dsos, DSO__NAME_VDSOX32, true); + dso = dsos__find(&machine->dsos, DSO__NAME_VDSOX32, true); break; case DSO__TYPE_64BIT: case DSO__TYPE_UNKNOWN: default: - dso = __dsos__find(&machine->dsos, DSO__NAME_VDSO, true); + dso = dsos__find(&machine->dsos, DSO__NAME_VDSO, true); break; } @@ -334,42 +330,38 @@ struct dso *machine__findnew_vdso(struct machine *machine, { struct vdso_info *vdso_info; struct dso *dso = NULL; + char *file; - down_write(&machine->dsos.lock); if (!machine->vdso_info) machine->vdso_info = vdso_info__new(); vdso_info = machine->vdso_info; if (!vdso_info) - goto out_unlock; + return NULL; dso = machine__find_vdso(machine, thread); if (dso) - goto out_unlock; + return dso; #if BITS_PER_LONG == 64 if (__machine__findnew_vdso_compat(machine, thread, vdso_info, &dso)) - goto out_unlock; + return dso; #endif - dso = __dsos__find(&machine->dsos, DSO__NAME_VDSO, true); - if (!dso) { - char *file; + dso = dsos__find(&machine->dsos, DSO__NAME_VDSO, true); + if (dso) + return dso; - file = get_file(&vdso_info->vdso); - if (file) - dso = __machine__addnew_vdso(machine, DSO__NAME_VDSO, file); - } + file = get_file(&vdso_info->vdso); + if (!file) + return NULL; -out_unlock: - dso__get(dso); - up_write(&machine->dsos.lock); - return dso; + return __machine__addnew_vdso(machine, DSO__NAME_VDSO, file); } bool dso__is_vdso(struct dso *dso) { - return !strcmp(dso->short_name, DSO__NAME_VDSO) || - !strcmp(dso->short_name, DSO__NAME_VDSO32) || - !strcmp(dso->short_name, DSO__NAME_VDSOX32); + return !strcmp(dso__short_name(dso), DSO__NAME_VDSO) || + !strcmp(dso__short_name(dso), DSO__NAME_VDSO32) || + !strcmp(dso__short_name(dso), DSO__NAME_VDSOX32); } |