aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/tools/lib/perf
diff options
context:
space:
mode:
Diffstat (limited to 'tools/lib/perf')
-rw-r--r--tools/lib/perf/.gitignore5
-rw-r--r--tools/lib/perf/Build2
-rw-r--r--tools/lib/perf/Documentation/Makefile2
-rw-r--r--tools/lib/perf/Documentation/examples/sampling.c2
-rw-r--r--tools/lib/perf/Documentation/libperf-counting.txt14
-rw-r--r--tools/lib/perf/Documentation/libperf-sampling.txt15
-rw-r--r--tools/lib/perf/Documentation/libperf.txt29
-rw-r--r--tools/lib/perf/Makefile102
-rw-r--r--tools/lib/perf/cpumap.c471
-rw-r--r--tools/lib/perf/evlist.c263
-rw-r--r--tools/lib/perf/evsel.c386
-rw-r--r--tools/lib/perf/include/internal/cpumap.h27
-rw-r--r--tools/lib/perf/include/internal/evlist.h25
-rw-r--r--tools/lib/perf/include/internal/evsel.h97
-rw-r--r--tools/lib/perf/include/internal/lib.h2
-rw-r--r--tools/lib/perf/include/internal/mmap.h11
-rw-r--r--tools/lib/perf/include/internal/rc_check.h113
-rw-r--r--tools/lib/perf/include/internal/tests.h36
-rw-r--r--tools/lib/perf/include/internal/xyarray.h9
-rw-r--r--tools/lib/perf/include/perf/bpf_perf.h31
-rw-r--r--tools/lib/perf/include/perf/cpumap.h92
-rw-r--r--tools/lib/perf/include/perf/event.h173
-rw-r--r--tools/lib/perf/include/perf/evlist.h2
-rw-r--r--tools/lib/perf/include/perf/evsel.h20
-rw-r--r--tools/lib/perf/include/perf/threadmap.h8
-rw-r--r--tools/lib/perf/lib.c20
-rw-r--r--tools/lib/perf/libperf.map17
-rw-r--r--tools/lib/perf/mmap.c274
-rw-r--r--tools/lib/perf/tests/Build5
-rw-r--r--tools/lib/perf/tests/Makefile38
-rw-r--r--tools/lib/perf/tests/main.c15
-rw-r--r--tools/lib/perf/tests/test-cpumap.c18
-rw-r--r--tools/lib/perf/tests/test-evlist.c218
-rw-r--r--tools/lib/perf/tests/test-evsel.c244
-rw-r--r--tools/lib/perf/tests/test-threadmap.c46
-rw-r--r--tools/lib/perf/tests/tests.h10
-rw-r--r--tools/lib/perf/threadmap.c53
37 files changed, 2408 insertions, 487 deletions
diff --git a/tools/lib/perf/.gitignore b/tools/lib/perf/.gitignore
new file mode 100644
index 000000000000..0f5b4af63f62
--- /dev/null
+++ b/tools/lib/perf/.gitignore
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+libperf.pc
+libperf.so.*
+tests-shared
+tests-static
diff --git a/tools/lib/perf/Build b/tools/lib/perf/Build
index 2ef9a4ec6d99..e8f5b7fb9973 100644
--- a/tools/lib/perf/Build
+++ b/tools/lib/perf/Build
@@ -11,3 +11,5 @@ libperf-y += lib.o
$(OUTPUT)zalloc.o: ../../lib/zalloc.c FORCE
$(call rule_mkdir)
$(call if_changed_dep,cc_o_c)
+
+tests-y += tests/
diff --git a/tools/lib/perf/Documentation/Makefile b/tools/lib/perf/Documentation/Makefile
index 972754082a85..573ca5b27556 100644
--- a/tools/lib/perf/Documentation/Makefile
+++ b/tools/lib/perf/Documentation/Makefile
@@ -121,7 +121,7 @@ install-man: all
$(INSTALL) -d -m 755 $(DESTDIR)$(man7dir); \
$(INSTALL) -m 644 $(MAN_7) $(DESTDIR)$(man7dir);
-install-html:
+install-html: $(MAN_HTML)
$(call QUIET_INSTALL, html) \
$(INSTALL) -d -m 755 $(DESTDIR)$(htmldir); \
$(INSTALL) -m 644 $(MAN_HTML) $(DESTDIR)$(htmldir); \
diff --git a/tools/lib/perf/Documentation/examples/sampling.c b/tools/lib/perf/Documentation/examples/sampling.c
index 8e1a926a9cfe..bc142f0664b5 100644
--- a/tools/lib/perf/Documentation/examples/sampling.c
+++ b/tools/lib/perf/Documentation/examples/sampling.c
@@ -39,7 +39,7 @@ int main(int argc, char **argv)
libperf_init(libperf_print);
- cpus = perf_cpu_map__new(NULL);
+ cpus = perf_cpu_map__new_online_cpus();
if (!cpus) {
fprintf(stderr, "failed to create cpus\n");
return -1;
diff --git a/tools/lib/perf/Documentation/libperf-counting.txt b/tools/lib/perf/Documentation/libperf-counting.txt
index cae9757f49c1..8b75efcd67ce 100644
--- a/tools/lib/perf/Documentation/libperf-counting.txt
+++ b/tools/lib/perf/Documentation/libperf-counting.txt
@@ -7,13 +7,13 @@ libperf-counting - counting interface
DESCRIPTION
-----------
-The counting interface provides API to meassure and get count for specific perf events.
+The counting interface provides API to measure and get count for specific perf events.
The following test tries to explain count on `counting.c` example.
It is by no means complete guide to counting, but shows libperf basic API for counting.
-The `counting.c` comes with libbperf package and can be compiled and run like:
+The `counting.c` comes with libperf package and can be compiled and run like:
[source,bash]
--
@@ -26,7 +26,8 @@ count 176242, enabled 176242, run 176242
It requires root access, because of the `PERF_COUNT_SW_CPU_CLOCK` event,
which is available only for root.
-The `counting.c` example monitors two events on the current process and displays their count, in a nutshel it:
+The `counting.c` example monitors two events on the current process and displays
+their count, in a nutshell it:
* creates events
* adds them to the event list
@@ -152,7 +153,7 @@ Configure event list with the thread map and open events:
--
Both events are created as disabled (note the `disabled = 1` assignment above),
-so we need to enable the whole list explicitely (both events).
+so we need to enable the whole list explicitly (both events).
From this moment events are counting and we can do our workload.
@@ -167,7 +168,8 @@ When we are done we disable the events list.
79 perf_evlist__disable(evlist);
--
-Now we need to get the counts from events, following code iterates throught the events list and read counts:
+Now we need to get the counts from events, following code iterates through the
+events list and read counts:
[source,c]
--
@@ -178,7 +180,7 @@ Now we need to get the counts from events, following code iterates throught the
85 }
--
-And finaly cleanup.
+And finally cleanup.
We close the whole events list (both events) and remove it together with the threads map:
diff --git a/tools/lib/perf/Documentation/libperf-sampling.txt b/tools/lib/perf/Documentation/libperf-sampling.txt
index d71a7b4fcf5f..2378980fab8a 100644
--- a/tools/lib/perf/Documentation/libperf-sampling.txt
+++ b/tools/lib/perf/Documentation/libperf-sampling.txt
@@ -8,13 +8,13 @@ libperf-sampling - sampling interface
DESCRIPTION
-----------
-The sampling interface provides API to meassure and get count for specific perf events.
+The sampling interface provides API to measure and get count for specific perf events.
The following test tries to explain count on `sampling.c` example.
It is by no means complete guide to sampling, but shows libperf basic API for sampling.
-The `sampling.c` comes with libbperf package and can be compiled and run like:
+The `sampling.c` comes with libperf package and can be compiled and run like:
[source,bash]
--
@@ -33,7 +33,8 @@ cpu 0, pid 4465, tid 4470, ip 7f84fe0ebebf, period 176
It requires root access, because it uses hardware cycles event.
-The `sampling.c` example profiles/samples all CPUs with hardware cycles, in a nutshel it:
+The `sampling.c` example profiles/samples all CPUs with hardware cycles, in a
+nutshell it:
- creates events
- adds them to the event list
@@ -90,13 +91,13 @@ Once the setup is complete we start by defining cycles event using the `struct p
36 };
--
-Next step is to prepare cpus map.
+Next step is to prepare CPUs map.
In this case we will monitor all the available CPUs:
[source,c]
--
- 42 cpus = perf_cpu_map__new(NULL);
+ 42 cpus = perf_cpu_map__new_online_cpus();
43 if (!cpus) {
44 fprintf(stderr, "failed to create cpus\n");
45 return -1;
@@ -152,7 +153,7 @@ Once the events list is open, we can create memory maps AKA perf ring buffers:
--
The event is created as disabled (note the `disabled = 1` assignment above),
-so we need to enable the events list explicitely.
+so we need to enable the events list explicitly.
From this moment the cycles event is sampling.
@@ -212,7 +213,7 @@ Each sample needs to get parsed:
106 cpu, pid, tid, ip, period);
--
-And finaly cleanup.
+And finally cleanup.
We close the whole events list (both events) and remove it together with the threads map:
diff --git a/tools/lib/perf/Documentation/libperf.txt b/tools/lib/perf/Documentation/libperf.txt
index 5a6bb512789d..4072bc9b7670 100644
--- a/tools/lib/perf/Documentation/libperf.txt
+++ b/tools/lib/perf/Documentation/libperf.txt
@@ -29,7 +29,7 @@ SYNOPSIS
void libperf_init(libperf_print_fn_t fn);
--
-*API to handle cpu maps:*
+*API to handle CPU maps:*
[source,c]
--
@@ -37,17 +37,17 @@ SYNOPSIS
struct perf_cpu_map;
- struct perf_cpu_map *perf_cpu_map__dummy_new(void);
+ struct perf_cpu_map *perf_cpu_map__new_any_cpu(void);
struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list);
- struct perf_cpu_map *perf_cpu_map__read(FILE *file);
struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map);
struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig,
struct perf_cpu_map *other);
void perf_cpu_map__put(struct perf_cpu_map *map);
int perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx);
int perf_cpu_map__nr(const struct perf_cpu_map *cpus);
- bool perf_cpu_map__empty(const struct perf_cpu_map *map);
+ bool perf_cpu_map__has_any_cpu_or_is_empty(const struct perf_cpu_map *map);
int perf_cpu_map__max(struct perf_cpu_map *map);
+ bool perf_cpu_map__has(const struct perf_cpu_map *map, int cpu);
#define perf_cpu_map__for_each_cpu(cpu, idx, cpus)
--
@@ -61,11 +61,12 @@ SYNOPSIS
struct perf_thread_map;
struct perf_thread_map *perf_thread_map__new_dummy(void);
+ struct perf_thread_map *perf_thread_map__new_array(int nr_threads, pid_t *array);
- void perf_thread_map__set_pid(struct perf_thread_map *map, int thread, pid_t pid);
- char *perf_thread_map__comm(struct perf_thread_map *map, int thread);
+ void perf_thread_map__set_pid(struct perf_thread_map *map, int idx, pid_t pid);
+ char *perf_thread_map__comm(struct perf_thread_map *map, int idx);
int perf_thread_map__nr(struct perf_thread_map *threads);
- pid_t perf_thread_map__pid(struct perf_thread_map *map, int thread);
+ pid_t perf_thread_map__pid(struct perf_thread_map *map, int idx);
struct perf_thread_map *perf_thread_map__get(struct perf_thread_map *map);
void perf_thread_map__put(struct perf_thread_map *map);
@@ -135,13 +136,16 @@ SYNOPSIS
int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
struct perf_thread_map *threads);
void perf_evsel__close(struct perf_evsel *evsel);
- void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu);
- int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
+ void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu_map_idx);
+ int perf_evsel__mmap(struct perf_evsel *evsel, int pages);
+ void perf_evsel__munmap(struct perf_evsel *evsel);
+ void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu_map_idx, int thread);
+ int perf_evsel__read(struct perf_evsel *evsel, int cpu_map_idx, int thread,
struct perf_counts_values *count);
int perf_evsel__enable(struct perf_evsel *evsel);
- int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu);
+ int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu_map_idx);
int perf_evsel__disable(struct perf_evsel *evsel);
- int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu);
+ int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu_map_idx);
struct perf_cpu_map *perf_evsel__cpus(struct perf_evsel *evsel);
struct perf_thread_map *perf_evsel__threads(struct perf_evsel *evsel);
struct perf_event_attr *perf_evsel__attr(struct perf_evsel *evsel);
@@ -206,6 +210,7 @@ SYNOPSIS
struct perf_record_time_conv;
struct perf_record_header_feature;
struct perf_record_compressed;
+ struct perf_record_compressed2;
--
DESCRIPTION
@@ -217,7 +222,7 @@ Following objects are key to the libperf interface:
[horizontal]
-struct perf_cpu_map:: Provides a cpu list abstraction.
+struct perf_cpu_map:: Provides a CPU list abstraction.
struct perf_thread_map:: Provides a thread list abstraction.
diff --git a/tools/lib/perf/Makefile b/tools/lib/perf/Makefile
index 3718d65cffac..7fbb50b74c00 100644
--- a/tools/lib/perf/Makefile
+++ b/tools/lib/perf/Makefile
@@ -39,27 +39,10 @@ libdir = $(prefix)/$(libdir_relative)
libdir_SQ = $(subst ','\'',$(libdir))
libdir_relative_SQ = $(subst ','\'',$(libdir_relative))
-ifeq ("$(origin V)", "command line")
- VERBOSE = $(V)
-endif
-ifndef VERBOSE
- VERBOSE = 0
-endif
-
-ifeq ($(VERBOSE),1)
- Q =
-else
- Q = @
-endif
-
-# Set compile option CFLAGS
-ifdef EXTRA_CFLAGS
- CFLAGS := $(EXTRA_CFLAGS)
-else
- CFLAGS := -g -Wall
-endif
+TEST_ARGS := $(if $(V),-v)
INCLUDES = \
+-I$(OUTPUT)arch/$(SRCARCH)/include/generated/uapi \
-I$(srctree)/tools/lib/perf/include \
-I$(srctree)/tools/lib/ \
-I$(srctree)/tools/include \
@@ -68,11 +51,12 @@ INCLUDES = \
-I$(srctree)/tools/include/uapi
# Append required CFLAGS
-override CFLAGS += $(EXTRA_WARNINGS)
-override CFLAGS += -Werror -Wall
+override CFLAGS += -g -Werror -Wall
override CFLAGS += -fPIC
override CFLAGS += $(INCLUDES)
override CFLAGS += -fvisibility=hidden
+override CFLAGS += $(EXTRA_WARNINGS)
+override CFLAGS += $(EXTRA_CFLAGS)
all:
@@ -116,7 +100,16 @@ $(LIBAPI)-clean:
$(call QUIET_CLEAN, libapi)
$(Q)$(MAKE) -C $(LIB_DIR) O=$(OUTPUT) clean >/dev/null
-$(LIBPERF_IN): FORCE
+uapi-asm := $(OUTPUT)arch/$(SRCARCH)/include/generated/uapi/asm
+ifeq ($(SRCARCH),arm64)
+ syscall-y := $(uapi-asm)/unistd_64.h
+endif
+uapi-asm-generic:
+ $(if $(syscall-y),\
+ $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.asm-headers obj=$(uapi-asm) \
+ generic=include/uapi/asm-generic $(syscall-y),)
+
+$(LIBPERF_IN): uapi-asm-generic FORCE
$(Q)$(MAKE) $(build)=libperf
$(LIBPERF_A): $(LIBPERF_IN)
@@ -136,12 +129,30 @@ all: fixdep
clean: $(LIBAPI)-clean
$(call QUIET_CLEAN, libperf) $(RM) $(LIBPERF_A) \
- *.o *~ *.a *.so *.so.$(VERSION) *.so.$(LIBPERF_VERSION) .*.d .*.cmd LIBPERF-CFLAGS $(LIBPERF_PC)
- $(Q)$(MAKE) -C tests clean
+ *.o *~ *.a *.so *.so.$(VERSION) *.so.$(LIBPERF_VERSION) .*.d .*.cmd tests/*.o LIBPERF-CFLAGS $(LIBPERF_PC) \
+ $(TESTS_STATIC) $(TESTS_SHARED) $(syscall-y)
+
+TESTS_IN = tests-in.o
+
+TESTS_STATIC = $(OUTPUT)tests-static
+TESTS_SHARED = $(OUTPUT)tests-shared
+
+$(TESTS_IN): FORCE
+ $(Q)$(MAKE) $(build)=tests
+
+$(TESTS_STATIC): $(TESTS_IN) $(LIBPERF_A) $(LIBAPI)
+ $(QUIET_LINK)$(CC) -o $@ $^
+
+$(TESTS_SHARED): $(TESTS_IN) $(LIBAPI)
+ $(QUIET_LINK)$(CC) -o $@ -L$(or $(OUTPUT),.) $^ -lperf
-tests: libs
- $(Q)$(MAKE) -C tests
- $(Q)$(MAKE) -C tests run
+make-tests: libs $(TESTS_SHARED) $(TESTS_STATIC)
+
+tests: make-tests
+ @echo "running static:"
+ @./$(TESTS_STATIC) $(TEST_ARGS)
+ @echo "running dynamic:"
+ @LD_LIBRARY_PATH=. ./$(TESTS_SHARED) $(TEST_ARGS)
$(LIBPERF_PC):
$(QUIET_GEN)sed -e "s|@PREFIX@|$(prefix)|" \
@@ -156,10 +167,10 @@ define do_install_mkdir
endef
define do_install
- if [ ! -d '$(DESTDIR_SQ)$2' ]; then \
- $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$2'; \
- fi; \
- $(INSTALL) $1 $(if $3,-m $3,) '$(DESTDIR_SQ)$2'
+ if [ ! -d '$2' ]; then \
+ $(INSTALL) -d -m 755 '$2'; \
+ fi; \
+ $(INSTALL) $1 $(if $3,-m $3,) '$2'
endef
install_lib: libs
@@ -167,19 +178,28 @@ install_lib: libs
$(call do_install_mkdir,$(libdir_SQ)); \
cp -fpR $(LIBPERF_ALL) $(DESTDIR)$(libdir_SQ)
-install_headers:
- $(call QUIET_INSTALL, headers) \
- $(call do_install,include/perf/core.h,$(prefix)/include/perf,644); \
- $(call do_install,include/perf/cpumap.h,$(prefix)/include/perf,644); \
- $(call do_install,include/perf/threadmap.h,$(prefix)/include/perf,644); \
- $(call do_install,include/perf/evlist.h,$(prefix)/include/perf,644); \
- $(call do_install,include/perf/evsel.h,$(prefix)/include/perf,644); \
- $(call do_install,include/perf/event.h,$(prefix)/include/perf,644); \
- $(call do_install,include/perf/mmap.h,$(prefix)/include/perf,644);
+HDRS := bpf_perf.h core.h cpumap.h threadmap.h evlist.h evsel.h event.h mmap.h
+INTERNAL_HDRS := cpumap.h evlist.h evsel.h lib.h mmap.h rc_check.h threadmap.h xyarray.h
+
+INSTALL_HDRS_PFX := $(DESTDIR)$(prefix)/include/perf
+INSTALL_HDRS := $(addprefix $(INSTALL_HDRS_PFX)/, $(HDRS))
+INSTALL_INTERNAL_HDRS_PFX := $(DESTDIR)$(prefix)/include/internal
+INSTALL_INTERNAL_HDRS := $(addprefix $(INSTALL_INTERNAL_HDRS_PFX)/, $(INTERNAL_HDRS))
+
+$(INSTALL_HDRS): $(INSTALL_HDRS_PFX)/%.h: include/perf/%.h
+ $(call QUIET_INSTALL, $@) \
+ $(call do_install,$<,$(INSTALL_HDRS_PFX)/,644)
+
+$(INSTALL_INTERNAL_HDRS): $(INSTALL_INTERNAL_HDRS_PFX)/%.h: include/internal/%.h
+ $(call QUIET_INSTALL, $@) \
+ $(call do_install,$<,$(INSTALL_INTERNAL_HDRS_PFX)/,644)
+
+install_headers: $(INSTALL_HDRS) $(INSTALL_INTERNAL_HDRS)
+ $(call QUIET_INSTALL, libperf_headers)
install_pkgconfig: $(LIBPERF_PC)
$(call QUIET_INSTALL, $(LIBPERF_PC)) \
- $(call do_install,$(LIBPERF_PC),$(libdir_SQ)/pkgconfig,644)
+ $(call do_install,$(LIBPERF_PC),$(DESTDIR_SQ)$(libdir_SQ)/pkgconfig,644)
install_doc:
$(Q)$(MAKE) -C Documentation install-man install-html install-examples
diff --git a/tools/lib/perf/cpumap.c b/tools/lib/perf/cpumap.c
index ca0215047c32..b20a5280f2b3 100644
--- a/tools/lib/perf/cpumap.c
+++ b/tools/lib/perf/cpumap.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <errno.h>
#include <perf/cpumap.h>
#include <stdlib.h>
#include <linux/refcount.h>
@@ -9,16 +10,38 @@
#include <unistd.h>
#include <ctype.h>
#include <limits.h>
+#include "internal.h"
+#include <api/fs/fs.h>
-struct perf_cpu_map *perf_cpu_map__dummy_new(void)
+#define MAX_NR_CPUS 4096
+
+void perf_cpu_map__set_nr(struct perf_cpu_map *map, int nr_cpus)
{
- struct perf_cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int));
+ RC_CHK_ACCESS(map)->nr = nr_cpus;
+}
- if (cpus != NULL) {
- cpus->nr = 1;
- cpus->map[0] = -1;
+struct perf_cpu_map *perf_cpu_map__alloc(int nr_cpus)
+{
+ RC_STRUCT(perf_cpu_map) *cpus;
+ struct perf_cpu_map *result;
+
+ if (nr_cpus == 0)
+ return NULL;
+
+ cpus = malloc(sizeof(*cpus) + sizeof(struct perf_cpu) * nr_cpus);
+ if (ADD_RC_CHK(result, cpus)) {
+ cpus->nr = nr_cpus;
refcount_set(&cpus->refcnt, 1);
}
+ return result;
+}
+
+struct perf_cpu_map *perf_cpu_map__new_any_cpu(void)
+{
+ struct perf_cpu_map *cpus = perf_cpu_map__alloc(1);
+
+ if (cpus)
+ RC_CHK_ACCESS(cpus)->map[0].cpu = -1;
return cpus;
}
@@ -26,145 +49,116 @@ struct perf_cpu_map *perf_cpu_map__dummy_new(void)
static void cpu_map__delete(struct perf_cpu_map *map)
{
if (map) {
- WARN_ONCE(refcount_read(&map->refcnt) != 0,
+ WARN_ONCE(refcount_read(perf_cpu_map__refcnt(map)) != 0,
"cpu_map refcnt unbalanced\n");
- free(map);
+ RC_CHK_FREE(map);
}
}
struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map)
{
- if (map)
- refcount_inc(&map->refcnt);
- return map;
+ struct perf_cpu_map *result;
+
+ if (RC_CHK_GET(result, map))
+ refcount_inc(perf_cpu_map__refcnt(map));
+
+ return result;
}
void perf_cpu_map__put(struct perf_cpu_map *map)
{
- if (map && refcount_dec_and_test(&map->refcnt))
- cpu_map__delete(map);
+ if (map) {
+ if (refcount_dec_and_test(perf_cpu_map__refcnt(map)))
+ cpu_map__delete(map);
+ else
+ RC_CHK_PUT(map);
+ }
}
-static struct perf_cpu_map *cpu_map__default_new(void)
+static struct perf_cpu_map *cpu_map__new_sysconf(void)
{
struct perf_cpu_map *cpus;
- int nr_cpus;
+ int nr_cpus, nr_cpus_conf;
nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
if (nr_cpus < 0)
return NULL;
- cpus = malloc(sizeof(*cpus) + nr_cpus * sizeof(int));
+ nr_cpus_conf = sysconf(_SC_NPROCESSORS_CONF);
+ if (nr_cpus != nr_cpus_conf) {
+ pr_warning("Number of online CPUs (%d) differs from the number configured (%d) the CPU map will only cover the first %d CPUs.",
+ nr_cpus, nr_cpus_conf, nr_cpus);
+ }
+
+ cpus = perf_cpu_map__alloc(nr_cpus);
if (cpus != NULL) {
int i;
for (i = 0; i < nr_cpus; ++i)
- cpus->map[i] = i;
-
- cpus->nr = nr_cpus;
- refcount_set(&cpus->refcnt, 1);
+ RC_CHK_ACCESS(cpus)->map[i].cpu = i;
}
return cpus;
}
-static int cmp_int(const void *a, const void *b)
-{
- return *(const int *)a - *(const int*)b;
-}
-
-static struct perf_cpu_map *cpu_map__trim_new(int nr_cpus, int *tmp_cpus)
+static struct perf_cpu_map *cpu_map__new_sysfs_online(void)
{
- size_t payload_size = nr_cpus * sizeof(int);
- struct perf_cpu_map *cpus = malloc(sizeof(*cpus) + payload_size);
- int i, j;
+ struct perf_cpu_map *cpus = NULL;
+ char *buf = NULL;
+ size_t buf_len;
- if (cpus != NULL) {
- memcpy(cpus->map, tmp_cpus, payload_size);
- qsort(cpus->map, nr_cpus, sizeof(int), cmp_int);
- /* Remove dups */
- j = 0;
- for (i = 0; i < nr_cpus; i++) {
- if (i == 0 || cpus->map[i] != cpus->map[i - 1])
- cpus->map[j++] = cpus->map[i];
- }
- cpus->nr = j;
- assert(j <= nr_cpus);
- refcount_set(&cpus->refcnt, 1);
+ if (sysfs__read_str("devices/system/cpu/online", &buf, &buf_len) >= 0) {
+ cpus = perf_cpu_map__new(buf);
+ free(buf);
}
-
return cpus;
}
-struct perf_cpu_map *perf_cpu_map__read(FILE *file)
+struct perf_cpu_map *perf_cpu_map__new_online_cpus(void)
{
- struct perf_cpu_map *cpus = NULL;
- int nr_cpus = 0;
- int *tmp_cpus = NULL, *tmp;
- int max_entries = 0;
- int n, cpu, prev;
- char sep;
-
- sep = 0;
- prev = -1;
- for (;;) {
- n = fscanf(file, "%u%c", &cpu, &sep);
- if (n <= 0)
- break;
- if (prev >= 0) {
- int new_max = nr_cpus + cpu - prev - 1;
-
- WARN_ONCE(new_max >= MAX_NR_CPUS, "Perf can support %d CPUs. "
- "Consider raising MAX_NR_CPUS\n", MAX_NR_CPUS);
-
- if (new_max >= max_entries) {
- max_entries = new_max + MAX_NR_CPUS / 2;
- tmp = realloc(tmp_cpus, max_entries * sizeof(int));
- if (tmp == NULL)
- goto out_free_tmp;
- tmp_cpus = tmp;
- }
+ struct perf_cpu_map *cpus = cpu_map__new_sysfs_online();
- while (++prev < cpu)
- tmp_cpus[nr_cpus++] = prev;
- }
- if (nr_cpus == max_entries) {
- max_entries += MAX_NR_CPUS;
- tmp = realloc(tmp_cpus, max_entries * sizeof(int));
- if (tmp == NULL)
- goto out_free_tmp;
- tmp_cpus = tmp;
- }
+ if (cpus)
+ return cpus;
- tmp_cpus[nr_cpus++] = cpu;
- if (n == 2 && sep == '-')
- prev = cpu;
- else
- prev = -1;
- if (n == 1 || sep == '\n')
- break;
- }
+ return cpu_map__new_sysconf();
+}
- if (nr_cpus > 0)
- cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
- else
- cpus = cpu_map__default_new();
-out_free_tmp:
- free(tmp_cpus);
- return cpus;
+
+static int cmp_cpu(const void *a, const void *b)
+{
+ const struct perf_cpu *cpu_a = a, *cpu_b = b;
+
+ return cpu_a->cpu - cpu_b->cpu;
}
-static struct perf_cpu_map *cpu_map__read_all_cpu_map(void)
+static struct perf_cpu __perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx)
{
- struct perf_cpu_map *cpus = NULL;
- FILE *onlnf;
+ return RC_CHK_ACCESS(cpus)->map[idx];
+}
- onlnf = fopen("/sys/devices/system/cpu/online", "r");
- if (!onlnf)
- return cpu_map__default_new();
+static struct perf_cpu_map *cpu_map__trim_new(int nr_cpus, const struct perf_cpu *tmp_cpus)
+{
+ size_t payload_size = nr_cpus * sizeof(struct perf_cpu);
+ struct perf_cpu_map *cpus = perf_cpu_map__alloc(nr_cpus);
+ int i, j;
- cpus = perf_cpu_map__read(onlnf);
- fclose(onlnf);
+ if (cpus != NULL) {
+ memcpy(RC_CHK_ACCESS(cpus)->map, tmp_cpus, payload_size);
+ qsort(RC_CHK_ACCESS(cpus)->map, nr_cpus, sizeof(struct perf_cpu), cmp_cpu);
+ /* Remove dups */
+ j = 0;
+ for (i = 0; i < nr_cpus; i++) {
+ if (i == 0 ||
+ __perf_cpu_map__cpu(cpus, i).cpu !=
+ __perf_cpu_map__cpu(cpus, i - 1).cpu) {
+ RC_CHK_ACCESS(cpus)->map[j++].cpu =
+ __perf_cpu_map__cpu(cpus, i).cpu;
+ }
+ }
+ perf_cpu_map__set_nr(cpus, j);
+ assert(j <= nr_cpus);
+ }
return cpus;
}
@@ -174,11 +168,11 @@ struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
unsigned long start_cpu, end_cpu = 0;
char *p = NULL;
int i, nr_cpus = 0;
- int *tmp_cpus = NULL, *tmp;
+ struct perf_cpu *tmp_cpus = NULL, *tmp;
int max_entries = 0;
if (!cpu_list)
- return cpu_map__read_all_cpu_map();
+ return perf_cpu_map__new_online_cpus();
/*
* must handle the case of empty cpumap to cover
@@ -191,8 +185,8 @@ struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
while (isdigit(*cpu_list)) {
p = NULL;
start_cpu = strtoul(cpu_list, &p, 0);
- if (start_cpu >= INT_MAX
- || (*p != '\0' && *p != ',' && *p != '-'))
+ if (start_cpu >= INT16_MAX
+ || (*p != '\0' && *p != ',' && *p != '-' && *p != '\n'))
goto invalid;
if (*p == '-') {
@@ -200,7 +194,7 @@ struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
p = NULL;
end_cpu = strtoul(cpu_list, &p, 0);
- if (end_cpu >= INT_MAX || (*p != '\0' && *p != ','))
+ if (end_cpu >= INT16_MAX || (*p != '\0' && *p != ',' && *p != '\n'))
goto invalid;
if (end_cpu < start_cpu)
@@ -215,17 +209,17 @@ struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
for (; start_cpu <= end_cpu; start_cpu++) {
/* check for duplicates */
for (i = 0; i < nr_cpus; i++)
- if (tmp_cpus[i] == (int)start_cpu)
+ if (tmp_cpus[i].cpu == (int16_t)start_cpu)
goto invalid;
if (nr_cpus == max_entries) {
- max_entries += MAX_NR_CPUS;
- tmp = realloc(tmp_cpus, max_entries * sizeof(int));
+ max_entries += max(end_cpu - start_cpu + 1, 16UL);
+ tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu));
if (tmp == NULL)
goto invalid;
tmp_cpus = tmp;
}
- tmp_cpus[nr_cpus++] = (int)start_cpu;
+ tmp_cpus[nr_cpus++].cpu = (int16_t)start_cpu;
}
if (*p)
++p;
@@ -233,113 +227,260 @@ struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
cpu_list = p;
}
- if (nr_cpus > 0)
+ if (nr_cpus > 0) {
cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
- else if (*cpu_list != '\0')
- cpus = cpu_map__default_new();
- else
- cpus = perf_cpu_map__dummy_new();
+ } else if (*cpu_list != '\0') {
+ pr_warning("Unexpected characters at end of cpu list ('%s'), using online CPUs.",
+ cpu_list);
+ cpus = perf_cpu_map__new_online_cpus();
+ } else {
+ cpus = perf_cpu_map__new_any_cpu();
+ }
invalid:
free(tmp_cpus);
out:
return cpus;
}
-int perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx)
+struct perf_cpu_map *perf_cpu_map__new_int(int cpu)
{
- if (cpus && idx < cpus->nr)
- return cpus->map[idx];
+ struct perf_cpu_map *cpus = perf_cpu_map__alloc(1);
- return -1;
+ if (cpus)
+ RC_CHK_ACCESS(cpus)->map[0].cpu = cpu;
+
+ return cpus;
+}
+
+static int __perf_cpu_map__nr(const struct perf_cpu_map *cpus)
+{
+ return RC_CHK_ACCESS(cpus)->nr;
+}
+
+struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx)
+{
+ struct perf_cpu result = {
+ .cpu = -1
+ };
+
+ if (cpus && idx < __perf_cpu_map__nr(cpus))
+ return __perf_cpu_map__cpu(cpus, idx);
+
+ return result;
}
int perf_cpu_map__nr(const struct perf_cpu_map *cpus)
{
- return cpus ? cpus->nr : 1;
+ return cpus ? __perf_cpu_map__nr(cpus) : 1;
+}
+
+bool perf_cpu_map__has_any_cpu_or_is_empty(const struct perf_cpu_map *map)
+{
+ return map ? __perf_cpu_map__cpu(map, 0).cpu == -1 : true;
+}
+
+bool perf_cpu_map__is_any_cpu_or_is_empty(const struct perf_cpu_map *map)
+{
+ if (!map)
+ return true;
+
+ return __perf_cpu_map__nr(map) == 1 && __perf_cpu_map__cpu(map, 0).cpu == -1;
}
-bool perf_cpu_map__empty(const struct perf_cpu_map *map)
+bool perf_cpu_map__is_empty(const struct perf_cpu_map *map)
{
- return map ? map->map[0] == -1 : true;
+ return map == NULL;
}
-int perf_cpu_map__idx(struct perf_cpu_map *cpus, int cpu)
+int perf_cpu_map__idx(const struct perf_cpu_map *cpus, struct perf_cpu cpu)
{
- int i;
+ int low, high;
+
+ if (!cpus)
+ return -1;
- for (i = 0; i < cpus->nr; ++i) {
- if (cpus->map[i] == cpu)
- return i;
+ low = 0;
+ high = __perf_cpu_map__nr(cpus);
+ while (low < high) {
+ int idx = (low + high) / 2;
+ struct perf_cpu cpu_at_idx = __perf_cpu_map__cpu(cpus, idx);
+
+ if (cpu_at_idx.cpu == cpu.cpu)
+ return idx;
+
+ if (cpu_at_idx.cpu > cpu.cpu)
+ high = idx;
+ else
+ low = idx + 1;
}
return -1;
}
-int perf_cpu_map__max(struct perf_cpu_map *map)
+bool perf_cpu_map__has(const struct perf_cpu_map *cpus, struct perf_cpu cpu)
+{
+ return perf_cpu_map__idx(cpus, cpu) != -1;
+}
+
+bool perf_cpu_map__equal(const struct perf_cpu_map *lhs, const struct perf_cpu_map *rhs)
{
- int i, max = -1;
+ int nr;
+
+ if (lhs == rhs)
+ return true;
+
+ if (!lhs || !rhs)
+ return false;
- for (i = 0; i < map->nr; i++) {
- if (map->map[i] > max)
- max = map->map[i];
+ nr = __perf_cpu_map__nr(lhs);
+ if (nr != __perf_cpu_map__nr(rhs))
+ return false;
+
+ for (int idx = 0; idx < nr; idx++) {
+ if (__perf_cpu_map__cpu(lhs, idx).cpu != __perf_cpu_map__cpu(rhs, idx).cpu)
+ return false;
}
+ return true;
+}
- return max;
+bool perf_cpu_map__has_any_cpu(const struct perf_cpu_map *map)
+{
+ return map && __perf_cpu_map__cpu(map, 0).cpu == -1;
+}
+
+struct perf_cpu perf_cpu_map__min(const struct perf_cpu_map *map)
+{
+ struct perf_cpu cpu, result = {
+ .cpu = -1
+ };
+ int idx;
+
+ perf_cpu_map__for_each_cpu_skip_any(cpu, idx, map) {
+ result = cpu;
+ break;
+ }
+ return result;
+}
+
+struct perf_cpu perf_cpu_map__max(const struct perf_cpu_map *map)
+{
+ struct perf_cpu result = {
+ .cpu = -1
+ };
+
+ // cpu_map__trim_new() qsort()s it, cpu_map__default_new() sorts it as well.
+ return __perf_cpu_map__nr(map) > 0
+ ? __perf_cpu_map__cpu(map, __perf_cpu_map__nr(map) - 1)
+ : result;
+}
+
+/** Is 'b' a subset of 'a'. */
+bool perf_cpu_map__is_subset(const struct perf_cpu_map *a, const struct perf_cpu_map *b)
+{
+ if (a == b || !b)
+ return true;
+ if (!a || __perf_cpu_map__nr(b) > __perf_cpu_map__nr(a))
+ return false;
+
+ for (int i = 0, j = 0; i < __perf_cpu_map__nr(a); i++) {
+ if (__perf_cpu_map__cpu(a, i).cpu > __perf_cpu_map__cpu(b, j).cpu)
+ return false;
+ if (__perf_cpu_map__cpu(a, i).cpu == __perf_cpu_map__cpu(b, j).cpu) {
+ j++;
+ if (j == __perf_cpu_map__nr(b))
+ return true;
+ }
+ }
+ return false;
}
/*
- * Merge two cpumaps
+ * Merge two cpumaps.
+ *
+ * If 'other' is subset of '*orig', '*orig' keeps itself with no reference count
+ * change (similar to "realloc").
*
- * orig either gets freed and replaced with a new map, or reused
- * with no reference count change (similar to "realloc")
- * other has its reference count increased.
+ * If '*orig' is subset of 'other', '*orig' reuses 'other' with its reference
+ * count increased.
+ *
+ * Otherwise, '*orig' gets freed and replaced with a new map.
*/
-
-struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig,
- struct perf_cpu_map *other)
+int perf_cpu_map__merge(struct perf_cpu_map **orig, struct perf_cpu_map *other)
{
- int *tmp_cpus;
+ struct perf_cpu *tmp_cpus;
int tmp_len;
int i, j, k;
struct perf_cpu_map *merged;
- if (!orig && !other)
- return NULL;
- if (!orig) {
- perf_cpu_map__get(other);
- return other;
+ if (perf_cpu_map__is_subset(*orig, other))
+ return 0;
+ if (perf_cpu_map__is_subset(other, *orig)) {
+ perf_cpu_map__put(*orig);
+ *orig = perf_cpu_map__get(other);
+ return 0;
}
- if (!other)
- return orig;
- if (orig->nr == other->nr &&
- !memcmp(orig->map, other->map, orig->nr * sizeof(int)))
- return orig;
-
- tmp_len = orig->nr + other->nr;
- tmp_cpus = malloc(tmp_len * sizeof(int));
+
+ tmp_len = __perf_cpu_map__nr(*orig) + __perf_cpu_map__nr(other);
+ tmp_cpus = malloc(tmp_len * sizeof(struct perf_cpu));
if (!tmp_cpus)
- return NULL;
+ return -ENOMEM;
/* Standard merge algorithm from wikipedia */
i = j = k = 0;
- while (i < orig->nr && j < other->nr) {
- if (orig->map[i] <= other->map[j]) {
- if (orig->map[i] == other->map[j])
+ while (i < __perf_cpu_map__nr(*orig) && j < __perf_cpu_map__nr(other)) {
+ if (__perf_cpu_map__cpu(*orig, i).cpu <= __perf_cpu_map__cpu(other, j).cpu) {
+ if (__perf_cpu_map__cpu(*orig, i).cpu == __perf_cpu_map__cpu(other, j).cpu)
j++;
- tmp_cpus[k++] = orig->map[i++];
+ tmp_cpus[k++] = __perf_cpu_map__cpu(*orig, i++);
} else
- tmp_cpus[k++] = other->map[j++];
+ tmp_cpus[k++] = __perf_cpu_map__cpu(other, j++);
}
- while (i < orig->nr)
- tmp_cpus[k++] = orig->map[i++];
+ while (i < __perf_cpu_map__nr(*orig))
+ tmp_cpus[k++] = __perf_cpu_map__cpu(*orig, i++);
- while (j < other->nr)
- tmp_cpus[k++] = other->map[j++];
+ while (j < __perf_cpu_map__nr(other))
+ tmp_cpus[k++] = __perf_cpu_map__cpu(other, j++);
assert(k <= tmp_len);
merged = cpu_map__trim_new(k, tmp_cpus);
free(tmp_cpus);
- perf_cpu_map__put(orig);
+ perf_cpu_map__put(*orig);
+ *orig = merged;
+ return 0;
+}
+
+struct perf_cpu_map *perf_cpu_map__intersect(struct perf_cpu_map *orig,
+ struct perf_cpu_map *other)
+{
+ struct perf_cpu *tmp_cpus;
+ int tmp_len;
+ int i, j, k;
+ struct perf_cpu_map *merged = NULL;
+
+ if (perf_cpu_map__is_subset(other, orig))
+ return perf_cpu_map__get(orig);
+ if (perf_cpu_map__is_subset(orig, other))
+ return perf_cpu_map__get(other);
+
+ tmp_len = max(__perf_cpu_map__nr(orig), __perf_cpu_map__nr(other));
+ tmp_cpus = malloc(tmp_len * sizeof(struct perf_cpu));
+ if (!tmp_cpus)
+ return NULL;
+
+ i = j = k = 0;
+ while (i < __perf_cpu_map__nr(orig) && j < __perf_cpu_map__nr(other)) {
+ if (__perf_cpu_map__cpu(orig, i).cpu < __perf_cpu_map__cpu(other, j).cpu)
+ i++;
+ else if (__perf_cpu_map__cpu(orig, i).cpu > __perf_cpu_map__cpu(other, j).cpu)
+ j++;
+ else {
+ j++;
+ tmp_cpus[k++] = __perf_cpu_map__cpu(orig, i++);
+ }
+ }
+ if (k)
+ merged = cpu_map__trim_new(k, tmp_cpus);
+ free(tmp_cpus);
return merged;
}
diff --git a/tools/lib/perf/evlist.c b/tools/lib/perf/evlist.c
index 6a875a0f01bb..b1f4c8176b32 100644
--- a/tools/lib/perf/evlist.c
+++ b/tools/lib/perf/evlist.c
@@ -23,52 +23,94 @@
#include <perf/cpumap.h>
#include <perf/threadmap.h>
#include <api/fd/array.h>
+#include "internal.h"
void perf_evlist__init(struct perf_evlist *evlist)
{
- int i;
-
- for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
- INIT_HLIST_HEAD(&evlist->heads[i]);
INIT_LIST_HEAD(&evlist->entries);
evlist->nr_entries = 0;
fdarray__init(&evlist->pollfd, 64);
+ perf_evlist__reset_id_hash(evlist);
}
static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
struct perf_evsel *evsel)
{
- /*
- * We already have cpus for evsel (via PMU sysfs) so
- * keep it, if there's no target cpu list defined.
- */
- if (!evsel->own_cpus || evlist->has_user_cpus) {
+ if (evsel->system_wide) {
+ /* System wide: set the cpu map of the evsel to all online CPUs. */
perf_cpu_map__put(evsel->cpus);
- evsel->cpus = perf_cpu_map__get(evlist->cpus);
+ evsel->cpus = perf_cpu_map__new_online_cpus();
+ } else if (evlist->has_user_cpus && evsel->is_pmu_core) {
+ /*
+ * User requested CPUs on a core PMU, ensure the requested CPUs
+ * are valid by intersecting with those of the PMU.
+ */
+ perf_cpu_map__put(evsel->cpus);
+ evsel->cpus = perf_cpu_map__intersect(evlist->user_requested_cpus, evsel->own_cpus);
+
+ /*
+ * Empty cpu lists would eventually get opened as "any" so remove
+ * genuinely empty ones before they're opened in the wrong place.
+ */
+ if (perf_cpu_map__is_empty(evsel->cpus)) {
+ struct perf_evsel *next = perf_evlist__next(evlist, evsel);
+
+ perf_evlist__remove(evlist, evsel);
+ /* Keep idx contiguous */
+ if (next)
+ list_for_each_entry_from(next, &evlist->entries, node)
+ next->idx--;
+ }
+ } else if (!evsel->own_cpus || evlist->has_user_cpus ||
+ (!evsel->requires_cpu && perf_cpu_map__has_any_cpu(evlist->user_requested_cpus))) {
+ /*
+ * The PMU didn't specify a default cpu map, this isn't a core
+ * event and the user requested CPUs or the evlist user
+ * requested CPUs have the "any CPU" (aka dummy) CPU value. In
+ * which case use the user requested CPUs rather than the PMU
+ * ones.
+ */
+ perf_cpu_map__put(evsel->cpus);
+ evsel->cpus = perf_cpu_map__get(evlist->user_requested_cpus);
} else if (evsel->cpus != evsel->own_cpus) {
+ /*
+ * No user requested cpu map but the PMU cpu map doesn't match
+ * the evsel's. Reset it back to the PMU cpu map.
+ */
perf_cpu_map__put(evsel->cpus);
evsel->cpus = perf_cpu_map__get(evsel->own_cpus);
}
- perf_thread_map__put(evsel->threads);
- evsel->threads = perf_thread_map__get(evlist->threads);
- evlist->all_cpus = perf_cpu_map__merge(evlist->all_cpus, evsel->cpus);
+ if (evsel->system_wide) {
+ perf_thread_map__put(evsel->threads);
+ evsel->threads = perf_thread_map__new_dummy();
+ } else {
+ perf_thread_map__put(evsel->threads);
+ evsel->threads = perf_thread_map__get(evlist->threads);
+ }
+
+ perf_cpu_map__merge(&evlist->all_cpus, evsel->cpus);
}
static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
{
- struct perf_evsel *evsel;
+ struct perf_evsel *evsel, *n;
- perf_evlist__for_each_evsel(evlist, evsel)
+ evlist->needs_map_propagation = true;
+
+ list_for_each_entry_safe(evsel, n, &evlist->entries, node)
__perf_evlist__propagate_maps(evlist, evsel);
}
void perf_evlist__add(struct perf_evlist *evlist,
struct perf_evsel *evsel)
{
+ evsel->idx = evlist->nr_entries;
list_add_tail(&evsel->node, &evlist->entries);
evlist->nr_entries += 1;
- __perf_evlist__propagate_maps(evlist, evsel);
+
+ if (evlist->needs_map_propagation)
+ __perf_evlist__propagate_maps(evlist, evsel);
}
void perf_evlist__remove(struct perf_evlist *evlist,
@@ -122,10 +164,10 @@ static void perf_evlist__purge(struct perf_evlist *evlist)
void perf_evlist__exit(struct perf_evlist *evlist)
{
- perf_cpu_map__put(evlist->cpus);
+ perf_cpu_map__put(evlist->user_requested_cpus);
perf_cpu_map__put(evlist->all_cpus);
perf_thread_map__put(evlist->threads);
- evlist->cpus = NULL;
+ evlist->user_requested_cpus = NULL;
evlist->all_cpus = NULL;
evlist->threads = NULL;
fdarray__exit(&evlist->pollfd);
@@ -154,9 +196,9 @@ void perf_evlist__set_maps(struct perf_evlist *evlist,
* original reference count of 1. If that is not the case it is up to
* the caller to increase the reference count.
*/
- if (cpus != evlist->cpus) {
- perf_cpu_map__put(evlist->cpus);
- evlist->cpus = perf_cpu_map__get(cpus);
+ if (cpus != evlist->user_requested_cpus) {
+ perf_cpu_map__put(evlist->user_requested_cpus);
+ evlist->user_requested_cpus = perf_cpu_map__get(cpus);
}
if (threads != evlist->threads) {
@@ -164,9 +206,6 @@ void perf_evlist__set_maps(struct perf_evlist *evlist,
evlist->threads = perf_thread_map__get(threads);
}
- if (!evlist->all_cpus && cpus)
- evlist->all_cpus = perf_cpu_map__get(cpus);
-
perf_evlist__propagate_maps(evlist);
}
@@ -223,10 +262,10 @@ u64 perf_evlist__read_format(struct perf_evlist *evlist)
static void perf_evlist__id_hash(struct perf_evlist *evlist,
struct perf_evsel *evsel,
- int cpu, int thread, u64 id)
+ int cpu_map_idx, int thread, u64 id)
{
int hash;
- struct perf_sample_id *sid = SID(evsel, cpu, thread);
+ struct perf_sample_id *sid = SID(evsel, cpu_map_idx, thread);
sid->id = id;
sid->evsel = evsel;
@@ -234,23 +273,37 @@ static void perf_evlist__id_hash(struct perf_evlist *evlist,
hlist_add_head(&sid->node, &evlist->heads[hash]);
}
+void perf_evlist__reset_id_hash(struct perf_evlist *evlist)
+{
+ int i;
+
+ for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
+ INIT_HLIST_HEAD(&evlist->heads[i]);
+}
+
void perf_evlist__id_add(struct perf_evlist *evlist,
struct perf_evsel *evsel,
- int cpu, int thread, u64 id)
+ int cpu_map_idx, int thread, u64 id)
{
- perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
+ if (!SID(evsel, cpu_map_idx, thread))
+ return;
+
+ perf_evlist__id_hash(evlist, evsel, cpu_map_idx, thread, id);
evsel->id[evsel->ids++] = id;
}
int perf_evlist__id_add_fd(struct perf_evlist *evlist,
struct perf_evsel *evsel,
- int cpu, int thread, int fd)
+ int cpu_map_idx, int thread, int fd)
{
u64 read_data[4] = { 0, };
int id_idx = 1; /* The first entry is the counter value */
u64 id;
int ret;
+ if (!SID(evsel, cpu_map_idx, thread))
+ return -1;
+
ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
if (!ret)
goto add;
@@ -279,13 +332,13 @@ int perf_evlist__id_add_fd(struct perf_evlist *evlist,
id = read_data[id_idx];
add:
- perf_evlist__id_add(evlist, evsel, cpu, thread, id);
+ perf_evlist__id_add(evlist, evsel, cpu_map_idx, thread, id);
return 0;
}
int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
{
- int nr_cpus = perf_cpu_map__nr(evlist->cpus);
+ int nr_cpus = perf_cpu_map__nr(evlist->all_cpus);
int nr_threads = perf_thread_map__nr(evlist->threads);
int nfds = 0;
struct perf_evsel *evsel;
@@ -305,9 +358,9 @@ int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
}
int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
- void *ptr, short revent)
+ void *ptr, short revent, enum fdarray_flags flags)
{
- int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP);
+ int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP, flags);
if (pos >= 0) {
evlist->pollfd.priv[pos].ptr = ptr;
@@ -364,21 +417,13 @@ static struct perf_mmap* perf_evlist__alloc_mmap(struct perf_evlist *evlist, boo
return map;
}
-static void perf_evlist__set_sid_idx(struct perf_evlist *evlist,
- struct perf_evsel *evsel, int idx, int cpu,
- int thread)
+static void perf_evsel__set_sid_idx(struct perf_evsel *evsel, int idx, int cpu, int thread)
{
struct perf_sample_id *sid = SID(evsel, cpu, thread);
sid->idx = idx;
- if (evlist->cpus && cpu >= 0)
- sid->cpu = evlist->cpus->map[cpu];
- else
- sid->cpu = -1;
- if (!evsel->system_wide && evlist->threads && thread >= 0)
- sid->tid = perf_thread_map__pid(evlist->threads, thread);
- else
- sid->tid = -1;
+ sid->cpu = perf_cpu_map__cpu(evsel->cpus, cpu);
+ sid->tid = perf_thread_map__pid(evsel->threads, thread);
}
static struct perf_mmap*
@@ -406,7 +451,7 @@ perf_evlist__mmap_cb_get(struct perf_evlist *evlist, bool overwrite, int idx)
static int
perf_evlist__mmap_cb_mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
- int output, int cpu)
+ int output, struct perf_cpu cpu)
{
return perf_mmap__mmap(map, mp, output, cpu);
}
@@ -423,14 +468,15 @@ static void perf_evlist__set_mmap_first(struct perf_evlist *evlist, struct perf_
static int
mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
int idx, struct perf_mmap_param *mp, int cpu_idx,
- int thread, int *_output, int *_output_overwrite)
+ int thread, int *_output, int *_output_overwrite, int *nr_mmaps)
{
- int evlist_cpu = perf_cpu_map__cpu(evlist->cpus, cpu_idx);
+ struct perf_cpu evlist_cpu = perf_cpu_map__cpu(evlist->all_cpus, cpu_idx);
struct perf_evsel *evsel;
int revent;
perf_evlist__for_each_entry(evlist, evsel) {
bool overwrite = evsel->attr.write_backward;
+ enum fdarray_flags flgs;
struct perf_mmap *map;
int *output, fd, cpu;
@@ -473,12 +519,21 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
*/
refcount_set(&map->refcnt, 2);
+ if (ops->idx)
+ ops->idx(evlist, evsel, mp, idx);
+
+ /* Debug message used by test scripts */
+ pr_debug("idx %d: mmapping fd %d\n", idx, *output);
if (ops->mmap(map, mp, *output, evlist_cpu) < 0)
return -1;
+ *nr_mmaps += 1;
+
if (!idx)
perf_evlist__set_mmap_first(evlist, map, overwrite);
} else {
+ /* Debug message used by test scripts */
+ pr_debug("idx %d: set output fd %d -> %d\n", idx, fd, *output);
if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
return -1;
@@ -487,8 +542,8 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
revent = !overwrite ? POLLIN : 0;
- if (!evsel->system_wide &&
- perf_evlist__add_pollfd(evlist, fd, map, revent) < 0) {
+ flgs = evsel->system_wide ? fdarray_flag__nonfilterable : fdarray_flag__default;
+ if (perf_evlist__add_pollfd(evlist, fd, map, revent, flgs) < 0) {
perf_mmap__put(map);
return -1;
}
@@ -497,8 +552,7 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
fd) < 0)
return -1;
- perf_evlist__set_sid_idx(evlist, evsel, idx, cpu,
- thread);
+ perf_evsel__set_sid_idx(evsel, idx, cpu, thread);
}
}
@@ -509,21 +563,37 @@ static int
mmap_per_thread(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
struct perf_mmap_param *mp)
{
- int thread;
int nr_threads = perf_thread_map__nr(evlist->threads);
+ int nr_cpus = perf_cpu_map__nr(evlist->all_cpus);
+ int cpu, thread, idx = 0;
+ int nr_mmaps = 0;
+
+ pr_debug("%s: nr cpu values (may include -1) %d nr threads %d\n",
+ __func__, nr_cpus, nr_threads);
- for (thread = 0; thread < nr_threads; thread++) {
+ /* per-thread mmaps */
+ for (thread = 0; thread < nr_threads; thread++, idx++) {
int output = -1;
int output_overwrite = -1;
- if (ops->idx)
- ops->idx(evlist, mp, thread, false);
+ if (mmap_per_evsel(evlist, ops, idx, mp, 0, thread, &output,
+ &output_overwrite, &nr_mmaps))
+ goto out_unmap;
+ }
+
+ /* system-wide mmaps i.e. per-cpu */
+ for (cpu = 1; cpu < nr_cpus; cpu++, idx++) {
+ int output = -1;
+ int output_overwrite = -1;
- if (mmap_per_evsel(evlist, ops, thread, mp, 0, thread,
- &output, &output_overwrite))
+ if (mmap_per_evsel(evlist, ops, idx, mp, cpu, 0, &output,
+ &output_overwrite, &nr_mmaps))
goto out_unmap;
}
+ if (nr_mmaps != evlist->nr_mmaps)
+ pr_err("Miscounted nr_mmaps %d vs %d\n", nr_mmaps, evlist->nr_mmaps);
+
return 0;
out_unmap:
@@ -536,23 +606,26 @@ mmap_per_cpu(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
struct perf_mmap_param *mp)
{
int nr_threads = perf_thread_map__nr(evlist->threads);
- int nr_cpus = perf_cpu_map__nr(evlist->cpus);
+ int nr_cpus = perf_cpu_map__nr(evlist->all_cpus);
+ int nr_mmaps = 0;
int cpu, thread;
+ pr_debug("%s: nr cpu values %d nr threads %d\n", __func__, nr_cpus, nr_threads);
+
for (cpu = 0; cpu < nr_cpus; cpu++) {
int output = -1;
int output_overwrite = -1;
- if (ops->idx)
- ops->idx(evlist, mp, cpu, true);
-
for (thread = 0; thread < nr_threads; thread++) {
if (mmap_per_evsel(evlist, ops, cpu, mp, cpu,
- thread, &output, &output_overwrite))
+ thread, &output, &output_overwrite, &nr_mmaps))
goto out_unmap;
}
}
+ if (nr_mmaps != evlist->nr_mmaps)
+ pr_err("Miscounted nr_mmaps %d vs %d\n", nr_mmaps, evlist->nr_mmaps);
+
return 0;
out_unmap:
@@ -564,9 +637,14 @@ static int perf_evlist__nr_mmaps(struct perf_evlist *evlist)
{
int nr_mmaps;
- nr_mmaps = perf_cpu_map__nr(evlist->cpus);
- if (perf_cpu_map__empty(evlist->cpus))
- nr_mmaps = perf_thread_map__nr(evlist->threads);
+ /* One for each CPU */
+ nr_mmaps = perf_cpu_map__nr(evlist->all_cpus);
+ if (perf_cpu_map__has_any_cpu_or_is_empty(evlist->all_cpus)) {
+ /* Plus one for each thread */
+ nr_mmaps += perf_thread_map__nr(evlist->threads);
+ /* Minus the per-thread CPU (-1) */
+ nr_mmaps -= 1;
+ }
return nr_mmaps;
}
@@ -575,9 +653,8 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist,
struct perf_evlist_mmap_ops *ops,
struct perf_mmap_param *mp)
{
+ const struct perf_cpu_map *cpus = evlist->all_cpus;
struct perf_evsel *evsel;
- const struct perf_cpu_map *cpus = evlist->cpus;
- const struct perf_thread_map *threads = evlist->threads;
if (!ops || !ops->get || !ops->mmap)
return -EINVAL;
@@ -589,14 +666,14 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist,
perf_evlist__for_each_entry(evlist, evsel) {
if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
evsel->sample_id == NULL &&
- perf_evsel__alloc_id(evsel, perf_cpu_map__nr(cpus), threads->nr) < 0)
+ perf_evsel__alloc_id(evsel, evsel->fd->max_x, evsel->fd->max_y) < 0)
return -ENOMEM;
}
if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
return -ENOMEM;
- if (perf_cpu_map__empty(cpus))
+ if (perf_cpu_map__has_any_cpu_or_is_empty(cpus))
return mmap_per_thread(evlist, ops, mp);
return mmap_per_cpu(evlist, ops, mp);
@@ -642,3 +719,51 @@ perf_evlist__next_mmap(struct perf_evlist *evlist, struct perf_mmap *map,
return overwrite ? evlist->mmap_ovw_first : evlist->mmap_first;
}
+
+void __perf_evlist__set_leader(struct list_head *list, struct perf_evsel *leader)
+{
+ struct perf_evsel *evsel;
+ int n = 0;
+
+ __perf_evlist__for_each_entry(list, evsel) {
+ evsel->leader = leader;
+ n++;
+ }
+ leader->nr_members = n;
+}
+
+void perf_evlist__set_leader(struct perf_evlist *evlist)
+{
+ if (evlist->nr_entries) {
+ struct perf_evsel *first = list_entry(evlist->entries.next,
+ struct perf_evsel, node);
+
+ __perf_evlist__set_leader(&evlist->entries, first);
+ }
+}
+
+int perf_evlist__nr_groups(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel;
+ int nr_groups = 0;
+
+ perf_evlist__for_each_evsel(evlist, evsel) {
+ /*
+ * evsels by default have a nr_members of 1, and they are their
+ * own leader. If the nr_members is >1 then this is an
+ * indication of a group.
+ */
+ if (evsel->leader == evsel && evsel->nr_members > 1)
+ nr_groups++;
+ }
+ return nr_groups;
+}
+
+void perf_evlist__go_system_wide(struct perf_evlist *evlist, struct perf_evsel *evsel)
+{
+ if (!evsel->system_wide) {
+ evsel->system_wide = true;
+ if (evlist->needs_map_propagation)
+ __perf_evlist__propagate_maps(evlist, evsel);
+ }
+}
diff --git a/tools/lib/perf/evsel.c b/tools/lib/perf/evsel.c
index 4dc06289f4c7..c475319e2e41 100644
--- a/tools/lib/perf/evsel.c
+++ b/tools/lib/perf/evsel.c
@@ -5,21 +5,29 @@
#include <perf/evsel.h>
#include <perf/cpumap.h>
#include <perf/threadmap.h>
+#include <linux/hash.h>
#include <linux/list.h>
#include <internal/evsel.h>
#include <linux/zalloc.h>
#include <stdlib.h>
#include <internal/xyarray.h>
#include <internal/cpumap.h>
+#include <internal/mmap.h>
#include <internal/threadmap.h>
#include <internal/lib.h>
#include <linux/string.h>
#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <asm/bug.h>
-void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr)
+void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr,
+ int idx)
{
INIT_LIST_HEAD(&evsel->node);
+ INIT_LIST_HEAD(&evsel->per_stream_periods);
evsel->attr = *attr;
+ evsel->idx = idx;
+ evsel->leader = evsel;
}
struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr)
@@ -27,7 +35,7 @@ struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr)
struct perf_evsel *evsel = zalloc(sizeof(*evsel));
if (evsel != NULL)
- perf_evsel__init(evsel, attr);
+ perf_evsel__init(evsel, attr, 0);
return evsel;
}
@@ -37,17 +45,25 @@ void perf_evsel__delete(struct perf_evsel *evsel)
free(evsel);
}
-#define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y))
+#define FD(_evsel, _cpu_map_idx, _thread) \
+ ((int *)xyarray__entry(_evsel->fd, _cpu_map_idx, _thread))
+#define MMAP(_evsel, _cpu_map_idx, _thread) \
+ (_evsel->mmap ? ((struct perf_mmap *) xyarray__entry(_evsel->mmap, _cpu_map_idx, _thread)) \
+ : NULL)
int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
{
evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
if (evsel->fd) {
- int cpu, thread;
- for (cpu = 0; cpu < ncpus; cpu++) {
+ int idx, thread;
+
+ for (idx = 0; idx < ncpus; idx++) {
for (thread = 0; thread < nthreads; thread++) {
- FD(evsel, cpu, thread) = -1;
+ int *fd = FD(evsel, idx, thread);
+
+ if (fd)
+ *fd = -1;
}
}
}
@@ -55,24 +71,58 @@ int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
return evsel->fd != NULL ? 0 : -ENOMEM;
}
+static int perf_evsel__alloc_mmap(struct perf_evsel *evsel, int ncpus, int nthreads)
+{
+ evsel->mmap = xyarray__new(ncpus, nthreads, sizeof(struct perf_mmap));
+
+ return evsel->mmap != NULL ? 0 : -ENOMEM;
+}
+
static int
sys_perf_event_open(struct perf_event_attr *attr,
- pid_t pid, int cpu, int group_fd,
+ pid_t pid, struct perf_cpu cpu, int group_fd,
unsigned long flags)
{
- return syscall(__NR_perf_event_open, attr, pid, cpu, group_fd, flags);
+ return syscall(__NR_perf_event_open, attr, pid, cpu.cpu, group_fd, flags);
+}
+
+static int get_group_fd(struct perf_evsel *evsel, int cpu_map_idx, int thread, int *group_fd)
+{
+ struct perf_evsel *leader = evsel->leader;
+ int *fd;
+
+ if (evsel == leader) {
+ *group_fd = -1;
+ return 0;
+ }
+
+ /*
+ * Leader must be already processed/open,
+ * if not it's a bug.
+ */
+ if (!leader->fd)
+ return -ENOTCONN;
+
+ fd = FD(leader, cpu_map_idx, thread);
+ if (fd == NULL || *fd == -1)
+ return -EBADF;
+
+ *group_fd = *fd;
+
+ return 0;
}
int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
struct perf_thread_map *threads)
{
- int cpu, thread, err = 0;
+ struct perf_cpu cpu;
+ int idx, thread, err = 0;
if (cpus == NULL) {
static struct perf_cpu_map *empty_cpu_map;
if (empty_cpu_map == NULL) {
- empty_cpu_map = perf_cpu_map__dummy_new();
+ empty_cpu_map = perf_cpu_map__new_any_cpu();
if (empty_cpu_map == NULL)
return -ENOMEM;
}
@@ -93,44 +143,60 @@ int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
}
if (evsel->fd == NULL &&
- perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
+ perf_evsel__alloc_fd(evsel, perf_cpu_map__nr(cpus), threads->nr) < 0)
return -ENOMEM;
- for (cpu = 0; cpu < cpus->nr; cpu++) {
+ perf_cpu_map__for_each_cpu(cpu, idx, cpus) {
for (thread = 0; thread < threads->nr; thread++) {
- int fd;
+ int fd, group_fd, *evsel_fd;
+
+ evsel_fd = FD(evsel, idx, thread);
+ if (evsel_fd == NULL) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = get_group_fd(evsel, idx, thread, &group_fd);
+ if (err < 0)
+ goto out;
fd = sys_perf_event_open(&evsel->attr,
threads->map[thread].pid,
- cpus->map[cpu], -1, 0);
+ cpu, group_fd, 0);
- if (fd < 0)
- return -errno;
+ if (fd < 0) {
+ err = -errno;
+ goto out;
+ }
- FD(evsel, cpu, thread) = fd;
+ *evsel_fd = fd;
}
}
+out:
+ if (err)
+ perf_evsel__close(evsel);
return err;
}
-static void perf_evsel__close_fd_cpu(struct perf_evsel *evsel, int cpu)
+static void perf_evsel__close_fd_cpu(struct perf_evsel *evsel, int cpu_map_idx)
{
int thread;
for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) {
- if (FD(evsel, cpu, thread) >= 0)
- close(FD(evsel, cpu, thread));
- FD(evsel, cpu, thread) = -1;
+ int *fd = FD(evsel, cpu_map_idx, thread);
+
+ if (fd && *fd >= 0) {
+ close(*fd);
+ *fd = -1;
+ }
}
}
void perf_evsel__close_fd(struct perf_evsel *evsel)
{
- int cpu;
-
- for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++)
- perf_evsel__close_fd_cpu(evsel, cpu);
+ for (int idx = 0; idx < xyarray__max_x(evsel->fd); idx++)
+ perf_evsel__close_fd_cpu(evsel, idx);
}
void perf_evsel__free_fd(struct perf_evsel *evsel)
@@ -148,12 +214,81 @@ void perf_evsel__close(struct perf_evsel *evsel)
perf_evsel__free_fd(evsel);
}
-void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu)
+void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu_map_idx)
{
if (evsel->fd == NULL)
return;
- perf_evsel__close_fd_cpu(evsel, cpu);
+ perf_evsel__close_fd_cpu(evsel, cpu_map_idx);
+}
+
+void perf_evsel__munmap(struct perf_evsel *evsel)
+{
+ int idx, thread;
+
+ if (evsel->fd == NULL || evsel->mmap == NULL)
+ return;
+
+ for (idx = 0; idx < xyarray__max_x(evsel->fd); idx++) {
+ for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
+ int *fd = FD(evsel, idx, thread);
+
+ if (fd == NULL || *fd < 0)
+ continue;
+
+ perf_mmap__munmap(MMAP(evsel, idx, thread));
+ }
+ }
+
+ xyarray__delete(evsel->mmap);
+ evsel->mmap = NULL;
+}
+
+int perf_evsel__mmap(struct perf_evsel *evsel, int pages)
+{
+ int ret, idx, thread;
+ struct perf_mmap_param mp = {
+ .prot = PROT_READ | PROT_WRITE,
+ .mask = (pages * page_size) - 1,
+ };
+
+ if (evsel->fd == NULL || evsel->mmap)
+ return -EINVAL;
+
+ if (perf_evsel__alloc_mmap(evsel, xyarray__max_x(evsel->fd), xyarray__max_y(evsel->fd)) < 0)
+ return -ENOMEM;
+
+ for (idx = 0; idx < xyarray__max_x(evsel->fd); idx++) {
+ for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
+ int *fd = FD(evsel, idx, thread);
+ struct perf_mmap *map;
+ struct perf_cpu cpu = perf_cpu_map__cpu(evsel->cpus, idx);
+
+ if (fd == NULL || *fd < 0)
+ continue;
+
+ map = MMAP(evsel, idx, thread);
+ perf_mmap__init(map, NULL, false, NULL);
+
+ ret = perf_mmap__mmap(map, &mp, *fd, cpu);
+ if (ret) {
+ perf_evsel__munmap(evsel);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu_map_idx, int thread)
+{
+ int *fd = FD(evsel, cpu_map_idx, thread);
+
+ if (fd == NULL || *fd < 0 || MMAP(evsel, cpu_map_idx, thread) == NULL)
+ return NULL;
+
+ return MMAP(evsel, cpu_map_idx, thread)->base;
}
int perf_evsel__read_size(struct perf_evsel *evsel)
@@ -172,6 +307,9 @@ int perf_evsel__read_size(struct perf_evsel *evsel)
if (read_format & PERF_FORMAT_ID)
entry += sizeof(u64);
+ if (read_format & PERF_FORMAT_LOST)
+ entry += sizeof(u64);
+
if (read_format & PERF_FORMAT_GROUP) {
nr = evsel->nr_members;
size += sizeof(u64);
@@ -181,31 +319,120 @@ int perf_evsel__read_size(struct perf_evsel *evsel)
return size;
}
-int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
+/* This only reads values for the leader */
+static int perf_evsel__read_group(struct perf_evsel *evsel, int cpu_map_idx,
+ int thread, struct perf_counts_values *count)
+{
+ size_t size = perf_evsel__read_size(evsel);
+ int *fd = FD(evsel, cpu_map_idx, thread);
+ u64 read_format = evsel->attr.read_format;
+ u64 *data;
+ int idx = 1;
+
+ if (fd == NULL || *fd < 0)
+ return -EINVAL;
+
+ data = calloc(1, size);
+ if (data == NULL)
+ return -ENOMEM;
+
+ if (readn(*fd, data, size) <= 0) {
+ free(data);
+ return -errno;
+ }
+
+ /*
+ * This reads only the leader event intentionally since we don't have
+ * perf counts values for sibling events.
+ */
+ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+ count->ena = data[idx++];
+ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ count->run = data[idx++];
+
+ /* value is always available */
+ count->val = data[idx++];
+ if (read_format & PERF_FORMAT_ID)
+ count->id = data[idx++];
+ if (read_format & PERF_FORMAT_LOST)
+ count->lost = data[idx++];
+
+ free(data);
+ return 0;
+}
+
+/*
+ * The perf read format is very flexible. It needs to set the proper
+ * values according to the read format.
+ */
+static void perf_evsel__adjust_values(struct perf_evsel *evsel, u64 *buf,
+ struct perf_counts_values *count)
+{
+ u64 read_format = evsel->attr.read_format;
+ int n = 0;
+
+ count->val = buf[n++];
+
+ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+ count->ena = buf[n++];
+
+ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ count->run = buf[n++];
+
+ if (read_format & PERF_FORMAT_ID)
+ count->id = buf[n++];
+
+ if (read_format & PERF_FORMAT_LOST)
+ count->lost = buf[n++];
+}
+
+int perf_evsel__read(struct perf_evsel *evsel, int cpu_map_idx, int thread,
struct perf_counts_values *count)
{
size_t size = perf_evsel__read_size(evsel);
+ int *fd = FD(evsel, cpu_map_idx, thread);
+ u64 read_format = evsel->attr.read_format;
+ struct perf_counts_values buf;
memset(count, 0, sizeof(*count));
- if (FD(evsel, cpu, thread) < 0)
+ if (fd == NULL || *fd < 0)
return -EINVAL;
- if (readn(FD(evsel, cpu, thread), count->values, size) <= 0)
+ if (read_format & PERF_FORMAT_GROUP)
+ return perf_evsel__read_group(evsel, cpu_map_idx, thread, count);
+
+ if (MMAP(evsel, cpu_map_idx, thread) &&
+ !(read_format & (PERF_FORMAT_ID | PERF_FORMAT_LOST)) &&
+ !perf_mmap__read_self(MMAP(evsel, cpu_map_idx, thread), count))
+ return 0;
+
+ if (readn(*fd, buf.values, size) <= 0)
return -errno;
+ perf_evsel__adjust_values(evsel, buf.values, count);
return 0;
}
+static int perf_evsel__ioctl(struct perf_evsel *evsel, int ioc, void *arg,
+ int cpu_map_idx, int thread)
+{
+ int *fd = FD(evsel, cpu_map_idx, thread);
+
+ if (fd == NULL || *fd < 0)
+ return -1;
+
+ return ioctl(*fd, ioc, arg);
+}
+
static int perf_evsel__run_ioctl(struct perf_evsel *evsel,
int ioc, void *arg,
- int cpu)
+ int cpu_map_idx)
{
int thread;
for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
- int fd = FD(evsel, cpu, thread),
- err = ioctl(fd, ioc, arg);
+ int err = perf_evsel__ioctl(evsel, ioc, arg, cpu_map_idx, thread);
if (err)
return err;
@@ -214,9 +441,24 @@ static int perf_evsel__run_ioctl(struct perf_evsel *evsel,
return 0;
}
-int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu)
+int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu_map_idx)
{
- return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, cpu);
+ return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, cpu_map_idx);
+}
+
+int perf_evsel__enable_thread(struct perf_evsel *evsel, int thread)
+{
+ struct perf_cpu cpu __maybe_unused;
+ int idx;
+ int err;
+
+ perf_cpu_map__for_each_cpu(cpu, idx, evsel->cpus) {
+ err = perf_evsel__ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, idx, thread);
+ if (err)
+ return err;
+ }
+
+ return 0;
}
int perf_evsel__enable(struct perf_evsel *evsel)
@@ -229,9 +471,9 @@ int perf_evsel__enable(struct perf_evsel *evsel)
return err;
}
-int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu)
+int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu_map_idx)
{
- return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, cpu);
+ return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, cpu_map_idx);
}
int perf_evsel__disable(struct perf_evsel *evsel)
@@ -248,7 +490,7 @@ int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter)
{
int err = 0, i;
- for (i = 0; i < evsel->cpus->nr && !err; i++)
+ for (i = 0; i < perf_cpu_map__nr(evsel->cpus) && !err; i++)
err = perf_evsel__run_ioctl(evsel,
PERF_EVENT_IOC_SET_FILTER,
(void *)filter, i);
@@ -275,9 +517,6 @@ int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
if (ncpus == 0 || nthreads == 0)
return 0;
- if (evsel->system_wide)
- nthreads = 1;
-
evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
if (evsel->sample_id == NULL)
return -ENOMEM;
@@ -294,8 +533,73 @@ int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
void perf_evsel__free_id(struct perf_evsel *evsel)
{
+ struct perf_sample_id_period *pos, *n;
+
xyarray__delete(evsel->sample_id);
evsel->sample_id = NULL;
zfree(&evsel->id);
evsel->ids = 0;
+
+ perf_evsel_for_each_per_thread_period_safe(evsel, n, pos) {
+ list_del_init(&pos->node);
+ free(pos);
+ }
+}
+
+bool perf_evsel__attr_has_per_thread_sample_period(struct perf_evsel *evsel)
+{
+ return (evsel->attr.sample_type & PERF_SAMPLE_READ) &&
+ (evsel->attr.sample_type & PERF_SAMPLE_TID) &&
+ evsel->attr.inherit;
+}
+
+u64 *perf_sample_id__get_period_storage(struct perf_sample_id *sid, u32 tid, bool per_thread)
+{
+ struct hlist_head *head;
+ struct perf_sample_id_period *res;
+ int hash;
+
+ if (!per_thread)
+ return &sid->period;
+
+ hash = hash_32(tid, PERF_SAMPLE_ID__HLIST_BITS);
+ head = &sid->periods[hash];
+
+ hlist_for_each_entry(res, head, hnode)
+ if (res->tid == tid)
+ return &res->period;
+
+ if (sid->evsel == NULL)
+ return NULL;
+
+ res = zalloc(sizeof(struct perf_sample_id_period));
+ if (res == NULL)
+ return NULL;
+
+ INIT_LIST_HEAD(&res->node);
+ res->tid = tid;
+
+ list_add_tail(&res->node, &sid->evsel->per_stream_periods);
+ hlist_add_head(&res->hnode, &sid->periods[hash]);
+
+ return &res->period;
+}
+
+void perf_counts_values__scale(struct perf_counts_values *count,
+ bool scale, __s8 *pscaled)
+{
+ s8 scaled = 0;
+
+ if (scale) {
+ if (count->run == 0) {
+ scaled = -1;
+ count->val = 0;
+ } else if (count->run < count->ena) {
+ scaled = 1;
+ count->val = (u64)((double)count->val * count->ena / count->run);
+ }
+ }
+
+ if (pscaled)
+ *pscaled = scaled;
}
diff --git a/tools/lib/perf/include/internal/cpumap.h b/tools/lib/perf/include/internal/cpumap.h
index 840d4032587b..e2be2d17c32b 100644
--- a/tools/lib/perf/include/internal/cpumap.h
+++ b/tools/lib/perf/include/internal/cpumap.h
@@ -3,17 +3,32 @@
#define __LIBPERF_INTERNAL_CPUMAP_H
#include <linux/refcount.h>
+#include <perf/cpumap.h>
+#include <internal/rc_check.h>
-struct perf_cpu_map {
+/**
+ * A sized, reference counted, sorted array of integers representing CPU
+ * numbers. This is commonly used to capture which CPUs a PMU is associated
+ * with. The indices into the cpumap are frequently used as they avoid having
+ * gaps if CPU numbers were used. For events associated with a pid, rather than
+ * a CPU, a single dummy map with an entry of -1 is used.
+ */
+DECLARE_RC_STRUCT(perf_cpu_map) {
refcount_t refcnt;
+ /** Length of the map array. */
int nr;
- int map[];
+ /** The CPU values. */
+ struct perf_cpu map[];
};
-#ifndef MAX_NR_CPUS
-#define MAX_NR_CPUS 2048
-#endif
+struct perf_cpu_map *perf_cpu_map__alloc(int nr_cpus);
+int perf_cpu_map__idx(const struct perf_cpu_map *cpus, struct perf_cpu cpu);
+bool perf_cpu_map__is_subset(const struct perf_cpu_map *a, const struct perf_cpu_map *b);
-int perf_cpu_map__idx(struct perf_cpu_map *cpus, int cpu);
+void perf_cpu_map__set_nr(struct perf_cpu_map *map, int nr_cpus);
+static inline refcount_t *perf_cpu_map__refcnt(struct perf_cpu_map *map)
+{
+ return &RC_CHK_ACCESS(map)->refcnt;
+}
#endif /* __LIBPERF_INTERNAL_CPUMAP_H */
diff --git a/tools/lib/perf/include/internal/evlist.h b/tools/lib/perf/include/internal/evlist.h
index 74dc8c3f0b66..f43bdb9b6227 100644
--- a/tools/lib/perf/include/internal/evlist.h
+++ b/tools/lib/perf/include/internal/evlist.h
@@ -4,6 +4,7 @@
#include <linux/list.h>
#include <api/fd/array.h>
+#include <internal/cpumap.h>
#include <internal/evsel.h>
#define PERF_EVLIST__HLIST_BITS 8
@@ -17,7 +18,13 @@ struct perf_evlist {
struct list_head entries;
int nr_entries;
bool has_user_cpus;
- struct perf_cpu_map *cpus;
+ bool needs_map_propagation;
+ /**
+ * The cpus passed from the command line or all online CPUs by
+ * default.
+ */
+ struct perf_cpu_map *user_requested_cpus;
+ /** The union of all evsel cpu maps. */
struct perf_cpu_map *all_cpus;
struct perf_thread_map *threads;
int nr_mmaps;
@@ -31,11 +38,12 @@ struct perf_evlist {
};
typedef void
-(*perf_evlist_mmap__cb_idx_t)(struct perf_evlist*, struct perf_mmap_param*, int, bool);
+(*perf_evlist_mmap__cb_idx_t)(struct perf_evlist*, struct perf_evsel*,
+ struct perf_mmap_param*, int);
typedef struct perf_mmap*
(*perf_evlist_mmap__cb_get_t)(struct perf_evlist*, bool, int);
typedef int
-(*perf_evlist_mmap__cb_mmap_t)(struct perf_mmap*, struct perf_mmap_param*, int, int);
+(*perf_evlist_mmap__cb_mmap_t)(struct perf_mmap*, struct perf_mmap_param*, int, struct perf_cpu);
struct perf_evlist_mmap_ops {
perf_evlist_mmap__cb_idx_t idx;
@@ -45,7 +53,7 @@ struct perf_evlist_mmap_ops {
int perf_evlist__alloc_pollfd(struct perf_evlist *evlist);
int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
- void *ptr, short revent);
+ void *ptr, short revent, enum fdarray_flags flags);
int perf_evlist__mmap_ops(struct perf_evlist *evlist,
struct perf_evlist_mmap_ops *ops,
@@ -118,10 +126,15 @@ u64 perf_evlist__read_format(struct perf_evlist *evlist);
void perf_evlist__id_add(struct perf_evlist *evlist,
struct perf_evsel *evsel,
- int cpu, int thread, u64 id);
+ int cpu_map_idx, int thread, u64 id);
int perf_evlist__id_add_fd(struct perf_evlist *evlist,
struct perf_evsel *evsel,
- int cpu, int thread, int fd);
+ int cpu_map_idx, int thread, int fd);
+void perf_evlist__reset_id_hash(struct perf_evlist *evlist);
+
+void __perf_evlist__set_leader(struct list_head *list, struct perf_evsel *leader);
+
+void perf_evlist__go_system_wide(struct perf_evlist *evlist, struct perf_evsel *evsel);
#endif /* __LIBPERF_INTERNAL_EVLIST_H */
diff --git a/tools/lib/perf/include/internal/evsel.h b/tools/lib/perf/include/internal/evsel.h
index 1ffd083b235e..ea78defa77d0 100644
--- a/tools/lib/perf/include/internal/evsel.h
+++ b/tools/lib/perf/include/internal/evsel.h
@@ -6,11 +6,37 @@
#include <linux/perf_event.h>
#include <stdbool.h>
#include <sys/types.h>
+#include <internal/cpumap.h>
-struct perf_cpu_map;
struct perf_thread_map;
struct xyarray;
+/**
+ * The per-thread accumulated period storage node.
+ */
+struct perf_sample_id_period {
+ struct list_head node;
+ struct hlist_node hnode;
+ /* Holds total ID period value for PERF_SAMPLE_READ processing. */
+ u64 period;
+ /* The TID that the values belongs to */
+ u32 tid;
+};
+
+/**
+ * perf_evsel_for_each_per_thread_period_safe - safely iterate thru all the
+ * per_stream_periods
+ * @evlist:perf_evsel instance to iterate
+ * @item: struct perf_sample_id_period iterator
+ * @tmp: struct perf_sample_id_period temp iterator
+ */
+#define perf_evsel_for_each_per_thread_period_safe(evsel, tmp, item) \
+ list_for_each_entry_safe(item, tmp, &(evsel)->per_stream_periods, node)
+
+
+#define PERF_SAMPLE_ID__HLIST_BITS 4
+#define PERF_SAMPLE_ID__HLIST_SIZE (1 << PERF_SAMPLE_ID__HLIST_BITS)
+
/*
* Per fd, to map back from PERF_SAMPLE_ID to evsel, only used when there are
* more than one entry in the evlist.
@@ -27,30 +53,86 @@ struct perf_sample_id {
* queue number.
*/
int idx;
- int cpu;
+ struct perf_cpu cpu;
pid_t tid;
- /* Holds total ID period value for PERF_SAMPLE_READ processing. */
- u64 period;
+ /* Guest machine pid and VCPU, valid only if machine_pid is non-zero */
+ pid_t machine_pid;
+ struct perf_cpu vcpu;
+
+ /*
+ * Per-thread, and global event counts are mutually exclusive:
+ * Whilst it is possible to combine events into a group with differing
+ * values of PERF_SAMPLE_READ, it is not valid to have inconsistent
+ * values for `inherit`. Therefore it is not possible to have a
+ * situation where a per-thread event is sampled as a global event;
+ * all !inherit groups are global, and all groups where the sampling
+ * event is inherit + PERF_SAMPLE_READ will be per-thread. Any event
+ * that is part of such a group that is inherit but not PERF_SAMPLE_READ
+ * will be read as per-thread. If such an event can also trigger a
+ * sample (such as with sample_period > 0) then it will not cause
+ * `read_format` to be included in its PERF_RECORD_SAMPLE, and
+ * therefore will not expose the per-thread group members as global.
+ */
+ union {
+ /*
+ * Holds total ID period value for PERF_SAMPLE_READ processing
+ * (when period is not per-thread).
+ */
+ u64 period;
+ /*
+ * Holds total ID period value for PERF_SAMPLE_READ processing
+ * (when period is per-thread).
+ */
+ struct hlist_head periods[PERF_SAMPLE_ID__HLIST_SIZE];
+ };
};
struct perf_evsel {
struct list_head node;
struct perf_event_attr attr;
+ /** The commonly used cpu map of CPUs the event should be opened upon, etc. */
struct perf_cpu_map *cpus;
+ /**
+ * The cpu map read from the PMU. For core PMUs this is the list of all
+ * CPUs the event can be opened upon. For other PMUs this is the default
+ * cpu map for opening the event on, for example, the first CPU on a
+ * socket for an uncore event.
+ */
struct perf_cpu_map *own_cpus;
struct perf_thread_map *threads;
struct xyarray *fd;
+ struct xyarray *mmap;
struct xyarray *sample_id;
u64 *id;
u32 ids;
+ struct perf_evsel *leader;
+
+ /* For events where the read_format value is per-thread rather than
+ * global, stores the per-thread cumulative period */
+ struct list_head per_stream_periods;
/* parse modifier helper */
int nr_members;
+ /*
+ * system_wide is for events that need to be on every CPU, irrespective
+ * of user requested CPUs or threads. Tha main example of this is the
+ * dummy event. Map propagation will set cpus for this event to all CPUs
+ * as software PMU events like dummy, have a CPU map that is empty.
+ */
bool system_wide;
+ /*
+ * Some events, for example uncore events, require a CPU.
+ * i.e. it cannot be the 'any CPU' value of -1.
+ */
+ bool requires_cpu;
+ /** Is the PMU for the event a core one? Effects the handling of own_cpus. */
+ bool is_pmu_core;
+ int idx;
};
-void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr);
+void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr,
+ int idx);
int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
void perf_evsel__close_fd(struct perf_evsel *evsel);
void perf_evsel__free_fd(struct perf_evsel *evsel);
@@ -60,4 +142,9 @@ int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter);
int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
void perf_evsel__free_id(struct perf_evsel *evsel);
+bool perf_evsel__attr_has_per_thread_sample_period(struct perf_evsel *evsel);
+
+u64 *perf_sample_id__get_period_storage(struct perf_sample_id *sid, u32 tid,
+ bool per_thread);
+
#endif /* __LIBPERF_INTERNAL_EVSEL_H */
diff --git a/tools/lib/perf/include/internal/lib.h b/tools/lib/perf/include/internal/lib.h
index 5175d491b2d4..85471a4b900f 100644
--- a/tools/lib/perf/include/internal/lib.h
+++ b/tools/lib/perf/include/internal/lib.h
@@ -9,4 +9,6 @@ extern unsigned int page_size;
ssize_t readn(int fd, void *buf, size_t n);
ssize_t writen(int fd, const void *buf, size_t n);
+ssize_t preadn(int fd, void *buf, size_t n, off_t offs);
+
#endif /* __LIBPERF_INTERNAL_CPUMAP_H */
diff --git a/tools/lib/perf/include/internal/mmap.h b/tools/lib/perf/include/internal/mmap.h
index be7556e0a2b2..5f08cab61ece 100644
--- a/tools/lib/perf/include/internal/mmap.h
+++ b/tools/lib/perf/include/internal/mmap.h
@@ -6,11 +6,13 @@
#include <linux/refcount.h>
#include <linux/types.h>
#include <stdbool.h>
+#include <internal/cpumap.h>
/* perf sample has 16 bits size limit */
#define PERF_SAMPLE_MAX_SIZE (1 << 16)
struct perf_mmap;
+struct perf_counts_values;
typedef void (*libperf_unmap_cb_t)(struct perf_mmap *map);
@@ -23,7 +25,7 @@ struct perf_mmap {
void *base;
int mask;
int fd;
- int cpu;
+ struct perf_cpu cpu;
refcount_t refcnt;
u64 prev;
u64 start;
@@ -31,7 +33,8 @@ struct perf_mmap {
bool overwrite;
u64 flush;
libperf_unmap_cb_t unmap_cb;
- char event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8);
+ void *event_copy;
+ size_t event_copy_sz;
struct perf_mmap *next;
};
@@ -45,11 +48,13 @@ size_t perf_mmap__mmap_len(struct perf_mmap *map);
void perf_mmap__init(struct perf_mmap *map, struct perf_mmap *prev,
bool overwrite, libperf_unmap_cb_t unmap_cb);
int perf_mmap__mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
- int fd, int cpu);
+ int fd, struct perf_cpu cpu);
void perf_mmap__munmap(struct perf_mmap *map);
void perf_mmap__get(struct perf_mmap *map);
void perf_mmap__put(struct perf_mmap *map);
u64 perf_mmap__read_head(struct perf_mmap *map);
+int perf_mmap__read_self(struct perf_mmap *map, struct perf_counts_values *count);
+
#endif /* __LIBPERF_INTERNAL_MMAP_H */
diff --git a/tools/lib/perf/include/internal/rc_check.h b/tools/lib/perf/include/internal/rc_check.h
new file mode 100644
index 000000000000..f80ddfc80129
--- /dev/null
+++ b/tools/lib/perf/include/internal/rc_check.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+#ifndef __LIBPERF_INTERNAL_RC_CHECK_H
+#define __LIBPERF_INTERNAL_RC_CHECK_H
+
+#include <stdlib.h>
+#include <linux/zalloc.h>
+
+/*
+ * Enable reference count checking implicitly with leak checking, which is
+ * integrated into address sanitizer.
+ */
+#if defined(__SANITIZE_ADDRESS__) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
+#define REFCNT_CHECKING 1
+#elif defined(__has_feature)
+#if __has_feature(address_sanitizer) || __has_feature(leak_sanitizer)
+#define REFCNT_CHECKING 1
+#endif
+#endif
+
+/*
+ * Shared reference count checking macros.
+ *
+ * Reference count checking is an approach to sanitizing the use of reference
+ * counted structs. It leverages address and leak sanitizers to make sure gets
+ * are paired with a put. Reference count checking adds a malloc-ed layer of
+ * indirection on a get, and frees it on a put. A missed put will be reported as
+ * a memory leak. A double put will be reported as a double free. Accessing
+ * after a put will cause a use-after-free and/or a segfault.
+ */
+
+#ifndef REFCNT_CHECKING
+/* Replaces "struct foo" so that the pointer may be interposed. */
+#define DECLARE_RC_STRUCT(struct_name) \
+ struct struct_name
+
+/* Declare a reference counted struct variable. */
+#define RC_STRUCT(struct_name) struct struct_name
+
+/*
+ * Interpose the indirection. Result will hold the indirection and object is the
+ * reference counted struct.
+ */
+#define ADD_RC_CHK(result, object) (result = object, object)
+
+/* Strip the indirection layer. */
+#define RC_CHK_ACCESS(object) object
+
+/* Frees the object and the indirection layer. */
+#define RC_CHK_FREE(object) free(object)
+
+/* A get operation adding the indirection layer. */
+#define RC_CHK_GET(result, object) ADD_RC_CHK(result, object)
+
+/* A put operation removing the indirection layer. */
+#define RC_CHK_PUT(object) {}
+
+/* Pointer equality when the indirection may or may not be there. */
+#define RC_CHK_EQUAL(object1, object2) (object1 == object2)
+
+#else
+
+/* Replaces "struct foo" so that the pointer may be interposed. */
+#define DECLARE_RC_STRUCT(struct_name) \
+ struct original_##struct_name; \
+ struct struct_name { \
+ struct original_##struct_name *orig; \
+ }; \
+ struct original_##struct_name
+
+/* Declare a reference counted struct variable. */
+#define RC_STRUCT(struct_name) struct original_##struct_name
+
+/*
+ * Interpose the indirection. Result will hold the indirection and object is the
+ * reference counted struct.
+ */
+#define ADD_RC_CHK(result, object) \
+ ( \
+ object ? (result = malloc(sizeof(*result)), \
+ result ? (result->orig = object, result) \
+ : (result = NULL, NULL)) \
+ : (result = NULL, NULL) \
+ )
+
+/* Strip the indirection layer. */
+#define RC_CHK_ACCESS(object) object->orig
+
+/* Frees the object and the indirection layer. */
+#define RC_CHK_FREE(object) \
+ do { \
+ zfree(&object->orig); \
+ free(object); \
+ } while(0)
+
+/* A get operation adding the indirection layer. */
+#define RC_CHK_GET(result, object) ADD_RC_CHK(result, (object ? object->orig : NULL))
+
+/* A put operation removing the indirection layer. */
+#define RC_CHK_PUT(object) \
+ do { \
+ if (object) { \
+ object->orig = NULL; \
+ free(object); \
+ } \
+ } while(0)
+
+/* Pointer equality when the indirection may or may not be there. */
+#define RC_CHK_EQUAL(object1, object2) (object1 == object2 || \
+ (object1 && object2 && object1->orig == object2->orig))
+
+#endif
+
+#endif /* __LIBPERF_INTERNAL_RC_CHECK_H */
diff --git a/tools/lib/perf/include/internal/tests.h b/tools/lib/perf/include/internal/tests.h
index 2093e8868a67..b130a6663ff8 100644
--- a/tools/lib/perf/include/internal/tests.h
+++ b/tools/lib/perf/include/internal/tests.h
@@ -3,11 +3,34 @@
#define __LIBPERF_INTERNAL_TESTS_H
#include <stdio.h>
+#include <unistd.h>
-int tests_failed;
+extern int tests_failed;
+extern int tests_verbose;
+
+static inline int get_verbose(char **argv, int argc)
+{
+ int c;
+ int verbose = 0;
+
+ while ((c = getopt(argc, argv, "v")) != -1) {
+ switch (c)
+ {
+ case 'v':
+ verbose = 1;
+ break;
+ default:
+ break;
+ }
+ }
+ optind = 1;
+
+ return verbose;
+}
#define __T_START \
do { \
+ tests_verbose = get_verbose(argv, argc); \
fprintf(stdout, "- running %s...", __FILE__); \
fflush(NULL); \
tests_failed = 0; \
@@ -30,4 +53,15 @@ do {
} \
} while (0)
+#define __T_VERBOSE(...) \
+do { \
+ if (tests_verbose) { \
+ if (tests_verbose == 1) { \
+ fputc('\n', stderr); \
+ tests_verbose++; \
+ } \
+ fprintf(stderr, ##__VA_ARGS__); \
+ } \
+} while (0)
+
#endif /* __LIBPERF_INTERNAL_TESTS_H */
diff --git a/tools/lib/perf/include/internal/xyarray.h b/tools/lib/perf/include/internal/xyarray.h
index 51e35d6c8ec4..f10af3da7b21 100644
--- a/tools/lib/perf/include/internal/xyarray.h
+++ b/tools/lib/perf/include/internal/xyarray.h
@@ -18,11 +18,18 @@ struct xyarray *xyarray__new(int xlen, int ylen, size_t entry_size);
void xyarray__delete(struct xyarray *xy);
void xyarray__reset(struct xyarray *xy);
-static inline void *xyarray__entry(struct xyarray *xy, int x, int y)
+static inline void *__xyarray__entry(struct xyarray *xy, int x, int y)
{
return &xy->contents[x * xy->row_size + y * xy->entry_size];
}
+static inline void *xyarray__entry(struct xyarray *xy, size_t x, size_t y)
+{
+ if (x >= xy->max_x || y >= xy->max_y)
+ return NULL;
+ return __xyarray__entry(xy, x, y);
+}
+
static inline int xyarray__max_y(struct xyarray *xy)
{
return xy->max_y;
diff --git a/tools/lib/perf/include/perf/bpf_perf.h b/tools/lib/perf/include/perf/bpf_perf.h
new file mode 100644
index 000000000000..e7cf6ba7b674
--- /dev/null
+++ b/tools/lib/perf/include/perf/bpf_perf.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+#ifndef __LIBPERF_BPF_PERF_H
+#define __LIBPERF_BPF_PERF_H
+
+#include <linux/types.h> /* for __u32 */
+
+/*
+ * bpf_perf uses a hashmap, the attr_map, to track all the leader programs.
+ * The hashmap is pinned in bpffs. flock() on this file is used to ensure
+ * no concurrent access to the attr_map. The key of attr_map is struct
+ * perf_event_attr, and the value is struct perf_event_attr_map_entry.
+ *
+ * struct perf_event_attr_map_entry contains two __u32 IDs, bpf_link of the
+ * leader prog, and the diff_map. Each perf-stat session holds a reference
+ * to the bpf_link to make sure the leader prog is attached to sched_switch
+ * tracepoint.
+ *
+ * Since the hashmap only contains IDs of the bpf_link and diff_map, it
+ * does not hold any references to the leader program. Once all perf-stat
+ * sessions of these events exit, the leader prog, its maps, and the
+ * perf_events will be freed.
+ */
+struct perf_event_attr_map_entry {
+ __u32 link_id;
+ __u32 diff_map_id;
+};
+
+/* default attr_map name */
+#define BPF_PERF_DEFAULT_ATTR_MAP_PATH "perf_attr_map"
+
+#endif /* __LIBPERF_BPF_PERF_H */
diff --git a/tools/lib/perf/include/perf/cpumap.h b/tools/lib/perf/include/perf/cpumap.h
index 6a17ad730cbc..58cc5c5fa47c 100644
--- a/tools/lib/perf/include/perf/cpumap.h
+++ b/tools/lib/perf/include/perf/cpumap.h
@@ -3,26 +3,102 @@
#define __LIBPERF_CPUMAP_H
#include <perf/core.h>
-#include <stdio.h>
#include <stdbool.h>
+#include <stdint.h>
+
+/** A wrapper around a CPU to avoid confusion with the perf_cpu_map's map's indices. */
+struct perf_cpu {
+ int16_t cpu;
+};
+
+struct perf_cache {
+ int cache_lvl;
+ int cache;
+};
struct perf_cpu_map;
-LIBPERF_API struct perf_cpu_map *perf_cpu_map__dummy_new(void);
+/**
+ * perf_cpu_map__new_any_cpu - a map with a singular "any CPU"/dummy -1 value.
+ */
+LIBPERF_API struct perf_cpu_map *perf_cpu_map__new_any_cpu(void);
+/**
+ * perf_cpu_map__new_online_cpus - a map read from
+ * /sys/devices/system/cpu/online if
+ * available. If reading wasn't possible a map
+ * is created using the online processors
+ * assuming the first 'n' processors are all
+ * online.
+ */
+LIBPERF_API struct perf_cpu_map *perf_cpu_map__new_online_cpus(void);
+/**
+ * perf_cpu_map__new - create a map from the given cpu_list such as "0-7". If no
+ * cpu_list argument is provided then
+ * perf_cpu_map__new_online_cpus is returned.
+ */
LIBPERF_API struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list);
-LIBPERF_API struct perf_cpu_map *perf_cpu_map__read(FILE *file);
+/** perf_cpu_map__new_int - create a map with the one given cpu. */
+LIBPERF_API struct perf_cpu_map *perf_cpu_map__new_int(int cpu);
LIBPERF_API struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map);
-LIBPERF_API struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig,
- struct perf_cpu_map *other);
+LIBPERF_API int perf_cpu_map__merge(struct perf_cpu_map **orig,
+ struct perf_cpu_map *other);
+LIBPERF_API struct perf_cpu_map *perf_cpu_map__intersect(struct perf_cpu_map *orig,
+ struct perf_cpu_map *other);
LIBPERF_API void perf_cpu_map__put(struct perf_cpu_map *map);
-LIBPERF_API int perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx);
+/**
+ * perf_cpu_map__cpu - get the CPU value at the given index. Returns -1 if index
+ * is invalid.
+ */
+LIBPERF_API struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx);
+/**
+ * perf_cpu_map__nr - for an empty map returns 1, as perf_cpu_map__cpu returns a
+ * cpu of -1 for an invalid index, this makes an empty map
+ * look like it contains the "any CPU"/dummy value. Otherwise
+ * the result is the number CPUs in the map plus one if the
+ * "any CPU"/dummy value is present.
+ */
LIBPERF_API int perf_cpu_map__nr(const struct perf_cpu_map *cpus);
-LIBPERF_API bool perf_cpu_map__empty(const struct perf_cpu_map *map);
-LIBPERF_API int perf_cpu_map__max(struct perf_cpu_map *map);
+/**
+ * perf_cpu_map__has_any_cpu_or_is_empty - is map either empty or has the "any CPU"/dummy value.
+ */
+LIBPERF_API bool perf_cpu_map__has_any_cpu_or_is_empty(const struct perf_cpu_map *map);
+/**
+ * perf_cpu_map__is_any_cpu_or_is_empty - is map either empty or the "any CPU"/dummy value.
+ */
+LIBPERF_API bool perf_cpu_map__is_any_cpu_or_is_empty(const struct perf_cpu_map *map);
+/**
+ * perf_cpu_map__is_empty - does the map contain no values and it doesn't
+ * contain the special "any CPU"/dummy value.
+ */
+LIBPERF_API bool perf_cpu_map__is_empty(const struct perf_cpu_map *map);
+/**
+ * perf_cpu_map__min - the minimum CPU value or -1 if empty or just the "any CPU"/dummy value.
+ */
+LIBPERF_API struct perf_cpu perf_cpu_map__min(const struct perf_cpu_map *map);
+/**
+ * perf_cpu_map__max - the maximum CPU value or -1 if empty or just the "any CPU"/dummy value.
+ */
+LIBPERF_API struct perf_cpu perf_cpu_map__max(const struct perf_cpu_map *map);
+LIBPERF_API bool perf_cpu_map__has(const struct perf_cpu_map *map, struct perf_cpu cpu);
+LIBPERF_API bool perf_cpu_map__equal(const struct perf_cpu_map *lhs,
+ const struct perf_cpu_map *rhs);
+/**
+ * perf_cpu_map__any_cpu - Does the map contain the "any CPU"/dummy -1 value?
+ */
+LIBPERF_API bool perf_cpu_map__has_any_cpu(const struct perf_cpu_map *map);
#define perf_cpu_map__for_each_cpu(cpu, idx, cpus) \
for ((idx) = 0, (cpu) = perf_cpu_map__cpu(cpus, idx); \
(idx) < perf_cpu_map__nr(cpus); \
(idx)++, (cpu) = perf_cpu_map__cpu(cpus, idx))
+#define perf_cpu_map__for_each_cpu_skip_any(_cpu, idx, cpus) \
+ for ((idx) = 0, (_cpu) = perf_cpu_map__cpu(cpus, idx); \
+ (idx) < perf_cpu_map__nr(cpus); \
+ (idx)++, (_cpu) = perf_cpu_map__cpu(cpus, idx)) \
+ if ((_cpu).cpu != -1)
+
+#define perf_cpu_map__for_each_idx(idx, cpus) \
+ for ((idx) = 0; (idx) < perf_cpu_map__nr(cpus); (idx)++)
+
#endif /* __LIBPERF_CPUMAP_H */
diff --git a/tools/lib/perf/include/perf/event.h b/tools/lib/perf/include/perf/event.h
index 69b44d2cc0f5..09b7c643ddac 100644
--- a/tools/lib/perf/include/perf/event.h
+++ b/tools/lib/perf/include/perf/event.h
@@ -8,6 +8,8 @@
#include <linux/bpf.h>
#include <sys/types.h> /* pid_t */
+#define event_contains(obj, mem) ((obj).header.size > offsetof(typeof(obj), mem))
+
struct perf_record_mmap {
struct perf_event_header header;
__u32 pid, tid;
@@ -23,10 +25,20 @@ struct perf_record_mmap2 {
__u64 start;
__u64 len;
__u64 pgoff;
- __u32 maj;
- __u32 min;
- __u64 ino;
- __u64 ino_generation;
+ union {
+ struct {
+ __u32 maj;
+ __u32 min;
+ __u64 ino;
+ __u64 ino_generation;
+ };
+ struct {
+ __u8 build_id_size;
+ __u8 __reserved_1;
+ __u16 __reserved_2;
+ __u8 build_id[20];
+ };
+ };
__u32 prot;
__u32 flags;
char filename[PATH_MAX];
@@ -58,13 +70,21 @@ struct perf_record_lost {
__u64 lost;
};
+#define PERF_RECORD_MISC_LOST_SAMPLES_BPF (1 << 15)
+
struct perf_record_lost_samples {
struct perf_event_header header;
__u64 lost;
};
+#define MAX_ID_HDR_ENTRIES 6
+struct perf_record_lost_samples_and_ids {
+ struct perf_record_lost_samples lost;
+ __u64 sample_ids[MAX_ID_HDR_ENTRIES];
+};
+
/*
- * PERF_FORMAT_ENABLED | PERF_FORMAT_RUNNING | PERF_FORMAT_ID
+ * PERF_FORMAT_ENABLED | PERF_FORMAT_RUNNING | PERF_FORMAT_ID | PERF_FORMAT_LOST
*/
struct perf_record_read {
struct perf_event_header header;
@@ -73,6 +93,7 @@ struct perf_record_read {
__u64 time_enabled;
__u64 time_running;
__u64 id;
+ __u64 lost;
};
struct perf_record_throttle {
@@ -83,7 +104,7 @@ struct perf_record_throttle {
};
#ifndef KSYM_NAME_LEN
-#define KSYM_NAME_LEN 256
+#define KSYM_NAME_LEN 512
#endif
struct perf_record_ksymbol {
@@ -111,6 +132,14 @@ struct perf_record_cgroup {
char path[PATH_MAX];
};
+struct perf_record_text_poke_event {
+ struct perf_event_header header;
+ __u64 addr;
+ __u16 old_len;
+ __u16 new_len;
+ __u8 bytes[];
+};
+
struct perf_record_sample {
struct perf_event_header header;
__u64 array[];
@@ -125,29 +154,91 @@ struct perf_record_switch {
struct perf_record_header_attr {
struct perf_event_header header;
struct perf_event_attr attr;
- __u64 id[];
+ /*
+ * Array of u64 id follows here but we cannot use a flexible array
+ * because size of attr in the data can be different then current
+ * version. Please use perf_record_header_attr_id() below.
+ *
+ * __u64 id[]; // do not use this
+ */
};
+/* Returns the pointer to id array based on the actual attr size. */
+#define perf_record_header_attr_id(evt) \
+ ((void *)&(evt)->attr.attr + (evt)->attr.attr.size)
+
enum {
PERF_CPU_MAP__CPUS = 0,
PERF_CPU_MAP__MASK = 1,
+ PERF_CPU_MAP__RANGE_CPUS = 2,
};
+/*
+ * Array encoding of a perf_cpu_map where nr is the number of entries in cpu[]
+ * and each entry is a value for a CPU in the map.
+ */
struct cpu_map_entries {
__u16 nr;
__u16 cpu[];
};
-struct perf_record_record_cpu_map {
+/* Bitmap encoding of a perf_cpu_map where bitmap entries are 32-bit. */
+struct perf_record_mask_cpu_map32 {
+ /* Number of mask values. */
__u16 nr;
+ /* Constant 4. */
__u16 long_size;
- unsigned long mask[];
+ /* Bitmap data. */
+ __u32 mask[];
+};
+
+/* Bitmap encoding of a perf_cpu_map where bitmap entries are 64-bit. */
+struct perf_record_mask_cpu_map64 {
+ /* Number of mask values. */
+ __u16 nr;
+ /* Constant 8. */
+ __u16 long_size;
+ /* Legacy padding. */
+ char __pad[4];
+ /* Bitmap data. */
+ __u64 mask[];
+};
+
+/*
+ * 'struct perf_record_cpu_map_data' is packed as unfortunately an earlier
+ * version had unaligned data and we wish to retain file format compatibility.
+ * -irogers
+ */
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wpacked"
+#pragma GCC diagnostic ignored "-Wattributes"
+
+/*
+ * An encoding of a CPU map for a range starting at start_cpu through to
+ * end_cpu. If any_cpu is 1, an any CPU (-1) value (aka dummy value) is present.
+ */
+struct perf_record_range_cpu_map {
+ __u8 any_cpu;
+ __u8 __pad;
+ __u16 start_cpu;
+ __u16 end_cpu;
};
struct perf_record_cpu_map_data {
__u16 type;
- char data[];
-};
+ union {
+ /* Used when type == PERF_CPU_MAP__CPUS. */
+ struct cpu_map_entries cpus_data;
+ /* Used when type == PERF_CPU_MAP__MASK and long_size == 4. */
+ struct perf_record_mask_cpu_map32 mask32_data;
+ /* Used when type == PERF_CPU_MAP__MASK and long_size == 8. */
+ struct perf_record_mask_cpu_map64 mask64_data;
+ /* Used when type == PERF_CPU_MAP__RANGE_CPUS. */
+ struct perf_record_range_cpu_map range_cpu_data;
+ };
+} __attribute__((packed));
+
+#pragma GCC diagnostic pop
struct perf_record_cpu_map {
struct perf_event_header header;
@@ -173,7 +264,16 @@ struct perf_record_event_update {
struct perf_event_header header;
__u64 type;
__u64 id;
- char data[];
+ union {
+ /* Used when type == PERF_EVENT_UPDATE__SCALE. */
+ struct perf_record_event_update_scale scale;
+ /* Used when type == PERF_EVENT_UPDATE__UNIT. */
+ char unit[0];
+ /* Used when type == PERF_EVENT_UPDATE__NAME. */
+ char name[0];
+ /* Used when type == PERF_EVENT_UPDATE__CPUS. */
+ struct perf_record_event_update_cpus cpus;
+ };
};
#define MAX_EVENT_NAME 64
@@ -193,10 +293,20 @@ struct perf_record_header_tracing_data {
__u32 size;
};
+#define PERF_RECORD_MISC_BUILD_ID_SIZE (1 << 15)
+
struct perf_record_header_build_id {
struct perf_event_header header;
pid_t pid;
- __u8 build_id[24];
+ union {
+ __u8 build_id[24];
+ struct {
+ __u8 data[20];
+ __u8 size;
+ __u8 reserved1__;
+ __u16 reserved2__;
+ };
+ };
char filename[];
};
@@ -207,10 +317,15 @@ struct id_index_entry {
__u64 tid;
};
+struct id_index_entry_2 {
+ __u64 machine_pid;
+ __u64 vcpu;
+};
+
struct perf_record_id_index {
struct perf_event_header header;
__u64 nr;
- struct id_index_entry entries[0];
+ struct id_index_entry entries[];
};
struct perf_record_auxtrace_info {
@@ -244,6 +359,8 @@ struct perf_record_auxtrace_error {
__u64 ip;
__u64 time;
char msg[MAX_AUXTRACE_ERROR_MSG];
+ __u32 machine_pid;
+ __u32 vcpu;
};
struct perf_record_aux {
@@ -259,6 +376,11 @@ struct perf_record_itrace_start {
__u32 tid;
};
+struct perf_record_aux_output_hw_id {
+ struct perf_event_header header;
+ __u64 hw_id;
+};
+
struct perf_record_thread_map_entry {
__u64 pid;
char comm[16];
@@ -274,7 +396,8 @@ enum {
PERF_STAT_CONFIG_TERM__AGGR_MODE = 0,
PERF_STAT_CONFIG_TERM__INTERVAL = 1,
PERF_STAT_CONFIG_TERM__SCALE = 2,
- PERF_STAT_CONFIG_TERM__MAX = 3,
+ PERF_STAT_CONFIG_TERM__AGGR_LEVEL = 3,
+ PERF_STAT_CONFIG_TERM__MAX = 4,
};
struct perf_record_stat_config_entry {
@@ -316,6 +439,11 @@ struct perf_record_time_conv {
__u64 time_shift;
__u64 time_mult;
__u64 time_zero;
+ __u64 time_cycles;
+ __u64 time_mask;
+ __u8 cap_user_time_zero;
+ __u8 cap_user_time_short;
+ __u8 reserved[6]; /* For alignment */
};
struct perf_record_header_feature {
@@ -329,6 +457,16 @@ struct perf_record_compressed {
char data[];
};
+/*
+ * `header.size` includes the padding we are going to add while writing the record.
+ * `data_size` only includes the size of `data[]` itself.
+ */
+struct perf_record_compressed2 {
+ struct perf_event_header header;
+ __u64 data_size;
+ char data[];
+};
+
enum perf_user_event_type { /* above any possible kernel type */
PERF_RECORD_USER_TYPE_START = 64,
PERF_RECORD_HEADER_ATTR = 64,
@@ -349,6 +487,8 @@ enum perf_user_event_type { /* above any possible kernel type */
PERF_RECORD_TIME_CONV = 79,
PERF_RECORD_HEADER_FEATURE = 80,
PERF_RECORD_COMPRESSED = 81,
+ PERF_RECORD_FINISHED_INIT = 82,
+ PERF_RECORD_COMPRESSED2 = 83,
PERF_RECORD_HEADER_MAX
};
@@ -367,6 +507,7 @@ union perf_event {
struct perf_record_sample sample;
struct perf_record_bpf_event bpf;
struct perf_record_ksymbol ksymbol;
+ struct perf_record_text_poke_event text_poke;
struct perf_record_header_attr attr;
struct perf_record_event_update event_update;
struct perf_record_header_event_type event_type;
@@ -378,6 +519,7 @@ union perf_event {
struct perf_record_auxtrace_error auxtrace_error;
struct perf_record_aux aux;
struct perf_record_itrace_start itrace_start;
+ struct perf_record_aux_output_hw_id aux_output_hw_id;
struct perf_record_switch context_switch;
struct perf_record_thread_map thread_map;
struct perf_record_cpu_map cpu_map;
@@ -387,6 +529,7 @@ union perf_event {
struct perf_record_time_conv time_conv;
struct perf_record_header_feature feat;
struct perf_record_compressed pack;
+ struct perf_record_compressed2 pack2;
};
#endif /* __LIBPERF_EVENT_H */
diff --git a/tools/lib/perf/include/perf/evlist.h b/tools/lib/perf/include/perf/evlist.h
index 0a7479dc13bf..e894b770779e 100644
--- a/tools/lib/perf/include/perf/evlist.h
+++ b/tools/lib/perf/include/perf/evlist.h
@@ -46,4 +46,6 @@ LIBPERF_API struct perf_mmap *perf_evlist__next_mmap(struct perf_evlist *evlist,
(pos) != NULL; \
(pos) = perf_evlist__next_mmap((evlist), (pos), overwrite))
+LIBPERF_API void perf_evlist__set_leader(struct perf_evlist *evlist);
+LIBPERF_API int perf_evlist__nr_groups(struct perf_evlist *evlist);
#endif /* __LIBPERF_EVLIST_H */
diff --git a/tools/lib/perf/include/perf/evsel.h b/tools/lib/perf/include/perf/evsel.h
index c82ec39a4ad0..6f92204075c2 100644
--- a/tools/lib/perf/include/perf/evsel.h
+++ b/tools/lib/perf/include/perf/evsel.h
@@ -4,6 +4,8 @@
#include <stdint.h>
#include <perf/core.h>
+#include <stdbool.h>
+#include <linux/types.h>
struct perf_evsel;
struct perf_event_attr;
@@ -16,8 +18,10 @@ struct perf_counts_values {
uint64_t val;
uint64_t ena;
uint64_t run;
+ uint64_t id;
+ uint64_t lost;
};
- uint64_t values[3];
+ uint64_t values[5];
};
};
@@ -26,15 +30,21 @@ LIBPERF_API void perf_evsel__delete(struct perf_evsel *evsel);
LIBPERF_API int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
struct perf_thread_map *threads);
LIBPERF_API void perf_evsel__close(struct perf_evsel *evsel);
-LIBPERF_API void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu);
-LIBPERF_API int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
+LIBPERF_API void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu_map_idx);
+LIBPERF_API int perf_evsel__mmap(struct perf_evsel *evsel, int pages);
+LIBPERF_API void perf_evsel__munmap(struct perf_evsel *evsel);
+LIBPERF_API void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu_map_idx, int thread);
+LIBPERF_API int perf_evsel__read(struct perf_evsel *evsel, int cpu_map_idx, int thread,
struct perf_counts_values *count);
LIBPERF_API int perf_evsel__enable(struct perf_evsel *evsel);
-LIBPERF_API int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu);
+LIBPERF_API int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu_map_idx);
+LIBPERF_API int perf_evsel__enable_thread(struct perf_evsel *evsel, int thread);
LIBPERF_API int perf_evsel__disable(struct perf_evsel *evsel);
-LIBPERF_API int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu);
+LIBPERF_API int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu_map_idx);
LIBPERF_API struct perf_cpu_map *perf_evsel__cpus(struct perf_evsel *evsel);
LIBPERF_API struct perf_thread_map *perf_evsel__threads(struct perf_evsel *evsel);
LIBPERF_API struct perf_event_attr *perf_evsel__attr(struct perf_evsel *evsel);
+LIBPERF_API void perf_counts_values__scale(struct perf_counts_values *count,
+ bool scale, __s8 *pscaled);
#endif /* __LIBPERF_EVSEL_H */
diff --git a/tools/lib/perf/include/perf/threadmap.h b/tools/lib/perf/include/perf/threadmap.h
index a7c50de8d010..44deb815b817 100644
--- a/tools/lib/perf/include/perf/threadmap.h
+++ b/tools/lib/perf/include/perf/threadmap.h
@@ -8,11 +8,13 @@
struct perf_thread_map;
LIBPERF_API struct perf_thread_map *perf_thread_map__new_dummy(void);
+LIBPERF_API struct perf_thread_map *perf_thread_map__new_array(int nr_threads, pid_t *array);
-LIBPERF_API void perf_thread_map__set_pid(struct perf_thread_map *map, int thread, pid_t pid);
-LIBPERF_API char *perf_thread_map__comm(struct perf_thread_map *map, int thread);
+LIBPERF_API void perf_thread_map__set_pid(struct perf_thread_map *map, int idx, pid_t pid);
+LIBPERF_API char *perf_thread_map__comm(struct perf_thread_map *map, int idx);
LIBPERF_API int perf_thread_map__nr(struct perf_thread_map *threads);
-LIBPERF_API pid_t perf_thread_map__pid(struct perf_thread_map *map, int thread);
+LIBPERF_API pid_t perf_thread_map__pid(struct perf_thread_map *map, int idx);
+LIBPERF_API int perf_thread_map__idx(struct perf_thread_map *map, pid_t pid);
LIBPERF_API struct perf_thread_map *perf_thread_map__get(struct perf_thread_map *map);
LIBPERF_API void perf_thread_map__put(struct perf_thread_map *map);
diff --git a/tools/lib/perf/lib.c b/tools/lib/perf/lib.c
index 18658931fc71..696fb0ea67c6 100644
--- a/tools/lib/perf/lib.c
+++ b/tools/lib/perf/lib.c
@@ -38,6 +38,26 @@ ssize_t readn(int fd, void *buf, size_t n)
return ion(true, fd, buf, n);
}
+ssize_t preadn(int fd, void *buf, size_t n, off_t offs)
+{
+ size_t left = n;
+
+ while (left) {
+ ssize_t ret = pread(fd, buf, left, offs);
+
+ if (ret < 0 && errno == EINTR)
+ continue;
+ if (ret <= 0)
+ return ret;
+
+ left -= ret;
+ buf += ret;
+ offs += ret;
+ }
+
+ return n;
+}
+
/*
* Write exactly 'n' bytes or return an error.
*/
diff --git a/tools/lib/perf/libperf.map b/tools/lib/perf/libperf.map
index 7be1af8a546c..fdd8304fe9d0 100644
--- a/tools/lib/perf/libperf.map
+++ b/tools/lib/perf/libperf.map
@@ -1,15 +1,21 @@
LIBPERF_0.0.1 {
global:
libperf_init;
- perf_cpu_map__dummy_new;
+ perf_cpu_map__new_any_cpu;
+ perf_cpu_map__new_online_cpus;
perf_cpu_map__get;
perf_cpu_map__put;
perf_cpu_map__new;
- perf_cpu_map__read;
perf_cpu_map__nr;
perf_cpu_map__cpu;
- perf_cpu_map__empty;
+ perf_cpu_map__has_any_cpu_or_is_empty;
+ perf_cpu_map__is_any_cpu_or_is_empty;
+ perf_cpu_map__is_empty;
+ perf_cpu_map__has_any_cpu;
+ perf_cpu_map__min;
perf_cpu_map__max;
+ perf_cpu_map__has;
+ perf_thread_map__new_array;
perf_thread_map__new_dummy;
perf_thread_map__set_pid;
perf_thread_map__comm;
@@ -23,6 +29,9 @@ LIBPERF_0.0.1 {
perf_evsel__disable;
perf_evsel__open;
perf_evsel__close;
+ perf_evsel__mmap;
+ perf_evsel__munmap;
+ perf_evsel__mmap_base;
perf_evsel__read;
perf_evsel__cpus;
perf_evsel__threads;
@@ -42,10 +51,12 @@ LIBPERF_0.0.1 {
perf_evlist__munmap;
perf_evlist__filter_pollfd;
perf_evlist__next_mmap;
+ perf_evlist__set_leader;
perf_mmap__consume;
perf_mmap__read_init;
perf_mmap__read_done;
perf_mmap__read_event;
+ perf_counts_values__scale;
local:
*;
};
diff --git a/tools/lib/perf/mmap.c b/tools/lib/perf/mmap.c
index 79d5ed6c38cc..c1a51d925e0e 100644
--- a/tools/lib/perf/mmap.c
+++ b/tools/lib/perf/mmap.c
@@ -8,14 +8,18 @@
#include <linux/perf_event.h>
#include <perf/mmap.h>
#include <perf/event.h>
+#include <perf/evsel.h>
#include <internal/mmap.h>
#include <internal/lib.h>
#include <linux/kernel.h>
+#include <linux/math64.h>
+#include <linux/stringify.h>
#include "internal.h"
void perf_mmap__init(struct perf_mmap *map, struct perf_mmap *prev,
bool overwrite, libperf_unmap_cb_t unmap_cb)
{
+ /* Assume fields were zero initialized. */
map->fd = -1;
map->overwrite = overwrite;
map->unmap_cb = unmap_cb;
@@ -30,7 +34,7 @@ size_t perf_mmap__mmap_len(struct perf_mmap *map)
}
int perf_mmap__mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
- int fd, int cpu)
+ int fd, struct perf_cpu cpu)
{
map->prev = 0;
map->mask = mp->mask;
@@ -48,13 +52,18 @@ int perf_mmap__mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
void perf_mmap__munmap(struct perf_mmap *map)
{
- if (map && map->base != NULL) {
+ if (!map)
+ return;
+
+ zfree(&map->event_copy);
+ map->event_copy_sz = 0;
+ if (map->base) {
munmap(map->base, perf_mmap__mmap_len(map));
map->base = NULL;
map->fd = -1;
refcount_set(&map->refcnt, 0);
}
- if (map && map->unmap_cb)
+ if (map->unmap_cb)
map->unmap_cb(map);
}
@@ -220,9 +229,17 @@ static union perf_event *perf_mmap__read(struct perf_mmap *map,
*/
if ((*startp & map->mask) + size != ((*startp + size) & map->mask)) {
unsigned int offset = *startp;
- unsigned int len = min(sizeof(*event), size), cpy;
+ unsigned int len = size, cpy;
void *dst = map->event_copy;
+ if (size > map->event_copy_sz) {
+ dst = realloc(map->event_copy, size);
+ if (!dst)
+ return NULL;
+ map->event_copy = dst;
+ map->event_copy_sz = size;
+ }
+
do {
cpy = min(map->mask + 1 - (offset & map->mask), len);
memcpy(dst, &data[offset & map->mask], cpy);
@@ -262,7 +279,7 @@ union perf_event *perf_mmap__read_event(struct perf_mmap *map)
if (!refcount_read(&map->refcnt))
return NULL;
- /* non-overwirte doesn't pause the ringbuffer */
+ /* non-overwrite doesn't pause the ringbuffer */
if (!map->overwrite)
map->end = perf_mmap__read_head(map);
@@ -273,3 +290,250 @@ union perf_event *perf_mmap__read_event(struct perf_mmap *map)
return event;
}
+
+#if defined(__i386__) || defined(__x86_64__)
+static u64 read_perf_counter(unsigned int counter)
+{
+ unsigned int low, high;
+
+ asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter));
+
+ return low | ((u64)high) << 32;
+}
+
+static u64 read_timestamp(void)
+{
+ unsigned int low, high;
+
+ asm volatile("rdtsc" : "=a" (low), "=d" (high));
+
+ return low | ((u64)high) << 32;
+}
+#elif defined(__aarch64__)
+#define read_sysreg(r) ({ \
+ u64 __val; \
+ asm volatile("mrs %0, " __stringify(r) : "=r" (__val)); \
+ __val; \
+})
+
+static u64 read_pmccntr(void)
+{
+ return read_sysreg(pmccntr_el0);
+}
+
+#define PMEVCNTR_READ(idx) \
+ static u64 read_pmevcntr_##idx(void) { \
+ return read_sysreg(pmevcntr##idx##_el0); \
+ }
+
+PMEVCNTR_READ(0);
+PMEVCNTR_READ(1);
+PMEVCNTR_READ(2);
+PMEVCNTR_READ(3);
+PMEVCNTR_READ(4);
+PMEVCNTR_READ(5);
+PMEVCNTR_READ(6);
+PMEVCNTR_READ(7);
+PMEVCNTR_READ(8);
+PMEVCNTR_READ(9);
+PMEVCNTR_READ(10);
+PMEVCNTR_READ(11);
+PMEVCNTR_READ(12);
+PMEVCNTR_READ(13);
+PMEVCNTR_READ(14);
+PMEVCNTR_READ(15);
+PMEVCNTR_READ(16);
+PMEVCNTR_READ(17);
+PMEVCNTR_READ(18);
+PMEVCNTR_READ(19);
+PMEVCNTR_READ(20);
+PMEVCNTR_READ(21);
+PMEVCNTR_READ(22);
+PMEVCNTR_READ(23);
+PMEVCNTR_READ(24);
+PMEVCNTR_READ(25);
+PMEVCNTR_READ(26);
+PMEVCNTR_READ(27);
+PMEVCNTR_READ(28);
+PMEVCNTR_READ(29);
+PMEVCNTR_READ(30);
+
+/*
+ * Read a value direct from PMEVCNTR<idx>
+ */
+static u64 read_perf_counter(unsigned int counter)
+{
+ static u64 (* const read_f[])(void) = {
+ read_pmevcntr_0,
+ read_pmevcntr_1,
+ read_pmevcntr_2,
+ read_pmevcntr_3,
+ read_pmevcntr_4,
+ read_pmevcntr_5,
+ read_pmevcntr_6,
+ read_pmevcntr_7,
+ read_pmevcntr_8,
+ read_pmevcntr_9,
+ read_pmevcntr_10,
+ read_pmevcntr_11,
+ read_pmevcntr_13,
+ read_pmevcntr_12,
+ read_pmevcntr_14,
+ read_pmevcntr_15,
+ read_pmevcntr_16,
+ read_pmevcntr_17,
+ read_pmevcntr_18,
+ read_pmevcntr_19,
+ read_pmevcntr_20,
+ read_pmevcntr_21,
+ read_pmevcntr_22,
+ read_pmevcntr_23,
+ read_pmevcntr_24,
+ read_pmevcntr_25,
+ read_pmevcntr_26,
+ read_pmevcntr_27,
+ read_pmevcntr_28,
+ read_pmevcntr_29,
+ read_pmevcntr_30,
+ read_pmccntr
+ };
+
+ if (counter < ARRAY_SIZE(read_f))
+ return (read_f[counter])();
+
+ return 0;
+}
+
+static u64 read_timestamp(void) { return read_sysreg(cntvct_el0); }
+
+/* __riscv_xlen contains the witdh of the native base integer, here 64-bit */
+#elif defined(__riscv) && __riscv_xlen == 64
+
+/* TODO: implement rv32 support */
+
+#define CSR_CYCLE 0xc00
+#define CSR_TIME 0xc01
+
+#define csr_read(csr) \
+({ \
+ register unsigned long __v; \
+ __asm__ __volatile__ ("csrr %0, %1" \
+ : "=r" (__v) \
+ : "i" (csr) : ); \
+ __v; \
+})
+
+static unsigned long csr_read_num(int csr_num)
+{
+#define switchcase_csr_read(__csr_num, __val) {\
+ case __csr_num: \
+ __val = csr_read(__csr_num); \
+ break; }
+#define switchcase_csr_read_2(__csr_num, __val) {\
+ switchcase_csr_read(__csr_num + 0, __val) \
+ switchcase_csr_read(__csr_num + 1, __val)}
+#define switchcase_csr_read_4(__csr_num, __val) {\
+ switchcase_csr_read_2(__csr_num + 0, __val) \
+ switchcase_csr_read_2(__csr_num + 2, __val)}
+#define switchcase_csr_read_8(__csr_num, __val) {\
+ switchcase_csr_read_4(__csr_num + 0, __val) \
+ switchcase_csr_read_4(__csr_num + 4, __val)}
+#define switchcase_csr_read_16(__csr_num, __val) {\
+ switchcase_csr_read_8(__csr_num + 0, __val) \
+ switchcase_csr_read_8(__csr_num + 8, __val)}
+#define switchcase_csr_read_32(__csr_num, __val) {\
+ switchcase_csr_read_16(__csr_num + 0, __val) \
+ switchcase_csr_read_16(__csr_num + 16, __val)}
+
+ unsigned long ret = 0;
+
+ switch (csr_num) {
+ switchcase_csr_read_32(CSR_CYCLE, ret)
+ default:
+ break;
+ }
+
+ return ret;
+#undef switchcase_csr_read_32
+#undef switchcase_csr_read_16
+#undef switchcase_csr_read_8
+#undef switchcase_csr_read_4
+#undef switchcase_csr_read_2
+#undef switchcase_csr_read
+}
+
+static u64 read_perf_counter(unsigned int counter)
+{
+ return csr_read_num(CSR_CYCLE + counter);
+}
+
+static u64 read_timestamp(void)
+{
+ return csr_read_num(CSR_TIME);
+}
+
+#else
+static u64 read_perf_counter(unsigned int counter __maybe_unused) { return 0; }
+static u64 read_timestamp(void) { return 0; }
+#endif
+
+int perf_mmap__read_self(struct perf_mmap *map, struct perf_counts_values *count)
+{
+ struct perf_event_mmap_page *pc = map->base;
+ u32 seq, idx, time_mult = 0, time_shift = 0;
+ u64 cnt, cyc = 0, time_offset = 0, time_cycles = 0, time_mask = ~0ULL;
+
+ if (!pc || !pc->cap_user_rdpmc)
+ return -1;
+
+ do {
+ seq = READ_ONCE(pc->lock);
+ barrier();
+
+ count->ena = READ_ONCE(pc->time_enabled);
+ count->run = READ_ONCE(pc->time_running);
+
+ if (pc->cap_user_time && count->ena != count->run) {
+ cyc = read_timestamp();
+ time_mult = READ_ONCE(pc->time_mult);
+ time_shift = READ_ONCE(pc->time_shift);
+ time_offset = READ_ONCE(pc->time_offset);
+
+ if (pc->cap_user_time_short) {
+ time_cycles = READ_ONCE(pc->time_cycles);
+ time_mask = READ_ONCE(pc->time_mask);
+ }
+ }
+
+ idx = READ_ONCE(pc->index);
+ cnt = READ_ONCE(pc->offset);
+ if (pc->cap_user_rdpmc && idx) {
+ s64 evcnt = read_perf_counter(idx - 1);
+ u16 width = READ_ONCE(pc->pmc_width);
+
+ evcnt <<= 64 - width;
+ evcnt >>= 64 - width;
+ cnt += evcnt;
+ } else
+ return -1;
+
+ barrier();
+ } while (READ_ONCE(pc->lock) != seq);
+
+ if (count->ena != count->run) {
+ u64 delta;
+
+ /* Adjust for cap_usr_time_short, a nop if not */
+ cyc = time_cycles + ((cyc - time_cycles) & time_mask);
+
+ delta = time_offset + mul_u64_u32_shr(cyc, time_mult, time_shift);
+
+ count->ena += delta;
+ if (idx)
+ count->run += delta;
+ }
+
+ count->val = cnt;
+
+ return 0;
+}
diff --git a/tools/lib/perf/tests/Build b/tools/lib/perf/tests/Build
new file mode 100644
index 000000000000..56e81378d443
--- /dev/null
+++ b/tools/lib/perf/tests/Build
@@ -0,0 +1,5 @@
+tests-y += main.o
+tests-y += test-evsel.o
+tests-y += test-evlist.o
+tests-y += test-cpumap.o
+tests-y += test-threadmap.o
diff --git a/tools/lib/perf/tests/Makefile b/tools/lib/perf/tests/Makefile
deleted file mode 100644
index 96841775feaf..000000000000
--- a/tools/lib/perf/tests/Makefile
+++ /dev/null
@@ -1,38 +0,0 @@
-# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
-
-TESTS = test-cpumap test-threadmap test-evlist test-evsel
-
-TESTS_SO := $(addsuffix -so,$(TESTS))
-TESTS_A := $(addsuffix -a,$(TESTS))
-
-# Set compile option CFLAGS
-ifdef EXTRA_CFLAGS
- CFLAGS := $(EXTRA_CFLAGS)
-else
- CFLAGS := -g -Wall
-endif
-
-all:
-
-include $(srctree)/tools/scripts/Makefile.include
-
-INCLUDE = -I$(srctree)/tools/lib/perf/include -I$(srctree)/tools/include -I$(srctree)/tools/lib
-
-$(TESTS_A): FORCE
- $(QUIET_LINK)$(CC) $(INCLUDE) $(CFLAGS) -o $@ $(subst -a,.c,$@) ../libperf.a $(LIBAPI)
-
-$(TESTS_SO): FORCE
- $(QUIET_LINK)$(CC) $(INCLUDE) $(CFLAGS) -L.. -o $@ $(subst -so,.c,$@) $(LIBAPI) -lperf
-
-all: $(TESTS_A) $(TESTS_SO)
-
-run:
- @echo "running static:"
- @for i in $(TESTS_A); do ./$$i; done
- @echo "running dynamic:"
- @for i in $(TESTS_SO); do LD_LIBRARY_PATH=../ ./$$i; done
-
-clean:
- $(call QUIET_CLEAN, tests)$(RM) $(TESTS_A) $(TESTS_SO)
-
-.PHONY: all clean FORCE
diff --git a/tools/lib/perf/tests/main.c b/tools/lib/perf/tests/main.c
new file mode 100644
index 000000000000..56423fd4db19
--- /dev/null
+++ b/tools/lib/perf/tests/main.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <internal/tests.h>
+#include "tests.h"
+
+int tests_failed;
+int tests_verbose;
+
+int main(int argc, char **argv)
+{
+ __T("test cpumap", !test_cpumap(argc, argv));
+ __T("test threadmap", !test_threadmap(argc, argv));
+ __T("test evlist", !test_evlist(argc, argv));
+ __T("test evsel", !test_evsel(argc, argv));
+ return 0;
+}
diff --git a/tools/lib/perf/tests/test-cpumap.c b/tools/lib/perf/tests/test-cpumap.c
index c8d45091e7c2..c998b1dae863 100644
--- a/tools/lib/perf/tests/test-cpumap.c
+++ b/tools/lib/perf/tests/test-cpumap.c
@@ -3,6 +3,7 @@
#include <stdio.h>
#include <perf/cpumap.h>
#include <internal/tests.h>
+#include "tests.h"
static int libperf_print(enum libperf_print_level level,
const char *fmt, va_list ap)
@@ -10,15 +11,17 @@ static int libperf_print(enum libperf_print_level level,
return vfprintf(stderr, fmt, ap);
}
-int main(int argc, char **argv)
+int test_cpumap(int argc, char **argv)
{
struct perf_cpu_map *cpus;
+ struct perf_cpu cpu;
+ int idx;
__T_START;
libperf_init(libperf_print);
- cpus = perf_cpu_map__dummy_new();
+ cpus = perf_cpu_map__new_any_cpu();
if (!cpus)
return -1;
@@ -26,6 +29,15 @@ int main(int argc, char **argv)
perf_cpu_map__put(cpus);
perf_cpu_map__put(cpus);
+ cpus = perf_cpu_map__new_online_cpus();
+ if (!cpus)
+ return -1;
+
+ perf_cpu_map__for_each_cpu(cpu, idx, cpus)
+ __T("wrong cpu number", cpu.cpu != -1);
+
+ perf_cpu_map__put(cpus);
+
__T_END;
- return 0;
+ return tests_failed == 0 ? 0 : -1;
}
diff --git a/tools/lib/perf/tests/test-evlist.c b/tools/lib/perf/tests/test-evlist.c
index 6d8ebe0c2504..10f70cb41ff1 100644
--- a/tools/lib/perf/tests/test-evlist.c
+++ b/tools/lib/perf/tests/test-evlist.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE // needed for sched.h to get sched_[gs]etaffinity and CPU_(ZERO,SET)
+#include <inttypes.h>
#include <sched.h>
#include <stdio.h>
#include <stdarg.h>
@@ -18,6 +19,11 @@
#include <perf/event.h>
#include <internal/tests.h>
#include <api/fs/fs.h>
+#include "tests.h"
+#include <internal/evsel.h>
+
+#define EVENT_NUM 15
+#define WAIT_COUNT 100000000UL
static int libperf_print(enum libperf_print_level level,
const char *fmt, va_list ap)
@@ -29,7 +35,7 @@ static int test_stat_cpu(void)
{
struct perf_cpu_map *cpus;
struct perf_evlist *evlist;
- struct perf_evsel *evsel;
+ struct perf_evsel *evsel, *leader;
struct perf_event_attr attr1 = {
.type = PERF_TYPE_SOFTWARE,
.config = PERF_COUNT_SW_CPU_CLOCK,
@@ -38,15 +44,15 @@ static int test_stat_cpu(void)
.type = PERF_TYPE_SOFTWARE,
.config = PERF_COUNT_SW_TASK_CLOCK,
};
- int err, cpu, tmp;
+ int err, idx;
- cpus = perf_cpu_map__new(NULL);
+ cpus = perf_cpu_map__new_online_cpus();
__T("failed to create cpus", cpus);
evlist = perf_evlist__new();
__T("failed to create evlist", evlist);
- evsel = perf_evsel__new(&attr1);
+ evsel = leader = perf_evsel__new(&attr1);
__T("failed to create evsel1", evsel);
perf_evlist__add(evlist, evsel);
@@ -56,18 +62,22 @@ static int test_stat_cpu(void)
perf_evlist__add(evlist, evsel);
+ perf_evlist__set_leader(evlist);
+ __T("failed to set leader", leader->leader == leader);
+ __T("failed to set leader", evsel->leader == leader);
+
perf_evlist__set_maps(evlist, cpus, NULL);
err = perf_evlist__open(evlist);
- __T("failed to open evsel", err == 0);
+ __T("failed to open evlist", err == 0);
perf_evlist__for_each_evsel(evlist, evsel) {
cpus = perf_evsel__cpus(evsel);
- perf_cpu_map__for_each_cpu(cpu, tmp, cpus) {
+ for (idx = 0; idx < perf_cpu_map__nr(cpus); idx++) {
struct perf_counts_values counts = { .val = 0 };
- perf_evsel__read(evsel, cpu, 0, &counts);
+ perf_evsel__read(evsel, idx, 0, &counts);
__T("failed to read value for evsel", counts.val != 0);
}
}
@@ -84,7 +94,7 @@ static int test_stat_thread(void)
struct perf_counts_values counts = { .val = 0 };
struct perf_thread_map *threads;
struct perf_evlist *evlist;
- struct perf_evsel *evsel;
+ struct perf_evsel *evsel, *leader;
struct perf_event_attr attr1 = {
.type = PERF_TYPE_SOFTWARE,
.config = PERF_COUNT_SW_CPU_CLOCK,
@@ -103,7 +113,7 @@ static int test_stat_thread(void)
evlist = perf_evlist__new();
__T("failed to create evlist", evlist);
- evsel = perf_evsel__new(&attr1);
+ evsel = leader = perf_evsel__new(&attr1);
__T("failed to create evsel1", evsel);
perf_evlist__add(evlist, evsel);
@@ -113,10 +123,14 @@ static int test_stat_thread(void)
perf_evlist__add(evlist, evsel);
+ perf_evlist__set_leader(evlist);
+ __T("failed to set leader", leader->leader == leader);
+ __T("failed to set leader", evsel->leader == leader);
+
perf_evlist__set_maps(evlist, NULL, threads);
err = perf_evlist__open(evlist);
- __T("failed to open evsel", err == 0);
+ __T("failed to open evlist", err == 0);
perf_evlist__for_each_evsel(evlist, evsel) {
perf_evsel__read(evsel, 0, 0, &counts);
@@ -135,7 +149,7 @@ static int test_stat_thread_enable(void)
struct perf_counts_values counts = { .val = 0 };
struct perf_thread_map *threads;
struct perf_evlist *evlist;
- struct perf_evsel *evsel;
+ struct perf_evsel *evsel, *leader;
struct perf_event_attr attr1 = {
.type = PERF_TYPE_SOFTWARE,
.config = PERF_COUNT_SW_CPU_CLOCK,
@@ -156,7 +170,7 @@ static int test_stat_thread_enable(void)
evlist = perf_evlist__new();
__T("failed to create evlist", evlist);
- evsel = perf_evsel__new(&attr1);
+ evsel = leader = perf_evsel__new(&attr1);
__T("failed to create evsel1", evsel);
perf_evlist__add(evlist, evsel);
@@ -166,10 +180,14 @@ static int test_stat_thread_enable(void)
perf_evlist__add(evlist, evsel);
+ perf_evlist__set_leader(evlist);
+ __T("failed to set leader", leader->leader == leader);
+ __T("failed to set leader", evsel->leader == leader);
+
perf_evlist__set_maps(evlist, NULL, threads);
err = perf_evlist__open(evlist);
- __T("failed to open evsel", err == 0);
+ __T("failed to open evlist", err == 0);
perf_evlist__for_each_evsel(evlist, evsel) {
perf_evsel__read(evsel, 0, 0, &counts);
@@ -208,13 +226,13 @@ static int test_mmap_thread(void)
char path[PATH_MAX];
int id, err, pid, go_pipe[2];
union perf_event *event;
- char bf;
int count = 0;
snprintf(path, PATH_MAX, "%s/kernel/debug/tracing/events/syscalls/sys_enter_prctl/id",
sysfs__mountpoint());
if (filename__read_int(path, &id)) {
+ tests_failed++;
fprintf(stderr, "error: failed to get tracepoint id: %s\n", path);
return -1;
}
@@ -229,6 +247,7 @@ static int test_mmap_thread(void)
pid = fork();
if (!pid) {
int i;
+ char bf;
read(go_pipe[0], &bf, 1);
@@ -242,7 +261,7 @@ static int test_mmap_thread(void)
threads = perf_thread_map__new_dummy();
__T("failed to create threads", threads);
- cpus = perf_cpu_map__dummy_new();
+ cpus = perf_cpu_map__new_any_cpu();
__T("failed to create cpus", cpus);
perf_thread_map__set_pid(threads, 0, pid);
@@ -252,6 +271,7 @@ static int test_mmap_thread(void)
evsel = perf_evsel__new(&attr);
__T("failed to create evsel1", evsel);
+ __T("failed to set leader", evsel->leader == evsel);
perf_evlist__add(evlist, evsel);
@@ -266,7 +286,7 @@ static int test_mmap_thread(void)
perf_evlist__enable(evlist);
/* kick the child and wait for it to finish */
- write(go_pipe[1], &bf, 1);
+ write(go_pipe[1], "A", 1);
waitpid(pid, NULL, 0);
/*
@@ -315,7 +335,8 @@ static int test_mmap_cpus(void)
};
cpu_set_t saved_mask;
char path[PATH_MAX];
- int id, err, cpu, tmp;
+ int id, err, tmp;
+ struct perf_cpu cpu;
union perf_event *event;
int count = 0;
@@ -329,7 +350,7 @@ static int test_mmap_cpus(void)
attr.config = id;
- cpus = perf_cpu_map__new(NULL);
+ cpus = perf_cpu_map__new_online_cpus();
__T("failed to create cpus", cpus);
evlist = perf_evlist__new();
@@ -337,6 +358,7 @@ static int test_mmap_cpus(void)
evsel = perf_evsel__new(&attr);
__T("failed to create evsel1", evsel);
+ __T("failed to set leader", evsel->leader == evsel);
perf_evlist__add(evlist, evsel);
@@ -357,7 +379,7 @@ static int test_mmap_cpus(void)
cpu_set_t mask;
CPU_ZERO(&mask);
- CPU_SET(cpu, &mask);
+ CPU_SET(cpu.cpu, &mask);
err = sched_setaffinity(0, sizeof(mask), &mask);
__T("sched_setaffinity failed", err == 0);
@@ -396,7 +418,160 @@ static int test_mmap_cpus(void)
return 0;
}
-int main(int argc, char **argv)
+static double display_error(long long average,
+ long long high,
+ long long low,
+ long long expected)
+{
+ double error;
+
+ error = (((double)average - expected) / expected) * 100.0;
+
+ __T_VERBOSE(" Expected: %lld\n", expected);
+ __T_VERBOSE(" High: %lld Low: %lld Average: %lld\n",
+ high, low, average);
+
+ __T_VERBOSE(" Average Error = %.2f%%\n", error);
+
+ return error;
+}
+
+static int test_stat_multiplexing(void)
+{
+ struct perf_counts_values expected_counts = { .val = 0 };
+ struct perf_counts_values counts[EVENT_NUM] = {{ .val = 0 },};
+ struct perf_thread_map *threads;
+ struct perf_evlist *evlist;
+ struct perf_evsel *evsel;
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .config = PERF_COUNT_HW_INSTRUCTIONS,
+ .read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
+ PERF_FORMAT_TOTAL_TIME_RUNNING,
+ .disabled = 1,
+ };
+ int err, i, nonzero = 0;
+ unsigned long count;
+ long long max = 0, min = 0, avg = 0;
+ double error = 0.0;
+ s8 scaled = 0;
+
+ /* read for non-multiplexing event count */
+ threads = perf_thread_map__new_dummy();
+ __T("failed to create threads", threads);
+
+ perf_thread_map__set_pid(threads, 0, 0);
+
+ evsel = perf_evsel__new(&attr);
+ __T("failed to create evsel", evsel);
+
+ err = perf_evsel__open(evsel, NULL, threads);
+ __T("failed to open evsel", err == 0);
+
+ err = perf_evsel__enable(evsel);
+ __T("failed to enable evsel", err == 0);
+
+ /* wait loop */
+ count = WAIT_COUNT;
+ while (count--)
+ ;
+
+ perf_evsel__read(evsel, 0, 0, &expected_counts);
+ __T("failed to read value for evsel", expected_counts.val != 0);
+ __T("failed to read non-multiplexing event count",
+ expected_counts.ena == expected_counts.run);
+
+ err = perf_evsel__disable(evsel);
+ __T("failed to enable evsel", err == 0);
+
+ perf_evsel__close(evsel);
+ perf_evsel__delete(evsel);
+
+ perf_thread_map__put(threads);
+
+ /* read for multiplexing event count */
+ threads = perf_thread_map__new_dummy();
+ __T("failed to create threads", threads);
+
+ perf_thread_map__set_pid(threads, 0, 0);
+
+ evlist = perf_evlist__new();
+ __T("failed to create evlist", evlist);
+
+ for (i = 0; i < EVENT_NUM; i++) {
+ evsel = perf_evsel__new(&attr);
+ __T("failed to create evsel", evsel);
+
+ perf_evlist__add(evlist, evsel);
+ }
+ perf_evlist__set_maps(evlist, NULL, threads);
+
+ err = perf_evlist__open(evlist);
+ __T("failed to open evlist", err == 0);
+
+ perf_evlist__enable(evlist);
+
+ /* wait loop */
+ count = WAIT_COUNT;
+ while (count--)
+ ;
+
+ i = 0;
+ perf_evlist__for_each_evsel(evlist, evsel) {
+ perf_evsel__read(evsel, 0, 0, &counts[i]);
+ __T("failed to read value for evsel", counts[i].val != 0);
+ i++;
+ }
+
+ perf_evlist__disable(evlist);
+
+ min = counts[0].val;
+ for (i = 0; i < EVENT_NUM; i++) {
+ __T_VERBOSE("Event %2d -- Raw count = %" PRIu64 ", run = %" PRIu64 ", enable = %" PRIu64 "\n",
+ i, counts[i].val, counts[i].run, counts[i].ena);
+
+ perf_counts_values__scale(&counts[i], true, &scaled);
+ if (scaled == 1) {
+ __T_VERBOSE("\t Scaled count = %" PRIu64 " (%.2lf%%, %" PRIu64 "/%" PRIu64 ")\n",
+ counts[i].val,
+ (double)counts[i].run / (double)counts[i].ena * 100.0,
+ counts[i].run, counts[i].ena);
+ } else if (scaled == -1) {
+ __T_VERBOSE("\t Not Running\n");
+ } else {
+ __T_VERBOSE("\t Not Scaling\n");
+ }
+
+ if (counts[i].val > max)
+ max = counts[i].val;
+
+ if (counts[i].val < min)
+ min = counts[i].val;
+
+ avg += counts[i].val;
+
+ if (counts[i].val != 0)
+ nonzero++;
+ }
+
+ if (nonzero != 0)
+ avg = avg / nonzero;
+ else
+ avg = 0;
+
+ error = display_error(avg, max, min, expected_counts.val);
+
+ __T("Error out of range!", ((error <= 1.0) && (error >= -1.0)));
+
+ perf_evlist__close(evlist);
+ perf_evlist__delete(evlist);
+
+ perf_thread_map__put(threads);
+
+ return 0;
+}
+
+int test_evlist(int argc, char **argv)
{
__T_START;
@@ -407,7 +582,8 @@ int main(int argc, char **argv)
test_stat_thread_enable();
test_mmap_thread();
test_mmap_cpus();
+ test_stat_multiplexing();
__T_END;
- return 0;
+ return tests_failed == 0 ? 0 : -1;
}
diff --git a/tools/lib/perf/tests/test-evsel.c b/tools/lib/perf/tests/test-evsel.c
index 135722ac965b..545ec3150546 100644
--- a/tools/lib/perf/tests/test-evsel.c
+++ b/tools/lib/perf/tests/test-evsel.c
@@ -1,11 +1,15 @@
// SPDX-License-Identifier: GPL-2.0
#include <stdarg.h>
#include <stdio.h>
+#include <string.h>
#include <linux/perf_event.h>
+#include <linux/kernel.h>
#include <perf/cpumap.h>
#include <perf/threadmap.h>
#include <perf/evsel.h>
+#include <internal/evsel.h>
#include <internal/tests.h>
+#include "tests.h"
static int libperf_print(enum libperf_print_level level,
const char *fmt, va_list ap)
@@ -21,9 +25,9 @@ static int test_stat_cpu(void)
.type = PERF_TYPE_SOFTWARE,
.config = PERF_COUNT_SW_CPU_CLOCK,
};
- int err, cpu, tmp;
+ int err, idx;
- cpus = perf_cpu_map__new(NULL);
+ cpus = perf_cpu_map__new_online_cpus();
__T("failed to create cpus", cpus);
evsel = perf_evsel__new(&attr);
@@ -32,10 +36,10 @@ static int test_stat_cpu(void)
err = perf_evsel__open(evsel, cpus, NULL);
__T("failed to open evsel", err == 0);
- perf_cpu_map__for_each_cpu(cpu, tmp, cpus) {
+ for (idx = 0; idx < perf_cpu_map__nr(cpus); idx++) {
struct perf_counts_values counts = { .val = 0 };
- perf_evsel__read(evsel, cpu, 0, &counts);
+ perf_evsel__read(evsel, idx, 0, &counts);
__T("failed to read value for evsel", counts.val != 0);
}
@@ -120,7 +124,232 @@ static int test_stat_thread_enable(void)
return 0;
}
-int main(int argc, char **argv)
+static int test_stat_user_read(int event)
+{
+ struct perf_counts_values counts = { .val = 0 };
+ struct perf_thread_map *threads;
+ struct perf_evsel *evsel;
+ struct perf_event_mmap_page *pc;
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .config = event,
+#ifdef __aarch64__
+ .config1 = 0x2, /* Request user access */
+#endif
+ };
+ int err, i;
+
+ threads = perf_thread_map__new_dummy();
+ __T("failed to create threads", threads);
+
+ perf_thread_map__set_pid(threads, 0, 0);
+
+ evsel = perf_evsel__new(&attr);
+ __T("failed to create evsel", evsel);
+
+ err = perf_evsel__open(evsel, NULL, threads);
+ __T("failed to open evsel", err == 0);
+
+ err = perf_evsel__mmap(evsel, 0);
+ __T("failed to mmap evsel", err == 0);
+
+ pc = perf_evsel__mmap_base(evsel, 0, 0);
+ __T("failed to get mmapped address", pc);
+
+#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__)
+ __T("userspace counter access not supported", pc->cap_user_rdpmc);
+ __T("userspace counter access not enabled", pc->index);
+ __T("userspace counter width not set", pc->pmc_width >= 32);
+#endif
+
+ perf_evsel__read(evsel, 0, 0, &counts);
+ __T("failed to read value for evsel", counts.val != 0);
+
+ for (i = 0; i < 5; i++) {
+ volatile int count = 0x10000 << i;
+ __u64 start, end, last = 0;
+
+ __T_VERBOSE("\tloop = %u, ", count);
+
+ perf_evsel__read(evsel, 0, 0, &counts);
+ start = counts.val;
+
+ while (count--) ;
+
+ perf_evsel__read(evsel, 0, 0, &counts);
+ end = counts.val;
+
+ __T("invalid counter data", (end - start) > last);
+ last = end - start;
+ __T_VERBOSE("count = %llu\n", end - start);
+ }
+
+ perf_evsel__munmap(evsel);
+ perf_evsel__close(evsel);
+ perf_evsel__delete(evsel);
+
+ perf_thread_map__put(threads);
+ return 0;
+}
+
+static int test_stat_read_format_single(struct perf_event_attr *attr, struct perf_thread_map *threads)
+{
+ struct perf_evsel *evsel;
+ struct perf_counts_values counts;
+ volatile int count = 0x100000;
+ int err;
+
+ evsel = perf_evsel__new(attr);
+ __T("failed to create evsel", evsel);
+
+ /* skip old kernels that don't support the format */
+ err = perf_evsel__open(evsel, NULL, threads);
+ if (err < 0)
+ return 0;
+
+ while (count--) ;
+
+ memset(&counts, -1, sizeof(counts));
+ perf_evsel__read(evsel, 0, 0, &counts);
+
+ __T("failed to read value", counts.val);
+ if (attr->read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+ __T("failed to read TOTAL_TIME_ENABLED", counts.ena);
+ if (attr->read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ __T("failed to read TOTAL_TIME_RUNNING", counts.run);
+ if (attr->read_format & PERF_FORMAT_ID)
+ __T("failed to read ID", counts.id);
+ if (attr->read_format & PERF_FORMAT_LOST)
+ __T("failed to read LOST", counts.lost == 0);
+
+ perf_evsel__close(evsel);
+ perf_evsel__delete(evsel);
+ return 0;
+}
+
+static int test_stat_read_format_group(struct perf_event_attr *attr, struct perf_thread_map *threads)
+{
+ struct perf_evsel *leader, *member;
+ struct perf_counts_values counts;
+ volatile int count = 0x100000;
+ int err;
+
+ attr->read_format |= PERF_FORMAT_GROUP;
+ leader = perf_evsel__new(attr);
+ __T("failed to create leader", leader);
+
+ attr->read_format &= ~PERF_FORMAT_GROUP;
+ member = perf_evsel__new(attr);
+ __T("failed to create member", member);
+
+ member->leader = leader;
+ leader->nr_members = 2;
+
+ /* skip old kernels that don't support the format */
+ err = perf_evsel__open(leader, NULL, threads);
+ if (err < 0)
+ return 0;
+ err = perf_evsel__open(member, NULL, threads);
+ if (err < 0)
+ return 0;
+
+ while (count--) ;
+
+ memset(&counts, -1, sizeof(counts));
+ perf_evsel__read(leader, 0, 0, &counts);
+
+ __T("failed to read leader value", counts.val);
+ if (attr->read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+ __T("failed to read leader TOTAL_TIME_ENABLED", counts.ena);
+ if (attr->read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ __T("failed to read leader TOTAL_TIME_RUNNING", counts.run);
+ if (attr->read_format & PERF_FORMAT_ID)
+ __T("failed to read leader ID", counts.id);
+ if (attr->read_format & PERF_FORMAT_LOST)
+ __T("failed to read leader LOST", counts.lost == 0);
+
+ memset(&counts, -1, sizeof(counts));
+ perf_evsel__read(member, 0, 0, &counts);
+
+ __T("failed to read member value", counts.val);
+ if (attr->read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+ __T("failed to read member TOTAL_TIME_ENABLED", counts.ena);
+ if (attr->read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ __T("failed to read member TOTAL_TIME_RUNNING", counts.run);
+ if (attr->read_format & PERF_FORMAT_ID)
+ __T("failed to read member ID", counts.id);
+ if (attr->read_format & PERF_FORMAT_LOST)
+ __T("failed to read member LOST", counts.lost == 0);
+
+ perf_evsel__close(member);
+ perf_evsel__close(leader);
+ perf_evsel__delete(member);
+ perf_evsel__delete(leader);
+ return 0;
+}
+
+static int test_stat_read_format(void)
+{
+ struct perf_thread_map *threads;
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_SOFTWARE,
+ .config = PERF_COUNT_SW_TASK_CLOCK,
+ };
+ int err, i;
+
+#define FMT(_fmt) PERF_FORMAT_ ## _fmt
+#define FMT_TIME (FMT(TOTAL_TIME_ENABLED) | FMT(TOTAL_TIME_RUNNING))
+
+ uint64_t test_formats [] = {
+ 0,
+ FMT_TIME,
+ FMT(ID),
+ FMT(LOST),
+ FMT_TIME | FMT(ID),
+ FMT_TIME | FMT(LOST),
+ FMT_TIME | FMT(ID) | FMT(LOST),
+ FMT(ID) | FMT(LOST),
+ };
+
+#undef FMT
+#undef FMT_TIME
+
+ threads = perf_thread_map__new_dummy();
+ __T("failed to create threads", threads);
+
+ perf_thread_map__set_pid(threads, 0, 0);
+
+ for (i = 0; i < (int)ARRAY_SIZE(test_formats); i++) {
+ attr.read_format = test_formats[i];
+ __T_VERBOSE("testing single read with read_format: %lx\n",
+ (unsigned long)test_formats[i]);
+
+ err = test_stat_read_format_single(&attr, threads);
+ __T("failed to read single format", err == 0);
+ }
+
+ perf_thread_map__put(threads);
+
+ threads = perf_thread_map__new_array(2, NULL);
+ __T("failed to create threads", threads);
+
+ perf_thread_map__set_pid(threads, 0, 0);
+ perf_thread_map__set_pid(threads, 1, 0);
+
+ for (i = 0; i < (int)ARRAY_SIZE(test_formats); i++) {
+ attr.read_format = test_formats[i];
+ __T_VERBOSE("testing group read with read_format: %lx\n",
+ (unsigned long)test_formats[i]);
+
+ err = test_stat_read_format_group(&attr, threads);
+ __T("failed to read group format", err == 0);
+ }
+
+ perf_thread_map__put(threads);
+ return 0;
+}
+
+int test_evsel(int argc, char **argv)
{
__T_START;
@@ -129,7 +358,10 @@ int main(int argc, char **argv)
test_stat_cpu();
test_stat_thread();
test_stat_thread_enable();
+ test_stat_user_read(PERF_COUNT_HW_INSTRUCTIONS);
+ test_stat_user_read(PERF_COUNT_HW_CPU_CYCLES);
+ test_stat_read_format();
__T_END;
- return 0;
+ return tests_failed == 0 ? 0 : -1;
}
diff --git a/tools/lib/perf/tests/test-threadmap.c b/tools/lib/perf/tests/test-threadmap.c
index 7dc4d6fbedde..f728ad7002bb 100644
--- a/tools/lib/perf/tests/test-threadmap.c
+++ b/tools/lib/perf/tests/test-threadmap.c
@@ -3,6 +3,7 @@
#include <stdio.h>
#include <perf/threadmap.h>
#include <internal/tests.h>
+#include "tests.h"
static int libperf_print(enum libperf_print_level level,
const char *fmt, va_list ap)
@@ -10,9 +11,43 @@ static int libperf_print(enum libperf_print_level level,
return vfprintf(stderr, fmt, ap);
}
-int main(int argc, char **argv)
+static int test_threadmap_array(int nr, pid_t *array)
{
struct perf_thread_map *threads;
+ int i;
+
+ threads = perf_thread_map__new_array(nr, array);
+ __T("Failed to allocate new thread map", threads);
+
+ __T("Unexpected number of threads", perf_thread_map__nr(threads) == nr);
+
+ for (i = 0; i < nr; i++) {
+ __T("Unexpected initial value of thread",
+ perf_thread_map__pid(threads, i) == (array ? array[i] : -1));
+ }
+
+ for (i = 1; i < nr; i++)
+ perf_thread_map__set_pid(threads, i, i * 100);
+
+ __T("Unexpected value of thread 0",
+ perf_thread_map__pid(threads, 0) == (array ? array[0] : -1));
+
+ for (i = 1; i < nr; i++) {
+ __T("Unexpected thread value",
+ perf_thread_map__pid(threads, i) == i * 100);
+ }
+
+ perf_thread_map__put(threads);
+
+ return 0;
+}
+
+#define THREADS_NR 10
+int test_threadmap(int argc, char **argv)
+{
+ struct perf_thread_map *threads;
+ pid_t thr_array[THREADS_NR];
+ int i;
__T_START;
@@ -26,6 +61,13 @@ int main(int argc, char **argv)
perf_thread_map__put(threads);
perf_thread_map__put(threads);
+ test_threadmap_array(THREADS_NR, NULL);
+
+ for (i = 0; i < THREADS_NR; i++)
+ thr_array[i] = i + 100;
+
+ test_threadmap_array(THREADS_NR, thr_array);
+
__T_END;
- return 0;
+ return tests_failed == 0 ? 0 : -1;
}
diff --git a/tools/lib/perf/tests/tests.h b/tools/lib/perf/tests/tests.h
new file mode 100644
index 000000000000..604838f21b2b
--- /dev/null
+++ b/tools/lib/perf/tests/tests.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef TESTS_H
+#define TESTS_H
+
+int test_cpumap(int argc, char **argv);
+int test_threadmap(int argc, char **argv);
+int test_evlist(int argc, char **argv);
+int test_evsel(int argc, char **argv);
+
+#endif /* TESTS_H */
diff --git a/tools/lib/perf/threadmap.c b/tools/lib/perf/threadmap.c
index e92c368b0a6c..db431b036f57 100644
--- a/tools/lib/perf/threadmap.c
+++ b/tools/lib/perf/threadmap.c
@@ -32,28 +32,38 @@ struct perf_thread_map *perf_thread_map__realloc(struct perf_thread_map *map, in
#define thread_map__alloc(__nr) perf_thread_map__realloc(NULL, __nr)
-void perf_thread_map__set_pid(struct perf_thread_map *map, int thread, pid_t pid)
+void perf_thread_map__set_pid(struct perf_thread_map *map, int idx, pid_t pid)
{
- map->map[thread].pid = pid;
+ map->map[idx].pid = pid;
}
-char *perf_thread_map__comm(struct perf_thread_map *map, int thread)
+char *perf_thread_map__comm(struct perf_thread_map *map, int idx)
{
- return map->map[thread].comm;
+ return map->map[idx].comm;
}
-struct perf_thread_map *perf_thread_map__new_dummy(void)
+struct perf_thread_map *perf_thread_map__new_array(int nr_threads, pid_t *array)
{
- struct perf_thread_map *threads = thread_map__alloc(1);
+ struct perf_thread_map *threads = thread_map__alloc(nr_threads);
+ int i;
+
+ if (!threads)
+ return NULL;
+
+ for (i = 0; i < nr_threads; i++)
+ perf_thread_map__set_pid(threads, i, array ? array[i] : -1);
+
+ threads->nr = nr_threads;
+ refcount_set(&threads->refcnt, 1);
- if (threads != NULL) {
- perf_thread_map__set_pid(threads, 0, -1);
- threads->nr = 1;
- refcount_set(&threads->refcnt, 1);
- }
return threads;
}
+struct perf_thread_map *perf_thread_map__new_dummy(void)
+{
+ return perf_thread_map__new_array(1, NULL);
+}
+
static void perf_thread_map__delete(struct perf_thread_map *threads)
{
if (threads) {
@@ -85,7 +95,24 @@ int perf_thread_map__nr(struct perf_thread_map *threads)
return threads ? threads->nr : 1;
}
-pid_t perf_thread_map__pid(struct perf_thread_map *map, int thread)
+pid_t perf_thread_map__pid(struct perf_thread_map *map, int idx)
+{
+ if (!map) {
+ assert(idx == 0);
+ return -1;
+ }
+
+ return map->map[idx].pid;
+}
+
+int perf_thread_map__idx(struct perf_thread_map *threads, pid_t pid)
{
- return map->map[thread].pid;
+ if (!threads)
+ return pid == -1 ? 0 : -1;
+
+ for (int i = 0; i < threads->nr; ++i) {
+ if (threads->map[i].pid == pid)
+ return i;
+ }
+ return -1;
}