diff options
Diffstat (limited to 'tools/perf/util/synthetic-events.c')
-rw-r--r-- | tools/perf/util/synthetic-events.c | 1074 |
1 files changed, 792 insertions, 282 deletions
diff --git a/tools/perf/util/synthetic-events.c b/tools/perf/util/synthetic-events.c index c423298fe62d..cccd293b5312 100644 --- a/tools/perf/util/synthetic-events.c +++ b/tools/perf/util/synthetic-events.c @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only +#include "util/cgroup.h" +#include "util/data.h" #include "util/debug.h" #include "util/dso.h" #include "util/event.h" @@ -23,7 +25,6 @@ #include <linux/perf_event.h> #include <asm/bug.h> #include <perf/evsel.h> -#include <internal/cpumap.h> #include <perf/cpumap.h> #include <internal/lib.h> // page_size #include <internal/threadmap.h> @@ -36,6 +37,7 @@ #include <string.h> #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */ #include <api/fs/fs.h> +#include <api/io.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> @@ -67,24 +69,26 @@ int perf_tool__process_synth_event(struct perf_tool *tool, * Assumes that the first 4095 bytes of /proc/pid/stat contains * the comm, tgid and ppid. */ -static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len, - pid_t *tgid, pid_t *ppid) +static int perf_event__get_comm_ids(pid_t pid, pid_t tid, char *comm, size_t len, + pid_t *tgid, pid_t *ppid, bool *kernel) { - char filename[PATH_MAX]; char bf[4096]; int fd; size_t size = 0; ssize_t n; - char *name, *tgids, *ppids; + char *name, *tgids, *ppids, *vmpeak, *threads; *tgid = -1; *ppid = -1; - snprintf(filename, sizeof(filename), "/proc/%d/status", pid); + if (pid) + snprintf(bf, sizeof(bf), "/proc/%d/task/%d/status", pid, tid); + else + snprintf(bf, sizeof(bf), "/proc/%d/status", tid); - fd = open(filename, O_RDONLY); + fd = open(bf, O_RDONLY); if (fd < 0) { - pr_debug("couldn't open %s\n", filename); + pr_debug("couldn't open %s\n", bf); return -1; } @@ -92,14 +96,20 @@ static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len, close(fd); if (n <= 0) { pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n", - pid); + tid); return -1; } bf[n] = '\0'; name = strstr(bf, "Name:"); - tgids = strstr(bf, "Tgid:"); - ppids = strstr(bf, "PPid:"); + tgids = strstr(name ?: bf, "Tgid:"); + ppids = strstr(tgids ?: bf, "PPid:"); + vmpeak = strstr(ppids ?: bf, "VmPeak:"); + + if (vmpeak) + threads = NULL; + else + threads = strstr(ppids ?: bf, "Threads:"); if (name) { char *nl; @@ -115,29 +125,34 @@ static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len, memcpy(comm, name, size); comm[size] = '\0'; } else { - pr_debug("Name: string not found for pid %d\n", pid); + pr_debug("Name: string not found for pid %d\n", tid); } if (tgids) { tgids += 5; /* strlen("Tgid:") */ *tgid = atoi(tgids); } else { - pr_debug("Tgid: string not found for pid %d\n", pid); + pr_debug("Tgid: string not found for pid %d\n", tid); } if (ppids) { ppids += 5; /* strlen("PPid:") */ *ppid = atoi(ppids); } else { - pr_debug("PPid: string not found for pid %d\n", pid); + pr_debug("PPid: string not found for pid %d\n", tid); } + if (!vmpeak && threads) + *kernel = true; + else + *kernel = false; + return 0; } -static int perf_event__prepare_comm(union perf_event *event, pid_t pid, +static int perf_event__prepare_comm(union perf_event *event, pid_t pid, pid_t tid, struct machine *machine, - pid_t *tgid, pid_t *ppid) + pid_t *tgid, pid_t *ppid, bool *kernel) { size_t size; @@ -146,9 +161,9 @@ static int perf_event__prepare_comm(union perf_event *event, pid_t pid, memset(&event->comm, 0, sizeof(event->comm)); if (machine__is_host(machine)) { - if (perf_event__get_comm_ids(pid, event->comm.comm, + if (perf_event__get_comm_ids(pid, tid, event->comm.comm, sizeof(event->comm.comm), - tgid, ppid) != 0) { + tgid, ppid, kernel) != 0) { return -1; } } else { @@ -167,7 +182,7 @@ static int perf_event__prepare_comm(union perf_event *event, pid_t pid, event->comm.header.size = (sizeof(event->comm) - (sizeof(event->comm.comm) - size) + machine->id_hdr_size); - event->comm.tid = pid; + event->comm.tid = tid; return 0; } @@ -178,8 +193,10 @@ pid_t perf_event__synthesize_comm(struct perf_tool *tool, struct machine *machine) { pid_t tgid, ppid; + bool kernel_thread; - if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0) + if (perf_event__prepare_comm(event, 0, pid, machine, &tgid, &ppid, + &kernel_thread) != 0) return -1; if (perf_tool__process_synth_event(tool, event, machine, process) != 0) @@ -273,6 +290,134 @@ static int perf_event__synthesize_fork(struct perf_tool *tool, return 0; } +static bool read_proc_maps_line(struct io *io, __u64 *start, __u64 *end, + u32 *prot, u32 *flags, __u64 *offset, + u32 *maj, u32 *min, + __u64 *inode, + ssize_t pathname_size, char *pathname) +{ + __u64 temp; + int ch; + char *start_pathname = pathname; + + if (io__get_hex(io, start) != '-') + return false; + if (io__get_hex(io, end) != ' ') + return false; + + /* map protection and flags bits */ + *prot = 0; + ch = io__get_char(io); + if (ch == 'r') + *prot |= PROT_READ; + else if (ch != '-') + return false; + ch = io__get_char(io); + if (ch == 'w') + *prot |= PROT_WRITE; + else if (ch != '-') + return false; + ch = io__get_char(io); + if (ch == 'x') + *prot |= PROT_EXEC; + else if (ch != '-') + return false; + ch = io__get_char(io); + if (ch == 's') + *flags = MAP_SHARED; + else if (ch == 'p') + *flags = MAP_PRIVATE; + else + return false; + if (io__get_char(io) != ' ') + return false; + + if (io__get_hex(io, offset) != ' ') + return false; + + if (io__get_hex(io, &temp) != ':') + return false; + *maj = temp; + if (io__get_hex(io, &temp) != ' ') + return false; + *min = temp; + + ch = io__get_dec(io, inode); + if (ch != ' ') { + *pathname = '\0'; + return ch == '\n'; + } + do { + ch = io__get_char(io); + } while (ch == ' '); + while (true) { + if (ch < 0) + return false; + if (ch == '\0' || ch == '\n' || + (pathname + 1 - start_pathname) >= pathname_size) { + *pathname = '\0'; + return true; + } + *pathname++ = ch; + ch = io__get_char(io); + } +} + +static void perf_record_mmap2__read_build_id(struct perf_record_mmap2 *event, + struct machine *machine, + bool is_kernel) +{ + struct build_id bid; + struct nsinfo *nsi; + struct nscookie nc; + struct dso *dso = NULL; + struct dso_id id; + int rc; + + if (is_kernel) { + rc = sysfs__read_build_id("/sys/kernel/notes", &bid); + goto out; + } + + id.maj = event->maj; + id.min = event->min; + id.ino = event->ino; + id.ino_generation = event->ino_generation; + + dso = dsos__findnew_id(&machine->dsos, event->filename, &id); + if (dso && dso->has_build_id) { + bid = dso->bid; + rc = 0; + goto out; + } + + nsi = nsinfo__new(event->pid); + nsinfo__mountns_enter(nsi, &nc); + + rc = filename__read_build_id(event->filename, &bid) > 0 ? 0 : -1; + + nsinfo__mountns_exit(&nc); + nsinfo__put(nsi); + +out: + if (rc == 0) { + memcpy(event->build_id, bid.data, sizeof(bid.data)); + event->build_id_size = (u8) bid.size; + event->header.misc |= PERF_RECORD_MISC_MMAP_BUILD_ID; + event->__reserved_1 = 0; + event->__reserved_2 = 0; + + if (dso && !dso->has_build_id) + dso__set_build_id(dso, &bid); + } else { + if (event->filename[0] == '/') { + pr_debug2("Failed to read build ID for %s\n", + event->filename); + } + } + dso__put(dso); +} + int perf_event__synthesize_mmap_events(struct perf_tool *tool, union perf_event *event, pid_t pid, pid_t tgid, @@ -280,9 +425,9 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool, struct machine *machine, bool mmap_data) { - char filename[PATH_MAX]; - FILE *fp; unsigned long long t; + char bf[BUFSIZ]; + struct io io; bool truncation = false; unsigned long long timeout = proc_map_timeout * 1000000ULL; int rc = 0; @@ -292,59 +437,53 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool, if (machine__is_default_guest(machine)) return 0; - snprintf(filename, sizeof(filename), "%s/proc/%d/task/%d/maps", - machine->root_dir, pid, pid); + snprintf(bf, sizeof(bf), "%s/proc/%d/task/%d/maps", + machine->root_dir, pid, pid); - fp = fopen(filename, "r"); - if (fp == NULL) { + io.fd = open(bf, O_RDONLY, 0); + if (io.fd < 0) { /* * We raced with a task exiting - just return: */ - pr_debug("couldn't open %s\n", filename); + pr_debug("couldn't open %s\n", bf); return -1; } + io__init(&io, io.fd, bf, sizeof(bf)); event->header.type = PERF_RECORD_MMAP2; t = rdclock(); - while (1) { - char bf[BUFSIZ]; - char prot[5]; - char execname[PATH_MAX]; - char anonstr[] = "//anon"; - unsigned int ino; - size_t size; - ssize_t n; + while (!io.eof) { + static const char anonstr[] = "//anon"; + size_t size, aligned_size; - if (fgets(bf, sizeof(bf), fp) == NULL) - break; + /* ensure null termination since stack will be reused. */ + event->mmap2.filename[0] = '\0'; + + /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */ + if (!read_proc_maps_line(&io, + &event->mmap2.start, + &event->mmap2.len, + &event->mmap2.prot, + &event->mmap2.flags, + &event->mmap2.pgoff, + &event->mmap2.maj, + &event->mmap2.min, + &event->mmap2.ino, + sizeof(event->mmap2.filename), + event->mmap2.filename)) + continue; if ((rdclock() - t) > timeout) { - pr_warning("Reading %s time out. " + pr_warning("Reading %s/proc/%d/task/%d/maps time out. " "You may want to increase " "the time limit by --proc-map-timeout\n", - filename); + machine->root_dir, pid, pid); truncation = true; goto out; } - /* ensure null termination since stack will be reused. */ - strcpy(execname, ""); - - /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */ - n = sscanf(bf, "%"PRI_lx64"-%"PRI_lx64" %s %"PRI_lx64" %x:%x %u %[^\n]\n", - &event->mmap2.start, &event->mmap2.len, prot, - &event->mmap2.pgoff, &event->mmap2.maj, - &event->mmap2.min, - &ino, execname); - - /* - * Anon maps don't have the execname. - */ - if (n < 7) - continue; - - event->mmap2.ino = (u64)ino; + event->mmap2.ino_generation = 0; /* * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c @@ -354,23 +493,8 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool, else event->header.misc = PERF_RECORD_MISC_GUEST_USER; - /* map protection and flags bits */ - event->mmap2.prot = 0; - event->mmap2.flags = 0; - if (prot[0] == 'r') - event->mmap2.prot |= PROT_READ; - if (prot[1] == 'w') - event->mmap2.prot |= PROT_WRITE; - if (prot[2] == 'x') - event->mmap2.prot |= PROT_EXEC; - - if (prot[3] == 's') - event->mmap2.flags |= MAP_SHARED; - else - event->mmap2.flags |= MAP_PRIVATE; - - if (prot[2] != 'x') { - if (!mmap_data || prot[0] != 'r') + if ((event->mmap2.prot & PROT_EXEC) == 0) { + if (!mmap_data || (event->mmap2.prot & PROT_READ) == 0) continue; event->header.misc |= PERF_RECORD_MISC_MMAP_DATA; @@ -380,26 +504,30 @@ out: if (truncation) event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT; - if (!strcmp(execname, "")) - strcpy(execname, anonstr); + if (!strcmp(event->mmap2.filename, "")) + strcpy(event->mmap2.filename, anonstr); if (hugetlbfs_mnt_len && - !strncmp(execname, hugetlbfs_mnt, hugetlbfs_mnt_len)) { - strcpy(execname, anonstr); + !strncmp(event->mmap2.filename, hugetlbfs_mnt, + hugetlbfs_mnt_len)) { + strcpy(event->mmap2.filename, anonstr); event->mmap2.flags |= MAP_HUGETLB; } - size = strlen(execname) + 1; - memcpy(event->mmap2.filename, execname, size); - size = PERF_ALIGN(size, sizeof(u64)); + size = strlen(event->mmap2.filename) + 1; + aligned_size = PERF_ALIGN(size, sizeof(u64)); event->mmap2.len -= event->mmap.start; event->mmap2.header.size = (sizeof(event->mmap2) - - (sizeof(event->mmap2.filename) - size)); - memset(event->mmap2.filename + size, 0, machine->id_hdr_size); + (sizeof(event->mmap2.filename) - aligned_size)); + memset(event->mmap2.filename + size, 0, machine->id_hdr_size + + (aligned_size - size)); event->mmap2.header.size += machine->id_hdr_size; event->mmap2.pid = tgid; event->mmap2.tid = pid; + if (symbol_conf.buildid_mmap2) + perf_record_mmap2__read_build_id(&event->mmap2, machine, false); + if (perf_tool__process_synth_event(tool, event, machine, process) != 0) { rc = -1; break; @@ -409,26 +537,151 @@ out: break; } - fclose(fp); + close(io.fd); return rc; } +#ifdef HAVE_FILE_HANDLE +static int perf_event__synthesize_cgroup(struct perf_tool *tool, + union perf_event *event, + char *path, size_t mount_len, + perf_event__handler_t process, + struct machine *machine) +{ + size_t event_size = sizeof(event->cgroup) - sizeof(event->cgroup.path); + size_t path_len = strlen(path) - mount_len + 1; + struct { + struct file_handle fh; + uint64_t cgroup_id; + } handle; + int mount_id; + + while (path_len % sizeof(u64)) + path[mount_len + path_len++] = '\0'; + + memset(&event->cgroup, 0, event_size); + + event->cgroup.header.type = PERF_RECORD_CGROUP; + event->cgroup.header.size = event_size + path_len + machine->id_hdr_size; + + handle.fh.handle_bytes = sizeof(handle.cgroup_id); + if (name_to_handle_at(AT_FDCWD, path, &handle.fh, &mount_id, 0) < 0) { + pr_debug("stat failed: %s\n", path); + return -1; + } + + event->cgroup.id = handle.cgroup_id; + strncpy(event->cgroup.path, path + mount_len, path_len); + memset(event->cgroup.path + path_len, 0, machine->id_hdr_size); + + if (perf_tool__process_synth_event(tool, event, machine, process) < 0) { + pr_debug("process synth event failed\n"); + return -1; + } + + return 0; +} + +static int perf_event__walk_cgroup_tree(struct perf_tool *tool, + union perf_event *event, + char *path, size_t mount_len, + perf_event__handler_t process, + struct machine *machine) +{ + size_t pos = strlen(path); + DIR *d; + struct dirent *dent; + int ret = 0; + + if (perf_event__synthesize_cgroup(tool, event, path, mount_len, + process, machine) < 0) + return -1; + + d = opendir(path); + if (d == NULL) { + pr_debug("failed to open directory: %s\n", path); + return -1; + } + + while ((dent = readdir(d)) != NULL) { + if (dent->d_type != DT_DIR) + continue; + if (!strcmp(dent->d_name, ".") || + !strcmp(dent->d_name, "..")) + continue; + + /* any sane path should be less than PATH_MAX */ + if (strlen(path) + strlen(dent->d_name) + 1 >= PATH_MAX) + continue; + + if (path[pos - 1] != '/') + strcat(path, "/"); + strcat(path, dent->d_name); + + ret = perf_event__walk_cgroup_tree(tool, event, path, + mount_len, process, machine); + if (ret < 0) + break; + + path[pos] = '\0'; + } + + closedir(d); + return ret; +} + +int perf_event__synthesize_cgroups(struct perf_tool *tool, + perf_event__handler_t process, + struct machine *machine) +{ + union perf_event event; + char cgrp_root[PATH_MAX]; + size_t mount_len; /* length of mount point in the path */ + + if (!tool || !tool->cgroup_events) + return 0; + + if (cgroupfs_find_mountpoint(cgrp_root, PATH_MAX, "perf_event") < 0) { + pr_debug("cannot find cgroup mount point\n"); + return -1; + } + + mount_len = strlen(cgrp_root); + /* make sure the path starts with a slash (after mount point) */ + strcat(cgrp_root, "/"); + + if (perf_event__walk_cgroup_tree(tool, &event, cgrp_root, mount_len, + process, machine) < 0) + return -1; + + return 0; +} +#else +int perf_event__synthesize_cgroups(struct perf_tool *tool __maybe_unused, + perf_event__handler_t process __maybe_unused, + struct machine *machine __maybe_unused) +{ + return -1; +} +#endif + int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t process, struct machine *machine) { int rc = 0; struct map *pos; struct maps *maps = machine__kernel_maps(machine); - union perf_event *event = zalloc((sizeof(event->mmap) + - machine->id_hdr_size)); + union perf_event *event; + size_t size = symbol_conf.buildid_mmap2 ? + sizeof(event->mmap2) : sizeof(event->mmap); + + event = zalloc(size + machine->id_hdr_size); if (event == NULL) { pr_debug("Not enough memory synthesizing mmap event " "for kernel modules\n"); return -1; } - event->header.type = PERF_RECORD_MMAP; - /* * kernel uses 0 for user space maps, see kernel/perf_event.c * __perf_event_mmap @@ -439,23 +692,39 @@ int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; maps__for_each_entry(maps, pos) { - size_t size; - if (!__map__is_kmodule(pos)) continue; - size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64)); - event->mmap.header.type = PERF_RECORD_MMAP; - event->mmap.header.size = (sizeof(event->mmap) - - (sizeof(event->mmap.filename) - size)); - memset(event->mmap.filename + size, 0, machine->id_hdr_size); - event->mmap.header.size += machine->id_hdr_size; - event->mmap.start = pos->start; - event->mmap.len = pos->end - pos->start; - event->mmap.pid = machine->pid; + if (symbol_conf.buildid_mmap2) { + size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64)); + event->mmap2.header.type = PERF_RECORD_MMAP2; + event->mmap2.header.size = (sizeof(event->mmap2) - + (sizeof(event->mmap2.filename) - size)); + memset(event->mmap2.filename + size, 0, machine->id_hdr_size); + event->mmap2.header.size += machine->id_hdr_size; + event->mmap2.start = pos->start; + event->mmap2.len = pos->end - pos->start; + event->mmap2.pid = machine->pid; + + memcpy(event->mmap2.filename, pos->dso->long_name, + pos->dso->long_name_len + 1); + + perf_record_mmap2__read_build_id(&event->mmap2, machine, false); + } else { + size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64)); + event->mmap.header.type = PERF_RECORD_MMAP; + event->mmap.header.size = (sizeof(event->mmap) - + (sizeof(event->mmap.filename) - size)); + memset(event->mmap.filename + size, 0, machine->id_hdr_size); + event->mmap.header.size += machine->id_hdr_size; + event->mmap.start = pos->start; + event->mmap.len = pos->end - pos->start; + event->mmap.pid = machine->pid; + + memcpy(event->mmap.filename, pos->dso->long_name, + pos->dso->long_name_len + 1); + } - memcpy(event->mmap.filename, pos->dso->long_name, - pos->dso->long_name_len + 1); if (perf_tool__process_synth_event(tool, event, machine, process) != 0) { rc = -1; break; @@ -466,18 +735,24 @@ int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t return rc; } +static int filter_task(const struct dirent *dirent) +{ + return isdigit(dirent->d_name[0]); +} + static int __event__synthesize_thread(union perf_event *comm_event, union perf_event *mmap_event, union perf_event *fork_event, union perf_event *namespaces_event, pid_t pid, int full, perf_event__handler_t process, - struct perf_tool *tool, struct machine *machine, bool mmap_data) + struct perf_tool *tool, struct machine *machine, + bool needs_mmap, bool mmap_data) { char filename[PATH_MAX]; - DIR *tasks; - struct dirent *dirent; + struct dirent **dirent; pid_t tgid, ppid; int rc = 0; + int i, n; /* special case: only send one comm event using passed in pid */ if (!full) { @@ -495,7 +770,7 @@ static int __event__synthesize_thread(union perf_event *comm_event, * send mmap only for thread group leader * see thread__init_maps() */ - if (pid == tgid && + if (pid == tgid && needs_mmap && perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid, process, machine, mmap_data)) return -1; @@ -509,25 +784,25 @@ static int __event__synthesize_thread(union perf_event *comm_event, snprintf(filename, sizeof(filename), "%s/proc/%d/task", machine->root_dir, pid); - tasks = opendir(filename); - if (tasks == NULL) { - pr_debug("couldn't open %s\n", filename); - return 0; - } + n = scandir(filename, &dirent, filter_task, NULL); + if (n < 0) + return n; - while ((dirent = readdir(tasks)) != NULL) { + for (i = 0; i < n; i++) { char *end; pid_t _pid; + bool kernel_thread = false; - _pid = strtol(dirent->d_name, &end, 10); + _pid = strtol(dirent[i]->d_name, &end, 10); if (*end) continue; - rc = -1; - if (perf_event__prepare_comm(comm_event, _pid, machine, - &tgid, &ppid) != 0) - break; + /* some threads may exit just after scan, ignore it */ + if (perf_event__prepare_comm(comm_event, pid, _pid, machine, + &tgid, &ppid, &kernel_thread) != 0) + continue; + rc = -1; if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid, ppid, process, machine) < 0) break; @@ -543,7 +818,7 @@ static int __event__synthesize_thread(union perf_event *comm_event, break; rc = 0; - if (_pid == pid) { + if (_pid == pid && !kernel_thread && needs_mmap) { /* process the parent's maps too */ rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid, process, machine, mmap_data); @@ -552,7 +827,10 @@ static int __event__synthesize_thread(union perf_event *comm_event, } } - closedir(tasks); + for (i = 0; i < n; i++) + zfree(&dirent[i]); + free(dirent); + return rc; } @@ -560,7 +838,7 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool, struct perf_thread_map *threads, perf_event__handler_t process, struct machine *machine, - bool mmap_data) + bool needs_mmap, bool mmap_data) { union perf_event *comm_event, *mmap_event, *fork_event; union perf_event *namespaces_event; @@ -590,7 +868,7 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool, fork_event, namespaces_event, perf_thread_map__pid(threads, thread), 0, process, tool, machine, - mmap_data)) { + needs_mmap, mmap_data)) { err = -1; break; } @@ -616,7 +894,7 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool, fork_event, namespaces_event, comm_event->comm.pid, 0, process, tool, machine, - mmap_data)) { + needs_mmap, mmap_data)) { err = -1; break; } @@ -636,6 +914,7 @@ out: static int __perf_event__synthesize_threads(struct perf_tool *tool, perf_event__handler_t process, struct machine *machine, + bool needs_mmap, bool mmap_data, struct dirent **dirent, int start, @@ -680,7 +959,7 @@ static int __perf_event__synthesize_threads(struct perf_tool *tool, */ __event__synthesize_thread(comm_event, mmap_event, fork_event, namespaces_event, pid, 1, process, - tool, machine, mmap_data); + tool, machine, needs_mmap, mmap_data); } err = 0; @@ -699,6 +978,7 @@ struct synthesize_threads_arg { struct perf_tool *tool; perf_event__handler_t process; struct machine *machine; + bool needs_mmap; bool mmap_data; struct dirent **dirent; int num; @@ -710,7 +990,8 @@ static void *synthesize_threads_worker(void *arg) struct synthesize_threads_arg *args = arg; __perf_event__synthesize_threads(args->tool, args->process, - args->machine, args->mmap_data, + args->machine, + args->needs_mmap, args->mmap_data, args->dirent, args->start, args->num); return NULL; @@ -719,7 +1000,7 @@ static void *synthesize_threads_worker(void *arg) int perf_event__synthesize_threads(struct perf_tool *tool, perf_event__handler_t process, struct machine *machine, - bool mmap_data, + bool needs_mmap, bool mmap_data, unsigned int nr_threads_synthesize) { struct synthesize_threads_arg *args = NULL; @@ -737,7 +1018,7 @@ int perf_event__synthesize_threads(struct perf_tool *tool, return 0; snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir); - n = scandir(proc_path, &dirent, 0, alphasort); + n = scandir(proc_path, &dirent, filter_task, NULL); if (n < 0) return err; @@ -748,7 +1029,8 @@ int perf_event__synthesize_threads(struct perf_tool *tool, if (thread_nr <= 1) { err = __perf_event__synthesize_threads(tool, process, - machine, mmap_data, + machine, + needs_mmap, mmap_data, dirent, base, n); goto free_dirent; } @@ -769,6 +1051,7 @@ int perf_event__synthesize_threads(struct perf_tool *tool, args[i].tool = tool; args[i].process = process; args[i].machine = machine; + args[i].needs_mmap = needs_mmap; args[i].mmap_data = mmap_data; args[i].dirent = dirent; } @@ -814,11 +1097,12 @@ static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool, perf_event__handler_t process, struct machine *machine) { - size_t size; + union perf_event *event; + size_t size = symbol_conf.buildid_mmap2 ? + sizeof(event->mmap2) : sizeof(event->mmap); struct map *map = machine__kernel_map(machine); struct kmap *kmap; int err; - union perf_event *event; if (map == NULL) return -1; @@ -832,7 +1116,7 @@ static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool, * available use this, and after it is use this as a fallback for older * kernels. */ - event = zalloc((sizeof(event->mmap) + machine->id_hdr_size)); + event = zalloc(size + machine->id_hdr_size); if (event == NULL) { pr_debug("Not enough memory synthesizing mmap event " "for kernel modules\n"); @@ -849,16 +1133,31 @@ static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool, event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; } - size = snprintf(event->mmap.filename, sizeof(event->mmap.filename), - "%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1; - size = PERF_ALIGN(size, sizeof(u64)); - event->mmap.header.type = PERF_RECORD_MMAP; - event->mmap.header.size = (sizeof(event->mmap) - - (sizeof(event->mmap.filename) - size) + machine->id_hdr_size); - event->mmap.pgoff = kmap->ref_reloc_sym->addr; - event->mmap.start = map->start; - event->mmap.len = map->end - event->mmap.start; - event->mmap.pid = machine->pid; + if (symbol_conf.buildid_mmap2) { + size = snprintf(event->mmap2.filename, sizeof(event->mmap2.filename), + "%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1; + size = PERF_ALIGN(size, sizeof(u64)); + event->mmap2.header.type = PERF_RECORD_MMAP2; + event->mmap2.header.size = (sizeof(event->mmap2) - + (sizeof(event->mmap2.filename) - size) + machine->id_hdr_size); + event->mmap2.pgoff = kmap->ref_reloc_sym->addr; + event->mmap2.start = map->start; + event->mmap2.len = map->end - event->mmap.start; + event->mmap2.pid = machine->pid; + + perf_record_mmap2__read_build_id(&event->mmap2, machine, true); + } else { + size = snprintf(event->mmap.filename, sizeof(event->mmap.filename), + "%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1; + size = PERF_ALIGN(size, sizeof(u64)); + event->mmap.header.type = PERF_RECORD_MMAP; + event->mmap.header.size = (sizeof(event->mmap) - + (sizeof(event->mmap.filename) - size) + machine->id_hdr_size); + event->mmap.pgoff = kmap->ref_reloc_sym->addr; + event->mmap.start = map->start; + event->mmap.len = map->end - event->mmap.start; + event->mmap.pid = machine->pid; + } err = perf_tool__process_synth_event(tool, event, machine, process); free(event); @@ -915,122 +1214,122 @@ int perf_event__synthesize_thread_map2(struct perf_tool *tool, return err; } -static void synthesize_cpus(struct cpu_map_entries *cpus, - struct perf_cpu_map *map) -{ - int i; - - cpus->nr = map->nr; - - for (i = 0; i < map->nr; i++) - cpus->cpu[i] = map->map[i]; -} - -static void synthesize_mask(struct perf_record_record_cpu_map *mask, - struct perf_cpu_map *map, int max) -{ - int i; - - mask->nr = BITS_TO_LONGS(max); - mask->long_size = sizeof(long); - - for (i = 0; i < map->nr; i++) - set_bit(map->map[i], mask->mask); -} +struct synthesize_cpu_map_data { + const struct perf_cpu_map *map; + int nr; + int min_cpu; + int max_cpu; + int has_any_cpu; + int type; + size_t size; + struct perf_record_cpu_map_data *data; +}; -static size_t cpus_size(struct perf_cpu_map *map) +static void synthesize_cpus(struct synthesize_cpu_map_data *data) { - return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16); + data->data->type = PERF_CPU_MAP__CPUS; + data->data->cpus_data.nr = data->nr; + for (int i = 0; i < data->nr; i++) + data->data->cpus_data.cpu[i] = perf_cpu_map__cpu(data->map, i).cpu; } -static size_t mask_size(struct perf_cpu_map *map, int *max) +static void synthesize_mask(struct synthesize_cpu_map_data *data) { - int i; + int idx; + struct perf_cpu cpu; - *max = 0; + /* Due to padding, the 4bytes per entry mask variant is always smaller. */ + data->data->type = PERF_CPU_MAP__MASK; + data->data->mask32_data.nr = BITS_TO_U32(data->max_cpu); + data->data->mask32_data.long_size = 4; - for (i = 0; i < map->nr; i++) { - /* bit possition of the cpu is + 1 */ - int bit = map->map[i] + 1; + perf_cpu_map__for_each_cpu(cpu, idx, data->map) { + int bit_word = cpu.cpu / 32; + u32 bit_mask = 1U << (cpu.cpu & 31); - if (bit > *max) - *max = bit; + data->data->mask32_data.mask[bit_word] |= bit_mask; } +} - return sizeof(struct perf_record_record_cpu_map) + BITS_TO_LONGS(*max) * sizeof(long); +static void synthesize_range_cpus(struct synthesize_cpu_map_data *data) +{ + data->data->type = PERF_CPU_MAP__RANGE_CPUS; + data->data->range_cpu_data.any_cpu = data->has_any_cpu; + data->data->range_cpu_data.start_cpu = data->min_cpu; + data->data->range_cpu_data.end_cpu = data->max_cpu; } -void *cpu_map_data__alloc(struct perf_cpu_map *map, size_t *size, u16 *type, int *max) +static void *cpu_map_data__alloc(struct synthesize_cpu_map_data *syn_data, + size_t header_size) { size_t size_cpus, size_mask; - bool is_dummy = perf_cpu_map__empty(map); - - /* - * Both array and mask data have variable size based - * on the number of cpus and their actual values. - * The size of the 'struct perf_record_cpu_map_data' is: - * - * array = size of 'struct cpu_map_entries' + - * number of cpus * sizeof(u64) - * - * mask = size of 'struct perf_record_record_cpu_map' + - * maximum cpu bit converted to size of longs - * - * and finaly + the size of 'struct perf_record_cpu_map_data'. - */ - size_cpus = cpus_size(map); - size_mask = mask_size(map, max); - - if (is_dummy || (size_cpus < size_mask)) { - *size += size_cpus; - *type = PERF_CPU_MAP__CPUS; - } else { - *size += size_mask; - *type = PERF_CPU_MAP__MASK; - } - *size += sizeof(struct perf_record_cpu_map_data); - *size = PERF_ALIGN(*size, sizeof(u64)); - return zalloc(*size); + syn_data->nr = perf_cpu_map__nr(syn_data->map); + syn_data->has_any_cpu = (perf_cpu_map__cpu(syn_data->map, 0).cpu == -1) ? 1 : 0; + + syn_data->min_cpu = perf_cpu_map__cpu(syn_data->map, syn_data->has_any_cpu).cpu; + syn_data->max_cpu = perf_cpu_map__max(syn_data->map).cpu; + if (syn_data->max_cpu - syn_data->min_cpu + 1 == syn_data->nr - syn_data->has_any_cpu) { + /* A consecutive range of CPUs can be encoded using a range. */ + assert(sizeof(u16) + sizeof(struct perf_record_range_cpu_map) == sizeof(u64)); + syn_data->type = PERF_CPU_MAP__RANGE_CPUS; + syn_data->size = header_size + sizeof(u64); + return zalloc(syn_data->size); + } + + size_cpus = sizeof(u16) + sizeof(struct cpu_map_entries) + syn_data->nr * sizeof(u16); + /* Due to padding, the 4bytes per entry mask variant is always smaller. */ + size_mask = sizeof(u16) + sizeof(struct perf_record_mask_cpu_map32) + + BITS_TO_U32(syn_data->max_cpu) * sizeof(__u32); + if (syn_data->has_any_cpu || size_cpus < size_mask) { + /* Follow the CPU map encoding. */ + syn_data->type = PERF_CPU_MAP__CPUS; + syn_data->size = header_size + PERF_ALIGN(size_cpus, sizeof(u64)); + return zalloc(syn_data->size); + } + /* Encode using a bitmask. */ + syn_data->type = PERF_CPU_MAP__MASK; + syn_data->size = header_size + PERF_ALIGN(size_mask, sizeof(u64)); + return zalloc(syn_data->size); } -void cpu_map_data__synthesize(struct perf_record_cpu_map_data *data, struct perf_cpu_map *map, - u16 type, int max) +static void cpu_map_data__synthesize(struct synthesize_cpu_map_data *data) { - data->type = type; - - switch (type) { + switch (data->type) { case PERF_CPU_MAP__CPUS: - synthesize_cpus((struct cpu_map_entries *) data->data, map); + synthesize_cpus(data); break; case PERF_CPU_MAP__MASK: - synthesize_mask((struct perf_record_record_cpu_map *)data->data, map, max); + synthesize_mask(data); + break; + case PERF_CPU_MAP__RANGE_CPUS: + synthesize_range_cpus(data); + break; default: break; - }; + } } -static struct perf_record_cpu_map *cpu_map_event__new(struct perf_cpu_map *map) +static struct perf_record_cpu_map *cpu_map_event__new(const struct perf_cpu_map *map) { - size_t size = sizeof(struct perf_record_cpu_map); + struct synthesize_cpu_map_data syn_data = { .map = map }; struct perf_record_cpu_map *event; - int max; - u16 type; - event = cpu_map_data__alloc(map, &size, &type, &max); + + event = cpu_map_data__alloc(&syn_data, sizeof(struct perf_event_header)); if (!event) return NULL; + syn_data.data = &event->data; event->header.type = PERF_RECORD_CPU_MAP; - event->header.size = size; - event->data.type = type; - - cpu_map_data__synthesize(&event->data, map, type, max); + event->header.size = syn_data.size; + cpu_map_data__synthesize(&syn_data); return event; } + int perf_event__synthesize_cpu_map(struct perf_tool *tool, - struct perf_cpu_map *map, + const struct perf_cpu_map *map, perf_event__handler_t process, struct machine *machine) { @@ -1086,7 +1385,7 @@ int perf_event__synthesize_stat_config(struct perf_tool *tool, } int perf_event__synthesize_stat(struct perf_tool *tool, - u32 cpu, u32 thread, u64 id, + struct perf_cpu cpu, u32 thread, u64 id, struct perf_counts_values *count, perf_event__handler_t process, struct machine *machine) @@ -1098,7 +1397,7 @@ int perf_event__synthesize_stat(struct perf_tool *tool, event.header.misc = 0; event.id = id; - event.cpu = cpu; + event.cpu = cpu.cpu; event.thread = thread; event.val = count->val; event.ena = count->ena; @@ -1163,11 +1462,12 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, result += sizeof(u64); /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */ if (read_format & PERF_FORMAT_GROUP) { - sz = sample->read.group.nr * - sizeof(struct sample_read_value); - result += sz; + sz = sample_read_value_size(read_format); + result += sz * sample->read.group.nr; } else { result += sizeof(u64); + if (read_format & PERF_FORMAT_LOST) + result += sizeof(u64); } } @@ -1183,7 +1483,8 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, if (type & PERF_SAMPLE_BRANCH_STACK) { sz = sample->branch_stack->nr * sizeof(struct branch_entry); - sz += sizeof(u64); + /* nr, hw_idx */ + sz += 2 * sizeof(u64); result += sz; } @@ -1206,7 +1507,7 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, } } - if (type & PERF_SAMPLE_WEIGHT) + if (type & PERF_SAMPLE_WEIGHT_TYPE) result += sizeof(u64); if (type & PERF_SAMPLE_DATA_SRC) @@ -1228,6 +1529,15 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, if (type & PERF_SAMPLE_PHYS_ADDR) result += sizeof(u64); + if (type & PERF_SAMPLE_CGROUP) + result += sizeof(u64); + + if (type & PERF_SAMPLE_DATA_PAGE_SIZE) + result += sizeof(u64); + + if (type & PERF_SAMPLE_CODE_PAGE_SIZE) + result += sizeof(u64); + if (type & PERF_SAMPLE_AUX) { result += sizeof(u64); result += sample->aux_sample.size; @@ -1236,6 +1546,26 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, return result; } +void __weak arch_perf_synthesize_sample_weight(const struct perf_sample *data, + __u64 *array, u64 type __maybe_unused) +{ + *array = data->weight; +} + +static __u64 *copy_read_group_values(__u64 *array, __u64 read_format, + const struct perf_sample *sample) +{ + size_t sz = sample_read_value_size(read_format); + struct sample_read_value *v = sample->read.group.values; + + sample_read_group__for_each(v, sample->read.group.nr, read_format) { + /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */ + memcpy(array, v, sz); + array = (void *)array + sz; + } + return array; +} + int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_format, const struct perf_sample *sample) { @@ -1317,13 +1647,16 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_fo /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */ if (read_format & PERF_FORMAT_GROUP) { - sz = sample->read.group.nr * - sizeof(struct sample_read_value); - memcpy(array, sample->read.group.values, sz); - array = (void *)array + sz; + array = copy_read_group_values(array, read_format, + sample); } else { *array = sample->read.one.id; array++; + + if (read_format & PERF_FORMAT_LOST) { + *array = sample->read.one.lost; + array++; + } } } @@ -1344,7 +1677,8 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_fo if (type & PERF_SAMPLE_BRANCH_STACK) { sz = sample->branch_stack->nr * sizeof(struct branch_entry); - sz += sizeof(u64); + /* nr, hw_idx */ + sz += 2 * sizeof(u64); memcpy(array, sample->branch_stack, sz); array = (void *)array + sz; } @@ -1370,8 +1704,8 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_fo } } - if (type & PERF_SAMPLE_WEIGHT) { - *array = sample->weight; + if (type & PERF_SAMPLE_WEIGHT_TYPE) { + arch_perf_synthesize_sample_weight(sample, array, type); array++; } @@ -1401,6 +1735,21 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_fo array++; } + if (type & PERF_SAMPLE_CGROUP) { + *array = sample->cgroup; + array++; + } + + if (type & PERF_SAMPLE_DATA_PAGE_SIZE) { + *array = sample->data_page_size; + array++; + } + + if (type & PERF_SAMPLE_CODE_PAGE_SIZE) { + *array = sample->code_page_size; + array++; + } + if (type & PERF_SAMPLE_AUX) { sz = sample->aux_sample.size; *array++ = sz; @@ -1411,65 +1760,136 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_fo return 0; } -int perf_event__synthesize_id_index(struct perf_tool *tool, perf_event__handler_t process, - struct evlist *evlist, struct machine *machine) +int perf_event__synthesize_id_sample(__u64 *array, u64 type, const struct perf_sample *sample) +{ + __u64 *start = array; + + /* + * used for cross-endian analysis. See git commit 65014ab3 + * for why this goofiness is needed. + */ + union u64_swap u; + + if (type & PERF_SAMPLE_TID) { + u.val32[0] = sample->pid; + u.val32[1] = sample->tid; + *array = u.val64; + array++; + } + + if (type & PERF_SAMPLE_TIME) { + *array = sample->time; + array++; + } + + if (type & PERF_SAMPLE_ID) { + *array = sample->id; + array++; + } + + if (type & PERF_SAMPLE_STREAM_ID) { + *array = sample->stream_id; + array++; + } + + if (type & PERF_SAMPLE_CPU) { + u.val32[0] = sample->cpu; + u.val32[1] = 0; + *array = u.val64; + array++; + } + + if (type & PERF_SAMPLE_IDENTIFIER) { + *array = sample->id; + array++; + } + + return (void *)array - (void *)start; +} + +int __perf_event__synthesize_id_index(struct perf_tool *tool, perf_event__handler_t process, + struct evlist *evlist, struct machine *machine, size_t from) { union perf_event *ev; struct evsel *evsel; - size_t nr = 0, i = 0, sz, max_nr, n; + size_t nr = 0, i = 0, sz, max_nr, n, pos; + size_t e1_sz = sizeof(struct id_index_entry); + size_t e2_sz = sizeof(struct id_index_entry_2); + size_t etot_sz = e1_sz + e2_sz; + bool e2_needed = false; int err; - pr_debug2("Synthesizing id index\n"); - - max_nr = (UINT16_MAX - sizeof(struct perf_record_id_index)) / - sizeof(struct id_index_entry); + max_nr = (UINT16_MAX - sizeof(struct perf_record_id_index)) / etot_sz; - evlist__for_each_entry(evlist, evsel) + pos = 0; + evlist__for_each_entry(evlist, evsel) { + if (pos++ < from) + continue; nr += evsel->core.ids; + } + + if (!nr) + return 0; + + pr_debug2("Synthesizing id index\n"); n = nr > max_nr ? max_nr : nr; - sz = sizeof(struct perf_record_id_index) + n * sizeof(struct id_index_entry); + sz = sizeof(struct perf_record_id_index) + n * etot_sz; ev = zalloc(sz); if (!ev) return -ENOMEM; + sz = sizeof(struct perf_record_id_index) + n * e1_sz; + ev->id_index.header.type = PERF_RECORD_ID_INDEX; - ev->id_index.header.size = sz; ev->id_index.nr = n; + pos = 0; evlist__for_each_entry(evlist, evsel) { u32 j; - for (j = 0; j < evsel->core.ids; j++) { + if (pos++ < from) + continue; + for (j = 0; j < evsel->core.ids; j++, i++) { struct id_index_entry *e; + struct id_index_entry_2 *e2; struct perf_sample_id *sid; if (i >= n) { + ev->id_index.header.size = sz + (e2_needed ? n * e2_sz : 0); err = process(tool, ev, NULL, machine); if (err) goto out_err; nr -= n; i = 0; + e2_needed = false; } - e = &ev->id_index.entries[i++]; + e = &ev->id_index.entries[i]; e->id = evsel->core.id[j]; - sid = perf_evlist__id2sid(evlist, e->id); + sid = evlist__id2sid(evlist, e->id); if (!sid) { free(ev); return -ENOENT; } e->idx = sid->idx; - e->cpu = sid->cpu; + e->cpu = sid->cpu.cpu; e->tid = sid->tid; + + if (sid->machine_pid) + e2_needed = true; + + e2 = (void *)ev + sz; + e2[i].machine_pid = sid->machine_pid; + e2[i].vcpu = sid->vcpu.cpu; } } - sz = sizeof(struct perf_record_id_index) + nr * sizeof(struct id_index_entry); - ev->id_index.header.size = sz; + sz = sizeof(struct perf_record_id_index) + nr * e1_sz; + ev->id_index.header.size = sz + (e2_needed ? nr * e2_sz : 0); ev->id_index.nr = nr; err = process(tool, ev, NULL, machine); @@ -1479,28 +1899,54 @@ out_err: return err; } +int perf_event__synthesize_id_index(struct perf_tool *tool, perf_event__handler_t process, + struct evlist *evlist, struct machine *machine) +{ + return __perf_event__synthesize_id_index(tool, process, evlist, machine, 0); +} + int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool, struct target *target, struct perf_thread_map *threads, - perf_event__handler_t process, bool data_mmap, - unsigned int nr_threads_synthesize) + perf_event__handler_t process, bool needs_mmap, + bool data_mmap, unsigned int nr_threads_synthesize) { + /* + * When perf runs in non-root PID namespace, and the namespace's proc FS + * is not mounted, nsinfo__is_in_root_namespace() returns false. + * In this case, the proc FS is coming for the parent namespace, thus + * perf tool will wrongly gather process info from its parent PID + * namespace. + * + * To avoid the confusion that the perf tool runs in a child PID + * namespace but it synthesizes thread info from its parent PID + * namespace, returns failure with warning. + */ + if (!nsinfo__is_in_root_namespace()) { + pr_err("Perf runs in non-root PID namespace but it tries to "); + pr_err("gather process info from its parent PID namespace.\n"); + pr_err("Please mount the proc file system properly, e.g. "); + pr_err("add the option '--mount-proc' for unshare command.\n"); + return -EPERM; + } + if (target__has_task(target)) - return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap); + return perf_event__synthesize_thread_map(tool, threads, process, machine, + needs_mmap, data_mmap); else if (target__has_cpu(target)) - return perf_event__synthesize_threads(tool, process, - machine, data_mmap, + return perf_event__synthesize_threads(tool, process, machine, + needs_mmap, data_mmap, nr_threads_synthesize); /* command specified */ return 0; } int machine__synthesize_threads(struct machine *machine, struct target *target, - struct perf_thread_map *threads, bool data_mmap, - unsigned int nr_threads_synthesize) + struct perf_thread_map *threads, bool needs_mmap, + bool data_mmap, unsigned int nr_threads_synthesize) { return __machine__synthesize_threads(machine, NULL, target, threads, - perf_event__process, data_mmap, - nr_threads_synthesize); + perf_event__process, needs_mmap, + data_mmap, nr_threads_synthesize); } static struct perf_record_event_update *event_update_event__new(size_t size, u64 type, u64 id) @@ -1531,7 +1977,7 @@ int perf_event__synthesize_event_update_unit(struct perf_tool *tool, struct evse if (ev == NULL) return -ENOMEM; - strlcpy(ev->data, evsel->unit, size + 1); + strlcpy(ev->unit, evsel->unit, size + 1); err = process(tool, (union perf_event *)ev, NULL, NULL); free(ev); return err; @@ -1548,8 +1994,7 @@ int perf_event__synthesize_event_update_scale(struct perf_tool *tool, struct evs if (ev == NULL) return -ENOMEM; - ev_data = (struct perf_record_event_update_scale *)ev->data; - ev_data->scale = evsel->scale; + ev->scale.scale = evsel->scale; err = process(tool, (union perf_event *)ev, NULL, NULL); free(ev); return err; @@ -1566,7 +2011,7 @@ int perf_event__synthesize_event_update_name(struct perf_tool *tool, struct evse if (ev == NULL) return -ENOMEM; - strlcpy(ev->data, evsel->name, len + 1); + strlcpy(ev->name, evsel->name, len + 1); err = process(tool, (union perf_event *)ev, NULL, NULL); free(ev); return err; @@ -1575,25 +2020,20 @@ int perf_event__synthesize_event_update_name(struct perf_tool *tool, struct evse int perf_event__synthesize_event_update_cpus(struct perf_tool *tool, struct evsel *evsel, perf_event__handler_t process) { - size_t size = sizeof(struct perf_record_event_update); + struct synthesize_cpu_map_data syn_data = { .map = evsel->core.own_cpus }; struct perf_record_event_update *ev; - int max, err; - u16 type; - - if (!evsel->core.own_cpus) - return 0; + int err; - ev = cpu_map_data__alloc(evsel->core.own_cpus, &size, &type, &max); + ev = cpu_map_data__alloc(&syn_data, sizeof(struct perf_event_header) + 2 * sizeof(u64)); if (!ev) return -ENOMEM; + syn_data.data = &ev->cpus.cpus; ev->header.type = PERF_RECORD_EVENT_UPDATE; - ev->header.size = (u16)size; + ev->header.size = (u16)syn_data.size; ev->type = PERF_EVENT_UPDATE__CPUS; ev->id = evsel->core.id[0]; - - cpu_map_data__synthesize((struct perf_record_cpu_map_data *)ev->data, - evsel->core.own_cpus, type, max); + cpu_map_data__synthesize(&syn_data); err = process(tool, (union perf_event *)ev, NULL, NULL); free(ev); @@ -1777,7 +2217,7 @@ int perf_event__synthesize_build_id(struct perf_tool *tool, struct dso *pos, u16 len = pos->long_name_len + 1; len = PERF_ALIGN(len, NAME_ALIGN); - memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id)); + memcpy(&ev.build_id.build_id, pos->bid.data, sizeof(pos->bid.data)); ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID; ev.build_id.header.misc = misc; ev.build_id.pid = machine->pid; @@ -1807,7 +2247,7 @@ int perf_event__synthesize_stat_events(struct perf_stat_config *config, struct p return err; } - err = perf_event__synthesize_cpu_map(tool, evlist->core.cpus, process, NULL); + err = perf_event__synthesize_cpu_map(tool, evlist->core.user_requested_cpus, process, NULL); if (err < 0) { pr_err("Couldn't synthesize thread map.\n"); return err; @@ -1822,14 +2262,6 @@ int perf_event__synthesize_stat_events(struct perf_stat_config *config, struct p return 0; } -int __weak perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused, - struct perf_tool *tool __maybe_unused, - perf_event__handler_t process __maybe_unused, - struct machine *machine __maybe_unused) -{ - return 0; -} - extern const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE]; int perf_event__synthesize_features(struct perf_tool *tool, struct perf_session *session, @@ -1894,3 +2326,81 @@ int perf_event__synthesize_features(struct perf_tool *tool, struct perf_session free(ff.buf); return ret; } + +int perf_event__synthesize_for_pipe(struct perf_tool *tool, + struct perf_session *session, + struct perf_data *data, + perf_event__handler_t process) +{ + int err; + int ret = 0; + struct evlist *evlist = session->evlist; + + /* + * We need to synthesize events first, because some + * features works on top of them (on report side). + */ + err = perf_event__synthesize_attrs(tool, evlist, process); + if (err < 0) { + pr_err("Couldn't synthesize attrs.\n"); + return err; + } + ret += err; + + err = perf_event__synthesize_features(tool, session, evlist, process); + if (err < 0) { + pr_err("Couldn't synthesize features.\n"); + return err; + } + ret += err; + + if (have_tracepoints(&evlist->core.entries)) { + int fd = perf_data__fd(data); + + /* + * FIXME err <= 0 here actually means that + * there were no tracepoints so its not really + * an error, just that we don't need to + * synthesize anything. We really have to + * return this more properly and also + * propagate errors that now are calling die() + */ + err = perf_event__synthesize_tracing_data(tool, fd, evlist, + process); + if (err <= 0) { + pr_err("Couldn't record tracing data.\n"); + return err; + } + ret += err; + } + + return ret; +} + +int parse_synth_opt(char *synth) +{ + char *p, *q; + int ret = 0; + + if (synth == NULL) + return -1; + + for (q = synth; (p = strsep(&q, ",")); p = q) { + if (!strcasecmp(p, "no") || !strcasecmp(p, "none")) + return 0; + + if (!strcasecmp(p, "all")) + return PERF_SYNTH_ALL; + + if (!strcasecmp(p, "task")) + ret |= PERF_SYNTH_TASK; + else if (!strcasecmp(p, "mmap")) + ret |= PERF_SYNTH_TASK | PERF_SYNTH_MMAP; + else if (!strcasecmp(p, "cgroup")) + ret |= PERF_SYNTH_CGROUP; + else + return -1; + } + + return ret; +} |