diff options
Diffstat (limited to 'tools/perf/builtin-record.c')
-rw-r--r-- | tools/perf/builtin-record.c | 1554 |
1 files changed, 1436 insertions, 118 deletions
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 0338b813585a..e128b855ddde 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -10,6 +10,7 @@ #include "util/build-id.h" #include <subcmd/parse-options.h> +#include <internal/xyarray.h> #include "util/parse-events.h" #include "util/config.h" @@ -21,6 +22,7 @@ #include "util/evsel.h" #include "util/debug.h" #include "util/mmap.h" +#include "util/mutex.h" #include "util/target.h" #include "util/session.h" #include "util/tool.h" @@ -49,8 +51,10 @@ #include "util/clockid.h" #include "util/pmu-hybrid.h" #include "util/evlist-hybrid.h" +#include "util/off_cpu.h" #include "asm/bug.h" #include "perf.h" +#include "cputopo.h" #include <errno.h> #include <inttypes.h> @@ -58,6 +62,9 @@ #include <poll.h> #include <pthread.h> #include <unistd.h> +#ifndef HAVE_GETTID +#include <syscall.h> +#endif #include <sched.h> #include <signal.h> #ifdef HAVE_EVENTFD_SUPPORT @@ -87,6 +94,62 @@ struct switch_output { int cur_file; }; +struct thread_mask { + struct mmap_cpu_mask maps; + struct mmap_cpu_mask affinity; +}; + +struct record_thread { + pid_t tid; + struct thread_mask *mask; + struct { + int msg[2]; + int ack[2]; + } pipes; + struct fdarray pollfd; + int ctlfd_pos; + int nr_mmaps; + struct mmap **maps; + struct mmap **overwrite_maps; + struct record *rec; + unsigned long long samples; + unsigned long waking; + u64 bytes_written; + u64 bytes_transferred; + u64 bytes_compressed; +}; + +static __thread struct record_thread *thread; + +enum thread_msg { + THREAD_MSG__UNDEFINED = 0, + THREAD_MSG__READY, + THREAD_MSG__MAX, +}; + +static const char *thread_msg_tags[THREAD_MSG__MAX] = { + "UNDEFINED", "READY" +}; + +enum thread_spec { + THREAD_SPEC__UNDEFINED = 0, + THREAD_SPEC__CPU, + THREAD_SPEC__CORE, + THREAD_SPEC__PACKAGE, + THREAD_SPEC__NUMA, + THREAD_SPEC__USER, + THREAD_SPEC__MAX, +}; + +static const char *thread_spec_tags[THREAD_SPEC__MAX] = { + "undefined", "cpu", "core", "package", "numa", "user" +}; + +struct pollfd_index_map { + int evlist_pollfd_index; + int thread_pollfd_index; +}; + struct record { struct perf_tool tool; struct record_opts opts; @@ -107,10 +170,17 @@ struct record { bool buildid_mmap; bool timestamp_filename; bool timestamp_boundary; + bool off_cpu; struct switch_output switch_output; unsigned long long samples; - struct mmap_cpu_mask affinity_mask; unsigned long output_max_size; /* = 0: unlimited */ + struct perf_debuginfod debuginfod; + int nr_threads; + struct thread_mask *thread_masks; + struct record_thread *thread_data; + struct pollfd_index_map *index_map; + size_t index_map_sz; + size_t index_map_cnt; }; static volatile int done; @@ -123,6 +193,18 @@ static const char *affinity_tags[PERF_AFFINITY_MAX] = { "SYS", "NODE", "CPU" }; +#ifndef HAVE_GETTID +static inline pid_t gettid(void) +{ + return (pid_t)syscall(__NR_gettid); +} +#endif + +static int record__threads_enabled(struct record *rec) +{ + return rec->opts.threads_spec; +} + static bool switch_output_signal(struct record *rec) { return rec->switch_output.signal && @@ -142,10 +224,22 @@ static bool switch_output_time(struct record *rec) trigger_is_ready(&switch_output_trigger); } +static u64 record__bytes_written(struct record *rec) +{ + int t; + u64 bytes_written = rec->bytes_written; + struct record_thread *thread_data = rec->thread_data; + + for (t = 0; t < rec->nr_threads; t++) + bytes_written += thread_data[t].bytes_written; + + return bytes_written; +} + static bool record__output_max_size_exceeded(struct record *rec) { return rec->output_max_size && - (rec->bytes_written >= rec->output_max_size); + (record__bytes_written(rec) >= rec->output_max_size); } static int record__write(struct record *rec, struct mmap *map __maybe_unused, @@ -153,17 +247,23 @@ static int record__write(struct record *rec, struct mmap *map __maybe_unused, { struct perf_data_file *file = &rec->session->data->file; + if (map && map->file) + file = map->file; + if (perf_data_file__write(file, bf, size) < 0) { pr_err("failed to write perf data, error: %m\n"); return -1; } - rec->bytes_written += size; + if (map && map->file) + thread->bytes_written += size; + else + rec->bytes_written += size; if (record__output_max_size_exceeded(rec) && !done) { fprintf(stderr, "[ perf record: perf size limit reached (%" PRIu64 " KB)," " stopping session ]\n", - rec->bytes_written >> 10); + record__bytes_written(rec) >> 10); done = 1; } @@ -175,8 +275,8 @@ static int record__write(struct record *rec, struct mmap *map __maybe_unused, static int record__aio_enabled(struct record *rec); static int record__comp_enabled(struct record *rec); -static size_t zstd_compress(struct perf_session *session, void *dst, size_t dst_size, - void *src, size_t src_size); +static size_t zstd_compress(struct perf_session *session, struct mmap *map, + void *dst, size_t dst_size, void *src, size_t src_size); #ifdef HAVE_AIO_SUPPORT static int record__aio_write(struct aiocb *cblock, int trace_fd, @@ -310,7 +410,7 @@ static int record__aio_pushfn(struct mmap *map, void *to, void *buf, size_t size */ if (record__comp_enabled(aio->rec)) { - size = zstd_compress(aio->rec->session, aio->data + aio->size, + size = zstd_compress(aio->rec->session, NULL, aio->data + aio->size, mmap__mmap_len(map) - aio->size, buf, size); } else { @@ -518,17 +618,18 @@ static int process_synthesized_event(struct perf_tool *tool, return record__write(rec, NULL, event, event->header.size); } +static struct mutex synth_lock; + static int process_locked_synthesized_event(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample __maybe_unused, struct machine *machine __maybe_unused) { - static pthread_mutex_t synth_lock = PTHREAD_MUTEX_INITIALIZER; int ret; - pthread_mutex_lock(&synth_lock); + mutex_lock(&synth_lock); ret = process_synthesized_event(tool, event, sample, machine); - pthread_mutex_unlock(&synth_lock); + mutex_unlock(&synth_lock); return ret; } @@ -537,18 +638,18 @@ static int record__pushfn(struct mmap *map, void *to, void *bf, size_t size) struct record *rec = to; if (record__comp_enabled(rec)) { - size = zstd_compress(rec->session, map->data, mmap__mmap_len(map), bf, size); + size = zstd_compress(rec->session, map, map->data, mmap__mmap_len(map), bf, size); bf = map->data; } - rec->samples++; + thread->samples++; return record__write(rec, map, bf, size); } static volatile int signr = -1; static volatile int child_finished; #ifdef HAVE_EVENTFD_SUPPORT -static int done_fd = -1; +static volatile int done_fd = -1; #endif static void sig_handler(int sig) @@ -560,19 +661,24 @@ static void sig_handler(int sig) done = 1; #ifdef HAVE_EVENTFD_SUPPORT -{ - u64 tmp = 1; - /* - * It is possible for this signal handler to run after done is checked - * in the main loop, but before the perf counter fds are polled. If this - * happens, the poll() will continue to wait even though done is set, - * and will only break out if either another signal is received, or the - * counters are ready for read. To ensure the poll() doesn't sleep when - * done is set, use an eventfd (done_fd) to wake up the poll(). - */ - if (write(done_fd, &tmp, sizeof(tmp)) < 0) - pr_err("failed to signal wakeup fd, error: %m\n"); -} + if (done_fd >= 0) { + u64 tmp = 1; + int orig_errno = errno; + + /* + * It is possible for this signal handler to run after done is + * checked in the main loop, but before the perf counter fds are + * polled. If this happens, the poll() will continue to wait + * even though done is set, and will only break out if either + * another signal is received, or the counters are ready for + * read. To ensure the poll() doesn't sleep when done is set, + * use an eventfd (done_fd) to wake up the poll(). + */ + if (write(done_fd, &tmp, sizeof(tmp)) < 0) + pr_err("failed to signal wakeup fd, error: %m\n"); + + errno = orig_errno; + } #endif // HAVE_EVENTFD_SUPPORT } @@ -717,6 +823,12 @@ static int record__auxtrace_init(struct record *rec) { int err; + if ((rec->opts.auxtrace_snapshot_opts || rec->opts.auxtrace_sample_opts) + && record__threads_enabled(rec)) { + pr_err("AUX area tracing options are not available in parallel streaming mode.\n"); + return -EINVAL; + } + if (!rec->itr) { rec->itr = auxtrace_record__init(rec->evlist, &err); if (err) @@ -775,7 +887,6 @@ static int record__auxtrace_init(struct record *rec __maybe_unused) static int record__config_text_poke(struct evlist *evlist) { struct evsel *evsel; - int err; /* Nothing to do if text poke is already configured */ evlist__for_each_entry(evlist, evsel) { @@ -783,32 +894,23 @@ static int record__config_text_poke(struct evlist *evlist) return 0; } - err = parse_events(evlist, "dummy:u", NULL); - if (err) - return err; - - evsel = evlist__last(evlist); + evsel = evlist__add_dummy_on_all_cpus(evlist); + if (!evsel) + return -ENOMEM; - evsel->core.attr.freq = 0; - evsel->core.attr.sample_period = 1; evsel->core.attr.text_poke = 1; evsel->core.attr.ksymbol = 1; - - evsel->core.system_wide = true; - evsel->no_aux_samples = true; evsel->immediate = true; - - /* Text poke must be collected on all CPUs */ - perf_cpu_map__put(evsel->core.own_cpus); - evsel->core.own_cpus = perf_cpu_map__new(NULL); - perf_cpu_map__put(evsel->core.cpus); - evsel->core.cpus = perf_cpu_map__get(evsel->core.own_cpus); - evsel__set_sample_bit(evsel, TIME); return 0; } +static int record__config_off_cpu(struct record *rec) +{ + return off_cpu_prepare(rec->evlist, &rec->opts.target, &rec->opts); +} + static bool record__kcore_readable(struct machine *machine) { char kcore[PATH_MAX]; @@ -840,9 +942,286 @@ static int record__kcore_copy(struct machine *machine, struct perf_data *data) return kcore_copy(from_dir, kcore_dir); } +static void record__thread_data_init_pipes(struct record_thread *thread_data) +{ + thread_data->pipes.msg[0] = -1; + thread_data->pipes.msg[1] = -1; + thread_data->pipes.ack[0] = -1; + thread_data->pipes.ack[1] = -1; +} + +static int record__thread_data_open_pipes(struct record_thread *thread_data) +{ + if (pipe(thread_data->pipes.msg)) + return -EINVAL; + + if (pipe(thread_data->pipes.ack)) { + close(thread_data->pipes.msg[0]); + thread_data->pipes.msg[0] = -1; + close(thread_data->pipes.msg[1]); + thread_data->pipes.msg[1] = -1; + return -EINVAL; + } + + pr_debug2("thread_data[%p]: msg=[%d,%d], ack=[%d,%d]\n", thread_data, + thread_data->pipes.msg[0], thread_data->pipes.msg[1], + thread_data->pipes.ack[0], thread_data->pipes.ack[1]); + + return 0; +} + +static void record__thread_data_close_pipes(struct record_thread *thread_data) +{ + if (thread_data->pipes.msg[0] != -1) { + close(thread_data->pipes.msg[0]); + thread_data->pipes.msg[0] = -1; + } + if (thread_data->pipes.msg[1] != -1) { + close(thread_data->pipes.msg[1]); + thread_data->pipes.msg[1] = -1; + } + if (thread_data->pipes.ack[0] != -1) { + close(thread_data->pipes.ack[0]); + thread_data->pipes.ack[0] = -1; + } + if (thread_data->pipes.ack[1] != -1) { + close(thread_data->pipes.ack[1]); + thread_data->pipes.ack[1] = -1; + } +} + +static bool evlist__per_thread(struct evlist *evlist) +{ + return cpu_map__is_dummy(evlist->core.user_requested_cpus); +} + +static int record__thread_data_init_maps(struct record_thread *thread_data, struct evlist *evlist) +{ + int m, tm, nr_mmaps = evlist->core.nr_mmaps; + struct mmap *mmap = evlist->mmap; + struct mmap *overwrite_mmap = evlist->overwrite_mmap; + struct perf_cpu_map *cpus = evlist->core.all_cpus; + bool per_thread = evlist__per_thread(evlist); + + if (per_thread) + thread_data->nr_mmaps = nr_mmaps; + else + thread_data->nr_mmaps = bitmap_weight(thread_data->mask->maps.bits, + thread_data->mask->maps.nbits); + if (mmap) { + thread_data->maps = zalloc(thread_data->nr_mmaps * sizeof(struct mmap *)); + if (!thread_data->maps) + return -ENOMEM; + } + if (overwrite_mmap) { + thread_data->overwrite_maps = zalloc(thread_data->nr_mmaps * sizeof(struct mmap *)); + if (!thread_data->overwrite_maps) { + zfree(&thread_data->maps); + return -ENOMEM; + } + } + pr_debug2("thread_data[%p]: nr_mmaps=%d, maps=%p, ow_maps=%p\n", thread_data, + thread_data->nr_mmaps, thread_data->maps, thread_data->overwrite_maps); + + for (m = 0, tm = 0; m < nr_mmaps && tm < thread_data->nr_mmaps; m++) { + if (per_thread || + test_bit(perf_cpu_map__cpu(cpus, m).cpu, thread_data->mask->maps.bits)) { + if (thread_data->maps) { + thread_data->maps[tm] = &mmap[m]; + pr_debug2("thread_data[%p]: cpu%d: maps[%d] -> mmap[%d]\n", + thread_data, perf_cpu_map__cpu(cpus, m).cpu, tm, m); + } + if (thread_data->overwrite_maps) { + thread_data->overwrite_maps[tm] = &overwrite_mmap[m]; + pr_debug2("thread_data[%p]: cpu%d: ow_maps[%d] -> ow_mmap[%d]\n", + thread_data, perf_cpu_map__cpu(cpus, m).cpu, tm, m); + } + tm++; + } + } + + return 0; +} + +static int record__thread_data_init_pollfd(struct record_thread *thread_data, struct evlist *evlist) +{ + int f, tm, pos; + struct mmap *map, *overwrite_map; + + fdarray__init(&thread_data->pollfd, 64); + + for (tm = 0; tm < thread_data->nr_mmaps; tm++) { + map = thread_data->maps ? thread_data->maps[tm] : NULL; + overwrite_map = thread_data->overwrite_maps ? + thread_data->overwrite_maps[tm] : NULL; + + for (f = 0; f < evlist->core.pollfd.nr; f++) { + void *ptr = evlist->core.pollfd.priv[f].ptr; + + if ((map && ptr == map) || (overwrite_map && ptr == overwrite_map)) { + pos = fdarray__dup_entry_from(&thread_data->pollfd, f, + &evlist->core.pollfd); + if (pos < 0) + return pos; + pr_debug2("thread_data[%p]: pollfd[%d] <- event_fd=%d\n", + thread_data, pos, evlist->core.pollfd.entries[f].fd); + } + } + } + + return 0; +} + +static void record__free_thread_data(struct record *rec) +{ + int t; + struct record_thread *thread_data = rec->thread_data; + + if (thread_data == NULL) + return; + + for (t = 0; t < rec->nr_threads; t++) { + record__thread_data_close_pipes(&thread_data[t]); + zfree(&thread_data[t].maps); + zfree(&thread_data[t].overwrite_maps); + fdarray__exit(&thread_data[t].pollfd); + } + + zfree(&rec->thread_data); +} + +static int record__map_thread_evlist_pollfd_indexes(struct record *rec, + int evlist_pollfd_index, + int thread_pollfd_index) +{ + size_t x = rec->index_map_cnt; + + if (realloc_array_as_needed(rec->index_map, rec->index_map_sz, x, NULL)) + return -ENOMEM; + rec->index_map[x].evlist_pollfd_index = evlist_pollfd_index; + rec->index_map[x].thread_pollfd_index = thread_pollfd_index; + rec->index_map_cnt += 1; + return 0; +} + +static int record__update_evlist_pollfd_from_thread(struct record *rec, + struct evlist *evlist, + struct record_thread *thread_data) +{ + struct pollfd *e_entries = evlist->core.pollfd.entries; + struct pollfd *t_entries = thread_data->pollfd.entries; + int err = 0; + size_t i; + + for (i = 0; i < rec->index_map_cnt; i++) { + int e_pos = rec->index_map[i].evlist_pollfd_index; + int t_pos = rec->index_map[i].thread_pollfd_index; + + if (e_entries[e_pos].fd != t_entries[t_pos].fd || + e_entries[e_pos].events != t_entries[t_pos].events) { + pr_err("Thread and evlist pollfd index mismatch\n"); + err = -EINVAL; + continue; + } + e_entries[e_pos].revents = t_entries[t_pos].revents; + } + return err; +} + +static int record__dup_non_perf_events(struct record *rec, + struct evlist *evlist, + struct record_thread *thread_data) +{ + struct fdarray *fda = &evlist->core.pollfd; + int i, ret; + + for (i = 0; i < fda->nr; i++) { + if (!(fda->priv[i].flags & fdarray_flag__non_perf_event)) + continue; + ret = fdarray__dup_entry_from(&thread_data->pollfd, i, fda); + if (ret < 0) { + pr_err("Failed to duplicate descriptor in main thread pollfd\n"); + return ret; + } + pr_debug2("thread_data[%p]: pollfd[%d] <- non_perf_event fd=%d\n", + thread_data, ret, fda->entries[i].fd); + ret = record__map_thread_evlist_pollfd_indexes(rec, i, ret); + if (ret < 0) { + pr_err("Failed to map thread and evlist pollfd indexes\n"); + return ret; + } + } + return 0; +} + +static int record__alloc_thread_data(struct record *rec, struct evlist *evlist) +{ + int t, ret; + struct record_thread *thread_data; + + rec->thread_data = zalloc(rec->nr_threads * sizeof(*(rec->thread_data))); + if (!rec->thread_data) { + pr_err("Failed to allocate thread data\n"); + return -ENOMEM; + } + thread_data = rec->thread_data; + + for (t = 0; t < rec->nr_threads; t++) + record__thread_data_init_pipes(&thread_data[t]); + + for (t = 0; t < rec->nr_threads; t++) { + thread_data[t].rec = rec; + thread_data[t].mask = &rec->thread_masks[t]; + ret = record__thread_data_init_maps(&thread_data[t], evlist); + if (ret) { + pr_err("Failed to initialize thread[%d] maps\n", t); + goto out_free; + } + ret = record__thread_data_init_pollfd(&thread_data[t], evlist); + if (ret) { + pr_err("Failed to initialize thread[%d] pollfd\n", t); + goto out_free; + } + if (t) { + thread_data[t].tid = -1; + ret = record__thread_data_open_pipes(&thread_data[t]); + if (ret) { + pr_err("Failed to open thread[%d] communication pipes\n", t); + goto out_free; + } + ret = fdarray__add(&thread_data[t].pollfd, thread_data[t].pipes.msg[0], + POLLIN | POLLERR | POLLHUP, fdarray_flag__nonfilterable); + if (ret < 0) { + pr_err("Failed to add descriptor to thread[%d] pollfd\n", t); + goto out_free; + } + thread_data[t].ctlfd_pos = ret; + pr_debug2("thread_data[%p]: pollfd[%d] <- ctl_fd=%d\n", + thread_data, thread_data[t].ctlfd_pos, + thread_data[t].pipes.msg[0]); + } else { + thread_data[t].tid = gettid(); + + ret = record__dup_non_perf_events(rec, evlist, &thread_data[t]); + if (ret < 0) + goto out_free; + + thread_data[t].ctlfd_pos = -1; /* Not used */ + } + } + + return 0; + +out_free: + record__free_thread_data(rec); + + return ret; +} + static int record__mmap_evlist(struct record *rec, struct evlist *evlist) { + int i, ret; struct record_opts *opts = &rec->opts; bool auxtrace_overwrite = opts->auxtrace_snapshot_mode || opts->auxtrace_sample_mode; @@ -873,6 +1252,28 @@ static int record__mmap_evlist(struct record *rec, return -EINVAL; } } + + if (evlist__initialize_ctlfd(evlist, opts->ctl_fd, opts->ctl_fd_ack)) + return -1; + + ret = record__alloc_thread_data(rec, evlist); + if (ret) + return ret; + + if (record__threads_enabled(rec)) { + ret = perf_data__create_dir(&rec->data, evlist->core.nr_mmaps); + if (ret) { + pr_err("Failed to create data directory: %s\n", strerror(-ret)); + return ret; + } + for (i = 0; i < evlist->core.nr_mmaps; i++) { + if (evlist->mmap) + evlist->mmap[i].file = &rec->data.dir.files[i]; + if (evlist->overwrite_mmap) + evlist->overwrite_mmap[i].file = &rec->data.dir.files[i]; + } + } + return 0; } @@ -1061,18 +1462,25 @@ static struct perf_event_header finished_round_event = { .type = PERF_RECORD_FINISHED_ROUND, }; +static struct perf_event_header finished_init_event = { + .size = sizeof(struct perf_event_header), + .type = PERF_RECORD_FINISHED_INIT, +}; + static void record__adjust_affinity(struct record *rec, struct mmap *map) { if (rec->opts.affinity != PERF_AFFINITY_SYS && - !bitmap_equal(rec->affinity_mask.bits, map->affinity_mask.bits, - rec->affinity_mask.nbits)) { - bitmap_zero(rec->affinity_mask.bits, rec->affinity_mask.nbits); - bitmap_or(rec->affinity_mask.bits, rec->affinity_mask.bits, - map->affinity_mask.bits, rec->affinity_mask.nbits); - sched_setaffinity(0, MMAP_CPU_MASK_BYTES(&rec->affinity_mask), - (cpu_set_t *)rec->affinity_mask.bits); - if (verbose == 2) - mmap_cpu_mask__scnprintf(&rec->affinity_mask, "thread"); + !bitmap_equal(thread->mask->affinity.bits, map->affinity_mask.bits, + thread->mask->affinity.nbits)) { + bitmap_zero(thread->mask->affinity.bits, thread->mask->affinity.nbits); + bitmap_or(thread->mask->affinity.bits, thread->mask->affinity.bits, + map->affinity_mask.bits, thread->mask->affinity.nbits); + sched_setaffinity(0, MMAP_CPU_MASK_BYTES(&thread->mask->affinity), + (cpu_set_t *)thread->mask->affinity.bits); + if (verbose == 2) { + pr_debug("threads[%d]: running on cpu%d: ", thread->tid, sched_getcpu()); + mmap_cpu_mask__scnprintf(&thread->mask->affinity, "affinity"); + } } } @@ -1092,17 +1500,26 @@ static size_t process_comp_header(void *record, size_t increment) return size; } -static size_t zstd_compress(struct perf_session *session, void *dst, size_t dst_size, - void *src, size_t src_size) +static size_t zstd_compress(struct perf_session *session, struct mmap *map, + void *dst, size_t dst_size, void *src, size_t src_size) { size_t compressed; size_t max_record_size = PERF_SAMPLE_MAX_SIZE - sizeof(struct perf_record_compressed) - 1; + struct zstd_data *zstd_data = &session->zstd_data; - compressed = zstd_compress_stream_to_records(&session->zstd_data, dst, dst_size, src, src_size, + if (map && map->file) + zstd_data = &map->zstd_data; + + compressed = zstd_compress_stream_to_records(zstd_data, dst, dst_size, src, src_size, max_record_size, process_comp_header); - session->bytes_transferred += src_size; - session->bytes_compressed += compressed; + if (map && map->file) { + thread->bytes_transferred += src_size; + thread->bytes_compressed += compressed; + } else { + session->bytes_transferred += src_size; + session->bytes_compressed += compressed; + } return compressed; } @@ -1113,14 +1530,17 @@ static int record__mmap_read_evlist(struct record *rec, struct evlist *evlist, u64 bytes_written = rec->bytes_written; int i; int rc = 0; - struct mmap *maps; + int nr_mmaps; + struct mmap **maps; int trace_fd = rec->data.file.fd; off_t off = 0; if (!evlist) return 0; - maps = overwrite ? evlist->overwrite_mmap : evlist->mmap; + nr_mmaps = thread->nr_mmaps; + maps = overwrite ? thread->overwrite_maps : thread->maps; + if (!maps) return 0; @@ -1130,9 +1550,9 @@ static int record__mmap_read_evlist(struct record *rec, struct evlist *evlist, if (record__aio_enabled(rec)) off = record__aio_get_pos(trace_fd); - for (i = 0; i < evlist->core.nr_mmaps; i++) { + for (i = 0; i < nr_mmaps; i++) { u64 flush = 0; - struct mmap *map = &maps[i]; + struct mmap *map = maps[i]; if (map->core.base) { record__adjust_affinity(rec, map); @@ -1174,8 +1594,12 @@ static int record__mmap_read_evlist(struct record *rec, struct evlist *evlist, /* * Mark the round finished in case we wrote * at least one event. + * + * No need for round events in directory mode, + * because per-cpu maps and files have data + * sorted by kernel. */ - if (bytes_written != rec->bytes_written) + if (!record__threads_enabled(rec) && bytes_written != rec->bytes_written) rc = record__write(rec, NULL, &finished_round_event, sizeof(finished_round_event)); if (overwrite) @@ -1195,6 +1619,77 @@ static int record__mmap_read_all(struct record *rec, bool synch) return record__mmap_read_evlist(rec, rec->evlist, true, synch); } +static void record__thread_munmap_filtered(struct fdarray *fda, int fd, + void *arg __maybe_unused) +{ + struct perf_mmap *map = fda->priv[fd].ptr; + + if (map) + perf_mmap__put(map); +} + +static void *record__thread(void *arg) +{ + enum thread_msg msg = THREAD_MSG__READY; + bool terminate = false; + struct fdarray *pollfd; + int err, ctlfd_pos; + + thread = arg; + thread->tid = gettid(); + + err = write(thread->pipes.ack[1], &msg, sizeof(msg)); + if (err == -1) + pr_warning("threads[%d]: failed to notify on start: %s\n", + thread->tid, strerror(errno)); + + pr_debug("threads[%d]: started on cpu%d\n", thread->tid, sched_getcpu()); + + pollfd = &thread->pollfd; + ctlfd_pos = thread->ctlfd_pos; + + for (;;) { + unsigned long long hits = thread->samples; + + if (record__mmap_read_all(thread->rec, false) < 0 || terminate) + break; + + if (hits == thread->samples) { + + err = fdarray__poll(pollfd, -1); + /* + * Propagate error, only if there's any. Ignore positive + * number of returned events and interrupt error. + */ + if (err > 0 || (err < 0 && errno == EINTR)) + err = 0; + thread->waking++; + + if (fdarray__filter(pollfd, POLLERR | POLLHUP, + record__thread_munmap_filtered, NULL) == 0) + break; + } + + if (pollfd->entries[ctlfd_pos].revents & POLLHUP) { + terminate = true; + close(thread->pipes.msg[0]); + thread->pipes.msg[0] = -1; + pollfd->entries[ctlfd_pos].fd = -1; + pollfd->entries[ctlfd_pos].events = 0; + } + + pollfd->entries[ctlfd_pos].revents = 0; + } + record__mmap_read_all(thread->rec, true); + + err = write(thread->pipes.ack[1], &msg, sizeof(msg)); + if (err == -1) + pr_warning("threads[%d]: failed to notify on termination: %s\n", + thread->tid, strerror(errno)); + + return NULL; +} + static void record__init_features(struct record *rec) { struct perf_session *session = rec->session; @@ -1221,7 +1716,9 @@ static void record__init_features(struct record *rec) if (!rec->opts.use_clockid) perf_header__clear_feat(&session->header, HEADER_CLOCK_DATA); - perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT); + if (!record__threads_enabled(rec)) + perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT); + if (!record__comp_enabled(rec)) perf_header__clear_feat(&session->header, HEADER_COMPRESSED); @@ -1231,6 +1728,7 @@ static void record__init_features(struct record *rec) static void record__finish_output(struct record *rec) { + int i; struct perf_data *data = &rec->data; int fd = perf_data__fd(data); @@ -1239,6 +1737,10 @@ record__finish_output(struct record *rec) rec->session->header.data_size += rec->bytes_written; data->file.size = lseek(perf_data__fd(data), 0, SEEK_CUR); + if (record__threads_enabled(rec)) { + for (i = 0; i < data->dir.nr; i++) + data->dir.files[i].size = lseek(data->dir.files[i].fd, 0, SEEK_CUR); + } if (!rec->no_buildid) { process_buildids(rec); @@ -1273,6 +1775,14 @@ static int record__synthesize_workload(struct record *rec, bool tail) return err; } +static int write_finished_init(struct record *rec, bool tail) +{ + if (rec->opts.tail_synthesize != tail) + return 0; + + return record__write(rec, NULL, &finished_init_event, sizeof(finished_init_event)); +} + static int record__synthesize(struct record *rec, bool tail); static int @@ -1287,6 +1797,8 @@ record__switch_output(struct record *rec, bool at_exit) record__aio_mmap_read_sync(rec); + write_finished_init(rec, true); + record__synthesize(rec, true); if (target__none(&rec->opts.target)) record__synthesize_workload(rec, true); @@ -1341,10 +1853,79 @@ record__switch_output(struct record *rec, bool at_exit) */ if (target__none(&rec->opts.target)) record__synthesize_workload(rec, false); + write_finished_init(rec, false); } return fd; } +static void __record__read_lost_samples(struct record *rec, struct evsel *evsel, + struct perf_record_lost_samples *lost, + int cpu_idx, int thread_idx) +{ + struct perf_counts_values count; + struct perf_sample_id *sid; + struct perf_sample sample = {}; + int id_hdr_size; + + if (perf_evsel__read(&evsel->core, cpu_idx, thread_idx, &count) < 0) { + pr_err("read LOST count failed\n"); + return; + } + + if (count.lost == 0) + return; + + lost->lost = count.lost; + if (evsel->core.ids) { + sid = xyarray__entry(evsel->core.sample_id, cpu_idx, thread_idx); + sample.id = sid->id; + } + + id_hdr_size = perf_event__synthesize_id_sample((void *)(lost + 1), + evsel->core.attr.sample_type, &sample); + lost->header.size = sizeof(*lost) + id_hdr_size; + record__write(rec, NULL, lost, lost->header.size); +} + +static void record__read_lost_samples(struct record *rec) +{ + struct perf_session *session = rec->session; + struct perf_record_lost_samples *lost; + struct evsel *evsel; + + /* there was an error during record__open */ + if (session->evlist == NULL) + return; + + lost = zalloc(PERF_SAMPLE_MAX_SIZE); + if (lost == NULL) { + pr_debug("Memory allocation failed\n"); + return; + } + + lost->header.type = PERF_RECORD_LOST_SAMPLES; + + evlist__for_each_entry(session->evlist, evsel) { + struct xyarray *xy = evsel->core.sample_id; + + if (xy == NULL || evsel->core.fd == NULL) + continue; + if (xyarray__max_x(evsel->core.fd) != xyarray__max_x(xy) || + xyarray__max_y(evsel->core.fd) != xyarray__max_y(xy)) { + pr_debug("Unmatched FD vs. sample ID: skip reading LOST count\n"); + continue; + } + + for (int x = 0; x < xyarray__max_x(xy); x++) { + for (int y = 0; y < xyarray__max_y(xy); y++) { + __record__read_lost_samples(rec, evsel, lost, x, y); + } + } + } + free(lost); + +} + static volatile int workload_exec_errno; /* @@ -1411,13 +1992,11 @@ static int record__synthesize(struct record *rec, bool tail) goto out; /* Synthesize id_index before auxtrace_info */ - if (rec->opts.auxtrace_sample_mode || rec->opts.full_auxtrace) { - err = perf_event__synthesize_id_index(tool, - process_synthesized_event, - session->evlist, machine); - if (err) - goto out; - } + err = perf_event__synthesize_id_index(tool, + process_synthesized_event, + session->evlist, machine); + if (err) + goto out; if (rec->opts.full_auxtrace) { err = perf_event__synthesize_auxtrace_info(rec->itr, tool, @@ -1460,7 +2039,7 @@ static int record__synthesize(struct record *rec, bool tail) return err; } - err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.cpus, + err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.all_cpus, process_synthesized_event, NULL); if (err < 0) { pr_err("Couldn't synthesize cpu map.\n"); @@ -1469,17 +2048,22 @@ static int record__synthesize(struct record *rec, bool tail) err = perf_event__synthesize_bpf_events(session, process_synthesized_event, machine, opts); - if (err < 0) + if (err < 0) { pr_warning("Couldn't synthesize bpf events.\n"); + err = 0; + } if (rec->opts.synth & PERF_SYNTH_CGROUP) { err = perf_event__synthesize_cgroups(tool, process_synthesized_event, machine); - if (err < 0) + if (err < 0) { pr_warning("Couldn't synthesize cgroup events.\n"); + err = 0; + } } if (rec->opts.nr_threads_synthesize > 1) { + mutex_init(&synth_lock); perf_set_multithreaded(); f = process_locked_synthesized_event; } @@ -1493,8 +2077,10 @@ static int record__synthesize(struct record *rec, bool tail) rec->opts.nr_threads_synthesize); } - if (rec->opts.nr_threads_synthesize > 1) + if (rec->opts.nr_threads_synthesize > 1) { perf_set_singlethreaded(); + mutex_destroy(&synth_lock); + } out: return err; @@ -1618,11 +2204,129 @@ static void record__uniquify_name(struct record *rec) } } +static int record__terminate_thread(struct record_thread *thread_data) +{ + int err; + enum thread_msg ack = THREAD_MSG__UNDEFINED; + pid_t tid = thread_data->tid; + + close(thread_data->pipes.msg[1]); + thread_data->pipes.msg[1] = -1; + err = read(thread_data->pipes.ack[0], &ack, sizeof(ack)); + if (err > 0) + pr_debug2("threads[%d]: sent %s\n", tid, thread_msg_tags[ack]); + else + pr_warning("threads[%d]: failed to receive termination notification from %d\n", + thread->tid, tid); + + return 0; +} + +static int record__start_threads(struct record *rec) +{ + int t, tt, err, ret = 0, nr_threads = rec->nr_threads; + struct record_thread *thread_data = rec->thread_data; + sigset_t full, mask; + pthread_t handle; + pthread_attr_t attrs; + + thread = &thread_data[0]; + + if (!record__threads_enabled(rec)) + return 0; + + sigfillset(&full); + if (sigprocmask(SIG_SETMASK, &full, &mask)) { + pr_err("Failed to block signals on threads start: %s\n", strerror(errno)); + return -1; + } + + pthread_attr_init(&attrs); + pthread_attr_setdetachstate(&attrs, PTHREAD_CREATE_DETACHED); + + for (t = 1; t < nr_threads; t++) { + enum thread_msg msg = THREAD_MSG__UNDEFINED; + +#ifdef HAVE_PTHREAD_ATTR_SETAFFINITY_NP + pthread_attr_setaffinity_np(&attrs, + MMAP_CPU_MASK_BYTES(&(thread_data[t].mask->affinity)), + (cpu_set_t *)(thread_data[t].mask->affinity.bits)); +#endif + if (pthread_create(&handle, &attrs, record__thread, &thread_data[t])) { + for (tt = 1; tt < t; tt++) + record__terminate_thread(&thread_data[t]); + pr_err("Failed to start threads: %s\n", strerror(errno)); + ret = -1; + goto out_err; + } + + err = read(thread_data[t].pipes.ack[0], &msg, sizeof(msg)); + if (err > 0) + pr_debug2("threads[%d]: sent %s\n", rec->thread_data[t].tid, + thread_msg_tags[msg]); + else + pr_warning("threads[%d]: failed to receive start notification from %d\n", + thread->tid, rec->thread_data[t].tid); + } + + sched_setaffinity(0, MMAP_CPU_MASK_BYTES(&thread->mask->affinity), + (cpu_set_t *)thread->mask->affinity.bits); + + pr_debug("threads[%d]: started on cpu%d\n", thread->tid, sched_getcpu()); + +out_err: + pthread_attr_destroy(&attrs); + + if (sigprocmask(SIG_SETMASK, &mask, NULL)) { + pr_err("Failed to unblock signals on threads start: %s\n", strerror(errno)); + ret = -1; + } + + return ret; +} + +static int record__stop_threads(struct record *rec) +{ + int t; + struct record_thread *thread_data = rec->thread_data; + + for (t = 1; t < rec->nr_threads; t++) + record__terminate_thread(&thread_data[t]); + + for (t = 0; t < rec->nr_threads; t++) { + rec->samples += thread_data[t].samples; + if (!record__threads_enabled(rec)) + continue; + rec->session->bytes_transferred += thread_data[t].bytes_transferred; + rec->session->bytes_compressed += thread_data[t].bytes_compressed; + pr_debug("threads[%d]: samples=%lld, wakes=%ld, ", thread_data[t].tid, + thread_data[t].samples, thread_data[t].waking); + if (thread_data[t].bytes_transferred && thread_data[t].bytes_compressed) + pr_debug("transferred=%" PRIu64 ", compressed=%" PRIu64 "\n", + thread_data[t].bytes_transferred, thread_data[t].bytes_compressed); + else + pr_debug("written=%" PRIu64 "\n", thread_data[t].bytes_written); + } + + return 0; +} + +static unsigned long record__waking(struct record *rec) +{ + int t; + unsigned long waking = 0; + struct record_thread *thread_data = rec->thread_data; + + for (t = 0; t < rec->nr_threads; t++) + waking += thread_data[t].waking; + + return waking; +} + static int __cmd_record(struct record *rec, int argc, const char **argv) { int err; int status = 0; - unsigned long waking = 0; const bool forks = argc > 0; struct perf_tool *tool = &rec->tool; struct record_opts *opts = &rec->opts; @@ -1667,6 +2371,17 @@ static int __cmd_record(struct record *rec, int argc, const char **argv) return PTR_ERR(session); } + if (record__threads_enabled(rec)) { + if (perf_data__is_pipe(&rec->data)) { + pr_err("Parallel trace streaming is not available in pipe mode.\n"); + return -1; + } + if (rec->opts.full_auxtrace) { + pr_err("Parallel trace streaming is not available in AUX area tracing mode.\n"); + return -1; + } + } + fd = perf_data__fd(data); rec->session = session; @@ -1724,17 +2439,21 @@ static int __cmd_record(struct record *rec, int argc, const char **argv) record__uniquify_name(rec); + /* Debug message used by test scripts */ + pr_debug3("perf record opening and mmapping events\n"); if (record__open(rec) != 0) { err = -1; - goto out_child; + goto out_free_threads; } + /* Debug message used by test scripts */ + pr_debug3("perf record done opening and mmapping events\n"); session->header.env.comp_mmap_len = session->evlist->core.mmap_len; if (rec->opts.kcore) { err = record__kcore_copy(&session->machines.host, data); if (err) { pr_err("ERROR: Failed to copy kcore\n"); - goto out_child; + goto out_free_threads; } } @@ -1745,7 +2464,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv) bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf)); pr_err("ERROR: Apply config to BPF failed: %s\n", errbuf); - goto out_child; + goto out_free_threads; } /* @@ -1763,11 +2482,11 @@ static int __cmd_record(struct record *rec, int argc, const char **argv) if (data->is_pipe) { err = perf_header__write_pipe(fd); if (err < 0) - goto out_child; + goto out_free_threads; } else { err = perf_session__write_header(session, rec->evlist, fd, false); if (err < 0) - goto out_child; + goto out_free_threads; } err = -1; @@ -1775,16 +2494,16 @@ static int __cmd_record(struct record *rec, int argc, const char **argv) && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) { pr_err("Couldn't generate buildids. " "Use --no-buildid to profile anyway.\n"); - goto out_child; + goto out_free_threads; } err = record__setup_sb_evlist(rec); if (err) - goto out_child; + goto out_free_threads; err = record__synthesize(rec, false); if (err < 0) - goto out_child; + goto out_free_threads; if (rec->realtime_prio) { struct sched_param param; @@ -1793,10 +2512,13 @@ static int __cmd_record(struct record *rec, int argc, const char **argv) if (sched_setscheduler(0, SCHED_FIFO, ¶m)) { pr_err("Could not set realtime priority.\n"); err = -1; - goto out_child; + goto out_free_threads; } } + if (record__start_threads(rec)) + goto out_free_threads; + /* * When perf is starting the traced process, all the events * (apart from group members) have enable_on_exec=1 set, @@ -1854,9 +2576,6 @@ static int __cmd_record(struct record *rec, int argc, const char **argv) evlist__start_workload(rec->evlist); } - if (evlist__initialize_ctlfd(rec->evlist, opts->ctl_fd, opts->ctl_fd_ack)) - goto out_child; - if (opts->initial_delay) { pr_info(EVLIST_DISABLED_MSG); if (opts->initial_delay > 0) { @@ -1866,11 +2585,28 @@ static int __cmd_record(struct record *rec, int argc, const char **argv) } } + err = event_enable_timer__start(rec->evlist->eet); + if (err) + goto out_child; + + /* Debug message used by test scripts */ + pr_debug3("perf record has started\n"); + fflush(stderr); + trigger_ready(&auxtrace_snapshot_trigger); trigger_ready(&switch_output_trigger); perf_hooks__invoke_record_start(); + + /* + * Must write FINISHED_INIT so it will be seen after all other + * synthesized user events, but before any regular events. + */ + err = write_finished_init(rec, false); + if (err < 0) + goto out_child; + for (;;) { - unsigned long long hits = rec->samples; + unsigned long long hits = thread->samples; /* * rec->evlist->bkw_mmap_state is possible to be @@ -1924,8 +2660,8 @@ static int __cmd_record(struct record *rec, int argc, const char **argv) if (!quiet) fprintf(stderr, "[ perf record: dump data: Woken up %ld times ]\n", - waking); - waking = 0; + record__waking(rec)); + thread->waking = 0; fd = record__switch_output(rec, false); if (fd < 0) { pr_err("Failed to switch to new file\n"); @@ -1939,20 +2675,25 @@ static int __cmd_record(struct record *rec, int argc, const char **argv) alarm(rec->switch_output.time); } - if (hits == rec->samples) { + if (hits == thread->samples) { if (done || draining) break; - err = evlist__poll(rec->evlist, -1); + err = fdarray__poll(&thread->pollfd, -1); /* * Propagate error, only if there's any. Ignore positive * number of returned events and interrupt error. */ if (err > 0 || (err < 0 && errno == EINTR)) err = 0; - waking++; + thread->waking++; - if (evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0) + if (fdarray__filter(&thread->pollfd, POLLERR | POLLHUP, + record__thread_munmap_filtered, NULL) == 0) draining = true; + + err = record__update_evlist_pollfd_from_thread(rec, rec->evlist, thread); + if (err) + goto out_child; } if (evlist__ctlfd_process(rec->evlist, &cmd) > 0) { @@ -1975,6 +2716,14 @@ static int __cmd_record(struct record *rec, int argc, const char **argv) } } + err = event_enable_timer__process(rec->evlist->eet); + if (err < 0) + goto out_child; + if (err) { + err = 0; + done = 1; + } + /* * When perf is starting the traced process, at the end events * die with the process and we wait for that. Thus no need to @@ -2006,14 +2755,20 @@ static int __cmd_record(struct record *rec, int argc, const char **argv) } if (!quiet) - fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking); + fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", + record__waking(rec)); + + write_finished_init(rec, true); if (target__none(&rec->opts.target)) record__synthesize_workload(rec, true); out_child: - evlist__finalize_ctlfd(rec->evlist); + record__stop_threads(rec); record__mmap_read_all(rec, true); +out_free_threads: + record__free_thread_data(rec); + evlist__finalize_ctlfd(rec->evlist); record__aio_mmap_read_sync(rec); if (rec->session->bytes_transferred && rec->session->bytes_compressed) { @@ -2038,6 +2793,10 @@ out_child: } else status = err; + if (rec->off_cpu) + rec->bytes_written += off_cpu_write(rec->session); + + record__read_lost_samples(rec); record__synthesize(rec, true); /* this will be recalculated during process_buildids() */ rec->samples = 0; @@ -2080,8 +2839,12 @@ out_child: out_delete_session: #ifdef HAVE_EVENTFD_SUPPORT - if (done_fd >= 0) - close(done_fd); + if (done_fd >= 0) { + fd = done_fd; + done_fd = -1; + + close(fd); + } #endif zstd_fini(&session->zstd_data); perf_session__delete(session); @@ -2177,10 +2940,22 @@ static int perf_record_config(const char *var, const char *value, void *cb) rec->opts.nr_cblocks = nr_cblocks_default; } #endif + if (!strcmp(var, "record.debuginfod")) { + rec->debuginfod.urls = strdup(value); + if (!rec->debuginfod.urls) + return -ENOMEM; + rec->debuginfod.set = true; + } return 0; } +static int record__parse_event_enable_time(const struct option *opt, const char *str, int unset) +{ + struct record *rec = (struct record *)opt->value; + + return evlist__parse_event_enable_time(rec->evlist, &rec->opts, str, unset); +} static int record__parse_affinity(const struct option *opt, const char *str, int unset) { @@ -2197,6 +2972,78 @@ static int record__parse_affinity(const struct option *opt, const char *str, int return 0; } +static int record__mmap_cpu_mask_alloc(struct mmap_cpu_mask *mask, int nr_bits) +{ + mask->nbits = nr_bits; + mask->bits = bitmap_zalloc(mask->nbits); + if (!mask->bits) + return -ENOMEM; + + return 0; +} + +static void record__mmap_cpu_mask_free(struct mmap_cpu_mask *mask) +{ + bitmap_free(mask->bits); + mask->nbits = 0; +} + +static int record__thread_mask_alloc(struct thread_mask *mask, int nr_bits) +{ + int ret; + + ret = record__mmap_cpu_mask_alloc(&mask->maps, nr_bits); + if (ret) { + mask->affinity.bits = NULL; + return ret; + } + + ret = record__mmap_cpu_mask_alloc(&mask->affinity, nr_bits); + if (ret) { + record__mmap_cpu_mask_free(&mask->maps); + mask->maps.bits = NULL; + } + + return ret; +} + +static void record__thread_mask_free(struct thread_mask *mask) +{ + record__mmap_cpu_mask_free(&mask->maps); + record__mmap_cpu_mask_free(&mask->affinity); +} + +static int record__parse_threads(const struct option *opt, const char *str, int unset) +{ + int s; + struct record_opts *opts = opt->value; + + if (unset || !str || !strlen(str)) { + opts->threads_spec = THREAD_SPEC__CPU; + } else { + for (s = 1; s < THREAD_SPEC__MAX; s++) { + if (s == THREAD_SPEC__USER) { + opts->threads_user_spec = strdup(str); + if (!opts->threads_user_spec) + return -ENOMEM; + opts->threads_spec = THREAD_SPEC__USER; + break; + } + if (!strncasecmp(str, thread_spec_tags[s], strlen(thread_spec_tags[s]))) { + opts->threads_spec = s; + break; + } + } + } + + if (opts->threads_spec == THREAD_SPEC__USER) + pr_debug("threads_spec: %s\n", opts->threads_user_spec); + else + pr_debug("threads_spec: %s\n", thread_spec_tags[opts->threads_spec]); + + return 0; +} + static int parse_output_max_size(const struct option *opt, const char *str, int unset) { @@ -2267,6 +3114,10 @@ out_free: return ret; } +void __weak arch__add_leaf_frame_record_opts(struct record_opts *opts __maybe_unused) +{ +} + static int parse_control_option(const struct option *opt, const char *str, int unset __maybe_unused) @@ -2317,12 +3168,22 @@ static int switch_output_setup(struct record *rec) * --switch-output=signal, as we'll send a SIGUSR2 from the side band * thread to its parent. */ - if (rec->switch_output_event_set) + if (rec->switch_output_event_set) { + if (record__threads_enabled(rec)) { + pr_warning("WARNING: --switch-output-event option is not available in parallel streaming mode.\n"); + return 0; + } goto do_signal; + } if (!s->set) return 0; + if (record__threads_enabled(rec)) { + pr_warning("WARNING: --switch-output option is not available in parallel streaming mode.\n"); + return 0; + } + if (!strcmp(s->str, "signal")) { do_signal: s->signal = true; @@ -2538,6 +3399,8 @@ static struct option __record_options[] = { OPT_BOOLEAN(0, "code-page-size", &record.opts.sample_code_page_size, "Record the sampled code address (ip) page size"), OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"), + OPT_BOOLEAN(0, "sample-identifier", &record.opts.sample_identifier, + "Record the sample identifier"), OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time, &record.opts.sample_time_set, "Record the sample timestamps"), @@ -2554,8 +3417,10 @@ static struct option __record_options[] = { OPT_CALLBACK('G', "cgroup", &record.evlist, "name", "monitor event in cgroup name only", parse_cgroups), - OPT_INTEGER('D', "delay", &record.opts.initial_delay, - "ms to wait before starting measurement after program start (-1: start with events disabled)"), + OPT_CALLBACK('D', "delay", &record, "ms", + "ms to wait before starting measurement after program start (-1: start with events disabled), " + "or ranges of time to enable events e.g. '-D 10-20,30-40'", + record__parse_event_enable_time), OPT_BOOLEAN(0, "kcore", &record.opts.kcore, "copy /proc/kcore"), OPT_STRING('u', "uid", &record.opts.target.uid_str, "user", "user to profile"), @@ -2641,8 +3506,8 @@ static struct option __record_options[] = { "Set affinity mask of trace reading thread to NUMA node cpu mask or cpu of processed mmap buffer", record__parse_affinity), #ifdef HAVE_ZSTD_SUPPORT - OPT_CALLBACK_OPTARG('z', "compression-level", &record.opts, &comp_level_default, - "n", "Compressed records using specified level (default: 1 - fastest compression, 22 - greatest compression)", + OPT_CALLBACK_OPTARG('z', "compression-level", &record.opts, &comp_level_default, "n", + "Compress records using specified level (default: 1 - fastest compression, 22 - greatest compression)", record__parse_comp_level), #endif OPT_CALLBACK(0, "max-size", &record.output_max_size, @@ -2663,11 +3528,422 @@ static struct option __record_options[] = { parse_control_option), OPT_CALLBACK(0, "synth", &record.opts, "no|all|task|mmap|cgroup", "Fine-tune event synthesis: default=all", parse_record_synth_option), + OPT_STRING_OPTARG_SET(0, "debuginfod", &record.debuginfod.urls, + &record.debuginfod.set, "debuginfod urls", + "Enable debuginfod data retrieval from DEBUGINFOD_URLS or specified urls", + "system"), + OPT_CALLBACK_OPTARG(0, "threads", &record.opts, NULL, "spec", + "write collected trace data into several data files using parallel threads", + record__parse_threads), + OPT_BOOLEAN(0, "off-cpu", &record.off_cpu, "Enable off-cpu analysis"), OPT_END() }; struct option *record_options = __record_options; +static int record__mmap_cpu_mask_init(struct mmap_cpu_mask *mask, struct perf_cpu_map *cpus) +{ + struct perf_cpu cpu; + int idx; + + if (cpu_map__is_dummy(cpus)) + return 0; + + perf_cpu_map__for_each_cpu(cpu, idx, cpus) { + if (cpu.cpu == -1) + continue; + /* Return ENODEV is input cpu is greater than max cpu */ + if ((unsigned long)cpu.cpu > mask->nbits) + return -ENODEV; + set_bit(cpu.cpu, mask->bits); + } + + return 0; +} + +static int record__mmap_cpu_mask_init_spec(struct mmap_cpu_mask *mask, const char *mask_spec) +{ + struct perf_cpu_map *cpus; + + cpus = perf_cpu_map__new(mask_spec); + if (!cpus) + return -ENOMEM; + + bitmap_zero(mask->bits, mask->nbits); + if (record__mmap_cpu_mask_init(mask, cpus)) + return -ENODEV; + + perf_cpu_map__put(cpus); + + return 0; +} + +static void record__free_thread_masks(struct record *rec, int nr_threads) +{ + int t; + + if (rec->thread_masks) + for (t = 0; t < nr_threads; t++) + record__thread_mask_free(&rec->thread_masks[t]); + + zfree(&rec->thread_masks); +} + +static int record__alloc_thread_masks(struct record *rec, int nr_threads, int nr_bits) +{ + int t, ret; + + rec->thread_masks = zalloc(nr_threads * sizeof(*(rec->thread_masks))); + if (!rec->thread_masks) { + pr_err("Failed to allocate thread masks\n"); + return -ENOMEM; + } + + for (t = 0; t < nr_threads; t++) { + ret = record__thread_mask_alloc(&rec->thread_masks[t], nr_bits); + if (ret) { + pr_err("Failed to allocate thread masks[%d]\n", t); + goto out_free; + } + } + + return 0; + +out_free: + record__free_thread_masks(rec, nr_threads); + + return ret; +} + +static int record__init_thread_cpu_masks(struct record *rec, struct perf_cpu_map *cpus) +{ + int t, ret, nr_cpus = perf_cpu_map__nr(cpus); + + ret = record__alloc_thread_masks(rec, nr_cpus, cpu__max_cpu().cpu); + if (ret) + return ret; + + rec->nr_threads = nr_cpus; + pr_debug("nr_threads: %d\n", rec->nr_threads); + + for (t = 0; t < rec->nr_threads; t++) { + set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].maps.bits); + set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].affinity.bits); + if (verbose) { + pr_debug("thread_masks[%d]: ", t); + mmap_cpu_mask__scnprintf(&rec->thread_masks[t].maps, "maps"); + pr_debug("thread_masks[%d]: ", t); + mmap_cpu_mask__scnprintf(&rec->thread_masks[t].affinity, "affinity"); + } + } + + return 0; +} + +static int record__init_thread_masks_spec(struct record *rec, struct perf_cpu_map *cpus, + const char **maps_spec, const char **affinity_spec, + u32 nr_spec) +{ + u32 s; + int ret = 0, t = 0; + struct mmap_cpu_mask cpus_mask; + struct thread_mask thread_mask, full_mask, *thread_masks; + + ret = record__mmap_cpu_mask_alloc(&cpus_mask, cpu__max_cpu().cpu); + if (ret) { + pr_err("Failed to allocate CPUs mask\n"); + return ret; + } + + ret = record__mmap_cpu_mask_init(&cpus_mask, cpus); + if (ret) { + pr_err("Failed to init cpu mask\n"); + goto out_free_cpu_mask; + } + + ret = record__thread_mask_alloc(&full_mask, cpu__max_cpu().cpu); + if (ret) { + pr_err("Failed to allocate full mask\n"); + goto out_free_cpu_mask; + } + + ret = record__thread_mask_alloc(&thread_mask, cpu__max_cpu().cpu); + if (ret) { + pr_err("Failed to allocate thread mask\n"); + goto out_free_full_and_cpu_masks; + } + + for (s = 0; s < nr_spec; s++) { + ret = record__mmap_cpu_mask_init_spec(&thread_mask.maps, maps_spec[s]); + if (ret) { + pr_err("Failed to initialize maps thread mask\n"); + goto out_free; + } + ret = record__mmap_cpu_mask_init_spec(&thread_mask.affinity, affinity_spec[s]); + if (ret) { + pr_err("Failed to initialize affinity thread mask\n"); + goto out_free; + } + + /* ignore invalid CPUs but do not allow empty masks */ + if (!bitmap_and(thread_mask.maps.bits, thread_mask.maps.bits, + cpus_mask.bits, thread_mask.maps.nbits)) { + pr_err("Empty maps mask: %s\n", maps_spec[s]); + ret = -EINVAL; + goto out_free; + } + if (!bitmap_and(thread_mask.affinity.bits, thread_mask.affinity.bits, + cpus_mask.bits, thread_mask.affinity.nbits)) { + pr_err("Empty affinity mask: %s\n", affinity_spec[s]); + ret = -EINVAL; + goto out_free; + } + + /* do not allow intersection with other masks (full_mask) */ + if (bitmap_intersects(thread_mask.maps.bits, full_mask.maps.bits, + thread_mask.maps.nbits)) { + pr_err("Intersecting maps mask: %s\n", maps_spec[s]); + ret = -EINVAL; + goto out_free; + } + if (bitmap_intersects(thread_mask.affinity.bits, full_mask.affinity.bits, + thread_mask.affinity.nbits)) { + pr_err("Intersecting affinity mask: %s\n", affinity_spec[s]); + ret = -EINVAL; + goto out_free; + } + + bitmap_or(full_mask.maps.bits, full_mask.maps.bits, + thread_mask.maps.bits, full_mask.maps.nbits); + bitmap_or(full_mask.affinity.bits, full_mask.affinity.bits, + thread_mask.affinity.bits, full_mask.maps.nbits); + + thread_masks = realloc(rec->thread_masks, (t + 1) * sizeof(struct thread_mask)); + if (!thread_masks) { + pr_err("Failed to reallocate thread masks\n"); + ret = -ENOMEM; + goto out_free; + } + rec->thread_masks = thread_masks; + rec->thread_masks[t] = thread_mask; + if (verbose) { + pr_debug("thread_masks[%d]: ", t); + mmap_cpu_mask__scnprintf(&rec->thread_masks[t].maps, "maps"); + pr_debug("thread_masks[%d]: ", t); + mmap_cpu_mask__scnprintf(&rec->thread_masks[t].affinity, "affinity"); + } + t++; + ret = record__thread_mask_alloc(&thread_mask, cpu__max_cpu().cpu); + if (ret) { + pr_err("Failed to allocate thread mask\n"); + goto out_free_full_and_cpu_masks; + } + } + rec->nr_threads = t; + pr_debug("nr_threads: %d\n", rec->nr_threads); + if (!rec->nr_threads) + ret = -EINVAL; + +out_free: + record__thread_mask_free(&thread_mask); +out_free_full_and_cpu_masks: + record__thread_mask_free(&full_mask); +out_free_cpu_mask: + record__mmap_cpu_mask_free(&cpus_mask); + + return ret; +} + +static int record__init_thread_core_masks(struct record *rec, struct perf_cpu_map *cpus) +{ + int ret; + struct cpu_topology *topo; + + topo = cpu_topology__new(); + if (!topo) { + pr_err("Failed to allocate CPU topology\n"); + return -ENOMEM; + } + + ret = record__init_thread_masks_spec(rec, cpus, topo->core_cpus_list, + topo->core_cpus_list, topo->core_cpus_lists); + cpu_topology__delete(topo); + + return ret; +} + +static int record__init_thread_package_masks(struct record *rec, struct perf_cpu_map *cpus) +{ + int ret; + struct cpu_topology *topo; + + topo = cpu_topology__new(); + if (!topo) { + pr_err("Failed to allocate CPU topology\n"); + return -ENOMEM; + } + + ret = record__init_thread_masks_spec(rec, cpus, topo->package_cpus_list, + topo->package_cpus_list, topo->package_cpus_lists); + cpu_topology__delete(topo); + + return ret; +} + +static int record__init_thread_numa_masks(struct record *rec, struct perf_cpu_map *cpus) +{ + u32 s; + int ret; + const char **spec; + struct numa_topology *topo; + + topo = numa_topology__new(); + if (!topo) { + pr_err("Failed to allocate NUMA topology\n"); + return -ENOMEM; + } + + spec = zalloc(topo->nr * sizeof(char *)); + if (!spec) { + pr_err("Failed to allocate NUMA spec\n"); + ret = -ENOMEM; + goto out_delete_topo; + } + for (s = 0; s < topo->nr; s++) + spec[s] = topo->nodes[s].cpus; + + ret = record__init_thread_masks_spec(rec, cpus, spec, spec, topo->nr); + + zfree(&spec); + +out_delete_topo: + numa_topology__delete(topo); + + return ret; +} + +static int record__init_thread_user_masks(struct record *rec, struct perf_cpu_map *cpus) +{ + int t, ret; + u32 s, nr_spec = 0; + char **maps_spec = NULL, **affinity_spec = NULL, **tmp_spec; + char *user_spec, *spec, *spec_ptr, *mask, *mask_ptr, *dup_mask = NULL; + + for (t = 0, user_spec = (char *)rec->opts.threads_user_spec; ; t++, user_spec = NULL) { + spec = strtok_r(user_spec, ":", &spec_ptr); + if (spec == NULL) + break; + pr_debug2("threads_spec[%d]: %s\n", t, spec); + mask = strtok_r(spec, "/", &mask_ptr); + if (mask == NULL) + break; + pr_debug2(" maps mask: %s\n", mask); + tmp_spec = realloc(maps_spec, (nr_spec + 1) * sizeof(char *)); + if (!tmp_spec) { + pr_err("Failed to reallocate maps spec\n"); + ret = -ENOMEM; + goto out_free; + } + maps_spec = tmp_spec; + maps_spec[nr_spec] = dup_mask = strdup(mask); + if (!maps_spec[nr_spec]) { + pr_err("Failed to allocate maps spec[%d]\n", nr_spec); + ret = -ENOMEM; + goto out_free; + } + mask = strtok_r(NULL, "/", &mask_ptr); + if (mask == NULL) { + pr_err("Invalid thread maps or affinity specs\n"); + ret = -EINVAL; + goto out_free; + } + pr_debug2(" affinity mask: %s\n", mask); + tmp_spec = realloc(affinity_spec, (nr_spec + 1) * sizeof(char *)); + if (!tmp_spec) { + pr_err("Failed to reallocate affinity spec\n"); + ret = -ENOMEM; + goto out_free; + } + affinity_spec = tmp_spec; + affinity_spec[nr_spec] = strdup(mask); + if (!affinity_spec[nr_spec]) { + pr_err("Failed to allocate affinity spec[%d]\n", nr_spec); + ret = -ENOMEM; + goto out_free; + } + dup_mask = NULL; + nr_spec++; + } + + ret = record__init_thread_masks_spec(rec, cpus, (const char **)maps_spec, + (const char **)affinity_spec, nr_spec); + +out_free: + free(dup_mask); + for (s = 0; s < nr_spec; s++) { + if (maps_spec) + free(maps_spec[s]); + if (affinity_spec) + free(affinity_spec[s]); + } + free(affinity_spec); + free(maps_spec); + + return ret; +} + +static int record__init_thread_default_masks(struct record *rec, struct perf_cpu_map *cpus) +{ + int ret; + + ret = record__alloc_thread_masks(rec, 1, cpu__max_cpu().cpu); + if (ret) + return ret; + + if (record__mmap_cpu_mask_init(&rec->thread_masks->maps, cpus)) + return -ENODEV; + + rec->nr_threads = 1; + + return 0; +} + +static int record__init_thread_masks(struct record *rec) +{ + int ret = 0; + struct perf_cpu_map *cpus = rec->evlist->core.all_cpus; + + if (!record__threads_enabled(rec)) + return record__init_thread_default_masks(rec, cpus); + + if (evlist__per_thread(rec->evlist)) { + pr_err("--per-thread option is mutually exclusive to parallel streaming mode.\n"); + return -EINVAL; + } + + switch (rec->opts.threads_spec) { + case THREAD_SPEC__CPU: + ret = record__init_thread_cpu_masks(rec, cpus); + break; + case THREAD_SPEC__CORE: + ret = record__init_thread_core_masks(rec, cpus); + break; + case THREAD_SPEC__PACKAGE: + ret = record__init_thread_package_masks(rec, cpus); + break; + case THREAD_SPEC__NUMA: + ret = record__init_thread_numa_masks(rec, cpus); + break; + case THREAD_SPEC__USER: + ret = record__init_thread_user_masks(rec, cpus); + break; + default: + break; + } + + return ret; +} + int cmd_record(int argc, const char **argv) { int err; @@ -2697,6 +3973,12 @@ int cmd_record(int argc, const char **argv) # undef REASON #endif +#ifndef HAVE_BPF_SKEL +# define set_nobuild(s, l, m, c) set_option_nobuild(record_options, s, l, m, c) + set_nobuild('\0', "off-cpu", "no BUILD_BPF_SKEL=1", true); +# undef set_nobuild +#endif + rec->opts.affinity = PERF_AFFINITY_SYS; rec->evlist = evlist__new(); @@ -2716,6 +3998,8 @@ int cmd_record(int argc, const char **argv) if (err) return err; + perf_debuginfod_setup(&record.debuginfod); + /* Make system wide (-a) the default target. */ if (!argc && target__none(&rec->opts.target)) rec->opts.target.system_wide = true; @@ -2748,8 +4032,22 @@ int cmd_record(int argc, const char **argv) } if (rec->opts.kcore) + rec->opts.text_poke = true; + + if (rec->opts.kcore || record__threads_enabled(rec)) rec->data.is_dir = true; + if (record__threads_enabled(rec)) { + if (rec->opts.affinity != PERF_AFFINITY_SYS) { + pr_err("--affinity option is mutually exclusive to parallel streaming mode.\n"); + goto out_opts; + } + if (record__aio_enabled(rec)) { + pr_err("Asynchronous streaming mode (--aio) is mutually exclusive to parallel streaming mode.\n"); + goto out_opts; + } + } + if (rec->opts.comp_level != 0) { pr_debug("Compression enabled, disabling build id collection at the end of the session.\n"); rec->no_buildid = true; @@ -2783,6 +4081,11 @@ int cmd_record(int argc, const char **argv) } } + if (rec->timestamp_filename && record__threads_enabled(rec)) { + rec->timestamp_filename = false; + pr_warning("WARNING: --timestamp-filename option is not available in parallel streaming mode.\n"); + } + /* * Allow aliases to facilitate the lookup of symbols for address * filters. Refer to auxtrace_parse_filters(). @@ -2791,17 +4094,6 @@ int cmd_record(int argc, const char **argv) symbol__init(NULL); - if (rec->opts.affinity != PERF_AFFINITY_SYS) { - rec->affinity_mask.nbits = cpu__max_cpu(); - rec->affinity_mask.bits = bitmap_zalloc(rec->affinity_mask.nbits); - if (!rec->affinity_mask.bits) { - pr_err("Failed to allocate thread mask for %zd cpus\n", rec->affinity_mask.nbits); - err = -ENOMEM; - goto out_opts; - } - pr_debug2("thread mask[%zd]: empty\n", rec->affinity_mask.nbits); - } - err = record__auxtrace_init(rec); if (err) goto out; @@ -2898,9 +4190,20 @@ int cmd_record(int argc, const char **argv) } rec->opts.target.hybrid = perf_pmu__has_hybrid(); + + if (callchain_param.enabled && callchain_param.record_mode == CALLCHAIN_FP) + arch__add_leaf_frame_record_opts(&rec->opts); + err = -ENOMEM; - if (evlist__create_maps(rec->evlist, &rec->opts.target) < 0) - usage_with_options(record_usage, record_options); + if (evlist__create_maps(rec->evlist, &rec->opts.target) < 0) { + if (rec->opts.target.pid != NULL) { + pr_err("Couldn't create thread/CPU maps: %s\n", + errno == ENOENT ? "No such process" : str_error_r(errno, errbuf, sizeof(errbuf))); + goto out; + } + else + usage_with_options(record_usage, record_options); + } err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts); if (err) @@ -2922,11 +4225,25 @@ int cmd_record(int argc, const char **argv) } } + if (rec->off_cpu) { + err = record__config_off_cpu(rec); + if (err) { + pr_err("record__config_off_cpu failed, error %d\n", err); + goto out; + } + } + if (record_opts__config(&rec->opts)) { err = -EINVAL; goto out; } + err = record__init_thread_masks(rec); + if (err) { + pr_err("Failed to initialize parallel data streaming masks\n"); + goto out; + } + if (rec->opts.nr_cblocks > nr_cblocks_max) rec->opts.nr_cblocks = nr_cblocks_max; pr_debug("nr_cblocks: %d\n", rec->opts.nr_cblocks); @@ -2940,11 +4257,12 @@ int cmd_record(int argc, const char **argv) err = __cmd_record(&record, argc, argv); out: - bitmap_free(rec->affinity_mask.bits); evlist__delete(rec->evlist); symbol__exit(); auxtrace_record__free(rec->itr); out_opts: + record__free_thread_masks(rec, rec->nr_threads); + rec->nr_threads = 0; evlist__close_control(rec->opts.ctl_fd, rec->opts.ctl_fd_ack, &rec->opts.ctl_fd_close); return err; } |