aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/util/synthetic-events.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util/synthetic-events.c')
-rw-r--r--tools/perf/util/synthetic-events.c397
1 files changed, 268 insertions, 129 deletions
diff --git a/tools/perf/util/synthetic-events.c b/tools/perf/util/synthetic-events.c
index 198982109f0f..cccd293b5312 100644
--- a/tools/perf/util/synthetic-events.c
+++ b/tools/perf/util/synthetic-events.c
@@ -364,28 +364,58 @@ static bool read_proc_maps_line(struct io *io, __u64 *start, __u64 *end,
}
static void perf_record_mmap2__read_build_id(struct perf_record_mmap2 *event,
+ struct machine *machine,
bool is_kernel)
{
struct build_id bid;
+ struct nsinfo *nsi;
+ struct nscookie nc;
+ struct dso *dso = NULL;
+ struct dso_id id;
int rc;
- if (is_kernel)
+ if (is_kernel) {
rc = sysfs__read_build_id("/sys/kernel/notes", &bid);
- else
- rc = filename__read_build_id(event->filename, &bid) > 0 ? 0 : -1;
+ goto out;
+ }
+
+ id.maj = event->maj;
+ id.min = event->min;
+ id.ino = event->ino;
+ id.ino_generation = event->ino_generation;
+
+ dso = dsos__findnew_id(&machine->dsos, event->filename, &id);
+ if (dso && dso->has_build_id) {
+ bid = dso->bid;
+ rc = 0;
+ goto out;
+ }
+
+ nsi = nsinfo__new(event->pid);
+ nsinfo__mountns_enter(nsi, &nc);
+ rc = filename__read_build_id(event->filename, &bid) > 0 ? 0 : -1;
+
+ nsinfo__mountns_exit(&nc);
+ nsinfo__put(nsi);
+
+out:
if (rc == 0) {
memcpy(event->build_id, bid.data, sizeof(bid.data));
event->build_id_size = (u8) bid.size;
event->header.misc |= PERF_RECORD_MISC_MMAP_BUILD_ID;
event->__reserved_1 = 0;
event->__reserved_2 = 0;
+
+ if (dso && !dso->has_build_id)
+ dso__set_build_id(dso, &bid);
} else {
if (event->filename[0] == '/') {
pr_debug2("Failed to read build ID for %s\n",
event->filename);
}
}
+ dso__put(dso);
}
int perf_event__synthesize_mmap_events(struct perf_tool *tool,
@@ -496,7 +526,7 @@ out:
event->mmap2.tid = pid;
if (symbol_conf.buildid_mmap2)
- perf_record_mmap2__read_build_id(&event->mmap2, false);
+ perf_record_mmap2__read_build_id(&event->mmap2, machine, false);
if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
rc = -1;
@@ -679,7 +709,7 @@ int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t
memcpy(event->mmap2.filename, pos->dso->long_name,
pos->dso->long_name_len + 1);
- perf_record_mmap2__read_build_id(&event->mmap2, false);
+ perf_record_mmap2__read_build_id(&event->mmap2, machine, false);
} else {
size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
event->mmap.header.type = PERF_RECORD_MMAP;
@@ -754,7 +784,7 @@ static int __event__synthesize_thread(union perf_event *comm_event,
snprintf(filename, sizeof(filename), "%s/proc/%d/task",
machine->root_dir, pid);
- n = scandir(filename, &dirent, filter_task, alphasort);
+ n = scandir(filename, &dirent, filter_task, NULL);
if (n < 0)
return n;
@@ -767,11 +797,12 @@ static int __event__synthesize_thread(union perf_event *comm_event,
if (*end)
continue;
- rc = -1;
+ /* some threads may exit just after scan, ignore it */
if (perf_event__prepare_comm(comm_event, pid, _pid, machine,
&tgid, &ppid, &kernel_thread) != 0)
- break;
+ continue;
+ rc = -1;
if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
ppid, process, machine) < 0)
break;
@@ -987,7 +1018,7 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
return 0;
snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
- n = scandir(proc_path, &dirent, filter_task, alphasort);
+ n = scandir(proc_path, &dirent, filter_task, NULL);
if (n < 0)
return err;
@@ -1114,7 +1145,7 @@ static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
event->mmap2.len = map->end - event->mmap.start;
event->mmap2.pid = machine->pid;
- perf_record_mmap2__read_build_id(&event->mmap2, true);
+ perf_record_mmap2__read_build_id(&event->mmap2, machine, true);
} else {
size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
"%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1;
@@ -1183,122 +1214,122 @@ int perf_event__synthesize_thread_map2(struct perf_tool *tool,
return err;
}
-static void synthesize_cpus(struct cpu_map_entries *cpus,
- struct perf_cpu_map *map)
-{
- int i;
-
- cpus->nr = map->nr;
-
- for (i = 0; i < map->nr; i++)
- cpus->cpu[i] = map->map[i];
-}
-
-static void synthesize_mask(struct perf_record_record_cpu_map *mask,
- struct perf_cpu_map *map, int max)
-{
- int i;
-
- mask->nr = BITS_TO_LONGS(max);
- mask->long_size = sizeof(long);
-
- for (i = 0; i < map->nr; i++)
- set_bit(map->map[i], mask->mask);
-}
+struct synthesize_cpu_map_data {
+ const struct perf_cpu_map *map;
+ int nr;
+ int min_cpu;
+ int max_cpu;
+ int has_any_cpu;
+ int type;
+ size_t size;
+ struct perf_record_cpu_map_data *data;
+};
-static size_t cpus_size(struct perf_cpu_map *map)
+static void synthesize_cpus(struct synthesize_cpu_map_data *data)
{
- return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16);
+ data->data->type = PERF_CPU_MAP__CPUS;
+ data->data->cpus_data.nr = data->nr;
+ for (int i = 0; i < data->nr; i++)
+ data->data->cpus_data.cpu[i] = perf_cpu_map__cpu(data->map, i).cpu;
}
-static size_t mask_size(struct perf_cpu_map *map, int *max)
+static void synthesize_mask(struct synthesize_cpu_map_data *data)
{
- int i;
+ int idx;
+ struct perf_cpu cpu;
- *max = 0;
+ /* Due to padding, the 4bytes per entry mask variant is always smaller. */
+ data->data->type = PERF_CPU_MAP__MASK;
+ data->data->mask32_data.nr = BITS_TO_U32(data->max_cpu);
+ data->data->mask32_data.long_size = 4;
- for (i = 0; i < map->nr; i++) {
- /* bit position of the cpu is + 1 */
- int bit = map->map[i] + 1;
+ perf_cpu_map__for_each_cpu(cpu, idx, data->map) {
+ int bit_word = cpu.cpu / 32;
+ u32 bit_mask = 1U << (cpu.cpu & 31);
- if (bit > *max)
- *max = bit;
+ data->data->mask32_data.mask[bit_word] |= bit_mask;
}
+}
- return sizeof(struct perf_record_record_cpu_map) + BITS_TO_LONGS(*max) * sizeof(long);
+static void synthesize_range_cpus(struct synthesize_cpu_map_data *data)
+{
+ data->data->type = PERF_CPU_MAP__RANGE_CPUS;
+ data->data->range_cpu_data.any_cpu = data->has_any_cpu;
+ data->data->range_cpu_data.start_cpu = data->min_cpu;
+ data->data->range_cpu_data.end_cpu = data->max_cpu;
}
-void *cpu_map_data__alloc(struct perf_cpu_map *map, size_t *size, u16 *type, int *max)
+static void *cpu_map_data__alloc(struct synthesize_cpu_map_data *syn_data,
+ size_t header_size)
{
size_t size_cpus, size_mask;
- bool is_dummy = perf_cpu_map__empty(map);
- /*
- * Both array and mask data have variable size based
- * on the number of cpus and their actual values.
- * The size of the 'struct perf_record_cpu_map_data' is:
- *
- * array = size of 'struct cpu_map_entries' +
- * number of cpus * sizeof(u64)
- *
- * mask = size of 'struct perf_record_record_cpu_map' +
- * maximum cpu bit converted to size of longs
- *
- * and finally + the size of 'struct perf_record_cpu_map_data'.
- */
- size_cpus = cpus_size(map);
- size_mask = mask_size(map, max);
-
- if (is_dummy || (size_cpus < size_mask)) {
- *size += size_cpus;
- *type = PERF_CPU_MAP__CPUS;
- } else {
- *size += size_mask;
- *type = PERF_CPU_MAP__MASK;
- }
-
- *size += sizeof(struct perf_record_cpu_map_data);
- *size = PERF_ALIGN(*size, sizeof(u64));
- return zalloc(*size);
+ syn_data->nr = perf_cpu_map__nr(syn_data->map);
+ syn_data->has_any_cpu = (perf_cpu_map__cpu(syn_data->map, 0).cpu == -1) ? 1 : 0;
+
+ syn_data->min_cpu = perf_cpu_map__cpu(syn_data->map, syn_data->has_any_cpu).cpu;
+ syn_data->max_cpu = perf_cpu_map__max(syn_data->map).cpu;
+ if (syn_data->max_cpu - syn_data->min_cpu + 1 == syn_data->nr - syn_data->has_any_cpu) {
+ /* A consecutive range of CPUs can be encoded using a range. */
+ assert(sizeof(u16) + sizeof(struct perf_record_range_cpu_map) == sizeof(u64));
+ syn_data->type = PERF_CPU_MAP__RANGE_CPUS;
+ syn_data->size = header_size + sizeof(u64);
+ return zalloc(syn_data->size);
+ }
+
+ size_cpus = sizeof(u16) + sizeof(struct cpu_map_entries) + syn_data->nr * sizeof(u16);
+ /* Due to padding, the 4bytes per entry mask variant is always smaller. */
+ size_mask = sizeof(u16) + sizeof(struct perf_record_mask_cpu_map32) +
+ BITS_TO_U32(syn_data->max_cpu) * sizeof(__u32);
+ if (syn_data->has_any_cpu || size_cpus < size_mask) {
+ /* Follow the CPU map encoding. */
+ syn_data->type = PERF_CPU_MAP__CPUS;
+ syn_data->size = header_size + PERF_ALIGN(size_cpus, sizeof(u64));
+ return zalloc(syn_data->size);
+ }
+ /* Encode using a bitmask. */
+ syn_data->type = PERF_CPU_MAP__MASK;
+ syn_data->size = header_size + PERF_ALIGN(size_mask, sizeof(u64));
+ return zalloc(syn_data->size);
}
-void cpu_map_data__synthesize(struct perf_record_cpu_map_data *data, struct perf_cpu_map *map,
- u16 type, int max)
+static void cpu_map_data__synthesize(struct synthesize_cpu_map_data *data)
{
- data->type = type;
-
- switch (type) {
+ switch (data->type) {
case PERF_CPU_MAP__CPUS:
- synthesize_cpus((struct cpu_map_entries *) data->data, map);
+ synthesize_cpus(data);
break;
case PERF_CPU_MAP__MASK:
- synthesize_mask((struct perf_record_record_cpu_map *)data->data, map, max);
+ synthesize_mask(data);
+ break;
+ case PERF_CPU_MAP__RANGE_CPUS:
+ synthesize_range_cpus(data);
+ break;
default:
break;
}
}
-static struct perf_record_cpu_map *cpu_map_event__new(struct perf_cpu_map *map)
+static struct perf_record_cpu_map *cpu_map_event__new(const struct perf_cpu_map *map)
{
- size_t size = sizeof(struct perf_record_cpu_map);
+ struct synthesize_cpu_map_data syn_data = { .map = map };
struct perf_record_cpu_map *event;
- int max;
- u16 type;
- event = cpu_map_data__alloc(map, &size, &type, &max);
+
+ event = cpu_map_data__alloc(&syn_data, sizeof(struct perf_event_header));
if (!event)
return NULL;
+ syn_data.data = &event->data;
event->header.type = PERF_RECORD_CPU_MAP;
- event->header.size = size;
- event->data.type = type;
-
- cpu_map_data__synthesize(&event->data, map, type, max);
+ event->header.size = syn_data.size;
+ cpu_map_data__synthesize(&syn_data);
return event;
}
+
int perf_event__synthesize_cpu_map(struct perf_tool *tool,
- struct perf_cpu_map *map,
+ const struct perf_cpu_map *map,
perf_event__handler_t process,
struct machine *machine)
{
@@ -1354,7 +1385,7 @@ int perf_event__synthesize_stat_config(struct perf_tool *tool,
}
int perf_event__synthesize_stat(struct perf_tool *tool,
- u32 cpu, u32 thread, u64 id,
+ struct perf_cpu cpu, u32 thread, u64 id,
struct perf_counts_values *count,
perf_event__handler_t process,
struct machine *machine)
@@ -1366,7 +1397,7 @@ int perf_event__synthesize_stat(struct perf_tool *tool,
event.header.misc = 0;
event.id = id;
- event.cpu = cpu;
+ event.cpu = cpu.cpu;
event.thread = thread;
event.val = count->val;
event.ena = count->ena;
@@ -1431,11 +1462,12 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
result += sizeof(u64);
/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
if (read_format & PERF_FORMAT_GROUP) {
- sz = sample->read.group.nr *
- sizeof(struct sample_read_value);
- result += sz;
+ sz = sample_read_value_size(read_format);
+ result += sz * sample->read.group.nr;
} else {
result += sizeof(u64);
+ if (read_format & PERF_FORMAT_LOST)
+ result += sizeof(u64);
}
}
@@ -1520,6 +1552,20 @@ void __weak arch_perf_synthesize_sample_weight(const struct perf_sample *data,
*array = data->weight;
}
+static __u64 *copy_read_group_values(__u64 *array, __u64 read_format,
+ const struct perf_sample *sample)
+{
+ size_t sz = sample_read_value_size(read_format);
+ struct sample_read_value *v = sample->read.group.values;
+
+ sample_read_group__for_each(v, sample->read.group.nr, read_format) {
+ /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
+ memcpy(array, v, sz);
+ array = (void *)array + sz;
+ }
+ return array;
+}
+
int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_format,
const struct perf_sample *sample)
{
@@ -1601,13 +1647,16 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_fo
/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
if (read_format & PERF_FORMAT_GROUP) {
- sz = sample->read.group.nr *
- sizeof(struct sample_read_value);
- memcpy(array, sample->read.group.values, sz);
- array = (void *)array + sz;
+ array = copy_read_group_values(array, read_format,
+ sample);
} else {
*array = sample->read.one.id;
array++;
+
+ if (read_format & PERF_FORMAT_LOST) {
+ *array = sample->read.one.lost;
+ array++;
+ }
}
}
@@ -1711,48 +1760,112 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_fo
return 0;
}
-int perf_event__synthesize_id_index(struct perf_tool *tool, perf_event__handler_t process,
- struct evlist *evlist, struct machine *machine)
+int perf_event__synthesize_id_sample(__u64 *array, u64 type, const struct perf_sample *sample)
+{
+ __u64 *start = array;
+
+ /*
+ * used for cross-endian analysis. See git commit 65014ab3
+ * for why this goofiness is needed.
+ */
+ union u64_swap u;
+
+ if (type & PERF_SAMPLE_TID) {
+ u.val32[0] = sample->pid;
+ u.val32[1] = sample->tid;
+ *array = u.val64;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_TIME) {
+ *array = sample->time;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_ID) {
+ *array = sample->id;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_STREAM_ID) {
+ *array = sample->stream_id;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_CPU) {
+ u.val32[0] = sample->cpu;
+ u.val32[1] = 0;
+ *array = u.val64;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_IDENTIFIER) {
+ *array = sample->id;
+ array++;
+ }
+
+ return (void *)array - (void *)start;
+}
+
+int __perf_event__synthesize_id_index(struct perf_tool *tool, perf_event__handler_t process,
+ struct evlist *evlist, struct machine *machine, size_t from)
{
union perf_event *ev;
struct evsel *evsel;
- size_t nr = 0, i = 0, sz, max_nr, n;
+ size_t nr = 0, i = 0, sz, max_nr, n, pos;
+ size_t e1_sz = sizeof(struct id_index_entry);
+ size_t e2_sz = sizeof(struct id_index_entry_2);
+ size_t etot_sz = e1_sz + e2_sz;
+ bool e2_needed = false;
int err;
- pr_debug2("Synthesizing id index\n");
-
- max_nr = (UINT16_MAX - sizeof(struct perf_record_id_index)) /
- sizeof(struct id_index_entry);
+ max_nr = (UINT16_MAX - sizeof(struct perf_record_id_index)) / etot_sz;
- evlist__for_each_entry(evlist, evsel)
+ pos = 0;
+ evlist__for_each_entry(evlist, evsel) {
+ if (pos++ < from)
+ continue;
nr += evsel->core.ids;
+ }
+
+ if (!nr)
+ return 0;
+
+ pr_debug2("Synthesizing id index\n");
n = nr > max_nr ? max_nr : nr;
- sz = sizeof(struct perf_record_id_index) + n * sizeof(struct id_index_entry);
+ sz = sizeof(struct perf_record_id_index) + n * etot_sz;
ev = zalloc(sz);
if (!ev)
return -ENOMEM;
+ sz = sizeof(struct perf_record_id_index) + n * e1_sz;
+
ev->id_index.header.type = PERF_RECORD_ID_INDEX;
- ev->id_index.header.size = sz;
ev->id_index.nr = n;
+ pos = 0;
evlist__for_each_entry(evlist, evsel) {
u32 j;
- for (j = 0; j < evsel->core.ids; j++) {
+ if (pos++ < from)
+ continue;
+ for (j = 0; j < evsel->core.ids; j++, i++) {
struct id_index_entry *e;
+ struct id_index_entry_2 *e2;
struct perf_sample_id *sid;
if (i >= n) {
+ ev->id_index.header.size = sz + (e2_needed ? n * e2_sz : 0);
err = process(tool, ev, NULL, machine);
if (err)
goto out_err;
nr -= n;
i = 0;
+ e2_needed = false;
}
- e = &ev->id_index.entries[i++];
+ e = &ev->id_index.entries[i];
e->id = evsel->core.id[j];
@@ -1763,13 +1876,20 @@ int perf_event__synthesize_id_index(struct perf_tool *tool, perf_event__handler_
}
e->idx = sid->idx;
- e->cpu = sid->cpu;
+ e->cpu = sid->cpu.cpu;
e->tid = sid->tid;
+
+ if (sid->machine_pid)
+ e2_needed = true;
+
+ e2 = (void *)ev + sz;
+ e2[i].machine_pid = sid->machine_pid;
+ e2[i].vcpu = sid->vcpu.cpu;
}
}
- sz = sizeof(struct perf_record_id_index) + nr * sizeof(struct id_index_entry);
- ev->id_index.header.size = sz;
+ sz = sizeof(struct perf_record_id_index) + nr * e1_sz;
+ ev->id_index.header.size = sz + (e2_needed ? nr * e2_sz : 0);
ev->id_index.nr = nr;
err = process(tool, ev, NULL, machine);
@@ -1779,11 +1899,36 @@ out_err:
return err;
}
+int perf_event__synthesize_id_index(struct perf_tool *tool, perf_event__handler_t process,
+ struct evlist *evlist, struct machine *machine)
+{
+ return __perf_event__synthesize_id_index(tool, process, evlist, machine, 0);
+}
+
int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
struct target *target, struct perf_thread_map *threads,
perf_event__handler_t process, bool needs_mmap,
bool data_mmap, unsigned int nr_threads_synthesize)
{
+ /*
+ * When perf runs in non-root PID namespace, and the namespace's proc FS
+ * is not mounted, nsinfo__is_in_root_namespace() returns false.
+ * In this case, the proc FS is coming for the parent namespace, thus
+ * perf tool will wrongly gather process info from its parent PID
+ * namespace.
+ *
+ * To avoid the confusion that the perf tool runs in a child PID
+ * namespace but it synthesizes thread info from its parent PID
+ * namespace, returns failure with warning.
+ */
+ if (!nsinfo__is_in_root_namespace()) {
+ pr_err("Perf runs in non-root PID namespace but it tries to ");
+ pr_err("gather process info from its parent PID namespace.\n");
+ pr_err("Please mount the proc file system properly, e.g. ");
+ pr_err("add the option '--mount-proc' for unshare command.\n");
+ return -EPERM;
+ }
+
if (target__has_task(target))
return perf_event__synthesize_thread_map(tool, threads, process, machine,
needs_mmap, data_mmap);
@@ -1832,7 +1977,7 @@ int perf_event__synthesize_event_update_unit(struct perf_tool *tool, struct evse
if (ev == NULL)
return -ENOMEM;
- strlcpy(ev->data, evsel->unit, size + 1);
+ strlcpy(ev->unit, evsel->unit, size + 1);
err = process(tool, (union perf_event *)ev, NULL, NULL);
free(ev);
return err;
@@ -1849,8 +1994,7 @@ int perf_event__synthesize_event_update_scale(struct perf_tool *tool, struct evs
if (ev == NULL)
return -ENOMEM;
- ev_data = (struct perf_record_event_update_scale *)ev->data;
- ev_data->scale = evsel->scale;
+ ev->scale.scale = evsel->scale;
err = process(tool, (union perf_event *)ev, NULL, NULL);
free(ev);
return err;
@@ -1867,7 +2011,7 @@ int perf_event__synthesize_event_update_name(struct perf_tool *tool, struct evse
if (ev == NULL)
return -ENOMEM;
- strlcpy(ev->data, evsel->name, len + 1);
+ strlcpy(ev->name, evsel->name, len + 1);
err = process(tool, (union perf_event *)ev, NULL, NULL);
free(ev);
return err;
@@ -1876,25 +2020,20 @@ int perf_event__synthesize_event_update_name(struct perf_tool *tool, struct evse
int perf_event__synthesize_event_update_cpus(struct perf_tool *tool, struct evsel *evsel,
perf_event__handler_t process)
{
- size_t size = sizeof(struct perf_record_event_update);
+ struct synthesize_cpu_map_data syn_data = { .map = evsel->core.own_cpus };
struct perf_record_event_update *ev;
- int max, err;
- u16 type;
-
- if (!evsel->core.own_cpus)
- return 0;
+ int err;
- ev = cpu_map_data__alloc(evsel->core.own_cpus, &size, &type, &max);
+ ev = cpu_map_data__alloc(&syn_data, sizeof(struct perf_event_header) + 2 * sizeof(u64));
if (!ev)
return -ENOMEM;
+ syn_data.data = &ev->cpus.cpus;
ev->header.type = PERF_RECORD_EVENT_UPDATE;
- ev->header.size = (u16)size;
+ ev->header.size = (u16)syn_data.size;
ev->type = PERF_EVENT_UPDATE__CPUS;
ev->id = evsel->core.id[0];
-
- cpu_map_data__synthesize((struct perf_record_cpu_map_data *)ev->data,
- evsel->core.own_cpus, type, max);
+ cpu_map_data__synthesize(&syn_data);
err = process(tool, (union perf_event *)ev, NULL, NULL);
free(ev);
@@ -2108,7 +2247,7 @@ int perf_event__synthesize_stat_events(struct perf_stat_config *config, struct p
return err;
}
- err = perf_event__synthesize_cpu_map(tool, evlist->core.cpus, process, NULL);
+ err = perf_event__synthesize_cpu_map(tool, evlist->core.user_requested_cpus, process, NULL);
if (err < 0) {
pr_err("Couldn't synthesize thread map.\n");
return err;