aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/util/evlist.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util/evlist.c')
-rw-r--r--tools/perf/util/evlist.c301
1 files changed, 225 insertions, 76 deletions
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index f9f77bee0b1b..bbc746aa5716 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -18,6 +18,7 @@
#include <unistd.h>
#include "parse-events.h"
+#include "parse-options.h"
#include <sys/mman.h>
@@ -49,6 +50,18 @@ struct perf_evlist *perf_evlist__new(void)
return evlist;
}
+struct perf_evlist *perf_evlist__new_default(void)
+{
+ struct perf_evlist *evlist = perf_evlist__new();
+
+ if (evlist && perf_evlist__add_default(evlist)) {
+ perf_evlist__delete(evlist);
+ evlist = NULL;
+ }
+
+ return evlist;
+}
+
/**
* perf_evlist__set_id_pos - set the positions of event ids.
* @evlist: selected event list
@@ -104,6 +117,8 @@ void perf_evlist__delete(struct perf_evlist *evlist)
void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
{
list_add_tail(&entry->node, &evlist->entries);
+ entry->idx = evlist->nr_entries;
+
if (!evlist->nr_entries++)
perf_evlist__set_id_pos(evlist);
}
@@ -152,7 +167,7 @@ int perf_evlist__add_default(struct perf_evlist *evlist)
event_attr_init(&attr);
- evsel = perf_evsel__new(&attr, 0);
+ evsel = perf_evsel__new(&attr);
if (evsel == NULL)
goto error;
@@ -177,7 +192,7 @@ static int perf_evlist__add_attrs(struct perf_evlist *evlist,
size_t i;
for (i = 0; i < nr_attrs; i++) {
- evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i);
+ evsel = perf_evsel__new_idx(attrs + i, evlist->nr_entries + i);
if (evsel == NULL)
goto out_delete_partial_list;
list_add_tail(&evsel->node, &head);
@@ -236,13 +251,12 @@ perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
int perf_evlist__add_newtp(struct perf_evlist *evlist,
const char *sys, const char *name, void *handler)
{
- struct perf_evsel *evsel;
+ struct perf_evsel *evsel = perf_evsel__newtp(sys, name);
- evsel = perf_evsel__newtp(sys, name, evlist->nr_entries);
if (evsel == NULL)
return -1;
- evsel->handler.func = handler;
+ evsel->handler = handler;
perf_evlist__add(evlist, evsel);
return 0;
}
@@ -527,7 +541,7 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
if ((old & md->mask) + size != ((old + size) & md->mask)) {
unsigned int offset = old;
unsigned int len = min(sizeof(*event), size), cpy;
- void *dst = &md->event_copy;
+ void *dst = md->event_copy;
do {
cpy = min(md->mask + 1 - (offset & md->mask), len);
@@ -537,7 +551,7 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
len -= cpy;
} while (len);
- event = &md->event_copy;
+ event = (union perf_event *) md->event_copy;
}
old += size;
@@ -545,12 +559,19 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
md->prev = old;
- if (!evlist->overwrite)
- perf_mmap__write_tail(md, old);
-
return event;
}
+void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
+{
+ if (!evlist->overwrite) {
+ struct perf_mmap *md = &evlist->mmap[idx];
+ unsigned int old = md->prev;
+
+ perf_mmap__write_tail(md, old);
+ }
+}
+
static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
{
if (evlist->mmap[idx].base != NULL) {
@@ -587,6 +608,8 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist,
evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
MAP_SHARED, fd, 0);
if (evlist->mmap[idx].base == MAP_FAILED) {
+ pr_debug2("failed to mmap perf event ring buffer, error %d\n",
+ errno);
evlist->mmap[idx].base = NULL;
return -1;
}
@@ -595,9 +618,36 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist,
return 0;
}
-static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
+static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
+ int prot, int mask, int cpu, int thread,
+ int *output)
{
struct perf_evsel *evsel;
+
+ list_for_each_entry(evsel, &evlist->entries, node) {
+ int fd = FD(evsel, cpu, thread);
+
+ if (*output == -1) {
+ *output = fd;
+ if (__perf_evlist__mmap(evlist, idx, prot, mask,
+ *output) < 0)
+ return -1;
+ } else {
+ if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
+ return -1;
+ }
+
+ if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
+ perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
+ return -1;
+ }
+
+ return 0;
+}
+
+static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot,
+ int mask)
+{
int cpu, thread;
int nr_cpus = cpu_map__nr(evlist->cpus);
int nr_threads = thread_map__nr(evlist->threads);
@@ -607,23 +657,9 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int m
int output = -1;
for (thread = 0; thread < nr_threads; thread++) {
- list_for_each_entry(evsel, &evlist->entries, node) {
- int fd = FD(evsel, cpu, thread);
-
- if (output == -1) {
- output = fd;
- if (__perf_evlist__mmap(evlist, cpu,
- prot, mask, output) < 0)
- goto out_unmap;
- } else {
- if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
- goto out_unmap;
- }
-
- if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
- perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
- goto out_unmap;
- }
+ if (perf_evlist__mmap_per_evsel(evlist, cpu, prot, mask,
+ cpu, thread, &output))
+ goto out_unmap;
}
}
@@ -635,9 +671,9 @@ out_unmap:
return -1;
}
-static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
+static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot,
+ int mask)
{
- struct perf_evsel *evsel;
int thread;
int nr_threads = thread_map__nr(evlist->threads);
@@ -645,23 +681,9 @@ static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, in
for (thread = 0; thread < nr_threads; thread++) {
int output = -1;
- list_for_each_entry(evsel, &evlist->entries, node) {
- int fd = FD(evsel, 0, thread);
-
- if (output == -1) {
- output = fd;
- if (__perf_evlist__mmap(evlist, thread,
- prot, mask, output) < 0)
- goto out_unmap;
- } else {
- if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
- goto out_unmap;
- }
-
- if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
- perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
- goto out_unmap;
- }
+ if (perf_evlist__mmap_per_evsel(evlist, thread, prot, mask, 0,
+ thread, &output))
+ goto out_unmap;
}
return 0;
@@ -672,20 +694,90 @@ out_unmap:
return -1;
}
-/** perf_evlist__mmap - Create per cpu maps to receive events
- *
- * @evlist - list of events
- * @pages - map length in pages
- * @overwrite - overwrite older events?
- *
- * If overwrite is false the user needs to signal event consuption using:
- *
- * struct perf_mmap *m = &evlist->mmap[cpu];
- * unsigned int head = perf_mmap__read_head(m);
+static size_t perf_evlist__mmap_size(unsigned long pages)
+{
+ /* 512 kiB: default amount of unprivileged mlocked memory */
+ if (pages == UINT_MAX)
+ pages = (512 * 1024) / page_size;
+ else if (!is_power_of_2(pages))
+ return 0;
+
+ return (pages + 1) * page_size;
+}
+
+static long parse_pages_arg(const char *str, unsigned long min,
+ unsigned long max)
+{
+ unsigned long pages, val;
+ static struct parse_tag tags[] = {
+ { .tag = 'B', .mult = 1 },
+ { .tag = 'K', .mult = 1 << 10 },
+ { .tag = 'M', .mult = 1 << 20 },
+ { .tag = 'G', .mult = 1 << 30 },
+ { .tag = 0 },
+ };
+
+ if (str == NULL)
+ return -EINVAL;
+
+ val = parse_tag_value(str, tags);
+ if (val != (unsigned long) -1) {
+ /* we got file size value */
+ pages = PERF_ALIGN(val, page_size) / page_size;
+ } else {
+ /* we got pages count value */
+ char *eptr;
+ pages = strtoul(str, &eptr, 10);
+ if (*eptr != '\0')
+ return -EINVAL;
+ }
+
+ if ((pages == 0) && (min == 0)) {
+ /* leave number of pages at 0 */
+ } else if (pages < (1UL << 31) && !is_power_of_2(pages)) {
+ /* round pages up to next power of 2 */
+ pages = next_pow2(pages);
+ pr_info("rounding mmap pages size to %lu bytes (%lu pages)\n",
+ pages * page_size, pages);
+ }
+
+ if (pages > max)
+ return -EINVAL;
+
+ return pages;
+}
+
+int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
+ int unset __maybe_unused)
+{
+ unsigned int *mmap_pages = opt->value;
+ unsigned long max = UINT_MAX;
+ long pages;
+
+ if (max < SIZE_MAX / page_size)
+ max = SIZE_MAX / page_size;
+
+ pages = parse_pages_arg(str, 1, max);
+ if (pages < 0) {
+ pr_err("Invalid argument for --mmap_pages/-m\n");
+ return -1;
+ }
+
+ *mmap_pages = pages;
+ return 0;
+}
+
+/**
+ * perf_evlist__mmap - Create mmaps to receive events.
+ * @evlist: list of events
+ * @pages: map length in pages
+ * @overwrite: overwrite older events?
*
- * perf_mmap__write_tail(m, head)
+ * If @overwrite is %false the user needs to signal event consumption using
+ * perf_mmap__write_tail(). Using perf_evlist__mmap_read() does this
+ * automatically.
*
- * Using perf_evlist__read_on_cpu does this automatically.
+ * Return: %0 on success, negative error code otherwise.
*/
int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
bool overwrite)
@@ -695,14 +787,6 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
const struct thread_map *threads = evlist->threads;
int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
- /* 512 kiB: default amount of unprivileged mlocked memory */
- if (pages == UINT_MAX)
- pages = (512 * 1024) / page_size;
- else if (!is_power_of_2(pages))
- return -EINVAL;
-
- mask = pages * page_size - 1;
-
if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
return -ENOMEM;
@@ -710,7 +794,9 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
return -ENOMEM;
evlist->overwrite = overwrite;
- evlist->mmap_len = (pages + 1) * page_size;
+ evlist->mmap_len = perf_evlist__mmap_size(pages);
+ pr_debug("mmap size %zuB\n", evlist->mmap_len);
+ mask = evlist->mmap_len - page_size - 1;
list_for_each_entry(evsel, &evlist->entries, node) {
if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
@@ -725,8 +811,7 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
return perf_evlist__mmap_per_cpu(evlist, prot, mask);
}
-int perf_evlist__create_maps(struct perf_evlist *evlist,
- struct perf_target *target)
+int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
{
evlist->threads = thread_map__new_str(target->pid, target->tid,
target->uid);
@@ -734,9 +819,11 @@ int perf_evlist__create_maps(struct perf_evlist *evlist,
if (evlist->threads == NULL)
return -1;
- if (perf_target__has_task(target))
+ if (target->force_per_cpu)
+ evlist->cpus = cpu_map__new(target->cpu_list);
+ else if (target__has_task(target))
evlist->cpus = cpu_map__dummy_new();
- else if (!perf_target__has_cpu(target) && !target->uses_mmap)
+ else if (!target__has_cpu(target) && !target->uses_mmap)
evlist->cpus = cpu_map__dummy_new();
else
evlist->cpus = cpu_map__new(target->cpu_list);
@@ -945,8 +1032,7 @@ out_err:
return err;
}
-int perf_evlist__prepare_workload(struct perf_evlist *evlist,
- struct perf_target *target,
+int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *target,
const char *argv[], bool pipe_output,
bool want_signal)
{
@@ -998,7 +1084,7 @@ int perf_evlist__prepare_workload(struct perf_evlist *evlist,
exit(-1);
}
- if (perf_target__none(target))
+ if (target__none(target))
evlist->threads->map[0] = evlist->workload.pid;
close(child_ready_pipe[1]);
@@ -1064,5 +1150,68 @@ size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
perf_evsel__name(evsel));
}
- return printed + fprintf(fp, "\n");;
+ return printed + fprintf(fp, "\n");
+}
+
+int perf_evlist__strerror_tp(struct perf_evlist *evlist __maybe_unused,
+ int err, char *buf, size_t size)
+{
+ char sbuf[128];
+
+ switch (err) {
+ case ENOENT:
+ scnprintf(buf, size, "%s",
+ "Error:\tUnable to find debugfs\n"
+ "Hint:\tWas your kernel was compiled with debugfs support?\n"
+ "Hint:\tIs the debugfs filesystem mounted?\n"
+ "Hint:\tTry 'sudo mount -t debugfs nodev /sys/kernel/debug'");
+ break;
+ case EACCES:
+ scnprintf(buf, size,
+ "Error:\tNo permissions to read %s/tracing/events/raw_syscalls\n"
+ "Hint:\tTry 'sudo mount -o remount,mode=755 %s'\n",
+ debugfs_mountpoint, debugfs_mountpoint);
+ break;
+ default:
+ scnprintf(buf, size, "%s", strerror_r(err, sbuf, sizeof(sbuf)));
+ break;
+ }
+
+ return 0;
+}
+
+int perf_evlist__strerror_open(struct perf_evlist *evlist __maybe_unused,
+ int err, char *buf, size_t size)
+{
+ int printed, value;
+ char sbuf[128], *emsg = strerror_r(err, sbuf, sizeof(sbuf));
+
+ switch (err) {
+ case EACCES:
+ case EPERM:
+ printed = scnprintf(buf, size,
+ "Error:\t%s.\n"
+ "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);
+
+ if (filename__read_int("/proc/sys/kernel/perf_event_paranoid", &value))
+ break;
+
+ printed += scnprintf(buf + printed, size - printed, "\nHint:\t");
+
+ if (value >= 2) {
+ printed += scnprintf(buf + printed, size - printed,
+ "For your workloads it needs to be <= 1\nHint:\t");
+ }
+ printed += scnprintf(buf + printed, size - printed,
+ "For system wide tracing it needs to be set to -1");
+
+ printed += scnprintf(buf + printed, size - printed,
+ ".\nHint:\tThe current value is %d.", value);
+ break;
+ default:
+ scnprintf(buf, size, "%s", emsg);
+ break;
+ }
+
+ return 0;
}