aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/builtin-trace.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/builtin-trace.c')
-rw-r--r--tools/perf/builtin-trace.c159
1 files changed, 123 insertions, 36 deletions
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index f5b3a1e9c1dd..4f0bbffee05f 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* builtin-trace.c
*
@@ -12,14 +13,13 @@
* Initially based on the 'trace' prototype by Thomas Gleixner:
*
* http://lwn.net/Articles/415728/ ("Announcing a new utility: 'trace'")
- *
- * Released under the GPL v2. (and only v2, not any later version)
*/
#include <traceevent/event-parse.h>
#include <api/fs/tracing_path.h>
#include <bpf/bpf.h>
#include "util/bpf_map.h"
+#include "util/rlimit.h"
#include "builtin.h"
#include "util/cgroup.h"
#include "util/color.h"
@@ -62,10 +62,11 @@
#include <linux/random.h>
#include <linux/stringify.h>
#include <linux/time64.h>
+#include <linux/zalloc.h>
#include <fcntl.h>
#include <sys/sysmacros.h>
-#include "sane_ctype.h"
+#include <linux/ctype.h>
#ifndef O_CLOEXEC
# define O_CLOEXEC 02000000
@@ -403,6 +404,11 @@ static size_t syscall_arg__scnprintf_strarray(char *bf, size_t size,
#define SCA_STRARRAY syscall_arg__scnprintf_strarray
+size_t syscall_arg__scnprintf_strarray_flags(char *bf, size_t size, struct syscall_arg *arg)
+{
+ return strarray__scnprintf_flags(arg->parm, bf, size, arg->show_string_prefix, arg->val);
+}
+
size_t strarrays__scnprintf(struct strarrays *sas, char *bf, size_t size, const char *intfmt, bool show_prefix, int val)
{
size_t printed;
@@ -482,6 +488,15 @@ static const char *bpf_cmd[] = {
};
static DEFINE_STRARRAY(bpf_cmd, "BPF_");
+static const char *fsmount_flags[] = {
+ [1] = "CLOEXEC",
+};
+static DEFINE_STRARRAY(fsmount_flags, "FSMOUNT_");
+
+#include "trace/beauty/generated/fsconfig_arrays.c"
+
+static DEFINE_STRARRAY(fsconfig_cmds, "FSCONFIG_");
+
static const char *epoll_ctl_ops[] = { "ADD", "DEL", "MOD", };
static DEFINE_STRARRAY_OFFSET(epoll_ctl_ops, "EPOLL_CTL_", 1);
@@ -642,6 +657,10 @@ static size_t syscall_arg__scnprintf_getrandom_flags(char *bf, size_t size,
{ .scnprintf = SCA_STRARRAY, \
.parm = &strarray__##array, }
+#define STRARRAY_FLAGS(name, array) \
+ { .scnprintf = SCA_STRARRAY_FLAGS, \
+ .parm = &strarray__##array, }
+
#include "trace/beauty/arch_errno_names.c"
#include "trace/beauty/eventfd.c"
#include "trace/beauty/futex_op.c"
@@ -713,6 +732,15 @@ static struct syscall_fmt {
[2] = { .scnprintf = SCA_FCNTL_ARG, /* arg */ }, }, },
{ .name = "flock",
.arg = { [1] = { .scnprintf = SCA_FLOCK, /* cmd */ }, }, },
+ { .name = "fsconfig",
+ .arg = { [1] = STRARRAY(cmd, fsconfig_cmds), }, },
+ { .name = "fsmount",
+ .arg = { [1] = STRARRAY_FLAGS(flags, fsmount_flags),
+ [2] = { .scnprintf = SCA_FSMOUNT_ATTR_FLAGS, /* attr_flags */ }, }, },
+ { .name = "fspick",
+ .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ },
+ [1] = { .scnprintf = SCA_FILENAME, /* path */ },
+ [2] = { .scnprintf = SCA_FSPICK_FLAGS, /* flags */ }, }, },
{ .name = "fstat", .alias = "newfstat", },
{ .name = "fstatat", .alias = "newfstatat", },
{ .name = "futex",
@@ -775,6 +803,12 @@ static struct syscall_fmt {
.arg = { [0] = { .scnprintf = SCA_FILENAME, /* dev_name */ },
[3] = { .scnprintf = SCA_MOUNT_FLAGS, /* flags */
.mask_val = SCAMV_MOUNT_FLAGS, /* flags */ }, }, },
+ { .name = "move_mount",
+ .arg = { [0] = { .scnprintf = SCA_FDAT, /* from_dfd */ },
+ [1] = { .scnprintf = SCA_FILENAME, /* from_pathname */ },
+ [2] = { .scnprintf = SCA_FDAT, /* to_dfd */ },
+ [3] = { .scnprintf = SCA_FILENAME, /* to_pathname */ },
+ [4] = { .scnprintf = SCA_MOVE_MOUNT_FLAGS, /* flags */ }, }, },
{ .name = "mprotect",
.arg = { [0] = { .scnprintf = SCA_HEX, /* start */ },
[2] = { .scnprintf = SCA_MMAP_PROT, /* prot */ }, }, },
@@ -879,6 +913,8 @@ static struct syscall_fmt {
.arg = { [0] = { .scnprintf = SCA_FILENAME, /* specialfile */ }, }, },
{ .name = "symlinkat",
.arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
+ { .name = "sync_file_range",
+ .arg = { [3] = { .scnprintf = SCA_SYNC_FILE_RANGE_FLAGS, /* flags */ }, }, },
{ .name = "tgkill",
.arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
{ .name = "tkill",
@@ -937,8 +973,14 @@ struct syscall {
struct syscall_arg_fmt *arg_fmt;
};
+/*
+ * Must match what is in the BPF program:
+ *
+ * tools/perf/examples/bpf/augmented_raw_syscalls.c
+ */
struct bpf_map_syscall_entry {
bool enabled;
+ u16 string_args_len[6];
};
/*
@@ -998,10 +1040,10 @@ static struct thread_trace *thread_trace__new(void)
{
struct thread_trace *ttrace = zalloc(sizeof(struct thread_trace));
- if (ttrace)
+ if (ttrace) {
ttrace->files.max = -1;
-
- ttrace->syscall_stats = intlist__new(NULL);
+ ttrace->syscall_stats = intlist__new(NULL);
+ }
return ttrace;
}
@@ -1192,8 +1234,17 @@ static void thread__set_filename_pos(struct thread *thread, const char *bf,
static size_t syscall_arg__scnprintf_augmented_string(struct syscall_arg *arg, char *bf, size_t size)
{
struct augmented_arg *augmented_arg = arg->augmented.args;
+ size_t printed = scnprintf(bf, size, "\"%.*s\"", augmented_arg->size, augmented_arg->value);
+ /*
+ * So that the next arg with a payload can consume its augmented arg, i.e. for rename* syscalls
+ * we would have two strings, each prefixed by its size.
+ */
+ int consumed = sizeof(*augmented_arg) + augmented_arg->size;
- return scnprintf(bf, size, "\"%.*s\"", augmented_arg->size, augmented_arg->value);
+ arg->augmented.args = ((void *)arg->augmented.args) + consumed;
+ arg->augmented.size -= consumed;
+
+ return printed;
}
static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
@@ -1381,10 +1432,11 @@ static int syscall__set_arg_fmts(struct syscall *sc)
if (sc->fmt && sc->fmt->arg[idx].scnprintf)
continue;
+ len = strlen(field->name);
+
if (strcmp(field->type, "const char *") == 0 &&
- (strcmp(field->name, "filename") == 0 ||
- strcmp(field->name, "path") == 0 ||
- strcmp(field->name, "pathname") == 0))
+ ((len >= 4 && strcmp(field->name + len - 4, "name") == 0) ||
+ strstr(field->name, "path") != NULL))
sc->arg_fmt[idx].scnprintf = SCA_FILENAME;
else if ((field->flags & TEP_FIELD_IS_POINTER) || strstr(field->name, "addr"))
sc->arg_fmt[idx].scnprintf = SCA_PTR;
@@ -1395,8 +1447,7 @@ static int syscall__set_arg_fmts(struct syscall *sc)
else if ((strcmp(field->type, "int") == 0 ||
strcmp(field->type, "unsigned int") == 0 ||
strcmp(field->type, "long") == 0) &&
- (len = strlen(field->name)) >= 2 &&
- strcmp(field->name + len - 2, "fd") == 0) {
+ len >= 2 && strcmp(field->name + len - 2, "fd") == 0) {
/*
* /sys/kernel/tracing/events/syscalls/sys_enter*
* egrep 'field:.*fd;' .../format|sed -r 's/.*field:([a-z ]+) [a-z_]*fd.+/\1/g'|sort|uniq -c
@@ -1478,12 +1529,12 @@ static int trace__read_syscall_info(struct trace *trace, int id)
static int trace__validate_ev_qualifier(struct trace *trace)
{
- int err = 0, i;
- size_t nr_allocated;
+ int err = 0;
+ bool printed_invalid_prefix = false;
struct str_node *pos;
+ size_t nr_used = 0, nr_allocated = strlist__nr_entries(trace->ev_qualifier);
- trace->ev_qualifier_ids.nr = strlist__nr_entries(trace->ev_qualifier);
- trace->ev_qualifier_ids.entries = malloc(trace->ev_qualifier_ids.nr *
+ trace->ev_qualifier_ids.entries = malloc(nr_allocated *
sizeof(trace->ev_qualifier_ids.entries[0]));
if (trace->ev_qualifier_ids.entries == NULL) {
@@ -1493,9 +1544,6 @@ static int trace__validate_ev_qualifier(struct trace *trace)
goto out;
}
- nr_allocated = trace->ev_qualifier_ids.nr;
- i = 0;
-
strlist__for_each_entry(pos, trace->ev_qualifier) {
const char *sc = pos->s;
int id = syscalltbl__id(trace->sctbl, sc), match_next = -1;
@@ -1505,17 +1553,18 @@ static int trace__validate_ev_qualifier(struct trace *trace)
if (id >= 0)
goto matches;
- if (err == 0) {
- fputs("Error:\tInvalid syscall ", trace->output);
- err = -EINVAL;
+ if (!printed_invalid_prefix) {
+ pr_debug("Skipping unknown syscalls: ");
+ printed_invalid_prefix = true;
} else {
- fputs(", ", trace->output);
+ pr_debug(", ");
}
- fputs(sc, trace->output);
+ pr_debug("%s", sc);
+ continue;
}
matches:
- trace->ev_qualifier_ids.entries[i++] = id;
+ trace->ev_qualifier_ids.entries[nr_used++] = id;
if (match_next == -1)
continue;
@@ -1523,7 +1572,7 @@ matches:
id = syscalltbl__strglobmatch_next(trace->sctbl, sc, &match_next);
if (id < 0)
break;
- if (nr_allocated == trace->ev_qualifier_ids.nr) {
+ if (nr_allocated == nr_used) {
void *entries;
nr_allocated += 8;
@@ -1536,20 +1585,19 @@ matches:
}
trace->ev_qualifier_ids.entries = entries;
}
- trace->ev_qualifier_ids.nr++;
- trace->ev_qualifier_ids.entries[i++] = id;
+ trace->ev_qualifier_ids.entries[nr_used++] = id;
}
}
- if (err < 0) {
- fputs("\nHint:\ttry 'perf list syscalls:sys_enter_*'"
- "\nHint:\tand: 'man syscalls'\n", trace->output);
-out_free:
- zfree(&trace->ev_qualifier_ids.entries);
- trace->ev_qualifier_ids.nr = 0;
- }
+ trace->ev_qualifier_ids.nr = nr_used;
out:
+ if (printed_invalid_prefix)
+ pr_debug("\n");
return err;
+out_free:
+ zfree(&trace->ev_qualifier_ids.entries);
+ trace->ev_qualifier_ids.nr = 0;
+ goto out;
}
/*
@@ -2676,6 +2724,25 @@ out_enomem:
}
#ifdef HAVE_LIBBPF_SUPPORT
+static void trace__init_bpf_map_syscall_args(struct trace *trace, int id, struct bpf_map_syscall_entry *entry)
+{
+ struct syscall *sc = trace__syscall_info(trace, NULL, id);
+ int arg = 0;
+
+ if (sc == NULL)
+ goto out;
+
+ for (; arg < sc->nr_args; ++arg) {
+ entry->string_args_len[arg] = 0;
+ if (sc->arg_fmt[arg].scnprintf == SCA_FILENAME) {
+ /* Should be set like strace -s strsize */
+ entry->string_args_len[arg] = PATH_MAX;
+ }
+ }
+out:
+ for (; arg < 6; ++arg)
+ entry->string_args_len[arg] = 0;
+}
static int trace__set_ev_qualifier_bpf_filter(struct trace *trace)
{
int fd = bpf_map__fd(trace->syscalls.map);
@@ -2688,6 +2755,9 @@ static int trace__set_ev_qualifier_bpf_filter(struct trace *trace)
for (i = 0; i < trace->ev_qualifier_ids.nr; ++i) {
int key = trace->ev_qualifier_ids.entries[i];
+ if (value.enabled)
+ trace__init_bpf_map_syscall_args(trace, key, &value);
+
err = bpf_map_update_elem(fd, &key, &value, BPF_EXIST);
if (err)
break;
@@ -2705,6 +2775,9 @@ static int __trace__init_syscalls_bpf_map(struct trace *trace, bool enabled)
int err = 0, key;
for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
+ if (enabled)
+ trace__init_bpf_map_syscall_args(trace, key, &value);
+
err = bpf_map_update_elem(fd, &key, &value, BPF_ANY);
if (err)
break;
@@ -3628,7 +3701,12 @@ static int trace__config(const char *var, const char *value, void *arg)
struct option o = OPT_CALLBACK('e', "event", &trace->evlist, "event",
"event selector. use 'perf list' to list available events",
parse_events_option);
- err = parse_events_option(&o, value, 0);
+ /*
+ * We can't propagate parse_event_option() return, as it is 1
+ * for failure while perf_config() expects -1.
+ */
+ if (parse_events_option(&o, value, 0))
+ err = -1;
} else if (!strcmp(var, "trace.show_timestamp")) {
trace->show_tstamp = perf_config_bool(var, value);
} else if (!strcmp(var, "trace.show_duration")) {
@@ -3787,6 +3865,15 @@ int cmd_trace(int argc, const char **argv)
goto out;
}
+ /*
+ * Parsing .perfconfig may entail creating a BPF event, that may need
+ * to create BPF maps, so bump RLIM_MEMLOCK as the default 64K setting
+ * is too small. This affects just this process, not touching the
+ * global setting. If it fails we'll get something in 'perf trace -v'
+ * to help diagnose the problem.
+ */
+ rlimit__bump_memlock();
+
err = perf_config(trace__config, &trace);
if (err)
goto out;