aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/tools/perf/util/record.c
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--tools/perf/util/record.c58
1 files changed, 31 insertions, 27 deletions
diff --git a/tools/perf/util/record.c b/tools/perf/util/record.c
index 07e4b96a6625..e867de8ddaaa 100644
--- a/tools/perf/util/record.c
+++ b/tools/perf/util/record.c
@@ -15,6 +15,8 @@
#include "record.h"
#include "../perf-sys.h"
#include "topdown.h"
+#include "map_symbol.h"
+#include "mem-events.h"
/*
* evsel__config_leader_sampling() uses special rules for leader sampling.
@@ -23,11 +25,12 @@
*/
static struct evsel *evsel__read_sampler(struct evsel *evsel, struct evlist *evlist)
{
- struct evsel *leader = evsel->leader;
+ struct evsel *leader = evsel__leader(evsel);
- if (evsel__is_aux_event(leader) || arch_topdown_sample_read(leader)) {
+ if (evsel__is_aux_event(leader) || arch_topdown_sample_read(leader) ||
+ is_mem_loads_aux_event(leader)) {
evlist__for_each_entry(evlist, evsel) {
- if (evsel->leader == leader && evsel != evsel->leader)
+ if (evsel__leader(evsel) == leader && evsel != evsel__leader(evsel))
return evsel;
}
}
@@ -50,7 +53,7 @@ static u64 evsel__config_term_mask(struct evsel *evsel)
static void evsel__config_leader_sampling(struct evsel *evsel, struct evlist *evlist)
{
struct perf_event_attr *attr = &evsel->core.attr;
- struct evsel *leader = evsel->leader;
+ struct evsel *leader = evsel__leader(evsel);
struct evsel *read_sampler;
u64 term_types, freq_mask;
@@ -89,22 +92,14 @@ static void evsel__config_leader_sampling(struct evsel *evsel, struct evlist *ev
leader->core.attr.sample_type;
}
-void perf_evlist__config(struct evlist *evlist, struct record_opts *opts,
- struct callchain_param *callchain)
+void evlist__config(struct evlist *evlist, struct record_opts *opts, struct callchain_param *callchain)
{
struct evsel *evsel;
bool use_sample_identifier = false;
bool use_comm_exec;
bool sample_id = opts->sample_id;
- /*
- * Set the evsel leader links before we configure attributes,
- * since some might depend on this info.
- */
- if (opts->group)
- perf_evlist__set_leader(evlist);
-
- if (evlist->core.cpus->map[0] < 0)
+ if (perf_cpu_map__cpu(evlist->core.user_requested_cpus, 0).cpu < 0)
opts->no_inherit = true;
use_comm_exec = perf_can_comm_exec();
@@ -119,7 +114,7 @@ void perf_evlist__config(struct evlist *evlist, struct record_opts *opts,
evlist__for_each_entry(evlist, evsel)
evsel__config_leader_sampling(evsel, evlist);
- if (opts->full_auxtrace) {
+ if (opts->full_auxtrace || opts->sample_identifier) {
/*
* Need to be able to synthesize and parse selected events with
* arbitrary sample types, which requires always being able to
@@ -144,7 +139,7 @@ void perf_evlist__config(struct evlist *evlist, struct record_opts *opts,
evsel__set_sample_id(evsel, use_sample_identifier);
}
- perf_evlist__set_id_pos(evlist);
+ evlist__set_id_pos(evlist);
}
static int get_max_rate(unsigned int *rate)
@@ -155,9 +150,15 @@ static int get_max_rate(unsigned int *rate)
static int record_opts__config_freq(struct record_opts *opts)
{
bool user_freq = opts->user_freq != UINT_MAX;
+ bool user_interval = opts->user_interval != ULLONG_MAX;
unsigned int max_rate;
- if (opts->user_interval != ULLONG_MAX)
+ if (user_interval && user_freq) {
+ pr_err("cannot set frequency and period at the same time\n");
+ return -1;
+ }
+
+ if (user_interval)
opts->default_interval = opts->user_interval;
if (user_freq)
opts->freq = opts->user_freq;
@@ -202,10 +203,10 @@ static int record_opts__config_freq(struct record_opts *opts)
* Default frequency is over current maximum.
*/
if (max_rate < opts->freq) {
- pr_warning("Lowering default frequency rate to %u.\n"
+ pr_warning("Lowering default frequency rate from %u to %u.\n"
"Please consider tweaking "
"/proc/sys/kernel/perf_event_max_sample_rate.\n",
- max_rate);
+ opts->freq, max_rate);
opts->freq = max_rate;
}
@@ -217,11 +218,12 @@ int record_opts__config(struct record_opts *opts)
return record_opts__config_freq(opts);
}
-bool perf_evlist__can_select_event(struct evlist *evlist, const char *str)
+bool evlist__can_select_event(struct evlist *evlist, const char *str)
{
struct evlist *temp_evlist;
struct evsel *evsel;
- int err, fd, cpu;
+ int err, fd;
+ struct perf_cpu cpu = { .cpu = 0 };
bool ret = false;
pid_t pid = -1;
@@ -229,23 +231,25 @@ bool perf_evlist__can_select_event(struct evlist *evlist, const char *str)
if (!temp_evlist)
return false;
- err = parse_events(temp_evlist, str, NULL);
+ err = parse_event(temp_evlist, str);
if (err)
goto out_delete;
evsel = evlist__last(temp_evlist);
- if (!evlist || perf_cpu_map__empty(evlist->core.cpus)) {
- struct perf_cpu_map *cpus = perf_cpu_map__new(NULL);
+ if (!evlist || perf_cpu_map__is_any_cpu_or_is_empty(evlist->core.user_requested_cpus)) {
+ struct perf_cpu_map *cpus = perf_cpu_map__new_online_cpus();
+
+ if (cpus)
+ cpu = perf_cpu_map__cpu(cpus, 0);
- cpu = cpus ? cpus->map[0] : 0;
perf_cpu_map__put(cpus);
} else {
- cpu = evlist->core.cpus->map[0];
+ cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, 0);
}
while (1) {
- fd = sys_perf_event_open(&evsel->core.attr, pid, cpu, -1,
+ fd = sys_perf_event_open(&evsel->core.attr, pid, cpu.cpu, -1,
perf_event_open_cloexec_flag());
if (fd < 0) {
if (pid == -1 && errno == EACCES) {