aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/tools/perf/util/cs-etm.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util/cs-etm.c')
-rw-r--r--tools/perf/util/cs-etm.c1947
1 files changed, 1413 insertions, 534 deletions
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
index c283223fb31f..30f4bb3e7fa3 100644
--- a/tools/perf/util/cs-etm.c
+++ b/tools/perf/util/cs-etm.c
@@ -6,14 +6,15 @@
* Author: Mathieu Poirier <mathieu.poirier@linaro.org>
*/
+#include <linux/kernel.h>
+#include <linux/bitfield.h>
#include <linux/bitops.h>
+#include <linux/coresight-pmu.h>
#include <linux/err.h>
-#include <linux/kernel.h>
#include <linux/log2.h>
#include <linux/types.h>
#include <linux/zalloc.h>
-#include <opencsd/ocsd_if_types.h>
#include <stdlib.h>
#include "auxtrace.h"
@@ -34,10 +35,10 @@
#include "tool.h"
#include "thread.h"
#include "thread-stack.h"
+#include "tsc.h"
#include <tools/libc_compat.h>
#include "util/synthetic-events.h"
-
-#define MAX_TIMESTAMP (~0ULL)
+#include "util/util.h"
struct cs_etm_auxtrace {
struct auxtrace auxtrace;
@@ -45,16 +46,30 @@ struct cs_etm_auxtrace {
struct auxtrace_heap heap;
struct itrace_synth_opts synth_opts;
struct perf_session *session;
- struct machine *machine;
- struct thread *unknown_thread;
+ struct perf_tsc_conversion tc;
+
+ /*
+ * Timeless has no timestamps in the trace so overlapping mmap lookups
+ * are less accurate but produces smaller trace data. We use context IDs
+ * in the trace instead of matching timestamps with fork records so
+ * they're not really needed in the general case. Overlapping mmaps
+ * happen in cases like between a fork and an exec.
+ */
+ bool timeless_decoding;
- u8 timeless_decoding;
- u8 snapshot_mode;
- u8 data_queued;
- u8 sample_branches;
- u8 sample_instructions;
+ /*
+ * Per-thread ignores the trace channel ID and instead assumes that
+ * everything in a buffer comes from the same process regardless of
+ * which CPU it ran on. It also implies no context IDs so the TID is
+ * taken from the auxtrace buffer.
+ */
+ bool per_thread_decoding;
+ bool snapshot_mode;
+ bool data_queued;
+ bool has_virtual_ts; /* Virtual/Kernel timestamps in the trace. */
int num_cpu;
+ u64 latest_kernel_timestamp;
u32 auxtrace_type;
u64 branches_sample_type;
u64 branches_id;
@@ -62,17 +77,19 @@ struct cs_etm_auxtrace {
u64 instructions_sample_period;
u64 instructions_id;
u64 **metadata;
- u64 kernel_start;
unsigned int pmu_type;
+ enum cs_etm_pid_fmt pid_fmt;
};
struct cs_etm_traceid_queue {
u8 trace_chan_id;
- pid_t pid, tid;
u64 period_instructions;
size_t last_branch_pos;
union perf_event *event_buf;
struct thread *thread;
+ struct thread *prev_packet_thread;
+ ocsd_ex_level prev_packet_el;
+ ocsd_ex_level el;
struct branch_stack *last_branch;
struct branch_stack *last_branch_rb;
struct cs_etm_packet *prev_packet;
@@ -80,29 +97,43 @@ struct cs_etm_traceid_queue {
struct cs_etm_packet_queue packet_queue;
};
+enum cs_etm_format {
+ UNSET,
+ FORMATTED,
+ UNFORMATTED
+};
+
struct cs_etm_queue {
struct cs_etm_auxtrace *etm;
struct cs_etm_decoder *decoder;
struct auxtrace_buffer *buffer;
unsigned int queue_nr;
- u8 pending_timestamp;
+ u8 pending_timestamp_chan_id;
+ enum cs_etm_format format;
u64 offset;
const unsigned char *buf;
size_t buf_len, buf_used;
/* Conversion between traceID and index in traceid_queues array */
struct intlist *traceid_queues_list;
struct cs_etm_traceid_queue **traceid_queues;
+ /* Conversion between traceID and metadata pointers */
+ struct intlist *traceid_list;
+ /*
+ * Same as traceid_list, but traceid_list may be a reference to another
+ * queue's which has a matching sink ID.
+ */
+ struct intlist *own_traceid_list;
+ u32 sink_id;
};
-/* RB tree for quick conversion between traceID and metadata pointers */
-static struct intlist *traceid_list;
-
-static int cs_etm__update_queues(struct cs_etm_auxtrace *etm);
-static int cs_etm__process_queues(struct cs_etm_auxtrace *etm);
+static int cs_etm__process_timestamped_queues(struct cs_etm_auxtrace *etm);
static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
pid_t tid);
static int cs_etm__get_data_block(struct cs_etm_queue *etmq);
static int cs_etm__decode_data_block(struct cs_etm_queue *etmq);
+static int cs_etm__metadata_get_trace_id(u8 *trace_chan_id, u64 *cpu_metadata);
+static u64 *get_cpu_data(struct cs_etm_auxtrace *etm, int cpu);
+static int cs_etm__metadata_set_trace_id(u8 trace_chan_id, u64 *cpu_metadata);
/* PTMs ETMIDR [11:8] set to b0011 */
#define ETMIDR_PTM_VERSION 0x00000300
@@ -117,6 +148,7 @@ static int cs_etm__decode_data_block(struct cs_etm_queue *etmq);
(queue_nr << 16 | trace_chan_id)
#define TO_QUEUE_NR(cs_queue_nr) (cs_queue_nr >> 16)
#define TO_TRACE_CHAN_ID(cs_queue_nr) (cs_queue_nr & 0x0000ffff)
+#define SINK_UNSET ((u32) -1)
static u32 cs_etm__get_v7_protocol_version(u32 etmidr)
{
@@ -128,12 +160,12 @@ static u32 cs_etm__get_v7_protocol_version(u32 etmidr)
return CS_ETM_PROTO_ETMV3;
}
-static int cs_etm__get_magic(u8 trace_chan_id, u64 *magic)
+static int cs_etm__get_magic(struct cs_etm_queue *etmq, u8 trace_chan_id, u64 *magic)
{
struct int_node *inode;
u64 *metadata;
- inode = intlist__find(traceid_list, trace_chan_id);
+ inode = intlist__find(etmq->traceid_list, trace_chan_id);
if (!inode)
return -EINVAL;
@@ -142,12 +174,12 @@ static int cs_etm__get_magic(u8 trace_chan_id, u64 *magic)
return 0;
}
-int cs_etm__get_cpu(u8 trace_chan_id, int *cpu)
+int cs_etm__get_cpu(struct cs_etm_queue *etmq, u8 trace_chan_id, int *cpu)
{
struct int_node *inode;
u64 *metadata;
- inode = intlist__find(traceid_list, trace_chan_id);
+ inode = intlist__find(etmq->traceid_list, trace_chan_id);
if (!inode)
return -EINVAL;
@@ -156,17 +188,358 @@ int cs_etm__get_cpu(u8 trace_chan_id, int *cpu)
return 0;
}
+/*
+ * The returned PID format is presented as an enum:
+ *
+ * CS_ETM_PIDFMT_CTXTID: CONTEXTIDR or CONTEXTIDR_EL1 is traced.
+ * CS_ETM_PIDFMT_CTXTID2: CONTEXTIDR_EL2 is traced.
+ * CS_ETM_PIDFMT_NONE: No context IDs
+ *
+ * It's possible that the two bits ETM_OPT_CTXTID and ETM_OPT_CTXTID2
+ * are enabled at the same time when the session runs on an EL2 kernel.
+ * This means the CONTEXTIDR_EL1 and CONTEXTIDR_EL2 both will be
+ * recorded in the trace data, the tool will selectively use
+ * CONTEXTIDR_EL2 as PID.
+ *
+ * The result is cached in etm->pid_fmt so this function only needs to be called
+ * when processing the aux info.
+ */
+static enum cs_etm_pid_fmt cs_etm__init_pid_fmt(u64 *metadata)
+{
+ u64 val;
+
+ if (metadata[CS_ETM_MAGIC] == __perf_cs_etmv3_magic) {
+ val = metadata[CS_ETM_ETMCR];
+ /* CONTEXTIDR is traced */
+ if (val & BIT(ETM_OPT_CTXTID))
+ return CS_ETM_PIDFMT_CTXTID;
+ } else {
+ val = metadata[CS_ETMV4_TRCCONFIGR];
+ /* CONTEXTIDR_EL2 is traced */
+ if (val & (BIT(ETM4_CFG_BIT_VMID) | BIT(ETM4_CFG_BIT_VMID_OPT)))
+ return CS_ETM_PIDFMT_CTXTID2;
+ /* CONTEXTIDR_EL1 is traced */
+ else if (val & BIT(ETM4_CFG_BIT_CTXTID))
+ return CS_ETM_PIDFMT_CTXTID;
+ }
+
+ return CS_ETM_PIDFMT_NONE;
+}
+
+enum cs_etm_pid_fmt cs_etm__get_pid_fmt(struct cs_etm_queue *etmq)
+{
+ return etmq->etm->pid_fmt;
+}
+
+static int cs_etm__insert_trace_id_node(struct cs_etm_queue *etmq,
+ u8 trace_chan_id, u64 *cpu_metadata)
+{
+ /* Get an RB node for this CPU */
+ struct int_node *inode = intlist__findnew(etmq->traceid_list, trace_chan_id);
+
+ /* Something went wrong, no need to continue */
+ if (!inode)
+ return -ENOMEM;
+
+ /* Disallow re-mapping a different traceID to metadata pair. */
+ if (inode->priv) {
+ u64 *curr_cpu_data = inode->priv;
+ u8 curr_chan_id;
+ int err;
+
+ if (curr_cpu_data[CS_ETM_CPU] != cpu_metadata[CS_ETM_CPU]) {
+ /*
+ * With > CORESIGHT_TRACE_IDS_MAX ETMs, overlapping IDs
+ * are expected (but not supported) in per-thread mode,
+ * rather than signifying an error.
+ */
+ if (etmq->etm->per_thread_decoding)
+ pr_err("CS_ETM: overlapping Trace IDs aren't currently supported in per-thread mode\n");
+ else
+ pr_err("CS_ETM: map mismatch between HW_ID packet CPU and Trace ID\n");
+
+ return -EINVAL;
+ }
+
+ /* check that the mapped ID matches */
+ err = cs_etm__metadata_get_trace_id(&curr_chan_id, curr_cpu_data);
+ if (err)
+ return err;
+
+ if (curr_chan_id != trace_chan_id) {
+ pr_err("CS_ETM: mismatch between CPU trace ID and HW_ID packet ID\n");
+ return -EINVAL;
+ }
+
+ /* Skip re-adding the same mappings if everything matched */
+ return 0;
+ }
+
+ /* Not one we've seen before, associate the traceID with the metadata pointer */
+ inode->priv = cpu_metadata;
+
+ return 0;
+}
+
+static struct cs_etm_queue *cs_etm__get_queue(struct cs_etm_auxtrace *etm, int cpu)
+{
+ if (etm->per_thread_decoding)
+ return etm->queues.queue_array[0].priv;
+ else
+ return etm->queues.queue_array[cpu].priv;
+}
+
+static int cs_etm__map_trace_id_v0(struct cs_etm_auxtrace *etm, u8 trace_chan_id,
+ u64 *cpu_metadata)
+{
+ struct cs_etm_queue *etmq;
+
+ /*
+ * If the queue is unformatted then only save one mapping in the
+ * queue associated with that CPU so only one decoder is made.
+ */
+ etmq = cs_etm__get_queue(etm, cpu_metadata[CS_ETM_CPU]);
+ if (etmq->format == UNFORMATTED)
+ return cs_etm__insert_trace_id_node(etmq, trace_chan_id,
+ cpu_metadata);
+
+ /*
+ * Otherwise, version 0 trace IDs are global so save them into every
+ * queue.
+ */
+ for (unsigned int i = 0; i < etm->queues.nr_queues; ++i) {
+ int ret;
+
+ etmq = etm->queues.queue_array[i].priv;
+ ret = cs_etm__insert_trace_id_node(etmq, trace_chan_id,
+ cpu_metadata);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cs_etm__process_trace_id_v0(struct cs_etm_auxtrace *etm, int cpu,
+ u64 hw_id)
+{
+ int err;
+ u64 *cpu_data;
+ u8 trace_chan_id = FIELD_GET(CS_AUX_HW_ID_TRACE_ID_MASK, hw_id);
+
+ cpu_data = get_cpu_data(etm, cpu);
+ if (cpu_data == NULL)
+ return -EINVAL;
+
+ err = cs_etm__map_trace_id_v0(etm, trace_chan_id, cpu_data);
+ if (err)
+ return err;
+
+ /*
+ * if we are picking up the association from the packet, need to plug
+ * the correct trace ID into the metadata for setting up decoders later.
+ */
+ return cs_etm__metadata_set_trace_id(trace_chan_id, cpu_data);
+}
+
+static int cs_etm__process_trace_id_v0_1(struct cs_etm_auxtrace *etm, int cpu,
+ u64 hw_id)
+{
+ struct cs_etm_queue *etmq = cs_etm__get_queue(etm, cpu);
+ int ret;
+ u64 *cpu_data;
+ u32 sink_id = FIELD_GET(CS_AUX_HW_ID_SINK_ID_MASK, hw_id);
+ u8 trace_id = FIELD_GET(CS_AUX_HW_ID_TRACE_ID_MASK, hw_id);
+
+ /*
+ * Check sink id hasn't changed in per-cpu mode. In per-thread mode,
+ * let it pass for now until an actual overlapping trace ID is hit. In
+ * most cases IDs won't overlap even if the sink changes.
+ */
+ if (!etmq->etm->per_thread_decoding && etmq->sink_id != SINK_UNSET &&
+ etmq->sink_id != sink_id) {
+ pr_err("CS_ETM: mismatch between sink IDs\n");
+ return -EINVAL;
+ }
+
+ etmq->sink_id = sink_id;
+
+ /* Find which other queues use this sink and link their ID maps */
+ for (unsigned int i = 0; i < etm->queues.nr_queues; ++i) {
+ struct cs_etm_queue *other_etmq = etm->queues.queue_array[i].priv;
+
+ /* Different sinks, skip */
+ if (other_etmq->sink_id != etmq->sink_id)
+ continue;
+
+ /* Already linked, skip */
+ if (other_etmq->traceid_list == etmq->traceid_list)
+ continue;
+
+ /* At the point of first linking, this one should be empty */
+ if (!intlist__empty(etmq->traceid_list)) {
+ pr_err("CS_ETM: Can't link populated trace ID lists\n");
+ return -EINVAL;
+ }
+
+ etmq->own_traceid_list = NULL;
+ intlist__delete(etmq->traceid_list);
+ etmq->traceid_list = other_etmq->traceid_list;
+ break;
+ }
+
+ cpu_data = get_cpu_data(etm, cpu);
+ ret = cs_etm__insert_trace_id_node(etmq, trace_id, cpu_data);
+ if (ret)
+ return ret;
+
+ ret = cs_etm__metadata_set_trace_id(trace_id, cpu_data);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int cs_etm__metadata_get_trace_id(u8 *trace_chan_id, u64 *cpu_metadata)
+{
+ u64 cs_etm_magic = cpu_metadata[CS_ETM_MAGIC];
+
+ switch (cs_etm_magic) {
+ case __perf_cs_etmv3_magic:
+ *trace_chan_id = (u8)(cpu_metadata[CS_ETM_ETMTRACEIDR] &
+ CORESIGHT_TRACE_ID_VAL_MASK);
+ break;
+ case __perf_cs_etmv4_magic:
+ case __perf_cs_ete_magic:
+ *trace_chan_id = (u8)(cpu_metadata[CS_ETMV4_TRCTRACEIDR] &
+ CORESIGHT_TRACE_ID_VAL_MASK);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * update metadata trace ID from the value found in the AUX_HW_INFO packet.
+ */
+static int cs_etm__metadata_set_trace_id(u8 trace_chan_id, u64 *cpu_metadata)
+{
+ u64 cs_etm_magic = cpu_metadata[CS_ETM_MAGIC];
+
+ switch (cs_etm_magic) {
+ case __perf_cs_etmv3_magic:
+ cpu_metadata[CS_ETM_ETMTRACEIDR] = trace_chan_id;
+ break;
+ case __perf_cs_etmv4_magic:
+ case __perf_cs_ete_magic:
+ cpu_metadata[CS_ETMV4_TRCTRACEIDR] = trace_chan_id;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * Get a metadata index for a specific cpu from an array.
+ *
+ */
+static int get_cpu_data_idx(struct cs_etm_auxtrace *etm, int cpu)
+{
+ int i;
+
+ for (i = 0; i < etm->num_cpu; i++) {
+ if (etm->metadata[i][CS_ETM_CPU] == (u64)cpu) {
+ return i;
+ }
+ }
+
+ return -1;
+}
+
+/*
+ * Get a metadata for a specific cpu from an array.
+ *
+ */
+static u64 *get_cpu_data(struct cs_etm_auxtrace *etm, int cpu)
+{
+ int idx = get_cpu_data_idx(etm, cpu);
+
+ return (idx != -1) ? etm->metadata[idx] : NULL;
+}
+
+/*
+ * Handle the PERF_RECORD_AUX_OUTPUT_HW_ID event.
+ *
+ * The payload associates the Trace ID and the CPU.
+ * The routine is tolerant of seeing multiple packets with the same association,
+ * but a CPU / Trace ID association changing during a session is an error.
+ */
+static int cs_etm__process_aux_output_hw_id(struct perf_session *session,
+ union perf_event *event)
+{
+ struct cs_etm_auxtrace *etm;
+ struct perf_sample sample;
+ struct evsel *evsel;
+ u64 hw_id;
+ int cpu, version, err;
+
+ /* extract and parse the HW ID */
+ hw_id = event->aux_output_hw_id.hw_id;
+ version = FIELD_GET(CS_AUX_HW_ID_MAJOR_VERSION_MASK, hw_id);
+
+ /* check that we can handle this version */
+ if (version > CS_AUX_HW_ID_MAJOR_VERSION) {
+ pr_err("CS ETM Trace: PERF_RECORD_AUX_OUTPUT_HW_ID version %d not supported. Please update Perf.\n",
+ version);
+ return -EINVAL;
+ }
+
+ /* get access to the etm metadata */
+ etm = container_of(session->auxtrace, struct cs_etm_auxtrace, auxtrace);
+ if (!etm || !etm->metadata)
+ return -EINVAL;
+
+ /* parse the sample to get the CPU */
+ evsel = evlist__event2evsel(session->evlist, event);
+ if (!evsel)
+ return -EINVAL;
+ perf_sample__init(&sample, /*all=*/false);
+ err = evsel__parse_sample(evsel, event, &sample);
+ if (err)
+ goto out;
+ cpu = sample.cpu;
+ if (cpu == -1) {
+ /* no CPU in the sample - possibly recorded with an old version of perf */
+ pr_err("CS_ETM: no CPU AUX_OUTPUT_HW_ID sample. Use compatible perf to record.");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (FIELD_GET(CS_AUX_HW_ID_MINOR_VERSION_MASK, hw_id) == 0) {
+ err = cs_etm__process_trace_id_v0(etm, cpu, hw_id);
+ goto out;
+ }
+
+ err = cs_etm__process_trace_id_v0_1(etm, cpu, hw_id);
+out:
+ perf_sample__exit(&sample);
+ return err;
+}
+
void cs_etm__etmq_set_traceid_queue_timestamp(struct cs_etm_queue *etmq,
u8 trace_chan_id)
{
/*
- * Wnen a timestamp packet is encountered the backend code
+ * When a timestamp packet is encountered the backend code
* is stopped so that the front end has time to process packets
* that were accumulated in the traceID queue. Since there can
* be more than one channel per cs_etm_queue, we need to specify
* what traceID queue needs servicing.
*/
- etmq->pending_timestamp = trace_chan_id;
+ etmq->pending_timestamp_chan_id = trace_chan_id;
}
static u64 cs_etm__etmq_get_timestamp(struct cs_etm_queue *etmq,
@@ -174,22 +547,22 @@ static u64 cs_etm__etmq_get_timestamp(struct cs_etm_queue *etmq,
{
struct cs_etm_packet_queue *packet_queue;
- if (!etmq->pending_timestamp)
+ if (!etmq->pending_timestamp_chan_id)
return 0;
if (trace_chan_id)
- *trace_chan_id = etmq->pending_timestamp;
+ *trace_chan_id = etmq->pending_timestamp_chan_id;
packet_queue = cs_etm__etmq_get_packet_queue(etmq,
- etmq->pending_timestamp);
+ etmq->pending_timestamp_chan_id);
if (!packet_queue)
return 0;
/* Acknowledge pending status */
- etmq->pending_timestamp = 0;
+ etmq->pending_timestamp_chan_id = 0;
/* See function cs_etm_decoder__do_{hard|soft}_timestamp() */
- return packet_queue->timestamp;
+ return packet_queue->cs_timestamp;
}
static void cs_etm__clear_packet_queue(struct cs_etm_packet_queue *queue)
@@ -241,9 +614,11 @@ static int cs_etm__init_traceid_queue(struct cs_etm_queue *etmq,
cs_etm__clear_packet_queue(&tidq->packet_queue);
queue = &etmq->etm->queues.queue_array[etmq->queue_nr];
- tidq->tid = queue->tid;
- tidq->pid = -1;
tidq->trace_chan_id = trace_chan_id;
+ tidq->el = tidq->prev_packet_el = ocsd_EL_unknown;
+ tidq->thread = machine__findnew_thread(&etm->session->machines.host, -1,
+ queue->tid);
+ tidq->prev_packet_thread = machine__idle_thread(&etm->session->machines.host);
tidq->packet = zalloc(sizeof(struct cs_etm_packet));
if (!tidq->packet)
@@ -290,7 +665,7 @@ static struct cs_etm_traceid_queue
struct cs_etm_traceid_queue *tidq, **traceid_queues;
struct cs_etm_auxtrace *etm = etmq->etm;
- if (etm->timeless_decoding)
+ if (etm->per_thread_decoding)
trace_chan_id = CS_ETM_PER_THREAD_TRACEID;
traceid_queues_list = etmq->traceid_queues_list;
@@ -371,73 +746,102 @@ static void cs_etm__packet_swap(struct cs_etm_auxtrace *etm,
{
struct cs_etm_packet *tmp;
- if (etm->sample_branches || etm->synth_opts.last_branch ||
- etm->sample_instructions) {
+ if (etm->synth_opts.branches || etm->synth_opts.last_branch ||
+ etm->synth_opts.instructions) {
/*
* Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for
* the next incoming packet.
+ *
+ * Threads and exception levels are also tracked for both the
+ * previous and current packets. This is because the previous
+ * packet is used for the 'from' IP for branch samples, so the
+ * thread at that time must also be assigned to that sample.
+ * Across discontinuity packets the thread can change, so by
+ * tracking the thread for the previous packet the branch sample
+ * will have the correct info.
*/
tmp = tidq->packet;
tidq->packet = tidq->prev_packet;
tidq->prev_packet = tmp;
+ tidq->prev_packet_el = tidq->el;
+ thread__put(tidq->prev_packet_thread);
+ tidq->prev_packet_thread = thread__get(tidq->thread);
}
}
-static void cs_etm__packet_dump(const char *pkt_string)
+static void cs_etm__packet_dump(const char *pkt_string, void *data)
{
const char *color = PERF_COLOR_BLUE;
int len = strlen(pkt_string);
+ struct cs_etm_queue *etmq = data;
+ char queue_nr[64];
+
+ if (verbose)
+ snprintf(queue_nr, sizeof(queue_nr), "Qnr:%d; ", etmq->queue_nr);
+ else
+ queue_nr[0] = '\0';
if (len && (pkt_string[len-1] == '\n'))
- color_fprintf(stdout, color, " %s", pkt_string);
+ color_fprintf(stdout, color, " %s%s", queue_nr, pkt_string);
else
- color_fprintf(stdout, color, " %s\n", pkt_string);
+ color_fprintf(stdout, color, " %s%s\n", queue_nr, pkt_string);
fflush(stdout);
}
static void cs_etm__set_trace_param_etmv3(struct cs_etm_trace_params *t_params,
- struct cs_etm_auxtrace *etm, int idx,
- u32 etmidr)
+ u64 *metadata, u32 etmidr)
{
- u64 **metadata = etm->metadata;
-
- t_params[idx].protocol = cs_etm__get_v7_protocol_version(etmidr);
- t_params[idx].etmv3.reg_ctrl = metadata[idx][CS_ETM_ETMCR];
- t_params[idx].etmv3.reg_trc_id = metadata[idx][CS_ETM_ETMTRACEIDR];
+ t_params->protocol = cs_etm__get_v7_protocol_version(etmidr);
+ t_params->etmv3.reg_ctrl = metadata[CS_ETM_ETMCR];
+ t_params->etmv3.reg_trc_id = metadata[CS_ETM_ETMTRACEIDR];
}
static void cs_etm__set_trace_param_etmv4(struct cs_etm_trace_params *t_params,
- struct cs_etm_auxtrace *etm, int idx)
+ u64 *metadata)
{
- u64 **metadata = etm->metadata;
+ t_params->protocol = CS_ETM_PROTO_ETMV4i;
+ t_params->etmv4.reg_idr0 = metadata[CS_ETMV4_TRCIDR0];
+ t_params->etmv4.reg_idr1 = metadata[CS_ETMV4_TRCIDR1];
+ t_params->etmv4.reg_idr2 = metadata[CS_ETMV4_TRCIDR2];
+ t_params->etmv4.reg_idr8 = metadata[CS_ETMV4_TRCIDR8];
+ t_params->etmv4.reg_configr = metadata[CS_ETMV4_TRCCONFIGR];
+ t_params->etmv4.reg_traceidr = metadata[CS_ETMV4_TRCTRACEIDR];
+}
- t_params[idx].protocol = CS_ETM_PROTO_ETMV4i;
- t_params[idx].etmv4.reg_idr0 = metadata[idx][CS_ETMV4_TRCIDR0];
- t_params[idx].etmv4.reg_idr1 = metadata[idx][CS_ETMV4_TRCIDR1];
- t_params[idx].etmv4.reg_idr2 = metadata[idx][CS_ETMV4_TRCIDR2];
- t_params[idx].etmv4.reg_idr8 = metadata[idx][CS_ETMV4_TRCIDR8];
- t_params[idx].etmv4.reg_configr = metadata[idx][CS_ETMV4_TRCCONFIGR];
- t_params[idx].etmv4.reg_traceidr = metadata[idx][CS_ETMV4_TRCTRACEIDR];
+static void cs_etm__set_trace_param_ete(struct cs_etm_trace_params *t_params,
+ u64 *metadata)
+{
+ t_params->protocol = CS_ETM_PROTO_ETE;
+ t_params->ete.reg_idr0 = metadata[CS_ETE_TRCIDR0];
+ t_params->ete.reg_idr1 = metadata[CS_ETE_TRCIDR1];
+ t_params->ete.reg_idr2 = metadata[CS_ETE_TRCIDR2];
+ t_params->ete.reg_idr8 = metadata[CS_ETE_TRCIDR8];
+ t_params->ete.reg_configr = metadata[CS_ETE_TRCCONFIGR];
+ t_params->ete.reg_traceidr = metadata[CS_ETE_TRCTRACEIDR];
+ t_params->ete.reg_devarch = metadata[CS_ETE_TRCDEVARCH];
}
static int cs_etm__init_trace_params(struct cs_etm_trace_params *t_params,
- struct cs_etm_auxtrace *etm)
+ struct cs_etm_queue *etmq)
{
- int i;
- u32 etmidr;
- u64 architecture;
+ struct int_node *inode;
- for (i = 0; i < etm->num_cpu; i++) {
- architecture = etm->metadata[i][CS_ETM_MAGIC];
+ intlist__for_each_entry(inode, etmq->traceid_list) {
+ u64 *metadata = inode->priv;
+ u64 architecture = metadata[CS_ETM_MAGIC];
+ u32 etmidr;
switch (architecture) {
case __perf_cs_etmv3_magic:
- etmidr = etm->metadata[i][CS_ETM_ETMIDR];
- cs_etm__set_trace_param_etmv3(t_params, etm, i, etmidr);
+ etmidr = metadata[CS_ETM_ETMIDR];
+ cs_etm__set_trace_param_etmv3(t_params++, metadata, etmidr);
break;
case __perf_cs_etmv4_magic:
- cs_etm__set_trace_param_etmv4(t_params, etm, i);
+ cs_etm__set_trace_param_etmv4(t_params++, metadata);
+ break;
+ case __perf_cs_ete_magic:
+ cs_etm__set_trace_param_ete(t_params++, metadata);
break;
default:
return -EINVAL;
@@ -459,7 +863,7 @@ static int cs_etm__init_decoder_params(struct cs_etm_decoder_params *d_params,
d_params->packet_printer = cs_etm__packet_dump;
d_params->operation = mode;
d_params->data = etmq;
- d_params->formatted = true;
+ d_params->formatted = etmq->format == FORMATTED;
d_params->fsyncs = false;
d_params->hsyncs = false;
d_params->frame_aligned = true;
@@ -469,44 +873,23 @@ out:
return ret;
}
-static void cs_etm__dump_event(struct cs_etm_auxtrace *etm,
+static void cs_etm__dump_event(struct cs_etm_queue *etmq,
struct auxtrace_buffer *buffer)
{
int ret;
const char *color = PERF_COLOR_BLUE;
- struct cs_etm_decoder_params d_params;
- struct cs_etm_trace_params *t_params;
- struct cs_etm_decoder *decoder;
size_t buffer_used = 0;
fprintf(stdout, "\n");
color_fprintf(stdout, color,
- ". ... CoreSight ETM Trace data: size %zu bytes\n",
- buffer->size);
-
- /* Use metadata to fill in trace parameters for trace decoder */
- t_params = zalloc(sizeof(*t_params) * etm->num_cpu);
+ ". ... CoreSight %s Trace data: size %#zx bytes\n",
+ cs_etm_decoder__get_name(etmq->decoder), buffer->size);
- if (!t_params)
- return;
-
- if (cs_etm__init_trace_params(t_params, etm))
- goto out_free;
-
- /* Set decoder parameters to simply print the trace packets */
- if (cs_etm__init_decoder_params(&d_params, NULL,
- CS_ETM_OPERATION_PRINT))
- goto out_free;
-
- decoder = cs_etm_decoder__new(etm->num_cpu, &d_params, t_params);
-
- if (!decoder)
- goto out_free;
do {
size_t consumed;
ret = cs_etm_decoder__process_data_block(
- decoder, buffer->offset,
+ etmq->decoder, buffer->offset,
&((u8 *)buffer->data)[buffer_used],
buffer->size - buffer_used, &consumed);
if (ret)
@@ -515,16 +898,12 @@ static void cs_etm__dump_event(struct cs_etm_auxtrace *etm,
buffer_used += consumed;
} while (buffer_used < buffer->size);
- cs_etm_decoder__free(decoder);
-
-out_free:
- zfree(&t_params);
+ cs_etm_decoder__reset(etmq->decoder);
}
static int cs_etm__flush_events(struct perf_session *session,
- struct perf_tool *tool)
+ const struct perf_tool *tool)
{
- int ret;
struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
struct cs_etm_auxtrace,
auxtrace);
@@ -534,15 +913,15 @@ static int cs_etm__flush_events(struct perf_session *session,
if (!tool->ordered_events)
return -EINVAL;
- ret = cs_etm__update_queues(etm);
-
- if (ret < 0)
- return ret;
-
- if (etm->timeless_decoding)
+ if (etm->timeless_decoding) {
+ /*
+ * Pass tid = -1 to process all queues. But likely they will have
+ * already been processed on PERF_RECORD_EXIT anyway.
+ */
return cs_etm__process_timeless_queues(etm, -1);
+ }
- return cs_etm__process_queues(etm);
+ return cs_etm__process_timestamped_queues(etm);
}
static void cs_etm__free_traceid_queues(struct cs_etm_queue *etmq)
@@ -560,6 +939,7 @@ static void cs_etm__free_traceid_queues(struct cs_etm_queue *etmq)
/* Free this traceid_queue from the array */
tidq = etmq->traceid_queues[idx];
thread__zput(tidq->thread);
+ thread__zput(tidq->prev_packet_thread);
zfree(&tidq->event_buf);
zfree(&tidq->last_branch);
zfree(&tidq->last_branch_rb);
@@ -584,6 +964,7 @@ static void cs_etm__free_traceid_queues(struct cs_etm_queue *etmq)
static void cs_etm__free_queue(void *priv)
{
+ struct int_node *inode, *tmp;
struct cs_etm_queue *etmq = priv;
if (!etmq)
@@ -591,6 +972,16 @@ static void cs_etm__free_queue(void *priv)
cs_etm_decoder__free(etmq->decoder);
cs_etm__free_traceid_queues(etmq);
+
+ if (etmq->own_traceid_list) {
+ /* First remove all traceID/metadata nodes for the RB tree */
+ intlist__for_each_entry_safe(inode, tmp, etmq->own_traceid_list)
+ intlist__remove(etmq->own_traceid_list, inode);
+
+ /* Then the RB tree itself */
+ intlist__delete(etmq->own_traceid_list);
+ }
+
free(etmq);
}
@@ -613,23 +1004,15 @@ static void cs_etm__free_events(struct perf_session *session)
static void cs_etm__free(struct perf_session *session)
{
int i;
- struct int_node *inode, *tmp;
struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
struct cs_etm_auxtrace,
auxtrace);
cs_etm__free_events(session);
session->auxtrace = NULL;
- /* First remove all traceID/metadata nodes for the RB tree */
- intlist__for_each_entry_safe(inode, tmp, traceid_list)
- intlist__remove(traceid_list, inode);
- /* Then the RB tree itself */
- intlist__delete(traceid_list);
-
for (i = 0; i < aux->num_cpu; i++)
zfree(&aux->metadata[i]);
- thread__zput(aux->unknown_thread);
zfree(&aux->metadata);
zfree(&aux);
}
@@ -644,13 +1027,45 @@ static bool cs_etm__evsel_is_auxtrace(struct perf_session *session,
return evsel->core.attr.type == aux->pmu_type;
}
-static u8 cs_etm__cpu_mode(struct cs_etm_queue *etmq, u64 address)
+static struct machine *cs_etm__get_machine(struct cs_etm_queue *etmq,
+ ocsd_ex_level el)
{
- struct machine *machine;
+ enum cs_etm_pid_fmt pid_fmt = cs_etm__get_pid_fmt(etmq);
- machine = etmq->etm->machine;
+ /*
+ * For any virtualisation based on nVHE (e.g. pKVM), or host kernels
+ * running at EL1 assume everything is the host.
+ */
+ if (pid_fmt == CS_ETM_PIDFMT_CTXTID)
+ return &etmq->etm->session->machines.host;
- if (address >= etmq->etm->kernel_start) {
+ /*
+ * Not perfect, but otherwise assume anything in EL1 is the default
+ * guest, and everything else is the host. Distinguishing between guest
+ * and host userspaces isn't currently supported either. Neither is
+ * multiple guest support. All this does is reduce the likeliness of
+ * decode errors where we look into the host kernel maps when it should
+ * have been the guest maps.
+ */
+ switch (el) {
+ case ocsd_EL1:
+ return machines__find_guest(&etmq->etm->session->machines,
+ DEFAULT_GUEST_KERNEL_ID);
+ case ocsd_EL3:
+ case ocsd_EL2:
+ case ocsd_EL0:
+ case ocsd_EL_unknown:
+ default:
+ return &etmq->etm->session->machines.host;
+ }
+}
+
+static u8 cs_etm__cpu_mode(struct cs_etm_queue *etmq, u64 address,
+ ocsd_ex_level el)
+{
+ struct machine *machine = cs_etm__get_machine(etmq, el);
+
+ if (address >= machine__kernel_start(machine)) {
if (machine__is_host(machine))
return PERF_RECORD_MISC_KERNEL;
else
@@ -658,66 +1073,95 @@ static u8 cs_etm__cpu_mode(struct cs_etm_queue *etmq, u64 address)
} else {
if (machine__is_host(machine))
return PERF_RECORD_MISC_USER;
- else if (perf_guest)
+ else {
+ /*
+ * Can't really happen at the moment because
+ * cs_etm__get_machine() will always return
+ * machines.host for any non EL1 trace.
+ */
return PERF_RECORD_MISC_GUEST_USER;
- else
- return PERF_RECORD_MISC_HYPERVISOR;
+ }
}
}
static u32 cs_etm__mem_access(struct cs_etm_queue *etmq, u8 trace_chan_id,
- u64 address, size_t size, u8 *buffer)
+ u64 address, size_t size, u8 *buffer,
+ const ocsd_mem_space_acc_t mem_space)
{
u8 cpumode;
u64 offset;
int len;
- struct thread *thread;
- struct machine *machine;
struct addr_location al;
+ struct dso *dso;
struct cs_etm_traceid_queue *tidq;
+ int ret = 0;
if (!etmq)
return 0;
- machine = etmq->etm->machine;
- cpumode = cs_etm__cpu_mode(etmq, address);
+ addr_location__init(&al);
tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
if (!tidq)
- return 0;
+ goto out;
- thread = tidq->thread;
- if (!thread) {
- if (cpumode != PERF_RECORD_MISC_KERNEL)
- return 0;
- thread = etmq->etm->unknown_thread;
+ /*
+ * We've already tracked EL along side the PID in cs_etm__set_thread()
+ * so double check that it matches what OpenCSD thinks as well. It
+ * doesn't distinguish between EL0 and EL1 for this mem access callback
+ * so we had to do the extra tracking. Skip validation if it's any of
+ * the 'any' values.
+ */
+ if (!(mem_space == OCSD_MEM_SPACE_ANY ||
+ mem_space == OCSD_MEM_SPACE_N || mem_space == OCSD_MEM_SPACE_S)) {
+ if (mem_space & OCSD_MEM_SPACE_EL1N) {
+ /* Includes both non secure EL1 and EL0 */
+ assert(tidq->el == ocsd_EL1 || tidq->el == ocsd_EL0);
+ } else if (mem_space & OCSD_MEM_SPACE_EL2)
+ assert(tidq->el == ocsd_EL2);
+ else if (mem_space & OCSD_MEM_SPACE_EL3)
+ assert(tidq->el == ocsd_EL3);
}
- if (!thread__find_map(thread, cpumode, address, &al) || !al.map->dso)
- return 0;
+ cpumode = cs_etm__cpu_mode(etmq, address, tidq->el);
- if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR &&
- dso__data_status_seen(al.map->dso, DSO_DATA_STATUS_SEEN_ITRACE))
- return 0;
+ if (!thread__find_map(tidq->thread, cpumode, address, &al))
+ goto out;
- offset = al.map->map_ip(al.map, address);
+ dso = map__dso(al.map);
+ if (!dso)
+ goto out;
- map__load(al.map);
+ if (dso__data(dso)->status == DSO_DATA_STATUS_ERROR &&
+ dso__data_status_seen(dso, DSO_DATA_STATUS_SEEN_ITRACE))
+ goto out;
- len = dso__data_read_offset(al.map->dso, machine, offset, buffer, size);
+ offset = map__map_ip(al.map, address);
- if (len <= 0)
- return 0;
+ map__load(al.map);
- return len;
+ len = dso__data_read_offset(dso, maps__machine(thread__maps(tidq->thread)),
+ offset, buffer, size);
+
+ if (len <= 0) {
+ ui__warning_once("CS ETM Trace: Missing DSO. Use 'perf archive' or debuginfod to export data from the traced system.\n"
+ " Enable CONFIG_PROC_KCORE or use option '-k /path/to/vmlinux' for kernel symbols.\n");
+ if (!dso__auxtrace_warned(dso)) {
+ pr_err("CS ETM Trace: Debug data not found for address %#"PRIx64" in %s\n",
+ address,
+ dso__long_name(dso) ? dso__long_name(dso) : "Unknown");
+ dso__set_auxtrace_warned(dso);
+ }
+ goto out;
+ }
+ ret = len;
+out:
+ addr_location__exit(&al);
+ return ret;
}
-static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm)
+static struct cs_etm_queue *cs_etm__alloc_queue(void)
{
- struct cs_etm_decoder_params d_params;
- struct cs_etm_trace_params *t_params = NULL;
- struct cs_etm_queue *etmq;
-
- etmq = zalloc(sizeof(*etmq));
+ struct cs_etm_queue *etmq = zalloc(sizeof(*etmq));
if (!etmq)
return NULL;
@@ -725,39 +1169,17 @@ static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm)
if (!etmq->traceid_queues_list)
goto out_free;
- /* Use metadata to fill in trace parameters for trace decoder */
- t_params = zalloc(sizeof(*t_params) * etm->num_cpu);
-
- if (!t_params)
- goto out_free;
-
- if (cs_etm__init_trace_params(t_params, etm))
- goto out_free;
-
- /* Set decoder parameters to decode trace packets */
- if (cs_etm__init_decoder_params(&d_params, etmq,
- CS_ETM_OPERATION_DECODE))
- goto out_free;
-
- etmq->decoder = cs_etm_decoder__new(etm->num_cpu, &d_params, t_params);
-
- if (!etmq->decoder)
- goto out_free;
-
/*
- * Register a function to handle all memory accesses required by
- * the trace decoder library.
+ * Create an RB tree for traceID-metadata tuple. Since the conversion
+ * has to be made for each packet that gets decoded, optimizing access
+ * in anything other than a sequential array is worth doing.
*/
- if (cs_etm_decoder__add_mem_access_cb(etmq->decoder,
- 0x0L, ((u64) -1L),
- cs_etm__mem_access))
- goto out_free_decoder;
+ etmq->traceid_list = etmq->own_traceid_list = intlist__new(NULL);
+ if (!etmq->traceid_list)
+ goto out_free;
- zfree(&t_params);
return etmq;
-out_free_decoder:
- cs_etm_decoder__free(etmq->decoder);
out_free:
intlist__delete(etmq->traceid_queues_list);
free(etmq);
@@ -769,29 +1191,34 @@ static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm,
struct auxtrace_queue *queue,
unsigned int queue_nr)
{
- int ret = 0;
- unsigned int cs_queue_nr;
- u8 trace_chan_id;
- u64 timestamp;
struct cs_etm_queue *etmq = queue->priv;
- if (list_empty(&queue->head) || etmq)
- goto out;
+ if (etmq)
+ return 0;
- etmq = cs_etm__alloc_queue(etm);
+ etmq = cs_etm__alloc_queue();
- if (!etmq) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!etmq)
+ return -ENOMEM;
queue->priv = etmq;
etmq->etm = etm;
etmq->queue_nr = queue_nr;
+ queue->cpu = queue_nr; /* Placeholder, may be reset to -1 in per-thread mode */
etmq->offset = 0;
+ etmq->sink_id = SINK_UNSET;
- if (etm->timeless_decoding)
- goto out;
+ return 0;
+}
+
+static int cs_etm__queue_first_cs_timestamp(struct cs_etm_auxtrace *etm,
+ struct cs_etm_queue *etmq,
+ unsigned int queue_nr)
+{
+ int ret = 0;
+ unsigned int cs_queue_nr;
+ u8 trace_chan_id;
+ u64 cs_timestamp;
/*
* We are under a CPU-wide trace scenario. As such we need to know
@@ -812,7 +1239,7 @@ static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm,
/*
* Run decoder on the trace block. The decoder will stop when
- * encountering a timestamp, a full packet queue or the end of
+ * encountering a CS timestamp, a full packet queue or the end of
* trace for that block.
*/
ret = cs_etm__decode_data_block(etmq);
@@ -823,10 +1250,10 @@ static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm,
* Function cs_etm_decoder__do_{hard|soft}_timestamp() does all
* the timestamp calculation for us.
*/
- timestamp = cs_etm__etmq_get_timestamp(etmq, &trace_chan_id);
+ cs_timestamp = cs_etm__etmq_get_timestamp(etmq, &trace_chan_id);
/* We found a timestamp, no need to continue. */
- if (timestamp)
+ if (cs_timestamp)
break;
/*
@@ -847,41 +1274,14 @@ static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm,
* chronological order.
*
* Note that packets decoded above are still in the traceID's packet
- * queue and will be processed in cs_etm__process_queues().
+ * queue and will be processed in cs_etm__process_timestamped_queues().
*/
cs_queue_nr = TO_CS_QUEUE_NR(queue_nr, trace_chan_id);
- ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, timestamp);
+ ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, cs_timestamp);
out:
return ret;
}
-static int cs_etm__setup_queues(struct cs_etm_auxtrace *etm)
-{
- unsigned int i;
- int ret;
-
- if (!etm->kernel_start)
- etm->kernel_start = machine__kernel_start(etm->machine);
-
- for (i = 0; i < etm->queues.nr_queues; i++) {
- ret = cs_etm__setup_queue(etm, &etm->queues.queue_array[i], i);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static int cs_etm__update_queues(struct cs_etm_auxtrace *etm)
-{
- if (etm->queues.new_data) {
- etm->queues.new_data = false;
- return cs_etm__setup_queues(etm);
- }
-
- return 0;
-}
-
static inline
void cs_etm__copy_last_branch_rb(struct cs_etm_queue *etmq,
struct cs_etm_traceid_queue *tidq)
@@ -938,8 +1338,8 @@ static inline int cs_etm__t32_instr_size(struct cs_etm_queue *etmq,
{
u8 instrBytes[2];
- cs_etm__mem_access(etmq, trace_chan_id, addr,
- ARRAY_SIZE(instrBytes), instrBytes);
+ cs_etm__mem_access(etmq, trace_chan_id, addr, ARRAY_SIZE(instrBytes),
+ instrBytes, 0);
/*
* T32 instruction size is indicated by bits[15:11] of the first
* 16-bit word of the instruction: 0b11101, 0b11110 and 0b11111
@@ -950,8 +1350,12 @@ static inline int cs_etm__t32_instr_size(struct cs_etm_queue *etmq,
static inline u64 cs_etm__first_executed_instr(struct cs_etm_packet *packet)
{
- /* Returns 0 for the CS_ETM_DISCONTINUITY packet */
- if (packet->sample_type == CS_ETM_DISCONTINUITY)
+ /*
+ * Return 0 for packets that have no addresses so that CS_ETM_INVAL_ADDR doesn't
+ * appear in samples.
+ */
+ if (packet->sample_type == CS_ETM_DISCONTINUITY ||
+ packet->sample_type == CS_ETM_EXCEPTION)
return 0;
return packet->start_addr;
@@ -1069,39 +1473,34 @@ cs_etm__get_trace(struct cs_etm_queue *etmq)
return etmq->buf_len;
}
-static void cs_etm__set_pid_tid_cpu(struct cs_etm_auxtrace *etm,
- struct cs_etm_traceid_queue *tidq)
+static void cs_etm__set_thread(struct cs_etm_queue *etmq,
+ struct cs_etm_traceid_queue *tidq, pid_t tid,
+ ocsd_ex_level el)
{
- if ((!tidq->thread) && (tidq->tid != -1))
- tidq->thread = machine__find_thread(etm->machine, -1,
- tidq->tid);
+ struct machine *machine = cs_etm__get_machine(etmq, el);
+
+ if (tid != -1) {
+ thread__zput(tidq->thread);
+ tidq->thread = machine__find_thread(machine, -1, tid);
+ }
+
+ /* Couldn't find a known thread */
+ if (!tidq->thread)
+ tidq->thread = machine__idle_thread(machine);
- if (tidq->thread)
- tidq->pid = tidq->thread->pid_;
+ tidq->el = el;
}
-int cs_etm__etmq_set_tid(struct cs_etm_queue *etmq,
- pid_t tid, u8 trace_chan_id)
+int cs_etm__etmq_set_tid_el(struct cs_etm_queue *etmq, pid_t tid,
+ u8 trace_chan_id, ocsd_ex_level el)
{
- int cpu, err = -EINVAL;
- struct cs_etm_auxtrace *etm = etmq->etm;
struct cs_etm_traceid_queue *tidq;
tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
if (!tidq)
- return err;
-
- if (cs_etm__get_cpu(trace_chan_id, &cpu) < 0)
- return err;
-
- err = machine__set_current_tid(etm->machine, cpu, tid, tid);
- if (err)
- return err;
-
- tidq->tid = tid;
- thread__zput(tidq->thread);
+ return -EINVAL;
- cs_etm__set_pid_tid_cpu(etm, tidq);
+ cs_etm__set_thread(etmq, tidq, tid, el);
return 0;
}
@@ -1135,8 +1534,30 @@ static void cs_etm__copy_insn(struct cs_etm_queue *etmq,
else
sample->insn_len = 4;
- cs_etm__mem_access(etmq, trace_chan_id, sample->ip,
- sample->insn_len, (void *)sample->insn);
+ cs_etm__mem_access(etmq, trace_chan_id, sample->ip, sample->insn_len,
+ (void *)sample->insn, 0);
+}
+
+u64 cs_etm__convert_sample_time(struct cs_etm_queue *etmq, u64 cs_timestamp)
+{
+ struct cs_etm_auxtrace *etm = etmq->etm;
+
+ if (etm->has_virtual_ts)
+ return tsc_to_perf_time(cs_timestamp, &etm->tc);
+ else
+ return cs_timestamp;
+}
+
+static inline u64 cs_etm__resolve_sample_time(struct cs_etm_queue *etmq,
+ struct cs_etm_traceid_queue *tidq)
+{
+ struct cs_etm_auxtrace *etm = etmq->etm;
+ struct cs_etm_packet_queue *packet_queue = &tidq->packet_queue;
+
+ if (!etm->timeless_decoding && etm->has_virtual_ts)
+ return packet_queue->cs_timestamp;
+ else
+ return etm->latest_kernel_timestamp;
}
static int cs_etm__synth_instruction_sample(struct cs_etm_queue *etmq,
@@ -1146,15 +1567,19 @@ static int cs_etm__synth_instruction_sample(struct cs_etm_queue *etmq,
int ret = 0;
struct cs_etm_auxtrace *etm = etmq->etm;
union perf_event *event = tidq->event_buf;
- struct perf_sample sample = {.ip = 0,};
+ struct perf_sample sample;
+ perf_sample__init(&sample, /*all=*/true);
event->sample.header.type = PERF_RECORD_SAMPLE;
- event->sample.header.misc = cs_etm__cpu_mode(etmq, addr);
+ event->sample.header.misc = cs_etm__cpu_mode(etmq, addr, tidq->el);
event->sample.header.size = sizeof(struct perf_event_header);
+ /* Set time field based on etm auxtrace config. */
+ sample.time = cs_etm__resolve_sample_time(etmq, tidq);
+
sample.ip = addr;
- sample.pid = tidq->pid;
- sample.tid = tidq->tid;
+ sample.pid = thread__pid(tidq->thread);
+ sample.tid = thread__tid(tidq->thread);
sample.id = etmq->etm->instructions_id;
sample.stream_id = etmq->etm->instructions_id;
sample.period = period;
@@ -1181,6 +1606,7 @@ static int cs_etm__synth_instruction_sample(struct cs_etm_queue *etmq,
"CS ETM Trace: failed to deliver instruction event, error %d\n",
ret);
+ perf_sample__exit(&sample);
return ret;
}
@@ -1205,12 +1631,16 @@ static int cs_etm__synth_branch_sample(struct cs_etm_queue *etmq,
ip = cs_etm__last_executed_instr(tidq->prev_packet);
event->sample.header.type = PERF_RECORD_SAMPLE;
- event->sample.header.misc = cs_etm__cpu_mode(etmq, ip);
+ event->sample.header.misc = cs_etm__cpu_mode(etmq, ip,
+ tidq->prev_packet_el);
event->sample.header.size = sizeof(struct perf_event_header);
+ /* Set time field based on etm auxtrace config. */
+ sample.time = cs_etm__resolve_sample_time(etmq, tidq);
+
sample.ip = ip;
- sample.pid = tidq->pid;
- sample.tid = tidq->tid;
+ sample.pid = thread__pid(tidq->prev_packet_thread);
+ sample.tid = thread__tid(tidq->prev_packet_thread);
sample.addr = cs_etm__first_executed_instr(tidq->packet);
sample.id = etmq->etm->branches_id;
sample.stream_id = etmq->etm->branches_id;
@@ -1254,35 +1684,6 @@ static int cs_etm__synth_branch_sample(struct cs_etm_queue *etmq,
return ret;
}
-struct cs_etm_synth {
- struct perf_tool dummy_tool;
- struct perf_session *session;
-};
-
-static int cs_etm__event_synth(struct perf_tool *tool,
- union perf_event *event,
- struct perf_sample *sample __maybe_unused,
- struct machine *machine __maybe_unused)
-{
- struct cs_etm_synth *cs_etm_synth =
- container_of(tool, struct cs_etm_synth, dummy_tool);
-
- return perf_session__deliver_synth_event(cs_etm_synth->session,
- event, NULL);
-}
-
-static int cs_etm__synth_event(struct perf_session *session,
- struct perf_event_attr *attr, u64 id)
-{
- struct cs_etm_synth cs_etm_synth;
-
- memset(&cs_etm_synth, 0, sizeof(struct cs_etm_synth));
- cs_etm_synth.session = session;
-
- return perf_event__synthesize_attr(&cs_etm_synth.dummy_tool, attr, 1,
- &id, cs_etm__event_synth);
-}
-
static int cs_etm__synth_events(struct cs_etm_auxtrace *etm,
struct perf_session *session)
{
@@ -1334,27 +1735,32 @@ static int cs_etm__synth_events(struct cs_etm_auxtrace *etm,
attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
attr.sample_period = 1;
attr.sample_type |= PERF_SAMPLE_ADDR;
- err = cs_etm__synth_event(session, &attr, id);
+ err = perf_session__deliver_synth_attr_event(session, &attr, id);
if (err)
return err;
- etm->sample_branches = true;
etm->branches_sample_type = attr.sample_type;
etm->branches_id = id;
id += 1;
attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR;
}
- if (etm->synth_opts.last_branch)
+ if (etm->synth_opts.last_branch) {
attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
+ /*
+ * We don't use the hardware index, but the sample generation
+ * code uses the new format branch_stack with this field,
+ * so the event attributes must indicate that it's present.
+ */
+ attr.branch_sample_type |= PERF_SAMPLE_BRANCH_HW_INDEX;
+ }
if (etm->synth_opts.instructions) {
attr.config = PERF_COUNT_HW_INSTRUCTIONS;
attr.sample_period = etm->synth_opts.period;
etm->instructions_sample_period = attr.sample_period;
- err = cs_etm__synth_event(session, &attr, id);
+ err = perf_session__deliver_synth_attr_event(session, &attr, id);
if (err)
return err;
- etm->sample_instructions = true;
etm->instructions_sample_type = attr.sample_type;
etm->instructions_id = id;
id += 1;
@@ -1385,7 +1791,7 @@ static int cs_etm__sample(struct cs_etm_queue *etmq,
tidq->prev_packet->last_instr_taken_branch)
cs_etm__update_last_branch_rb(etmq, tidq);
- if (etm->sample_instructions &&
+ if (etm->synth_opts.instructions &&
tidq->period_instructions >= etm->instructions_sample_period) {
/*
* Emit instruction sample periodically
@@ -1420,7 +1826,7 @@ static int cs_etm__sample(struct cs_etm_queue *etmq,
* tidq->packet->instr_count represents the number of
* instructions in the current etm packet.
*
- * Period instructions (Pi) contains the the number of
+ * Period instructions (Pi) contains the number of
* instructions executed after the sample point(n) from the
* previous etm packet. This will always be less than
* etm->instructions_sample_period.
@@ -1468,7 +1874,7 @@ static int cs_etm__sample(struct cs_etm_queue *etmq,
}
}
- if (etm->sample_branches) {
+ if (etm->synth_opts.branches) {
bool generate_sample = false;
/* Generate sample for tracing on packet */
@@ -1522,6 +1928,7 @@ static int cs_etm__flush(struct cs_etm_queue *etmq,
goto swap_packet;
if (etmq->etm->synth_opts.last_branch &&
+ etmq->etm->synth_opts.instructions &&
tidq->prev_packet->sample_type == CS_ETM_RANGE) {
u64 addr;
@@ -1547,7 +1954,7 @@ static int cs_etm__flush(struct cs_etm_queue *etmq,
}
- if (etm->sample_branches &&
+ if (etm->synth_opts.branches &&
tidq->prev_packet->sample_type == CS_ETM_RANGE) {
err = cs_etm__synth_branch_sample(etmq, tidq);
if (err)
@@ -1579,6 +1986,7 @@ static int cs_etm__end_block(struct cs_etm_queue *etmq,
* the trace.
*/
if (etmq->etm->synth_opts.last_branch &&
+ etmq->etm->synth_opts.instructions &&
tidq->prev_packet->sample_type == CS_ETM_RANGE) {
u64 addr;
@@ -1648,13 +2056,13 @@ static bool cs_etm__is_svc_instr(struct cs_etm_queue *etmq, u8 trace_chan_id,
* | 1 1 0 1 1 1 1 1 | imm8 |
* +-----------------+--------+
*
- * According to the specifiction, it only defines SVC for T32
+ * According to the specification, it only defines SVC for T32
* with 16 bits instruction and has no definition for 32bits;
* so below only read 2 bytes as instruction size for T32.
*/
addr = end_addr - 2;
- cs_etm__mem_access(etmq, trace_chan_id, addr,
- sizeof(instr16), (u8 *)&instr16);
+ cs_etm__mem_access(etmq, trace_chan_id, addr, sizeof(instr16),
+ (u8 *)&instr16, 0);
if ((instr16 & 0xFF00) == 0xDF00)
return true;
@@ -1669,8 +2077,8 @@ static bool cs_etm__is_svc_instr(struct cs_etm_queue *etmq, u8 trace_chan_id,
* +---------+---------+-------------------------+
*/
addr = end_addr - 4;
- cs_etm__mem_access(etmq, trace_chan_id, addr,
- sizeof(instr32), (u8 *)&instr32);
+ cs_etm__mem_access(etmq, trace_chan_id, addr, sizeof(instr32),
+ (u8 *)&instr32, 0);
if ((instr32 & 0x0F000000) == 0x0F000000 &&
(instr32 & 0xF0000000) != 0xF0000000)
return true;
@@ -1686,8 +2094,8 @@ static bool cs_etm__is_svc_instr(struct cs_etm_queue *etmq, u8 trace_chan_id,
* +-----------------------+---------+-----------+
*/
addr = end_addr - 4;
- cs_etm__mem_access(etmq, trace_chan_id, addr,
- sizeof(instr32), (u8 *)&instr32);
+ cs_etm__mem_access(etmq, trace_chan_id, addr, sizeof(instr32),
+ (u8 *)&instr32, 0);
if ((instr32 & 0xFFE0001F) == 0xd4000001)
return true;
@@ -1880,7 +2288,7 @@ static int cs_etm__set_sample_flags(struct cs_etm_queue *etmq,
/*
* If the previous packet is an exception return packet
- * and the return address just follows SVC instuction,
+ * and the return address just follows SVC instruction,
* it needs to calibrate the previous packet sample flags
* as PERF_IP_FLAG_SYSCALLRET.
*/
@@ -1904,7 +2312,7 @@ static int cs_etm__set_sample_flags(struct cs_etm_queue *etmq,
PERF_IP_FLAG_TRACE_END;
break;
case CS_ETM_EXCEPTION:
- ret = cs_etm__get_magic(packet->trace_chan_id, &magic);
+ ret = cs_etm__get_magic(etmq, packet->trace_chan_id, &magic);
if (ret)
return ret;
@@ -1954,7 +2362,7 @@ static int cs_etm__set_sample_flags(struct cs_etm_queue *etmq,
* contain exception type related info so we cannot decide
* the exception type purely based on exception return packet.
* If we record the exception number from exception packet and
- * reuse it for excpetion return packet, this is not reliable
+ * reuse it for exception return packet, this is not reliable
* due the trace can be discontinuity or the interrupt can
* be nested, thus the recorded exception number cannot be
* used for exception return packet for these two cases.
@@ -2091,16 +2499,10 @@ static void cs_etm__clear_all_traceid_queues(struct cs_etm_queue *etmq)
/* Ignore return value */
cs_etm__process_traceid_queue(etmq, tidq);
-
- /*
- * Generate an instruction sample with the remaining
- * branchstack entries.
- */
- cs_etm__flush(etmq, tidq);
}
}
-static int cs_etm__run_decoder(struct cs_etm_queue *etmq)
+static int cs_etm__run_per_thread_timeless_decoder(struct cs_etm_queue *etmq)
{
int err = 0;
struct cs_etm_traceid_queue *tidq;
@@ -2138,6 +2540,51 @@ static int cs_etm__run_decoder(struct cs_etm_queue *etmq)
return err;
}
+static int cs_etm__run_per_cpu_timeless_decoder(struct cs_etm_queue *etmq)
+{
+ int idx, err = 0;
+ struct cs_etm_traceid_queue *tidq;
+ struct int_node *inode;
+
+ /* Go through each buffer in the queue and decode them one by one */
+ while (1) {
+ err = cs_etm__get_data_block(etmq);
+ if (err <= 0)
+ return err;
+
+ /* Run trace decoder until buffer consumed or end of trace */
+ do {
+ err = cs_etm__decode_data_block(etmq);
+ if (err)
+ return err;
+
+ /*
+ * cs_etm__run_per_thread_timeless_decoder() runs on a
+ * single traceID queue because each TID has a separate
+ * buffer. But here in per-cpu mode we need to iterate
+ * over each channel instead.
+ */
+ intlist__for_each_entry(inode,
+ etmq->traceid_queues_list) {
+ idx = (int)(intptr_t)inode->priv;
+ tidq = etmq->traceid_queues[idx];
+ cs_etm__process_traceid_queue(etmq, tidq);
+ }
+ } while (etmq->buf_len);
+
+ intlist__for_each_entry(inode, etmq->traceid_queues_list) {
+ idx = (int)(intptr_t)inode->priv;
+ tidq = etmq->traceid_queues[idx];
+ /* Flush any remaining branch stack entries */
+ err = cs_etm__end_block(etmq, tidq);
+ if (err)
+ return err;
+ }
+ }
+
+ return err;
+}
+
static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
pid_t tid)
{
@@ -2152,34 +2599,49 @@ static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
if (!etmq)
continue;
- tidq = cs_etm__etmq_get_traceid_queue(etmq,
- CS_ETM_PER_THREAD_TRACEID);
+ if (etm->per_thread_decoding) {
+ tidq = cs_etm__etmq_get_traceid_queue(
+ etmq, CS_ETM_PER_THREAD_TRACEID);
- if (!tidq)
- continue;
+ if (!tidq)
+ continue;
- if ((tid == -1) || (tidq->tid == tid)) {
- cs_etm__set_pid_tid_cpu(etm, tidq);
- cs_etm__run_decoder(etmq);
- }
+ if (tid == -1 || thread__tid(tidq->thread) == tid)
+ cs_etm__run_per_thread_timeless_decoder(etmq);
+ } else
+ cs_etm__run_per_cpu_timeless_decoder(etmq);
}
return 0;
}
-static int cs_etm__process_queues(struct cs_etm_auxtrace *etm)
+static int cs_etm__process_timestamped_queues(struct cs_etm_auxtrace *etm)
{
int ret = 0;
- unsigned int cs_queue_nr, queue_nr;
+ unsigned int cs_queue_nr, queue_nr, i;
u8 trace_chan_id;
- u64 timestamp;
+ u64 cs_timestamp;
struct auxtrace_queue *queue;
struct cs_etm_queue *etmq;
struct cs_etm_traceid_queue *tidq;
+ /*
+ * Pre-populate the heap with one entry from each queue so that we can
+ * start processing in time order across all queues.
+ */
+ for (i = 0; i < etm->queues.nr_queues; i++) {
+ etmq = etm->queues.queue_array[i].priv;
+ if (!etmq)
+ continue;
+
+ ret = cs_etm__queue_first_cs_timestamp(etm, etmq, i);
+ if (ret)
+ return ret;
+ }
+
while (1) {
if (!etm->heap.heap_cnt)
- goto out;
+ break;
/* Take the entry at the top of the min heap */
cs_queue_nr = etm->heap.heap_array[0].queue_nr;
@@ -2234,9 +2696,9 @@ refetch:
if (ret)
goto out;
- timestamp = cs_etm__etmq_get_timestamp(etmq, &trace_chan_id);
+ cs_timestamp = cs_etm__etmq_get_timestamp(etmq, &trace_chan_id);
- if (!timestamp) {
+ if (!cs_timestamp) {
/*
* Function cs_etm__decode_data_block() returns when
* there is no more traces to decode in the current
@@ -2259,9 +2721,26 @@ refetch:
* this queue/traceID.
*/
cs_queue_nr = TO_CS_QUEUE_NR(queue_nr, trace_chan_id);
- ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, timestamp);
+ ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, cs_timestamp);
}
+ for (i = 0; i < etm->queues.nr_queues; i++) {
+ struct int_node *inode;
+
+ etmq = etm->queues.queue_array[i].priv;
+ if (!etmq)
+ continue;
+
+ intlist__for_each_entry(inode, etmq->traceid_queues_list) {
+ int idx = (int)(intptr_t)inode->priv;
+
+ /* Flush any remaining branch stack entries */
+ tidq = etmq->traceid_queues[idx];
+ ret = cs_etm__end_block(etmq, tidq);
+ if (ret)
+ return ret;
+ }
+ }
out:
return ret;
}
@@ -2275,10 +2754,12 @@ static int cs_etm__process_itrace_start(struct cs_etm_auxtrace *etm,
return 0;
/*
- * Add the tid/pid to the log so that we can get a match when
- * we get a contextID from the decoder.
+ * Add the tid/pid to the log so that we can get a match when we get a
+ * contextID from the decoder. Only track for the host: only kernel
+ * trace is supported for guests which wouldn't need pids so this should
+ * be fine.
*/
- th = machine__findnew_thread(etm->machine,
+ th = machine__findnew_thread(&etm->session->machines.host,
event->itrace_start.pid,
event->itrace_start.tid);
if (!th)
@@ -2311,10 +2792,12 @@ static int cs_etm__process_switch_cpu_wide(struct cs_etm_auxtrace *etm,
return 0;
/*
- * Add the tid/pid to the log so that we can get a match when
- * we get a contextID from the decoder.
+ * Add the tid/pid to the log so that we can get a match when we get a
+ * contextID from the decoder. Only track for the host: only kernel
+ * trace is supported for guests which wouldn't need pids so this should
+ * be fine.
*/
- th = machine__findnew_thread(etm->machine,
+ th = machine__findnew_thread(&etm->session->machines.host,
event->context_switch.next_prev_pid,
event->context_switch.next_prev_tid);
if (!th)
@@ -2328,10 +2811,8 @@ static int cs_etm__process_switch_cpu_wide(struct cs_etm_auxtrace *etm,
static int cs_etm__process_event(struct perf_session *session,
union perf_event *event,
struct perf_sample *sample,
- struct perf_tool *tool)
+ const struct perf_tool *tool)
{
- int err = 0;
- u64 timestamp;
struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
struct cs_etm_auxtrace,
auxtrace);
@@ -2344,37 +2825,63 @@ static int cs_etm__process_event(struct perf_session *session,
return -EINVAL;
}
- if (sample->time && (sample->time != (u64) -1))
- timestamp = sample->time;
- else
- timestamp = 0;
-
- if (timestamp || etm->timeless_decoding) {
- err = cs_etm__update_queues(etm);
- if (err)
- return err;
- }
-
- if (etm->timeless_decoding &&
- event->header.type == PERF_RECORD_EXIT)
- return cs_etm__process_timeless_queues(etm,
- event->fork.tid);
+ switch (event->header.type) {
+ case PERF_RECORD_EXIT:
+ /*
+ * Don't need to wait for cs_etm__flush_events() in per-thread mode to
+ * start the decode because we know there will be no more trace from
+ * this thread. All this does is emit samples earlier than waiting for
+ * the flush in other modes, but with timestamps it makes sense to wait
+ * for flush so that events from different threads are interleaved
+ * properly.
+ */
+ if (etm->per_thread_decoding && etm->timeless_decoding)
+ return cs_etm__process_timeless_queues(etm,
+ event->fork.tid);
+ break;
- if (event->header.type == PERF_RECORD_ITRACE_START)
+ case PERF_RECORD_ITRACE_START:
return cs_etm__process_itrace_start(etm, event);
- else if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)
+
+ case PERF_RECORD_SWITCH_CPU_WIDE:
return cs_etm__process_switch_cpu_wide(etm, event);
- if (!etm->timeless_decoding &&
- event->header.type == PERF_RECORD_AUX)
- return cs_etm__process_queues(etm);
+ case PERF_RECORD_AUX:
+ /*
+ * Record the latest kernel timestamp available in the header
+ * for samples so that synthesised samples occur from this point
+ * onwards.
+ */
+ if (sample->time && (sample->time != (u64)-1))
+ etm->latest_kernel_timestamp = sample->time;
+ break;
+
+ default:
+ break;
+ }
return 0;
}
+static void dump_queued_data(struct cs_etm_auxtrace *etm,
+ struct perf_record_auxtrace *event)
+{
+ struct auxtrace_buffer *buf;
+ unsigned int i;
+ /*
+ * Find all buffers with same reference in the queues and dump them.
+ * This is because the queues can contain multiple entries of the same
+ * buffer that were split on aux records.
+ */
+ for (i = 0; i < etm->queues.nr_queues; ++i)
+ list_for_each_entry(buf, &etm->queues.queue_array[i].head, list)
+ if (buf->reference == event->reference)
+ cs_etm__dump_event(etm->queues.queue_array[i].priv, buf);
+}
+
static int cs_etm__process_auxtrace_event(struct perf_session *session,
union perf_event *event,
- struct perf_tool *tool __maybe_unused)
+ const struct perf_tool *tool __maybe_unused)
{
struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
struct cs_etm_auxtrace,
@@ -2385,6 +2892,7 @@ static int cs_etm__process_auxtrace_event(struct perf_session *session,
int fd = perf_data__fd(session->data);
bool is_pipe = perf_data__is_pipe(session->data);
int err;
+ int idx = event->auxtrace.idx;
if (is_pipe)
data_offset = 0;
@@ -2401,141 +2909,480 @@ static int cs_etm__process_auxtrace_event(struct perf_session *session,
if (dump_trace)
if (auxtrace_buffer__get_data(buffer, fd)) {
- cs_etm__dump_event(etm, buffer);
+ cs_etm__dump_event(etm->queues.queue_array[idx].priv, buffer);
auxtrace_buffer__put_data(buffer);
}
- }
+ } else if (dump_trace)
+ dump_queued_data(etm, &event->auxtrace);
return 0;
}
-static bool cs_etm__is_timeless_decoding(struct cs_etm_auxtrace *etm)
+static int cs_etm__setup_timeless_decoding(struct cs_etm_auxtrace *etm)
{
struct evsel *evsel;
struct evlist *evlist = etm->session->evlist;
- bool timeless_decoding = true;
+
+ /* Override timeless mode with user input from --itrace=Z */
+ if (etm->synth_opts.timeless_decoding) {
+ etm->timeless_decoding = true;
+ return 0;
+ }
/*
- * Circle through the list of event and complain if we find one
- * with the time bit set.
+ * Find the cs_etm evsel and look at what its timestamp setting was
*/
- evlist__for_each_entry(evlist, evsel) {
- if ((evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
- timeless_decoding = false;
- }
+ evlist__for_each_entry(evlist, evsel)
+ if (cs_etm__evsel_is_auxtrace(etm->session, evsel)) {
+ etm->timeless_decoding =
+ !(evsel->core.attr.config & BIT(ETM_OPT_TS));
+ return 0;
+ }
- return timeless_decoding;
+ pr_err("CS ETM: Couldn't find ETM evsel\n");
+ return -EINVAL;
}
-static const char * const cs_etm_global_header_fmts[] = {
- [CS_HEADER_VERSION_0] = " Header version %llx\n",
- [CS_PMU_TYPE_CPUS] = " PMU type/num cpus %llx\n",
- [CS_ETM_SNAPSHOT] = " Snapshot %llx\n",
-};
+/*
+ * Read a single cpu parameter block from the auxtrace_info priv block.
+ *
+ * For version 1 there is a per cpu nr_params entry. If we are handling
+ * version 1 file, then there may be less, the same, or more params
+ * indicated by this value than the compile time number we understand.
+ *
+ * For a version 0 info block, there are a fixed number, and we need to
+ * fill out the nr_param value in the metadata we create.
+ */
+static u64 *cs_etm__create_meta_blk(u64 *buff_in, int *buff_in_offset,
+ int out_blk_size, int nr_params_v0)
+{
+ u64 *metadata = NULL;
+ int hdr_version;
+ int nr_in_params, nr_out_params, nr_cmn_params;
+ int i, k;
-static const char * const cs_etm_priv_fmts[] = {
- [CS_ETM_MAGIC] = " Magic number %llx\n",
- [CS_ETM_CPU] = " CPU %lld\n",
- [CS_ETM_ETMCR] = " ETMCR %llx\n",
- [CS_ETM_ETMTRACEIDR] = " ETMTRACEIDR %llx\n",
- [CS_ETM_ETMCCER] = " ETMCCER %llx\n",
- [CS_ETM_ETMIDR] = " ETMIDR %llx\n",
-};
+ metadata = zalloc(sizeof(*metadata) * out_blk_size);
+ if (!metadata)
+ return NULL;
-static const char * const cs_etmv4_priv_fmts[] = {
- [CS_ETM_MAGIC] = " Magic number %llx\n",
- [CS_ETM_CPU] = " CPU %lld\n",
- [CS_ETMV4_TRCCONFIGR] = " TRCCONFIGR %llx\n",
- [CS_ETMV4_TRCTRACEIDR] = " TRCTRACEIDR %llx\n",
- [CS_ETMV4_TRCIDR0] = " TRCIDR0 %llx\n",
- [CS_ETMV4_TRCIDR1] = " TRCIDR1 %llx\n",
- [CS_ETMV4_TRCIDR2] = " TRCIDR2 %llx\n",
- [CS_ETMV4_TRCIDR8] = " TRCIDR8 %llx\n",
- [CS_ETMV4_TRCAUTHSTATUS] = " TRCAUTHSTATUS %llx\n",
-};
+ /* read block current index & version */
+ i = *buff_in_offset;
+ hdr_version = buff_in[CS_HEADER_VERSION];
+
+ if (!hdr_version) {
+ /* read version 0 info block into a version 1 metadata block */
+ nr_in_params = nr_params_v0;
+ metadata[CS_ETM_MAGIC] = buff_in[i + CS_ETM_MAGIC];
+ metadata[CS_ETM_CPU] = buff_in[i + CS_ETM_CPU];
+ metadata[CS_ETM_NR_TRC_PARAMS] = nr_in_params;
+ /* remaining block params at offset +1 from source */
+ for (k = CS_ETM_COMMON_BLK_MAX_V1 - 1; k < nr_in_params; k++)
+ metadata[k + 1] = buff_in[i + k];
+ /* version 0 has 2 common params */
+ nr_cmn_params = 2;
+ } else {
+ /* read version 1 info block - input and output nr_params may differ */
+ /* version 1 has 3 common params */
+ nr_cmn_params = 3;
+ nr_in_params = buff_in[i + CS_ETM_NR_TRC_PARAMS];
-static void cs_etm__print_auxtrace_info(__u64 *val, int num)
-{
- int i, j, cpu = 0;
+ /* if input has more params than output - skip excess */
+ nr_out_params = nr_in_params + nr_cmn_params;
+ if (nr_out_params > out_blk_size)
+ nr_out_params = out_blk_size;
- for (i = 0; i < CS_HEADER_VERSION_0_MAX; i++)
- fprintf(stdout, cs_etm_global_header_fmts[i], val[i]);
+ for (k = CS_ETM_MAGIC; k < nr_out_params; k++)
+ metadata[k] = buff_in[i + k];
- for (i = CS_HEADER_VERSION_0_MAX; cpu < num; cpu++) {
- if (val[i] == __perf_cs_etmv3_magic)
- for (j = 0; j < CS_ETM_PRIV_MAX; j++, i++)
- fprintf(stdout, cs_etm_priv_fmts[j], val[i]);
- else if (val[i] == __perf_cs_etmv4_magic)
- for (j = 0; j < CS_ETMV4_PRIV_MAX; j++, i++)
- fprintf(stdout, cs_etmv4_priv_fmts[j], val[i]);
- else
- /* failure.. return */
- return;
+ /* record the actual nr params we copied */
+ metadata[CS_ETM_NR_TRC_PARAMS] = nr_out_params - nr_cmn_params;
}
+
+ /* adjust in offset by number of in params used */
+ i += nr_in_params + nr_cmn_params;
+ *buff_in_offset = i;
+ return metadata;
}
-int cs_etm__process_auxtrace_info(union perf_event *event,
- struct perf_session *session)
+/**
+ * Puts a fragment of an auxtrace buffer into the auxtrace queues based
+ * on the bounds of aux_event, if it matches with the buffer that's at
+ * file_offset.
+ *
+ * Normally, whole auxtrace buffers would be added to the queue. But we
+ * want to reset the decoder for every PERF_RECORD_AUX event, and the decoder
+ * is reset across each buffer, so splitting the buffers up in advance has
+ * the same effect.
+ */
+static int cs_etm__queue_aux_fragment(struct perf_session *session, off_t file_offset, size_t sz,
+ struct perf_record_aux *aux_event, struct perf_sample *sample)
{
- struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info;
- struct cs_etm_auxtrace *etm = NULL;
- struct int_node *inode;
- unsigned int pmu_type;
- int event_header_size = sizeof(struct perf_event_header);
- int info_header_size;
- int total_size = auxtrace_info->header.size;
- int priv_size = 0;
- int num_cpu;
- int err = 0, idx = -1;
- int i, j, k;
- u64 *ptr, *hdr = NULL;
- u64 **metadata = NULL;
+ int err;
+ char buf[PERF_SAMPLE_MAX_SIZE];
+ union perf_event *auxtrace_event_union;
+ struct perf_record_auxtrace *auxtrace_event;
+ union perf_event auxtrace_fragment;
+ __u64 aux_offset, aux_size;
+ enum cs_etm_format format;
+
+ struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
+ struct cs_etm_auxtrace,
+ auxtrace);
/*
- * sizeof(auxtrace_info_event::type) +
- * sizeof(auxtrace_info_event::reserved) == 8
+ * There should be a PERF_RECORD_AUXTRACE event at the file_offset that we got
+ * from looping through the auxtrace index.
*/
- info_header_size = 8;
+ err = perf_session__peek_event(session, file_offset, buf,
+ PERF_SAMPLE_MAX_SIZE, &auxtrace_event_union, NULL);
+ if (err)
+ return err;
+ auxtrace_event = &auxtrace_event_union->auxtrace;
+ if (auxtrace_event->header.type != PERF_RECORD_AUXTRACE)
+ return -EINVAL;
- if (total_size < (event_header_size + info_header_size))
+ if (auxtrace_event->header.size < sizeof(struct perf_record_auxtrace) ||
+ auxtrace_event->header.size != sz) {
return -EINVAL;
+ }
- priv_size = total_size - event_header_size - info_header_size;
+ /*
+ * In per-thread mode, auxtrace CPU is set to -1, but TID will be set instead. See
+ * auxtrace_mmap_params__set_idx(). However, the sample AUX event will contain a
+ * CPU as we set this always for the AUX_OUTPUT_HW_ID event.
+ * So now compare only TIDs if auxtrace CPU is -1, and CPUs if auxtrace CPU is not -1.
+ * Return 'not found' if mismatch.
+ */
+ if (auxtrace_event->cpu == (__u32) -1) {
+ etm->per_thread_decoding = true;
+ if (auxtrace_event->tid != sample->tid)
+ return 1;
+ } else if (auxtrace_event->cpu != sample->cpu) {
+ if (etm->per_thread_decoding) {
+ /*
+ * Found a per-cpu buffer after a per-thread one was
+ * already found
+ */
+ pr_err("CS ETM: Inconsistent per-thread/per-cpu mode.\n");
+ return -EINVAL;
+ }
+ return 1;
+ }
- /* First the global part */
- ptr = (u64 *) auxtrace_info->priv;
+ if (aux_event->flags & PERF_AUX_FLAG_OVERWRITE) {
+ /*
+ * Clamp size in snapshot mode. The buffer size is clamped in
+ * __auxtrace_mmap__read() for snapshots, so the aux record size doesn't reflect
+ * the buffer size.
+ */
+ aux_size = min(aux_event->aux_size, auxtrace_event->size);
+
+ /*
+ * In this mode, the head also points to the end of the buffer so aux_offset
+ * needs to have the size subtracted so it points to the beginning as in normal mode
+ */
+ aux_offset = aux_event->aux_offset - aux_size;
+ } else {
+ aux_size = aux_event->aux_size;
+ aux_offset = aux_event->aux_offset;
+ }
+
+ if (aux_offset >= auxtrace_event->offset &&
+ aux_offset + aux_size <= auxtrace_event->offset + auxtrace_event->size) {
+ struct cs_etm_queue *etmq = etm->queues.queue_array[auxtrace_event->idx].priv;
+
+ /*
+ * If this AUX event was inside this buffer somewhere, create a new auxtrace event
+ * based on the sizes of the aux event, and queue that fragment.
+ */
+ auxtrace_fragment.auxtrace = *auxtrace_event;
+ auxtrace_fragment.auxtrace.size = aux_size;
+ auxtrace_fragment.auxtrace.offset = aux_offset;
+ file_offset += aux_offset - auxtrace_event->offset + auxtrace_event->header.size;
+
+ pr_debug3("CS ETM: Queue buffer size: %#"PRI_lx64" offset: %#"PRI_lx64
+ " tid: %d cpu: %d\n", aux_size, aux_offset, sample->tid, sample->cpu);
+ err = auxtrace_queues__add_event(&etm->queues, session, &auxtrace_fragment,
+ file_offset, NULL);
+ if (err)
+ return err;
+
+ format = (aux_event->flags & PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW) ?
+ UNFORMATTED : FORMATTED;
+ if (etmq->format != UNSET && format != etmq->format) {
+ pr_err("CS_ETM: mixed formatted and unformatted trace not supported\n");
+ return -EINVAL;
+ }
+ etmq->format = format;
+ return 0;
+ }
+
+ /* Wasn't inside this buffer, but there were no parse errors. 1 == 'not found' */
+ return 1;
+}
+
+static int cs_etm__process_aux_hw_id_cb(struct perf_session *session, union perf_event *event,
+ u64 offset __maybe_unused, void *data __maybe_unused)
+{
+ /* look to handle PERF_RECORD_AUX_OUTPUT_HW_ID early to ensure decoders can be set up */
+ if (event->header.type == PERF_RECORD_AUX_OUTPUT_HW_ID) {
+ (*(int *)data)++; /* increment found count */
+ return cs_etm__process_aux_output_hw_id(session, event);
+ }
+ return 0;
+}
+
+static int cs_etm__queue_aux_records_cb(struct perf_session *session, union perf_event *event,
+ u64 offset __maybe_unused, void *data __maybe_unused)
+{
+ struct perf_sample sample;
+ int ret;
+ struct auxtrace_index_entry *ent;
+ struct auxtrace_index *auxtrace_index;
+ struct evsel *evsel;
+ size_t i;
+
+ /* Don't care about any other events, we're only queuing buffers for AUX events */
+ if (event->header.type != PERF_RECORD_AUX)
+ return 0;
- /* Look for version '0' of the header */
- if (ptr[0] != 0)
+ if (event->header.size < sizeof(struct perf_record_aux))
return -EINVAL;
- hdr = zalloc(sizeof(*hdr) * CS_HEADER_VERSION_0_MAX);
- if (!hdr)
- return -ENOMEM;
+ /* Truncated Aux records can have 0 size and shouldn't result in anything being queued. */
+ if (!event->aux.aux_size)
+ return 0;
- /* Extract header information - see cs-etm.h for format */
- for (i = 0; i < CS_HEADER_VERSION_0_MAX; i++)
- hdr[i] = ptr[i];
- num_cpu = hdr[CS_PMU_TYPE_CPUS] & 0xffffffff;
- pmu_type = (unsigned int) ((hdr[CS_PMU_TYPE_CPUS] >> 32) &
- 0xffffffff);
+ /*
+ * Parse the sample, we need the sample_id_all data that comes after the event so that the
+ * CPU or PID can be matched to an AUXTRACE buffer's CPU or PID.
+ */
+ evsel = evlist__event2evsel(session->evlist, event);
+ if (!evsel)
+ return -EINVAL;
+ perf_sample__init(&sample, /*all=*/false);
+ ret = evsel__parse_sample(evsel, event, &sample);
+ if (ret)
+ goto out;
/*
- * Create an RB tree for traceID-metadata tuple. Since the conversion
- * has to be made for each packet that gets decoded, optimizing access
- * in anything other than a sequential array is worth doing.
+ * Loop through the auxtrace index to find the buffer that matches up with this aux event.
*/
- traceid_list = intlist__new(NULL);
- if (!traceid_list) {
- err = -ENOMEM;
- goto err_free_hdr;
+ list_for_each_entry(auxtrace_index, &session->auxtrace_index, list) {
+ for (i = 0; i < auxtrace_index->nr; i++) {
+ ent = &auxtrace_index->entries[i];
+ ret = cs_etm__queue_aux_fragment(session, ent->file_offset,
+ ent->sz, &event->aux, &sample);
+ /*
+ * Stop search on error or successful values. Continue search on
+ * 1 ('not found')
+ */
+ if (ret != 1)
+ goto out;
+ }
}
- metadata = zalloc(sizeof(*metadata) * num_cpu);
- if (!metadata) {
- err = -ENOMEM;
- goto err_free_traceid_list;
+ /*
+ * Couldn't find the buffer corresponding to this aux record, something went wrong. Warn but
+ * don't exit with an error because it will still be possible to decode other aux records.
+ */
+ pr_err("CS ETM: Couldn't find auxtrace buffer for aux_offset: %#"PRI_lx64
+ " tid: %d cpu: %d\n", event->aux.aux_offset, sample.tid, sample.cpu);
+ ret = 0;
+out:
+ perf_sample__exit(&sample);
+ return ret;
+}
+
+static int cs_etm__queue_aux_records(struct perf_session *session)
+{
+ struct auxtrace_index *index = list_first_entry_or_null(&session->auxtrace_index,
+ struct auxtrace_index, list);
+ if (index && index->nr > 0)
+ return perf_session__peek_events(session, session->header.data_offset,
+ session->header.data_size,
+ cs_etm__queue_aux_records_cb, NULL);
+
+ /*
+ * We would get here if there are no entries in the index (either no auxtrace
+ * buffers or no index at all). Fail silently as there is the possibility of
+ * queueing them in cs_etm__process_auxtrace_event() if etm->data_queued is still
+ * false.
+ *
+ * In that scenario, buffers will not be split by AUX records.
+ */
+ return 0;
+}
+
+#define HAS_PARAM(j, type, param) (metadata[(j)][CS_ETM_NR_TRC_PARAMS] <= \
+ (CS_##type##_##param - CS_ETM_COMMON_BLK_MAX_V1))
+
+/*
+ * Loop through the ETMs and complain if we find at least one where ts_source != 1 (virtual
+ * timestamps).
+ */
+static bool cs_etm__has_virtual_ts(u64 **metadata, int num_cpu)
+{
+ int j;
+
+ for (j = 0; j < num_cpu; j++) {
+ switch (metadata[j][CS_ETM_MAGIC]) {
+ case __perf_cs_etmv4_magic:
+ if (HAS_PARAM(j, ETMV4, TS_SOURCE) || metadata[j][CS_ETMV4_TS_SOURCE] != 1)
+ return false;
+ break;
+ case __perf_cs_ete_magic:
+ if (HAS_PARAM(j, ETE, TS_SOURCE) || metadata[j][CS_ETE_TS_SOURCE] != 1)
+ return false;
+ break;
+ default:
+ /* Unknown / unsupported magic number. */
+ return false;
+ }
+ }
+ return true;
+}
+
+/* map trace ids to correct metadata block, from information in metadata */
+static int cs_etm__map_trace_ids_metadata(struct cs_etm_auxtrace *etm, int num_cpu,
+ u64 **metadata)
+{
+ u64 cs_etm_magic;
+ u8 trace_chan_id;
+ int i, err;
+
+ for (i = 0; i < num_cpu; i++) {
+ cs_etm_magic = metadata[i][CS_ETM_MAGIC];
+ switch (cs_etm_magic) {
+ case __perf_cs_etmv3_magic:
+ metadata[i][CS_ETM_ETMTRACEIDR] &= CORESIGHT_TRACE_ID_VAL_MASK;
+ trace_chan_id = (u8)(metadata[i][CS_ETM_ETMTRACEIDR]);
+ break;
+ case __perf_cs_etmv4_magic:
+ case __perf_cs_ete_magic:
+ metadata[i][CS_ETMV4_TRCTRACEIDR] &= CORESIGHT_TRACE_ID_VAL_MASK;
+ trace_chan_id = (u8)(metadata[i][CS_ETMV4_TRCTRACEIDR]);
+ break;
+ default:
+ /* unknown magic number */
+ return -EINVAL;
+ }
+ err = cs_etm__map_trace_id_v0(etm, trace_chan_id, metadata[i]);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+/*
+ * Use the data gathered by the peeks for HW_ID (trace ID mappings) and AUX
+ * (formatted or not) packets to create the decoders.
+ */
+static int cs_etm__create_queue_decoders(struct cs_etm_queue *etmq)
+{
+ struct cs_etm_decoder_params d_params;
+ struct cs_etm_trace_params *t_params;
+ int decoders = intlist__nr_entries(etmq->traceid_list);
+
+ if (decoders == 0)
+ return 0;
+
+ /*
+ * Each queue can only contain data from one CPU when unformatted, so only one decoder is
+ * needed.
+ */
+ if (etmq->format == UNFORMATTED)
+ assert(decoders == 1);
+
+ /* Use metadata to fill in trace parameters for trace decoder */
+ t_params = zalloc(sizeof(*t_params) * decoders);
+
+ if (!t_params)
+ goto out_free;
+
+ if (cs_etm__init_trace_params(t_params, etmq))
+ goto out_free;
+
+ /* Set decoder parameters to decode trace packets */
+ if (cs_etm__init_decoder_params(&d_params, etmq,
+ dump_trace ? CS_ETM_OPERATION_PRINT :
+ CS_ETM_OPERATION_DECODE))
+ goto out_free;
+
+ etmq->decoder = cs_etm_decoder__new(decoders, &d_params,
+ t_params);
+
+ if (!etmq->decoder)
+ goto out_free;
+
+ /*
+ * Register a function to handle all memory accesses required by
+ * the trace decoder library.
+ */
+ if (cs_etm_decoder__add_mem_access_cb(etmq->decoder,
+ 0x0L, ((u64) -1L),
+ cs_etm__mem_access))
+ goto out_free_decoder;
+
+ zfree(&t_params);
+ return 0;
+
+out_free_decoder:
+ cs_etm_decoder__free(etmq->decoder);
+out_free:
+ zfree(&t_params);
+ return -EINVAL;
+}
+
+static int cs_etm__create_decoders(struct cs_etm_auxtrace *etm)
+{
+ struct auxtrace_queues *queues = &etm->queues;
+
+ for (unsigned int i = 0; i < queues->nr_queues; i++) {
+ bool empty = list_empty(&queues->queue_array[i].head);
+ struct cs_etm_queue *etmq = queues->queue_array[i].priv;
+ int ret;
+
+ /*
+ * Don't create decoders for empty queues, mainly because
+ * etmq->format is unknown for empty queues.
+ */
+ assert(empty || etmq->format != UNSET);
+ if (empty)
+ continue;
+
+ ret = cs_etm__create_queue_decoders(etmq);
+ if (ret)
+ return ret;
}
+ return 0;
+}
+
+int cs_etm__process_auxtrace_info_full(union perf_event *event,
+ struct perf_session *session)
+{
+ struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info;
+ struct cs_etm_auxtrace *etm = NULL;
+ struct perf_record_time_conv *tc = &session->time_conv;
+ int event_header_size = sizeof(struct perf_event_header);
+ int total_size = auxtrace_info->header.size;
+ int priv_size = 0;
+ int num_cpu, max_cpu = 0;
+ int err = 0;
+ int aux_hw_id_found;
+ int i;
+ u64 *ptr = NULL;
+ u64 **metadata = NULL;
+
+ /* First the global part */
+ ptr = (u64 *) auxtrace_info->priv;
+ num_cpu = ptr[CS_PMU_TYPE_CPUS] & 0xffffffff;
+ metadata = zalloc(sizeof(*metadata) * num_cpu);
+ if (!metadata)
+ return -ENOMEM;
+
+ /* Start parsing after the common part of the header */
+ i = CS_HEADER_VERSION_MAX;
/*
* The metadata is stored in the auxtrace_info section and encodes
@@ -2543,63 +3390,43 @@ int cs_etm__process_auxtrace_info(union perf_event *event,
* required by the trace decoder to properly decode the trace due
* to its highly compressed nature.
*/
- for (j = 0; j < num_cpu; j++) {
+ for (int j = 0; j < num_cpu; j++) {
if (ptr[i] == __perf_cs_etmv3_magic) {
- metadata[j] = zalloc(sizeof(*metadata[j]) *
- CS_ETM_PRIV_MAX);
- if (!metadata[j]) {
- err = -ENOMEM;
- goto err_free_metadata;
- }
- for (k = 0; k < CS_ETM_PRIV_MAX; k++)
- metadata[j][k] = ptr[i + k];
-
- /* The traceID is our handle */
- idx = metadata[j][CS_ETM_ETMTRACEIDR];
- i += CS_ETM_PRIV_MAX;
+ metadata[j] =
+ cs_etm__create_meta_blk(ptr, &i,
+ CS_ETM_PRIV_MAX,
+ CS_ETM_NR_TRC_PARAMS_V0);
} else if (ptr[i] == __perf_cs_etmv4_magic) {
- metadata[j] = zalloc(sizeof(*metadata[j]) *
- CS_ETMV4_PRIV_MAX);
- if (!metadata[j]) {
- err = -ENOMEM;
- goto err_free_metadata;
- }
- for (k = 0; k < CS_ETMV4_PRIV_MAX; k++)
- metadata[j][k] = ptr[i + k];
-
- /* The traceID is our handle */
- idx = metadata[j][CS_ETMV4_TRCTRACEIDR];
- i += CS_ETMV4_PRIV_MAX;
+ metadata[j] =
+ cs_etm__create_meta_blk(ptr, &i,
+ CS_ETMV4_PRIV_MAX,
+ CS_ETMV4_NR_TRC_PARAMS_V0);
+ } else if (ptr[i] == __perf_cs_ete_magic) {
+ metadata[j] = cs_etm__create_meta_blk(ptr, &i, CS_ETE_PRIV_MAX, -1);
+ } else {
+ ui__error("CS ETM Trace: Unrecognised magic number %#"PRIx64". File could be from a newer version of perf.\n",
+ ptr[i]);
+ err = -EINVAL;
+ goto err_free_metadata;
}
- /* Get an RB node for this CPU */
- inode = intlist__findnew(traceid_list, idx);
-
- /* Something went wrong, no need to continue */
- if (!inode) {
+ if (!metadata[j]) {
err = -ENOMEM;
goto err_free_metadata;
}
- /*
- * The node for that CPU should not be taken.
- * Back out if that's the case.
- */
- if (inode->priv) {
- err = -EINVAL;
- goto err_free_metadata;
- }
- /* All good, associate the traceID with the metadata pointer */
- inode->priv = metadata[j];
+ if ((int) metadata[j][CS_ETM_CPU] > max_cpu)
+ max_cpu = metadata[j][CS_ETM_CPU];
}
/*
- * Each of CS_HEADER_VERSION_0_MAX, CS_ETM_PRIV_MAX and
+ * Each of CS_HEADER_VERSION_MAX, CS_ETM_PRIV_MAX and
* CS_ETMV4_PRIV_MAX mark how many double words are in the
* global metadata, and each cpu's metadata respectively.
* The following tests if the correct number of double words was
* present in the auxtrace info section.
*/
+ priv_size = total_size - event_header_size - INFO_HEADER_SIZE;
if (i * 8 != priv_size) {
err = -EINVAL;
goto err_free_metadata;
@@ -2612,19 +3439,60 @@ int cs_etm__process_auxtrace_info(union perf_event *event,
goto err_free_metadata;
}
- err = auxtrace_queues__init(&etm->queues);
+ /*
+ * As all the ETMs run at the same exception level, the system should
+ * have the same PID format crossing CPUs. So cache the PID format
+ * and reuse it for sequential decoding.
+ */
+ etm->pid_fmt = cs_etm__init_pid_fmt(metadata[0]);
+
+ err = auxtrace_queues__init_nr(&etm->queues, max_cpu + 1);
if (err)
goto err_free_etm;
+ for (unsigned int j = 0; j < etm->queues.nr_queues; ++j) {
+ err = cs_etm__setup_queue(etm, &etm->queues.queue_array[j], j);
+ if (err)
+ goto err_free_queues;
+ }
+
+ if (session->itrace_synth_opts->set) {
+ etm->synth_opts = *session->itrace_synth_opts;
+ } else {
+ itrace_synth_opts__set_default(&etm->synth_opts,
+ session->itrace_synth_opts->default_no_sample);
+ etm->synth_opts.callchain = false;
+ }
+
etm->session = session;
- etm->machine = &session->machines.host;
etm->num_cpu = num_cpu;
- etm->pmu_type = pmu_type;
- etm->snapshot_mode = (hdr[CS_ETM_SNAPSHOT] != 0);
+ etm->pmu_type = (unsigned int) ((ptr[CS_PMU_TYPE_CPUS] >> 32) & 0xffffffff);
+ etm->snapshot_mode = (ptr[CS_ETM_SNAPSHOT] != 0);
etm->metadata = metadata;
etm->auxtrace_type = auxtrace_info->type;
- etm->timeless_decoding = cs_etm__is_timeless_decoding(etm);
+
+ if (etm->synth_opts.use_timestamp)
+ /*
+ * Prior to Armv8.4, Arm CPUs don't support FEAT_TRF feature,
+ * therefore the decoder cannot know if the timestamp trace is
+ * same with the kernel time.
+ *
+ * If a user has knowledge for the working platform and can
+ * specify itrace option 'T' to tell decoder to forcely use the
+ * traced timestamp as the kernel time.
+ */
+ etm->has_virtual_ts = true;
+ else
+ /* Use virtual timestamps if all ETMs report ts_source = 1 */
+ etm->has_virtual_ts = cs_etm__has_virtual_ts(metadata, num_cpu);
+
+ if (!etm->has_virtual_ts)
+ ui__warning("Virtual timestamps are not enabled, or not supported by the traced system.\n"
+ "The time field of the samples will not be set accurately.\n"
+ "For Arm CPUs prior to Armv8.4 or without support FEAT_TRF,\n"
+ "you can specify the itrace option 'T' for timestamp decoding\n"
+ "if the Coresight timestamp on the platform is same with the kernel time.\n\n");
etm->auxtrace.process_event = cs_etm__process_event;
etm->auxtrace.process_auxtrace_event = cs_etm__process_auxtrace_event;
@@ -2634,54 +3502,70 @@ int cs_etm__process_auxtrace_info(union perf_event *event,
etm->auxtrace.evsel_is_auxtrace = cs_etm__evsel_is_auxtrace;
session->auxtrace = &etm->auxtrace;
- etm->unknown_thread = thread__new(999999999, 999999999);
- if (!etm->unknown_thread) {
- err = -ENOMEM;
- goto err_free_queues;
+ err = cs_etm__setup_timeless_decoding(etm);
+ if (err)
+ return err;
+
+ etm->tc.time_shift = tc->time_shift;
+ etm->tc.time_mult = tc->time_mult;
+ etm->tc.time_zero = tc->time_zero;
+ if (event_contains(*tc, time_cycles)) {
+ etm->tc.time_cycles = tc->time_cycles;
+ etm->tc.time_mask = tc->time_mask;
+ etm->tc.cap_user_time_zero = tc->cap_user_time_zero;
+ etm->tc.cap_user_time_short = tc->cap_user_time_short;
}
+ err = cs_etm__synth_events(etm, session);
+ if (err)
+ goto err_free_queues;
+
+ err = cs_etm__queue_aux_records(session);
+ if (err)
+ goto err_free_queues;
/*
- * Initialize list node so that at thread__zput() we can avoid
- * segmentation fault at list_del_init().
+ * Map Trace ID values to CPU metadata.
+ *
+ * Trace metadata will always contain Trace ID values from the legacy algorithm
+ * in case it's read by a version of Perf that doesn't know about HW_ID packets
+ * or the kernel doesn't emit them.
+ *
+ * The updated kernel drivers that use AUX_HW_ID to sent Trace IDs will attempt to use
+ * the same IDs as the old algorithm as far as is possible, unless there are clashes
+ * in which case a different value will be used. This means an older perf may still
+ * be able to record and read files generate on a newer system.
+ *
+ * For a perf able to interpret AUX_HW_ID packets we first check for the presence of
+ * those packets. If they are there then the values will be mapped and plugged into
+ * the metadata and decoders are only created for each mapping received.
+ *
+ * If no AUX_HW_ID packets are present - which means a file recorded on an old kernel
+ * then we map Trace ID values to CPU directly from the metadata and create decoders
+ * for all mappings.
*/
- INIT_LIST_HEAD(&etm->unknown_thread->node);
- err = thread__set_comm(etm->unknown_thread, "unknown", 0);
+ /* Scan for AUX_OUTPUT_HW_ID records to map trace ID values to CPU metadata */
+ aux_hw_id_found = 0;
+ err = perf_session__peek_events(session, session->header.data_offset,
+ session->header.data_size,
+ cs_etm__process_aux_hw_id_cb, &aux_hw_id_found);
if (err)
- goto err_delete_thread;
-
- if (thread__init_maps(etm->unknown_thread, etm->machine)) {
- err = -ENOMEM;
- goto err_delete_thread;
- }
-
- if (dump_trace) {
- cs_etm__print_auxtrace_info(auxtrace_info->priv, num_cpu);
- return 0;
- }
+ goto err_free_queues;
- if (session->itrace_synth_opts->set) {
- etm->synth_opts = *session->itrace_synth_opts;
- } else {
- itrace_synth_opts__set_default(&etm->synth_opts,
- session->itrace_synth_opts->default_no_sample);
- etm->synth_opts.callchain = false;
+ /* if no HW ID found this is a file with metadata values only, map from metadata */
+ if (!aux_hw_id_found) {
+ err = cs_etm__map_trace_ids_metadata(etm, num_cpu, metadata);
+ if (err)
+ goto err_free_queues;
}
- err = cs_etm__synth_events(etm, session);
+ err = cs_etm__create_decoders(etm);
if (err)
- goto err_delete_thread;
-
- err = auxtrace_queues__process_index(&etm->queues, session);
- if (err)
- goto err_delete_thread;
+ goto err_free_queues;
etm->data_queued = etm->queues.populated;
-
return 0;
-err_delete_thread:
- thread__zput(etm->unknown_thread);
err_free_queues:
auxtrace_queues__free(&etm->queues);
session->auxtrace = NULL;
@@ -2689,13 +3573,8 @@ err_free_etm:
zfree(&etm);
err_free_metadata:
/* No need to check @metadata[j], free(NULL) is supported */
- for (j = 0; j < num_cpu; j++)
+ for (int j = 0; j < num_cpu; j++)
zfree(&metadata[j]);
zfree(&metadata);
-err_free_traceid_list:
- intlist__delete(traceid_list);
-err_free_hdr:
- zfree(&hdr);
-
return err;
}