aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/tools/perf/util/arm-spe.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util/arm-spe.c')
-rw-r--r--tools/perf/util/arm-spe.c1154
1 files changed, 1039 insertions, 115 deletions
diff --git a/tools/perf/util/arm-spe.c b/tools/perf/util/arm-spe.c
index 3882a5360ada..d46e0cccac99 100644
--- a/tools/perf/util/arm-spe.c
+++ b/tools/perf/util/arm-spe.c
@@ -26,6 +26,7 @@
#include "symbol.h"
#include "thread.h"
#include "thread-stack.h"
+#include "tsc.h"
#include "tool.h"
#include "util/synthetic-events.h"
@@ -33,8 +34,11 @@
#include "arm-spe-decoder/arm-spe-decoder.h"
#include "arm-spe-decoder/arm-spe-pkt-decoder.h"
+#include "../../arch/arm64/include/asm/cputype.h"
#define MAX_TIMESTAMP (~0ULL)
+#define is_ldst_op(op) (!!((op) & ARM_SPE_OP_LDST))
+
struct arm_spe {
struct auxtrace auxtrace;
struct auxtrace_queues queues;
@@ -45,14 +49,20 @@ struct arm_spe {
struct machine *machine;
u32 pmu_type;
+ struct perf_tsc_conversion tc;
+
u8 timeless_decoding;
u8 data_queued;
+ u64 sample_type;
u8 sample_flc;
u8 sample_llc;
u8 sample_tlb;
u8 sample_branch;
u8 sample_remote_access;
+ u8 sample_memory;
+ u8 sample_instructions;
+ u64 instructions_sample_period;
u64 l1d_miss_id;
u64 l1d_access_id;
@@ -60,12 +70,20 @@ struct arm_spe {
u64 llc_access_id;
u64 tlb_miss_id;
u64 tlb_access_id;
- u64 branch_miss_id;
+ u64 branch_id;
u64 remote_access_id;
+ u64 memory_id;
+ u64 instructions_id;
u64 kernel_start;
unsigned long num_events;
+ u8 use_ctx_pkt_for_pid;
+
+ u64 **metadata;
+ u64 metadata_ver;
+ u64 metadata_nr_cpu;
+ bool is_homogeneous;
};
struct arm_spe_queue {
@@ -83,8 +101,23 @@ struct arm_spe_queue {
u64 time;
u64 timestamp;
struct thread *thread;
+ u64 period_instructions;
+ u32 flags;
+ struct branch_stack *last_branch;
};
+struct data_source_handle {
+ const struct midr_range *midr_ranges;
+ void (*ds_synth)(const struct arm_spe_record *record,
+ union perf_mem_data_src *data_src);
+};
+
+#define DS(range, func) \
+ { \
+ .midr_ranges = range, \
+ .ds_synth = arm_spe__synth_##func, \
+ }
+
static void arm_spe_dump(struct arm_spe *spe __maybe_unused,
unsigned char *buf, size_t len)
{
@@ -95,7 +128,7 @@ static void arm_spe_dump(struct arm_spe *spe __maybe_unused,
const char *color = PERF_COLOR_BLUE;
color_fprintf(stdout, color,
- ". ... ARM SPE data: size %zu bytes\n",
+ ". ... ARM SPE data: size %#zx bytes\n",
len);
while (len) {
@@ -105,7 +138,7 @@ static void arm_spe_dump(struct arm_spe *spe __maybe_unused,
else
pkt_len = 1;
printf(".");
- color_fprintf(stdout, color, " %08x: ", pos);
+ color_fprintf(stdout, color, " %08zx: ", pos);
for (i = 0; i < pkt_len; i++)
color_fprintf(stdout, color, " %02x", buf[i]);
for (; i < 16; i++)
@@ -113,7 +146,7 @@ static void arm_spe_dump(struct arm_spe *spe __maybe_unused,
if (ret > 0) {
ret = arm_spe_pkt_desc(&packet, desc,
ARM_SPE_PKT_DESC_MAX);
- if (ret > 0)
+ if (!ret)
color_fprintf(stdout, color, " %s\n", desc);
} else {
color_fprintf(stdout, color, " Bad packet!\n");
@@ -195,11 +228,23 @@ static struct arm_spe_queue *arm_spe__alloc_queue(struct arm_spe *spe,
speq->pid = -1;
speq->tid = -1;
speq->cpu = -1;
+ speq->period_instructions = 0;
/* params set */
params.get_trace = arm_spe_get_trace;
params.data = speq;
+ if (spe->synth_opts.last_branch) {
+ size_t sz = sizeof(struct branch_stack);
+
+ /* Allocate up to two entries for PBT + TGT */
+ sz += sizeof(struct branch_entry) *
+ min(spe->synth_opts.last_branch_sz, 2U);
+ speq->last_branch = zalloc(sz);
+ if (!speq->last_branch)
+ goto out_free;
+ }
+
/* create new decoder */
speq->decoder = arm_spe_decoder_new(&params);
if (!speq->decoder)
@@ -209,6 +254,7 @@ static struct arm_spe_queue *arm_spe__alloc_queue(struct arm_spe *spe,
out_free:
zfree(&speq->event_buf);
+ zfree(&speq->last_branch);
free(speq);
return NULL;
@@ -221,6 +267,77 @@ static inline u8 arm_spe_cpumode(struct arm_spe *spe, u64 ip)
PERF_RECORD_MISC_USER;
}
+static void arm_spe_set_pid_tid_cpu(struct arm_spe *spe,
+ struct auxtrace_queue *queue)
+{
+ struct arm_spe_queue *speq = queue->priv;
+ pid_t tid;
+
+ tid = machine__get_current_tid(spe->machine, speq->cpu);
+ if (tid != -1) {
+ speq->tid = tid;
+ thread__zput(speq->thread);
+ } else
+ speq->tid = queue->tid;
+
+ if ((!speq->thread) && (speq->tid != -1)) {
+ speq->thread = machine__find_thread(spe->machine, -1,
+ speq->tid);
+ }
+
+ if (speq->thread) {
+ speq->pid = thread__pid(speq->thread);
+ if (queue->cpu == -1)
+ speq->cpu = thread__cpu(speq->thread);
+ }
+}
+
+static int arm_spe_set_tid(struct arm_spe_queue *speq, pid_t tid)
+{
+ struct arm_spe *spe = speq->spe;
+ int err = machine__set_current_tid(spe->machine, speq->cpu, -1, tid);
+
+ if (err)
+ return err;
+
+ arm_spe_set_pid_tid_cpu(spe, &spe->queues.queue_array[speq->queue_nr]);
+
+ return 0;
+}
+
+static u64 *arm_spe__get_metadata_by_cpu(struct arm_spe *spe, u64 cpu)
+{
+ u64 i;
+
+ if (!spe->metadata)
+ return NULL;
+
+ for (i = 0; i < spe->metadata_nr_cpu; i++)
+ if (spe->metadata[i][ARM_SPE_CPU] == cpu)
+ return spe->metadata[i];
+
+ return NULL;
+}
+
+static struct simd_flags arm_spe__synth_simd_flags(const struct arm_spe_record *record)
+{
+ struct simd_flags simd_flags = {};
+
+ if ((record->op & ARM_SPE_OP_LDST) && (record->op & ARM_SPE_OP_SVE_LDST))
+ simd_flags.arch |= SIMD_OP_FLAGS_ARCH_SVE;
+
+ if ((record->op & ARM_SPE_OP_OTHER) && (record->op & ARM_SPE_OP_SVE_OTHER))
+ simd_flags.arch |= SIMD_OP_FLAGS_ARCH_SVE;
+
+ if (record->type & ARM_SPE_SVE_PARTIAL_PRED)
+ simd_flags.pred |= SIMD_OP_FLAGS_PRED_PARTIAL;
+
+ if (record->type & ARM_SPE_SVE_EMPTY_PRED)
+ simd_flags.pred |= SIMD_OP_FLAGS_PRED_EMPTY;
+
+ return simd_flags;
+}
+
static void arm_spe_prep_sample(struct arm_spe *spe,
struct arm_spe_queue *speq,
union perf_event *event,
@@ -229,21 +346,109 @@ static void arm_spe_prep_sample(struct arm_spe *spe,
struct arm_spe_record *record = &speq->decoder->record;
if (!spe->timeless_decoding)
- sample->time = speq->timestamp;
+ sample->time = tsc_to_perf_time(record->timestamp, &spe->tc);
sample->ip = record->from_ip;
sample->cpumode = arm_spe_cpumode(spe, sample->ip);
sample->pid = speq->pid;
sample->tid = speq->tid;
- sample->addr = record->to_ip;
sample->period = 1;
sample->cpu = speq->cpu;
+ sample->simd_flags = arm_spe__synth_simd_flags(record);
event->sample.header.type = PERF_RECORD_SAMPLE;
event->sample.header.misc = sample->cpumode;
event->sample.header.size = sizeof(struct perf_event_header);
}
+static void arm_spe__prep_branch_stack(struct arm_spe_queue *speq)
+{
+ struct arm_spe *spe = speq->spe;
+ struct arm_spe_record *record = &speq->decoder->record;
+ struct branch_stack *bstack = speq->last_branch;
+ struct branch_flags *bs_flags;
+ unsigned int last_branch_sz = spe->synth_opts.last_branch_sz;
+ bool have_tgt = !!(speq->flags & PERF_IP_FLAG_BRANCH);
+ bool have_pbt = last_branch_sz >= (have_tgt + 1U) && record->prev_br_tgt;
+ size_t sz = sizeof(struct branch_stack) +
+ sizeof(struct branch_entry) * min(last_branch_sz, 2U) /* PBT + TGT */;
+ int i = 0;
+
+ /* Clean up branch stack */
+ memset(bstack, 0x0, sz);
+
+ if (!have_tgt && !have_pbt)
+ return;
+
+ if (have_tgt) {
+ bstack->entries[i].from = record->from_ip;
+ bstack->entries[i].to = record->to_ip;
+
+ bs_flags = &bstack->entries[i].flags;
+ bs_flags->value = 0;
+
+ if (record->op & ARM_SPE_OP_BR_CR_BL) {
+ if (record->op & ARM_SPE_OP_BR_COND)
+ bs_flags->type |= PERF_BR_COND_CALL;
+ else
+ bs_flags->type |= PERF_BR_CALL;
+ /*
+ * Indirect branch instruction without link (e.g. BR),
+ * take this case as function return.
+ */
+ } else if (record->op & ARM_SPE_OP_BR_CR_RET ||
+ record->op & ARM_SPE_OP_BR_INDIRECT) {
+ if (record->op & ARM_SPE_OP_BR_COND)
+ bs_flags->type |= PERF_BR_COND_RET;
+ else
+ bs_flags->type |= PERF_BR_RET;
+ } else if (record->op & ARM_SPE_OP_BR_CR_NON_BL_RET) {
+ if (record->op & ARM_SPE_OP_BR_COND)
+ bs_flags->type |= PERF_BR_COND;
+ else
+ bs_flags->type |= PERF_BR_UNCOND;
+ } else {
+ if (record->op & ARM_SPE_OP_BR_COND)
+ bs_flags->type |= PERF_BR_COND;
+ else
+ bs_flags->type |= PERF_BR_UNKNOWN;
+ }
+
+ if (record->type & ARM_SPE_BRANCH_MISS) {
+ bs_flags->mispred = 1;
+ bs_flags->predicted = 0;
+ } else {
+ bs_flags->mispred = 0;
+ bs_flags->predicted = 1;
+ }
+
+ if (record->type & ARM_SPE_BRANCH_NOT_TAKEN)
+ bs_flags->not_taken = 1;
+
+ if (record->type & ARM_SPE_IN_TXN)
+ bs_flags->in_tx = 1;
+
+ bs_flags->cycles = min(record->latency, 0xFFFFU);
+ i++;
+ }
+
+ if (have_pbt) {
+ bs_flags = &bstack->entries[i].flags;
+ bs_flags->type |= PERF_BR_UNKNOWN;
+ bstack->entries[i].to = record->prev_br_tgt;
+ i++;
+ }
+
+ bstack->nr = i;
+ bstack->hw_idx = -1ULL;
+}
+
+static int arm_spe__inject_event(union perf_event *event, struct perf_sample *sample, u64 type)
+{
+ event->header.size = perf_event__sample_event_size(sample, type, 0);
+ return perf_event__synthesize_sample(event, type, 0, sample);
+}
+
static inline int
arm_spe_deliver_synth_event(struct arm_spe *spe,
struct arm_spe_queue *speq __maybe_unused,
@@ -252,6 +457,12 @@ arm_spe_deliver_synth_event(struct arm_spe *spe,
{
int ret;
+ if (spe->synth_opts.inject) {
+ ret = arm_spe__inject_event(event, sample, spe->sample_type);
+ if (ret)
+ return ret;
+ }
+
ret = perf_session__deliver_synth_event(spe->session, event, sample);
if (ret)
pr_err("ARM SPE: failed to deliver event, error %d\n", ret);
@@ -259,39 +470,479 @@ arm_spe_deliver_synth_event(struct arm_spe *spe,
return ret;
}
-static int
-arm_spe_synth_spe_events_sample(struct arm_spe_queue *speq,
- u64 spe_events_id)
+static int arm_spe__synth_mem_sample(struct arm_spe_queue *speq,
+ u64 spe_events_id, u64 data_src)
{
struct arm_spe *spe = speq->spe;
+ struct arm_spe_record *record = &speq->decoder->record;
union perf_event *event = speq->event_buf;
- struct perf_sample sample = { .ip = 0, };
+ struct perf_sample sample;
+ int ret;
+ perf_sample__init(&sample, /*all=*/true);
arm_spe_prep_sample(spe, speq, event, &sample);
sample.id = spe_events_id;
sample.stream_id = spe_events_id;
+ sample.addr = record->virt_addr;
+ sample.phys_addr = record->phys_addr;
+ sample.data_src = data_src;
+ sample.weight = record->latency;
- return arm_spe_deliver_synth_event(spe, speq, event, &sample);
+ ret = arm_spe_deliver_synth_event(spe, speq, event, &sample);
+ perf_sample__exit(&sample);
+ return ret;
+}
+
+static int arm_spe__synth_branch_sample(struct arm_spe_queue *speq,
+ u64 spe_events_id)
+{
+ struct arm_spe *spe = speq->spe;
+ struct arm_spe_record *record = &speq->decoder->record;
+ union perf_event *event = speq->event_buf;
+ struct perf_sample sample;
+ int ret;
+
+ perf_sample__init(&sample, /*all=*/true);
+ arm_spe_prep_sample(spe, speq, event, &sample);
+
+ sample.id = spe_events_id;
+ sample.stream_id = spe_events_id;
+ sample.addr = record->to_ip;
+ sample.weight = record->latency;
+ sample.flags = speq->flags;
+ sample.branch_stack = speq->last_branch;
+
+ ret = arm_spe_deliver_synth_event(spe, speq, event, &sample);
+ perf_sample__exit(&sample);
+ return ret;
+}
+
+static int arm_spe__synth_instruction_sample(struct arm_spe_queue *speq,
+ u64 spe_events_id, u64 data_src)
+{
+ struct arm_spe *spe = speq->spe;
+ struct arm_spe_record *record = &speq->decoder->record;
+ union perf_event *event = speq->event_buf;
+ struct perf_sample sample;
+ int ret;
+
+ /*
+ * Handles perf instruction sampling period.
+ */
+ speq->period_instructions++;
+ if (speq->period_instructions < spe->instructions_sample_period)
+ return 0;
+ speq->period_instructions = 0;
+
+ perf_sample__init(&sample, /*all=*/true);
+ arm_spe_prep_sample(spe, speq, event, &sample);
+
+ sample.id = spe_events_id;
+ sample.stream_id = spe_events_id;
+ sample.addr = record->to_ip;
+ sample.phys_addr = record->phys_addr;
+ sample.data_src = data_src;
+ sample.period = spe->instructions_sample_period;
+ sample.weight = record->latency;
+ sample.flags = speq->flags;
+ sample.branch_stack = speq->last_branch;
+
+ ret = arm_spe_deliver_synth_event(spe, speq, event, &sample);
+ perf_sample__exit(&sample);
+ return ret;
+}
+
+static const struct midr_range common_ds_encoding_cpus[] = {
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A720),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A725),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_X1C),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_X3),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_X925),
+ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
+ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
+ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
+ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2),
+ {},
+};
+
+static const struct midr_range ampereone_ds_encoding_cpus[] = {
+ MIDR_ALL_VERSIONS(MIDR_AMPERE1A),
+ {},
+};
+
+static const struct midr_range hisi_hip_ds_encoding_cpus[] = {
+ MIDR_ALL_VERSIONS(MIDR_HISI_HIP12),
+ {},
+};
+
+static void arm_spe__sample_flags(struct arm_spe_queue *speq)
+{
+ const struct arm_spe_record *record = &speq->decoder->record;
+
+ speq->flags = 0;
+ if (record->op & ARM_SPE_OP_BRANCH_ERET) {
+ speq->flags = PERF_IP_FLAG_BRANCH;
+
+ if (record->type & ARM_SPE_BRANCH_MISS)
+ speq->flags |= PERF_IP_FLAG_BRANCH_MISS;
+
+ if (record->type & ARM_SPE_BRANCH_NOT_TAKEN)
+ speq->flags |= PERF_IP_FLAG_NOT_TAKEN;
+
+ if (record->type & ARM_SPE_IN_TXN)
+ speq->flags |= PERF_IP_FLAG_IN_TX;
+
+ if (record->op & ARM_SPE_OP_BR_COND)
+ speq->flags |= PERF_IP_FLAG_CONDITIONAL;
+
+ if (record->op & ARM_SPE_OP_BR_CR_BL)
+ speq->flags |= PERF_IP_FLAG_CALL;
+ else if (record->op & ARM_SPE_OP_BR_CR_RET)
+ speq->flags |= PERF_IP_FLAG_RETURN;
+ /*
+ * Indirect branch instruction without link (e.g. BR),
+ * take it as a function return.
+ */
+ else if (record->op & ARM_SPE_OP_BR_INDIRECT)
+ speq->flags |= PERF_IP_FLAG_RETURN;
+ }
+}
+
+static void arm_spe__synth_data_source_common(const struct arm_spe_record *record,
+ union perf_mem_data_src *data_src)
+{
+ /*
+ * Even though four levels of cache hierarchy are possible, no known
+ * production Neoverse systems currently include more than three levels
+ * so for the time being we assume three exist. If a production system
+ * is built with four the this function would have to be changed to
+ * detect the number of levels for reporting.
+ */
+
+ /*
+ * We have no data on the hit level or data source for stores in the
+ * Neoverse SPE records.
+ */
+ if (record->op & ARM_SPE_OP_ST) {
+ data_src->mem_lvl = PERF_MEM_LVL_NA;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_NA;
+ data_src->mem_snoop = PERF_MEM_SNOOP_NA;
+ return;
+ }
+
+ switch (record->source) {
+ case ARM_SPE_COMMON_DS_L1D:
+ data_src->mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_HIT;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_L1;
+ data_src->mem_snoop = PERF_MEM_SNOOP_NONE;
+ break;
+ case ARM_SPE_COMMON_DS_L2:
+ data_src->mem_lvl = PERF_MEM_LVL_L2 | PERF_MEM_LVL_HIT;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
+ data_src->mem_snoop = PERF_MEM_SNOOP_NONE;
+ break;
+ case ARM_SPE_COMMON_DS_PEER_CORE:
+ data_src->mem_lvl = PERF_MEM_LVL_L2 | PERF_MEM_LVL_HIT;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
+ data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
+ break;
+ /*
+ * We don't know if this is L1, L2 but we do know it was a cache-2-cache
+ * transfer, so set SNOOPX_PEER
+ */
+ case ARM_SPE_COMMON_DS_LOCAL_CLUSTER:
+ case ARM_SPE_COMMON_DS_PEER_CLUSTER:
+ data_src->mem_lvl = PERF_MEM_LVL_L3 | PERF_MEM_LVL_HIT;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_L3;
+ data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
+ break;
+ /*
+ * System cache is assumed to be L3
+ */
+ case ARM_SPE_COMMON_DS_SYS_CACHE:
+ data_src->mem_lvl = PERF_MEM_LVL_L3 | PERF_MEM_LVL_HIT;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_L3;
+ data_src->mem_snoop = PERF_MEM_SNOOP_HIT;
+ break;
+ /*
+ * We don't know what level it hit in, except it came from the other
+ * socket
+ */
+ case ARM_SPE_COMMON_DS_REMOTE:
+ data_src->mem_lvl = PERF_MEM_LVL_REM_CCE1;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_ANY_CACHE;
+ data_src->mem_remote = PERF_MEM_REMOTE_REMOTE;
+ data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
+ break;
+ case ARM_SPE_COMMON_DS_DRAM:
+ data_src->mem_lvl = PERF_MEM_LVL_LOC_RAM | PERF_MEM_LVL_HIT;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_RAM;
+ data_src->mem_snoop = PERF_MEM_SNOOP_NONE;
+ break;
+ default:
+ break;
+ }
+}
+
+/*
+ * Source is IMPDEF. Here we convert the source code used on AmpereOne cores
+ * to the common (Neoverse, Cortex) to avoid duplicating the decoding code.
+ */
+static void arm_spe__synth_data_source_ampereone(const struct arm_spe_record *record,
+ union perf_mem_data_src *data_src)
+{
+ struct arm_spe_record common_record;
+
+ switch (record->source) {
+ case ARM_SPE_AMPEREONE_LOCAL_CHIP_CACHE_OR_DEVICE:
+ common_record.source = ARM_SPE_COMMON_DS_PEER_CORE;
+ break;
+ case ARM_SPE_AMPEREONE_SLC:
+ common_record.source = ARM_SPE_COMMON_DS_SYS_CACHE;
+ break;
+ case ARM_SPE_AMPEREONE_REMOTE_CHIP_CACHE:
+ common_record.source = ARM_SPE_COMMON_DS_REMOTE;
+ break;
+ case ARM_SPE_AMPEREONE_DDR:
+ common_record.source = ARM_SPE_COMMON_DS_DRAM;
+ break;
+ case ARM_SPE_AMPEREONE_L1D:
+ common_record.source = ARM_SPE_COMMON_DS_L1D;
+ break;
+ case ARM_SPE_AMPEREONE_L2D:
+ common_record.source = ARM_SPE_COMMON_DS_L2;
+ break;
+ default:
+ pr_warning_once("AmpereOne: Unknown data source (0x%x)\n",
+ record->source);
+ return;
+ }
+
+ common_record.op = record->op;
+ arm_spe__synth_data_source_common(&common_record, data_src);
+}
+
+static void arm_spe__synth_data_source_hisi_hip(const struct arm_spe_record *record,
+ union perf_mem_data_src *data_src)
+{
+ /* Use common synthesis method to handle store operations */
+ if (record->op & ARM_SPE_OP_ST) {
+ arm_spe__synth_data_source_common(record, data_src);
+ return;
+ }
+
+ switch (record->source) {
+ case ARM_SPE_HISI_HIP_PEER_CPU:
+ data_src->mem_lvl = PERF_MEM_LVL_L2 | PERF_MEM_LVL_HIT;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
+ data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
+ break;
+ case ARM_SPE_HISI_HIP_PEER_CPU_HITM:
+ data_src->mem_lvl = PERF_MEM_LVL_L2 | PERF_MEM_LVL_HIT;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
+ data_src->mem_snoop = PERF_MEM_SNOOP_HITM;
+ data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
+ break;
+ case ARM_SPE_HISI_HIP_L3:
+ data_src->mem_lvl = PERF_MEM_LVL_L3 | PERF_MEM_LVL_HIT;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_L3;
+ data_src->mem_snoop = PERF_MEM_SNOOP_HIT;
+ break;
+ case ARM_SPE_HISI_HIP_L3_HITM:
+ data_src->mem_lvl = PERF_MEM_LVL_L3 | PERF_MEM_LVL_HIT;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_L3;
+ data_src->mem_snoop = PERF_MEM_SNOOP_HITM;
+ break;
+ case ARM_SPE_HISI_HIP_PEER_CLUSTER:
+ data_src->mem_lvl = PERF_MEM_LVL_REM_CCE1 | PERF_MEM_LVL_HIT;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_L3;
+ data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
+ break;
+ case ARM_SPE_HISI_HIP_PEER_CLUSTER_HITM:
+ data_src->mem_lvl = PERF_MEM_LVL_REM_CCE1 | PERF_MEM_LVL_HIT;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_L3;
+ data_src->mem_snoop = PERF_MEM_SNOOP_HITM;
+ data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
+ break;
+ case ARM_SPE_HISI_HIP_REMOTE_SOCKET:
+ data_src->mem_lvl = PERF_MEM_LVL_REM_CCE2;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_ANY_CACHE;
+ data_src->mem_remote = PERF_MEM_REMOTE_REMOTE;
+ data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
+ break;
+ case ARM_SPE_HISI_HIP_REMOTE_SOCKET_HITM:
+ data_src->mem_lvl = PERF_MEM_LVL_REM_CCE2;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_ANY_CACHE;
+ data_src->mem_snoop = PERF_MEM_SNOOP_HITM;
+ data_src->mem_remote = PERF_MEM_REMOTE_REMOTE;
+ data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
+ break;
+ case ARM_SPE_HISI_HIP_LOCAL_MEM:
+ data_src->mem_lvl = PERF_MEM_LVL_LOC_RAM | PERF_MEM_LVL_HIT;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_RAM;
+ data_src->mem_snoop = PERF_MEM_SNOOP_NONE;
+ break;
+ case ARM_SPE_HISI_HIP_REMOTE_MEM:
+ data_src->mem_lvl = PERF_MEM_LVL_REM_RAM1 | PERF_MEM_LVL_HIT;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_RAM;
+ data_src->mem_remote = PERF_MEM_REMOTE_REMOTE;
+ break;
+ case ARM_SPE_HISI_HIP_NC_DEV:
+ data_src->mem_lvl = PERF_MEM_LVL_IO | PERF_MEM_LVL_HIT;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_IO;
+ data_src->mem_snoop = PERF_MEM_SNOOP_NONE;
+ break;
+ case ARM_SPE_HISI_HIP_L2:
+ data_src->mem_lvl = PERF_MEM_LVL_L2 | PERF_MEM_LVL_HIT;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
+ data_src->mem_snoop = PERF_MEM_SNOOP_NONE;
+ break;
+ case ARM_SPE_HISI_HIP_L2_HITM:
+ data_src->mem_lvl = PERF_MEM_LVL_L2 | PERF_MEM_LVL_HIT;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
+ data_src->mem_snoop = PERF_MEM_SNOOP_HITM;
+ break;
+ case ARM_SPE_HISI_HIP_L1:
+ data_src->mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_HIT;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_L1;
+ data_src->mem_snoop = PERF_MEM_SNOOP_NONE;
+ break;
+ default:
+ break;
+ }
+}
+
+static const struct data_source_handle data_source_handles[] = {
+ DS(common_ds_encoding_cpus, data_source_common),
+ DS(ampereone_ds_encoding_cpus, data_source_ampereone),
+ DS(hisi_hip_ds_encoding_cpus, data_source_hisi_hip),
+};
+
+static void arm_spe__synth_memory_level(const struct arm_spe_record *record,
+ union perf_mem_data_src *data_src)
+{
+ if (record->type & (ARM_SPE_LLC_ACCESS | ARM_SPE_LLC_MISS)) {
+ data_src->mem_lvl = PERF_MEM_LVL_L3;
+
+ if (record->type & ARM_SPE_LLC_MISS)
+ data_src->mem_lvl |= PERF_MEM_LVL_MISS;
+ else
+ data_src->mem_lvl |= PERF_MEM_LVL_HIT;
+ } else if (record->type & (ARM_SPE_L1D_ACCESS | ARM_SPE_L1D_MISS)) {
+ data_src->mem_lvl = PERF_MEM_LVL_L1;
+
+ if (record->type & ARM_SPE_L1D_MISS)
+ data_src->mem_lvl |= PERF_MEM_LVL_MISS;
+ else
+ data_src->mem_lvl |= PERF_MEM_LVL_HIT;
+ }
+
+ if (record->type & ARM_SPE_REMOTE_ACCESS)
+ data_src->mem_lvl |= PERF_MEM_LVL_REM_CCE1;
+}
+
+static bool arm_spe__synth_ds(struct arm_spe_queue *speq,
+ const struct arm_spe_record *record,
+ union perf_mem_data_src *data_src)
+{
+ struct arm_spe *spe = speq->spe;
+ u64 *metadata = NULL;
+ u64 midr;
+ unsigned int i;
+
+ /* Metadata version 1 assumes all CPUs are the same (old behavior) */
+ if (spe->metadata_ver == 1) {
+ const char *cpuid;
+
+ pr_warning_once("Old SPE metadata, re-record to improve decode accuracy\n");
+ cpuid = perf_env__cpuid(spe->session->evlist->env);
+ midr = strtol(cpuid, NULL, 16);
+ } else {
+ /* CPU ID is -1 for per-thread mode */
+ if (speq->cpu < 0) {
+ /*
+ * On the heterogeneous system, due to CPU ID is -1,
+ * cannot confirm the data source packet is supported.
+ */
+ if (!spe->is_homogeneous)
+ return false;
+
+ /* In homogeneous system, simply use CPU0's metadata */
+ if (spe->metadata)
+ metadata = spe->metadata[0];
+ } else {
+ metadata = arm_spe__get_metadata_by_cpu(spe, speq->cpu);
+ }
+
+ if (!metadata)
+ return false;
+
+ midr = metadata[ARM_SPE_CPU_MIDR];
+ }
+
+ for (i = 0; i < ARRAY_SIZE(data_source_handles); i++) {
+ if (is_midr_in_range_list(midr, data_source_handles[i].midr_ranges)) {
+ data_source_handles[i].ds_synth(record, data_src);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static u64 arm_spe__synth_data_source(struct arm_spe_queue *speq,
+ const struct arm_spe_record *record)
+{
+ union perf_mem_data_src data_src = { .mem_op = PERF_MEM_OP_NA };
+
+ /* Only synthesize data source for LDST operations */
+ if (!is_ldst_op(record->op))
+ return 0;
+
+ if (record->op & ARM_SPE_OP_LD)
+ data_src.mem_op = PERF_MEM_OP_LOAD;
+ else if (record->op & ARM_SPE_OP_ST)
+ data_src.mem_op = PERF_MEM_OP_STORE;
+ else
+ return 0;
+
+ if (!arm_spe__synth_ds(speq, record, &data_src))
+ arm_spe__synth_memory_level(record, &data_src);
+
+ if (record->type & (ARM_SPE_TLB_ACCESS | ARM_SPE_TLB_MISS)) {
+ data_src.mem_dtlb = PERF_MEM_TLB_WK;
+
+ if (record->type & ARM_SPE_TLB_MISS)
+ data_src.mem_dtlb |= PERF_MEM_TLB_MISS;
+ else
+ data_src.mem_dtlb |= PERF_MEM_TLB_HIT;
+ }
+
+ return data_src.val;
}
static int arm_spe_sample(struct arm_spe_queue *speq)
{
const struct arm_spe_record *record = &speq->decoder->record;
struct arm_spe *spe = speq->spe;
+ u64 data_src;
int err;
+ arm_spe__sample_flags(speq);
+ data_src = arm_spe__synth_data_source(speq, record);
+
if (spe->sample_flc) {
if (record->type & ARM_SPE_L1D_MISS) {
- err = arm_spe_synth_spe_events_sample(
- speq, spe->l1d_miss_id);
+ err = arm_spe__synth_mem_sample(speq, spe->l1d_miss_id,
+ data_src);
if (err)
return err;
}
if (record->type & ARM_SPE_L1D_ACCESS) {
- err = arm_spe_synth_spe_events_sample(
- speq, spe->l1d_access_id);
+ err = arm_spe__synth_mem_sample(speq, spe->l1d_access_id,
+ data_src);
if (err)
return err;
}
@@ -299,15 +950,15 @@ static int arm_spe_sample(struct arm_spe_queue *speq)
if (spe->sample_llc) {
if (record->type & ARM_SPE_LLC_MISS) {
- err = arm_spe_synth_spe_events_sample(
- speq, spe->llc_miss_id);
+ err = arm_spe__synth_mem_sample(speq, spe->llc_miss_id,
+ data_src);
if (err)
return err;
}
if (record->type & ARM_SPE_LLC_ACCESS) {
- err = arm_spe_synth_spe_events_sample(
- speq, spe->llc_access_id);
+ err = arm_spe__synth_mem_sample(speq, spe->llc_access_id,
+ data_src);
if (err)
return err;
}
@@ -315,31 +966,50 @@ static int arm_spe_sample(struct arm_spe_queue *speq)
if (spe->sample_tlb) {
if (record->type & ARM_SPE_TLB_MISS) {
- err = arm_spe_synth_spe_events_sample(
- speq, spe->tlb_miss_id);
+ err = arm_spe__synth_mem_sample(speq, spe->tlb_miss_id,
+ data_src);
if (err)
return err;
}
if (record->type & ARM_SPE_TLB_ACCESS) {
- err = arm_spe_synth_spe_events_sample(
- speq, spe->tlb_access_id);
+ err = arm_spe__synth_mem_sample(speq, spe->tlb_access_id,
+ data_src);
if (err)
return err;
}
}
- if (spe->sample_branch && (record->type & ARM_SPE_BRANCH_MISS)) {
- err = arm_spe_synth_spe_events_sample(speq,
- spe->branch_miss_id);
+ if (spe->synth_opts.last_branch &&
+ (spe->sample_branch || spe->sample_instructions))
+ arm_spe__prep_branch_stack(speq);
+
+ if (spe->sample_branch && (record->op & ARM_SPE_OP_BRANCH_ERET)) {
+ err = arm_spe__synth_branch_sample(speq, spe->branch_id);
if (err)
return err;
}
if (spe->sample_remote_access &&
(record->type & ARM_SPE_REMOTE_ACCESS)) {
- err = arm_spe_synth_spe_events_sample(speq,
- spe->remote_access_id);
+ err = arm_spe__synth_mem_sample(speq, spe->remote_access_id,
+ data_src);
+ if (err)
+ return err;
+ }
+
+ /*
+ * When data_src is zero it means the record is not a memory operation,
+ * skip to synthesize memory sample for this case.
+ */
+ if (spe->sample_memory && is_ldst_op(record->op)) {
+ err = arm_spe__synth_mem_sample(speq, spe->memory_id, data_src);
+ if (err)
+ return err;
+ }
+
+ if (spe->sample_instructions) {
+ err = arm_spe__synth_instruction_sample(speq, spe->instructions_id, data_src);
if (err)
return err;
}
@@ -350,12 +1020,49 @@ static int arm_spe_sample(struct arm_spe_queue *speq)
static int arm_spe_run_decoder(struct arm_spe_queue *speq, u64 *timestamp)
{
struct arm_spe *spe = speq->spe;
+ struct arm_spe_record *record;
int ret;
if (!spe->kernel_start)
spe->kernel_start = machine__kernel_start(spe->machine);
while (1) {
+ /*
+ * The usual logic is firstly to decode the packets, and then
+ * based the record to synthesize sample; but here the flow is
+ * reversed: it calls arm_spe_sample() for synthesizing samples
+ * prior to arm_spe_decode().
+ *
+ * Two reasons for this code logic:
+ * 1. Firstly, when setup queue in arm_spe__setup_queue(), it
+ * has decoded trace data and generated a record, but the record
+ * is left to generate sample until run to here, so it's correct
+ * to synthesize sample for the left record.
+ * 2. After decoding trace data, it needs to compare the record
+ * timestamp with the coming perf event, if the record timestamp
+ * is later than the perf event, it needs bail out and pushs the
+ * record into auxtrace heap, thus the record can be deferred to
+ * synthesize sample until run to here at the next time; so this
+ * can correlate samples between Arm SPE trace data and other
+ * perf events with correct time ordering.
+ */
+
+ /*
+ * Update pid/tid info.
+ */
+ record = &speq->decoder->record;
+ if (!spe->timeless_decoding && record->context_id != (u64)-1) {
+ ret = arm_spe_set_tid(speq, record->context_id);
+ if (ret)
+ return ret;
+
+ spe->use_ctx_pkt_for_pid = true;
+ }
+
+ ret = arm_spe_sample(speq);
+ if (ret)
+ return ret;
+
ret = arm_spe_decode(speq->decoder);
if (!ret) {
pr_debug("No data or all data has been processed.\n");
@@ -369,10 +1076,17 @@ static int arm_spe_run_decoder(struct arm_spe_queue *speq, u64 *timestamp)
if (ret < 0)
continue;
- ret = arm_spe_sample(speq);
- if (ret)
- return ret;
+ record = &speq->decoder->record;
+ /* Update timestamp for the last record */
+ if (record->timestamp > speq->timestamp)
+ speq->timestamp = record->timestamp;
+
+ /*
+ * If the timestamp of the queue is later than timestamp of the
+ * coming perf event, bail out so can allow the perf event to
+ * be processed ahead.
+ */
if (!spe->timeless_decoding && speq->timestamp >= *timestamp) {
*timestamp = speq->timestamp;
return 0;
@@ -471,31 +1185,6 @@ static bool arm_spe__is_timeless_decoding(struct arm_spe *spe)
return timeless_decoding;
}
-static void arm_spe_set_pid_tid_cpu(struct arm_spe *spe,
- struct auxtrace_queue *queue)
-{
- struct arm_spe_queue *speq = queue->priv;
- pid_t tid;
-
- tid = machine__get_current_tid(spe->machine, speq->cpu);
- if (tid != -1) {
- speq->tid = tid;
- thread__zput(speq->thread);
- } else
- speq->tid = queue->tid;
-
- if ((!speq->thread) && (speq->tid != -1)) {
- speq->thread = machine__find_thread(spe->machine, -1,
- speq->tid);
- }
-
- if (speq->thread) {
- speq->pid = speq->thread->pid_;
- if (queue->cpu == -1)
- speq->cpu = speq->thread->cpu;
- }
-}
-
static int arm_spe_process_queues(struct arm_spe *spe, u64 timestamp)
{
unsigned int queue_nr;
@@ -526,7 +1215,12 @@ static int arm_spe_process_queues(struct arm_spe *spe, u64 timestamp)
ts = timestamp;
}
- arm_spe_set_pid_tid_cpu(spe, queue);
+ /*
+ * A previous context-switch event has set pid/tid in the machine's context, so
+ * here we need to update the pid/tid in the thread and SPE queue.
+ */
+ if (!spe->use_ctx_pkt_for_pid)
+ arm_spe_set_pid_tid_cpu(spe, queue);
ret = arm_spe_run_decoder(speq, &ts);
if (ret < 0) {
@@ -566,10 +1260,29 @@ static int arm_spe_process_timeless_queues(struct arm_spe *spe, pid_t tid,
return 0;
}
+static int arm_spe_context_switch(struct arm_spe *spe, union perf_event *event,
+ struct perf_sample *sample)
+{
+ pid_t pid, tid;
+ int cpu;
+
+ if (!(event->header.misc & PERF_RECORD_MISC_SWITCH_OUT))
+ return 0;
+
+ pid = event->context_switch.next_prev_pid;
+ tid = event->context_switch.next_prev_tid;
+ cpu = sample->cpu;
+
+ if (tid == -1)
+ pr_warning("context_switch event has no tid\n");
+
+ return machine__set_current_tid(spe->machine, cpu, pid, tid);
+}
+
static int arm_spe_process_event(struct perf_session *session,
union perf_event *event,
struct perf_sample *sample,
- struct perf_tool *tool)
+ const struct perf_tool *tool)
{
int err = 0;
u64 timestamp;
@@ -585,7 +1298,7 @@ static int arm_spe_process_event(struct perf_session *session,
}
if (sample->time && (sample->time != (u64) -1))
- timestamp = sample->time;
+ timestamp = perf_time_to_tsc(sample->time, &spe->tc);
else
timestamp = 0;
@@ -602,11 +1315,14 @@ static int arm_spe_process_event(struct perf_session *session,
sample->time);
}
} else if (timestamp) {
- if (event->header.type == PERF_RECORD_EXIT) {
- err = arm_spe_process_queues(spe, timestamp);
- if (err)
- return err;
- }
+ err = arm_spe_process_queues(spe, timestamp);
+ if (err)
+ return err;
+
+ if (!spe->use_ctx_pkt_for_pid &&
+ (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE ||
+ event->header.type == PERF_RECORD_SWITCH))
+ err = arm_spe_context_switch(spe, event, sample);
}
return err;
@@ -614,7 +1330,7 @@ static int arm_spe_process_event(struct perf_session *session,
static int arm_spe_process_auxtrace_event(struct perf_session *session,
union perf_event *event,
- struct perf_tool *tool __maybe_unused)
+ const struct perf_tool *tool __maybe_unused)
{
struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe,
auxtrace);
@@ -652,7 +1368,7 @@ static int arm_spe_process_auxtrace_event(struct perf_session *session,
}
static int arm_spe_flush(struct perf_session *session __maybe_unused,
- struct perf_tool *tool __maybe_unused)
+ const struct perf_tool *tool __maybe_unused)
{
struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe,
auxtrace);
@@ -672,7 +1388,82 @@ static int arm_spe_flush(struct perf_session *session __maybe_unused,
return arm_spe_process_timeless_queues(spe, -1,
MAX_TIMESTAMP - 1);
- return arm_spe_process_queues(spe, MAX_TIMESTAMP);
+ ret = arm_spe_process_queues(spe, MAX_TIMESTAMP);
+ if (ret)
+ return ret;
+
+ if (!spe->use_ctx_pkt_for_pid)
+ ui__warning("Arm SPE CONTEXT packets not found in the traces.\n"
+ "Matching of TIDs to SPE events could be inaccurate.\n");
+
+ return 0;
+}
+
+static u64 *arm_spe__alloc_per_cpu_metadata(u64 *buf, int per_cpu_size)
+{
+ u64 *metadata;
+
+ metadata = zalloc(per_cpu_size);
+ if (!metadata)
+ return NULL;
+
+ memcpy(metadata, buf, per_cpu_size);
+ return metadata;
+}
+
+static void arm_spe__free_metadata(u64 **metadata, int nr_cpu)
+{
+ int i;
+
+ for (i = 0; i < nr_cpu; i++)
+ zfree(&metadata[i]);
+ free(metadata);
+}
+
+static u64 **arm_spe__alloc_metadata(struct perf_record_auxtrace_info *info,
+ u64 *ver, int *nr_cpu)
+{
+ u64 *ptr = (u64 *)info->priv;
+ u64 metadata_size;
+ u64 **metadata = NULL;
+ int hdr_sz, per_cpu_sz, i;
+
+ metadata_size = info->header.size -
+ sizeof(struct perf_record_auxtrace_info);
+
+ /* Metadata version 1 */
+ if (metadata_size == ARM_SPE_AUXTRACE_V1_PRIV_SIZE) {
+ *ver = 1;
+ *nr_cpu = 0;
+ /* No per CPU metadata */
+ return NULL;
+ }
+
+ *ver = ptr[ARM_SPE_HEADER_VERSION];
+ hdr_sz = ptr[ARM_SPE_HEADER_SIZE];
+ *nr_cpu = ptr[ARM_SPE_CPUS_NUM];
+
+ metadata = calloc(*nr_cpu, sizeof(*metadata));
+ if (!metadata)
+ return NULL;
+
+ /* Locate the start address of per CPU metadata */
+ ptr += hdr_sz;
+ per_cpu_sz = (metadata_size - (hdr_sz * sizeof(u64))) / (*nr_cpu);
+
+ for (i = 0; i < *nr_cpu; i++) {
+ metadata[i] = arm_spe__alloc_per_cpu_metadata(ptr, per_cpu_sz);
+ if (!metadata[i])
+ goto err_per_cpu_metadata;
+
+ ptr += per_cpu_sz / sizeof(u64);
+ }
+
+ return metadata;
+
+err_per_cpu_metadata:
+ arm_spe__free_metadata(metadata, *nr_cpu);
+ return NULL;
}
static void arm_spe_free_queue(void *priv)
@@ -684,6 +1475,7 @@ static void arm_spe_free_queue(void *priv)
thread__zput(speq->thread);
arm_spe_decoder_free(speq->decoder);
zfree(&speq->event_buf);
+ zfree(&speq->last_branch);
free(speq);
}
@@ -709,6 +1501,7 @@ static void arm_spe_free(struct perf_session *session)
auxtrace_heap__free(&spe->heap);
arm_spe_free_events(session);
session->auxtrace = NULL;
+ arm_spe__free_metadata(spe->metadata, spe->metadata_nr_cpu);
free(spe);
}
@@ -720,45 +1513,60 @@ static bool arm_spe_evsel_is_auxtrace(struct perf_session *session,
return evsel->core.attr.type == spe->pmu_type;
}
-static const char * const arm_spe_info_fmts[] = {
- [ARM_SPE_PMU_TYPE] = " PMU Type %"PRId64"\n",
+static const char * const metadata_hdr_v1_fmts[] = {
+ [ARM_SPE_PMU_TYPE] = " PMU Type :%"PRId64"\n",
+ [ARM_SPE_PER_CPU_MMAPS] = " Per CPU mmaps :%"PRId64"\n",
};
-static void arm_spe_print_info(__u64 *arr)
-{
- if (!dump_trace)
- return;
-
- fprintf(stdout, arm_spe_info_fmts[ARM_SPE_PMU_TYPE], arr[ARM_SPE_PMU_TYPE]);
-}
+static const char * const metadata_hdr_fmts[] = {
+ [ARM_SPE_HEADER_VERSION] = " Header version :%"PRId64"\n",
+ [ARM_SPE_HEADER_SIZE] = " Header size :%"PRId64"\n",
+ [ARM_SPE_PMU_TYPE_V2] = " PMU type v2 :%"PRId64"\n",
+ [ARM_SPE_CPUS_NUM] = " CPU number :%"PRId64"\n",
+};
-struct arm_spe_synth {
- struct perf_tool dummy_tool;
- struct perf_session *session;
+static const char * const metadata_per_cpu_fmts[] = {
+ [ARM_SPE_MAGIC] = " Magic :0x%"PRIx64"\n",
+ [ARM_SPE_CPU] = " CPU # :%"PRId64"\n",
+ [ARM_SPE_CPU_NR_PARAMS] = " Num of params :%"PRId64"\n",
+ [ARM_SPE_CPU_MIDR] = " MIDR :0x%"PRIx64"\n",
+ [ARM_SPE_CPU_PMU_TYPE] = " PMU Type :%"PRId64"\n",
+ [ARM_SPE_CAP_MIN_IVAL] = " Min Interval :%"PRId64"\n",
};
-static int arm_spe_event_synth(struct perf_tool *tool,
- union perf_event *event,
- struct perf_sample *sample __maybe_unused,
- struct machine *machine __maybe_unused)
+static void arm_spe_print_info(struct arm_spe *spe, __u64 *arr)
{
- struct arm_spe_synth *arm_spe_synth =
- container_of(tool, struct arm_spe_synth, dummy_tool);
+ unsigned int i, cpu, hdr_size, cpu_num, cpu_size;
+ const char * const *hdr_fmts;
- return perf_session__deliver_synth_event(arm_spe_synth->session,
- event, NULL);
-}
+ if (!dump_trace)
+ return;
-static int arm_spe_synth_event(struct perf_session *session,
- struct perf_event_attr *attr, u64 id)
-{
- struct arm_spe_synth arm_spe_synth;
+ if (spe->metadata_ver == 1) {
+ cpu_num = 0;
+ hdr_size = ARM_SPE_AUXTRACE_V1_PRIV_MAX;
+ hdr_fmts = metadata_hdr_v1_fmts;
+ } else {
+ cpu_num = arr[ARM_SPE_CPUS_NUM];
+ hdr_size = arr[ARM_SPE_HEADER_SIZE];
+ hdr_fmts = metadata_hdr_fmts;
+ }
- memset(&arm_spe_synth, 0, sizeof(struct arm_spe_synth));
- arm_spe_synth.session = session;
+ for (i = 0; i < hdr_size; i++)
+ fprintf(stdout, hdr_fmts[i], arr[i]);
- return perf_event__synthesize_attr(&arm_spe_synth.dummy_tool, attr, 1,
- &id, arm_spe_event_synth);
+ arr += hdr_size;
+ for (cpu = 0; cpu < cpu_num; cpu++) {
+ /*
+ * The parameters from ARM_SPE_MAGIC to ARM_SPE_CPU_NR_PARAMS
+ * are fixed. The sequential parameter size is decided by the
+ * field 'ARM_SPE_CPU_NR_PARAMS'.
+ */
+ cpu_size = (ARM_SPE_CPU_NR_PARAMS + 1) + arr[ARM_SPE_CPU_NR_PARAMS];
+ for (i = 0; i < cpu_size; i++)
+ fprintf(stdout, metadata_per_cpu_fmts[i], arr[i]);
+ arr += cpu_size;
+ }
}
static void arm_spe_set_event_name(struct evlist *evlist, u64 id,
@@ -801,14 +1609,18 @@ arm_spe_synth_events(struct arm_spe *spe, struct perf_session *session)
memset(&attr, 0, sizeof(struct perf_event_attr));
attr.size = sizeof(struct perf_event_attr);
attr.type = PERF_TYPE_HARDWARE;
- attr.sample_type = evsel->core.attr.sample_type & PERF_SAMPLE_MASK;
+ attr.sample_type = evsel->core.attr.sample_type &
+ (PERF_SAMPLE_MASK | PERF_SAMPLE_PHYS_ADDR);
attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
- PERF_SAMPLE_PERIOD;
+ PERF_SAMPLE_PERIOD | PERF_SAMPLE_DATA_SRC |
+ PERF_SAMPLE_WEIGHT | PERF_SAMPLE_ADDR;
if (spe->timeless_decoding)
attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
else
attr.sample_type |= PERF_SAMPLE_TIME;
+ spe->sample_type = attr.sample_type;
+
attr.exclude_user = evsel->core.attr.exclude_user;
attr.exclude_kernel = evsel->core.attr.exclude_kernel;
attr.exclude_hv = evsel->core.attr.exclude_hv;
@@ -827,7 +1639,7 @@ arm_spe_synth_events(struct arm_spe *spe, struct perf_session *session)
spe->sample_flc = true;
/* Level 1 data cache miss */
- err = arm_spe_synth_event(session, &attr, id);
+ err = perf_session__deliver_synth_attr_event(session, &attr, id);
if (err)
return err;
spe->l1d_miss_id = id;
@@ -835,7 +1647,7 @@ arm_spe_synth_events(struct arm_spe *spe, struct perf_session *session)
id += 1;
/* Level 1 data cache access */
- err = arm_spe_synth_event(session, &attr, id);
+ err = perf_session__deliver_synth_attr_event(session, &attr, id);
if (err)
return err;
spe->l1d_access_id = id;
@@ -847,7 +1659,7 @@ arm_spe_synth_events(struct arm_spe *spe, struct perf_session *session)
spe->sample_llc = true;
/* Last level cache miss */
- err = arm_spe_synth_event(session, &attr, id);
+ err = perf_session__deliver_synth_attr_event(session, &attr, id);
if (err)
return err;
spe->llc_miss_id = id;
@@ -855,7 +1667,7 @@ arm_spe_synth_events(struct arm_spe *spe, struct perf_session *session)
id += 1;
/* Last level cache access */
- err = arm_spe_synth_event(session, &attr, id);
+ err = perf_session__deliver_synth_attr_event(session, &attr, id);
if (err)
return err;
spe->llc_access_id = id;
@@ -867,7 +1679,7 @@ arm_spe_synth_events(struct arm_spe *spe, struct perf_session *session)
spe->sample_tlb = true;
/* TLB miss */
- err = arm_spe_synth_event(session, &attr, id);
+ err = perf_session__deliver_synth_attr_event(session, &attr, id);
if (err)
return err;
spe->tlb_miss_id = id;
@@ -875,7 +1687,7 @@ arm_spe_synth_events(struct arm_spe *spe, struct perf_session *session)
id += 1;
/* TLB access */
- err = arm_spe_synth_event(session, &attr, id);
+ err = perf_session__deliver_synth_attr_event(session, &attr, id);
if (err)
return err;
spe->tlb_access_id = id;
@@ -883,15 +1695,28 @@ arm_spe_synth_events(struct arm_spe *spe, struct perf_session *session)
id += 1;
}
+ if (spe->synth_opts.last_branch) {
+ if (spe->synth_opts.last_branch_sz > 2)
+ pr_debug("Arm SPE supports only two bstack entries (PBT+TGT).\n");
+
+ attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
+ /*
+ * We don't use the hardware index, but the sample generation
+ * code uses the new format branch_stack with this field,
+ * so the event attributes must indicate that it's present.
+ */
+ attr.branch_sample_type |= PERF_SAMPLE_BRANCH_HW_INDEX;
+ }
+
if (spe->synth_opts.branches) {
spe->sample_branch = true;
- /* Branch miss */
- err = arm_spe_synth_event(session, &attr, id);
+ /* Branch */
+ err = perf_session__deliver_synth_attr_event(session, &attr, id);
if (err)
return err;
- spe->branch_miss_id = id;
- arm_spe_set_event_name(evlist, id, "branch-miss");
+ spe->branch_id = id;
+ arm_spe_set_event_name(evlist, id, "branch");
id += 1;
}
@@ -899,7 +1724,7 @@ arm_spe_synth_events(struct arm_spe *spe, struct perf_session *session)
spe->sample_remote_access = true;
/* Remote access */
- err = arm_spe_synth_event(session, &attr, id);
+ err = perf_session__deliver_synth_attr_event(session, &attr, id);
if (err)
return err;
spe->remote_access_id = id;
@@ -907,24 +1732,92 @@ arm_spe_synth_events(struct arm_spe *spe, struct perf_session *session)
id += 1;
}
+ if (spe->synth_opts.mem) {
+ spe->sample_memory = true;
+
+ err = perf_session__deliver_synth_attr_event(session, &attr, id);
+ if (err)
+ return err;
+ spe->memory_id = id;
+ arm_spe_set_event_name(evlist, id, "memory");
+ id += 1;
+ }
+
+ if (spe->synth_opts.instructions) {
+ if (spe->synth_opts.period_type != PERF_ITRACE_PERIOD_INSTRUCTIONS) {
+ pr_warning("Only instruction-based sampling period is currently supported by Arm SPE.\n");
+ goto synth_instructions_out;
+ }
+ if (spe->synth_opts.period > 1)
+ pr_warning("Arm SPE has a hardware-based sample period.\n"
+ "Additional instruction events will be discarded by --itrace\n");
+
+ spe->sample_instructions = true;
+ attr.config = PERF_COUNT_HW_INSTRUCTIONS;
+ attr.sample_period = spe->synth_opts.period;
+ spe->instructions_sample_period = attr.sample_period;
+ err = perf_session__deliver_synth_attr_event(session, &attr, id);
+ if (err)
+ return err;
+ spe->instructions_id = id;
+ arm_spe_set_event_name(evlist, id, "instructions");
+ }
+synth_instructions_out:
+
return 0;
}
+static bool arm_spe__is_homogeneous(u64 **metadata, int nr_cpu)
+{
+ u64 midr;
+ int i;
+
+ if (!nr_cpu)
+ return false;
+
+ for (i = 0; i < nr_cpu; i++) {
+ if (!metadata[i])
+ return false;
+
+ if (i == 0) {
+ midr = metadata[i][ARM_SPE_CPU_MIDR];
+ continue;
+ }
+
+ if (midr != metadata[i][ARM_SPE_CPU_MIDR])
+ return false;
+ }
+
+ return true;
+}
+
int arm_spe_process_auxtrace_info(union perf_event *event,
struct perf_session *session)
{
struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info;
- size_t min_sz = sizeof(u64) * ARM_SPE_AUXTRACE_PRIV_MAX;
+ size_t min_sz = ARM_SPE_AUXTRACE_V1_PRIV_SIZE;
+ struct perf_record_time_conv *tc = &session->time_conv;
struct arm_spe *spe;
- int err;
+ u64 **metadata = NULL;
+ u64 metadata_ver;
+ int nr_cpu, err;
if (auxtrace_info->header.size < sizeof(struct perf_record_auxtrace_info) +
min_sz)
return -EINVAL;
+ metadata = arm_spe__alloc_metadata(auxtrace_info, &metadata_ver,
+ &nr_cpu);
+ if (!metadata && metadata_ver != 1) {
+ pr_err("Failed to parse Arm SPE metadata.\n");
+ return -EINVAL;
+ }
+
spe = zalloc(sizeof(struct arm_spe));
- if (!spe)
- return -ENOMEM;
+ if (!spe) {
+ err = -ENOMEM;
+ goto err_free_metadata;
+ }
err = auxtrace_queues__init(&spe->queues);
if (err)
@@ -933,9 +1826,38 @@ int arm_spe_process_auxtrace_info(union perf_event *event,
spe->session = session;
spe->machine = &session->machines.host; /* No kvm support */
spe->auxtrace_type = auxtrace_info->type;
- spe->pmu_type = auxtrace_info->priv[ARM_SPE_PMU_TYPE];
+ if (metadata_ver == 1)
+ spe->pmu_type = auxtrace_info->priv[ARM_SPE_PMU_TYPE];
+ else
+ spe->pmu_type = auxtrace_info->priv[ARM_SPE_PMU_TYPE_V2];
+ spe->metadata = metadata;
+ spe->metadata_ver = metadata_ver;
+ spe->metadata_nr_cpu = nr_cpu;
+ spe->is_homogeneous = arm_spe__is_homogeneous(metadata, nr_cpu);
spe->timeless_decoding = arm_spe__is_timeless_decoding(spe);
+
+ /*
+ * The synthesized event PERF_RECORD_TIME_CONV has been handled ahead
+ * and the parameters for hardware clock are stored in the session
+ * context. Passes these parameters to the struct perf_tsc_conversion
+ * in "spe->tc", which is used for later conversion between clock
+ * counter and timestamp.
+ *
+ * For backward compatibility, copies the fields starting from
+ * "time_cycles" only if they are contained in the event.
+ */
+ spe->tc.time_shift = tc->time_shift;
+ spe->tc.time_mult = tc->time_mult;
+ spe->tc.time_zero = tc->time_zero;
+
+ if (event_contains(*tc, time_cycles)) {
+ spe->tc.time_cycles = tc->time_cycles;
+ spe->tc.time_mask = tc->time_mask;
+ spe->tc.cap_user_time_zero = tc->cap_user_time_zero;
+ spe->tc.cap_user_time_short = tc->cap_user_time_short;
+ }
+
spe->auxtrace.process_event = arm_spe_process_event;
spe->auxtrace.process_auxtrace_event = arm_spe_process_auxtrace_event;
spe->auxtrace.flush_events = arm_spe_flush;
@@ -944,7 +1866,7 @@ int arm_spe_process_auxtrace_info(union perf_event *event,
spe->auxtrace.evsel_is_auxtrace = arm_spe_evsel_is_auxtrace;
session->auxtrace = &spe->auxtrace;
- arm_spe_print_info(&auxtrace_info->priv[0]);
+ arm_spe_print_info(spe, &auxtrace_info->priv[0]);
if (dump_trace)
return 0;
@@ -972,5 +1894,7 @@ err_free_queues:
session->auxtrace = NULL;
err_free:
free(spe);
+err_free_metadata:
+ arm_spe__free_metadata(metadata, nr_cpu);
return err;
}