aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/tools/perf/arch/x86/util/event.c
blob: e65b7dbe27fbcee6cc4890a30622dc27f62a0e95 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
// SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
#include <linux/string.h>
#include <linux/zalloc.h>
#include <stdlib.h>

#include "../../../util/event.h"
#include "../../../util/synthetic-events.h"
#include "../../../util/machine.h"
#include "../../../util/tool.h"
#include "../../../util/map.h"
#include "../../../util/debug.h"
#include "util/sample.h"

#if defined(__x86_64__)

struct perf_event__synthesize_extra_kmaps_cb_args {
	struct perf_tool *tool;
	perf_event__handler_t process;
	struct machine *machine;
	union perf_event *event;
};

static int perf_event__synthesize_extra_kmaps_cb(struct map *map, void *data)
{
	struct perf_event__synthesize_extra_kmaps_cb_args *args = data;
	union perf_event *event = args->event;
	struct kmap *kmap;
	size_t size;

	if (!__map__is_extra_kernel_map(map))
		return 0;

	kmap = map__kmap(map);

	size = sizeof(event->mmap) - sizeof(event->mmap.filename) +
		      PERF_ALIGN(strlen(kmap->name) + 1, sizeof(u64)) +
		      args->machine->id_hdr_size;

	memset(event, 0, size);

	event->mmap.header.type = PERF_RECORD_MMAP;

	/*
	 * kernel uses 0 for user space maps, see kernel/perf_event.c
	 * __perf_event_mmap
	 */
	if (machine__is_host(args->machine))
		event->header.misc = PERF_RECORD_MISC_KERNEL;
	else
		event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;

	event->mmap.header.size = size;

	event->mmap.start = map__start(map);
	event->mmap.len   = map__size(map);
	event->mmap.pgoff = map__pgoff(map);
	event->mmap.pid   = args->machine->pid;

	strlcpy(event->mmap.filename, kmap->name, PATH_MAX);

	if (perf_tool__process_synth_event(args->tool, event, args->machine, args->process) != 0)
		return -1;

	return 0;
}

int perf_event__synthesize_extra_kmaps(struct perf_tool *tool,
				       perf_event__handler_t process,
				       struct machine *machine)
{
	int rc;
	struct maps *kmaps = machine__kernel_maps(machine);
	struct perf_event__synthesize_extra_kmaps_cb_args args = {
		.tool = tool,
		.process = process,
		.machine = machine,
		.event = zalloc(sizeof(args.event->mmap) + machine->id_hdr_size),
	};

	if (!args.event) {
		pr_debug("Not enough memory synthesizing mmap event "
			 "for extra kernel maps\n");
		return -1;
	}

	rc = maps__for_each_map(kmaps, perf_event__synthesize_extra_kmaps_cb, &args);

	free(args.event);
	return rc;
}

#endif

void arch_perf_parse_sample_weight(struct perf_sample *data,
				   const __u64 *array, u64 type)
{
	union perf_sample_weight weight;

	weight.full = *array;
	if (type & PERF_SAMPLE_WEIGHT)
		data->weight = weight.full;
	else {
		data->weight = weight.var1_dw;
		data->ins_lat = weight.var2_w;
		data->retire_lat = weight.var3_w;
	}
}

void arch_perf_synthesize_sample_weight(const struct perf_sample *data,
					__u64 *array, u64 type)
{
	*array = data->weight;

	if (type & PERF_SAMPLE_WEIGHT_STRUCT) {
		*array &= 0xffffffff;
		*array |= ((u64)data->ins_lat << 32);
		*array |= ((u64)data->retire_lat << 48);
	}
}

const char *arch_perf_header_entry(const char *se_header)
{
	if (!strcmp(se_header, "Local Pipeline Stage Cycle"))
		return "Local Retire Latency";
	else if (!strcmp(se_header, "Pipeline Stage Cycle"))
		return "Retire Latency";

	return se_header;
}

int arch_support_sort_key(const char *sort_key)
{
	if (!strcmp(sort_key, "p_stage_cyc"))
		return 1;
	if (!strcmp(sort_key, "local_p_stage_cyc"))
		return 1;
	return 0;
}