aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/perf.h
blob: b23fed5275149adfb1b5f826730341ef6d63f8be (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
#ifndef _PERF_PERF_H
#define _PERF_PERF_H

#include <asm/unistd.h>

#if defined(__i386__)
#define mb()		asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
#define wmb()		asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
#define rmb()		asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
#define cpu_relax()	asm volatile("rep; nop" ::: "memory");
#define CPUINFO_PROC	"model name"
#ifndef __NR_perf_event_open
# define __NR_perf_event_open 336
#endif
#endif

#if defined(__x86_64__)
#define mb()		asm volatile("mfence" ::: "memory")
#define wmb()		asm volatile("sfence" ::: "memory")
#define rmb()		asm volatile("lfence" ::: "memory")
#define cpu_relax()	asm volatile("rep; nop" ::: "memory");
#define CPUINFO_PROC	"model name"
#ifndef __NR_perf_event_open
# define __NR_perf_event_open 298
#endif
#endif

#ifdef __powerpc__
#include "../../arch/powerpc/include/uapi/asm/unistd.h"
#define mb()		asm volatile ("sync" ::: "memory")
#define wmb()		asm volatile ("sync" ::: "memory")
#define rmb()		asm volatile ("sync" ::: "memory")
#define CPUINFO_PROC	"cpu"
#endif

#ifdef __s390__
#define mb()		asm volatile("bcr 15,0" ::: "memory")
#define wmb()		asm volatile("bcr 15,0" ::: "memory")
#define rmb()		asm volatile("bcr 15,0" ::: "memory")
#endif

#ifdef __sh__
#if defined(__SH4A__) || defined(__SH5__)
# define mb()		asm volatile("synco" ::: "memory")
# define wmb()		asm volatile("synco" ::: "memory")
# define rmb()		asm volatile("synco" ::: "memory")
#else
# define mb()		asm volatile("" ::: "memory")
# define wmb()		asm volatile("" ::: "memory")
# define rmb()		asm volatile("" ::: "memory")
#endif
#define CPUINFO_PROC	"cpu type"
#endif

#ifdef __hppa__
#define mb()		asm volatile("" ::: "memory")
#define wmb()		asm volatile("" ::: "memory")
#define rmb()		asm volatile("" ::: "memory")
#define CPUINFO_PROC	"cpu"
#endif

#ifdef __sparc__
#ifdef __LP64__
#define mb()		asm volatile("ba,pt %%xcc, 1f\n"	\
				     "membar #StoreLoad\n"	\
				     "1:\n":::"memory")
#else
#define mb()		asm volatile("":::"memory")
#endif
#define wmb()		asm volatile("":::"memory")
#define rmb()		asm volatile("":::"memory")
#define CPUINFO_PROC	"cpu"
#endif

#ifdef __alpha__
#define mb()		asm volatile("mb" ::: "memory")
#define wmb()		asm volatile("wmb" ::: "memory")
#define rmb()		asm volatile("mb" ::: "memory")
#define CPUINFO_PROC	"cpu model"
#endif

#ifdef __ia64__
#define mb()		asm volatile ("mf" ::: "memory")
#define wmb()		asm volatile ("mf" ::: "memory")
#define rmb()		asm volatile ("mf" ::: "memory")
#define cpu_relax()	asm volatile ("hint @pause" ::: "memory")
#define CPUINFO_PROC	"model name"
#endif

#ifdef __arm__
/*
 * Use the __kuser_memory_barrier helper in the CPU helper page. See
 * arch/arm/kernel/entry-armv.S in the kernel source for details.
 */
#define mb()		((void(*)(void))0xffff0fa0)()
#define wmb()		((void(*)(void))0xffff0fa0)()
#define rmb()		((void(*)(void))0xffff0fa0)()
#define CPUINFO_PROC	"Processor"
#endif

#ifdef __aarch64__
#define mb()		asm volatile("dmb ish" ::: "memory")
#define wmb()		asm volatile("dmb ishld" ::: "memory")
#define rmb()		asm volatile("dmb ishst" ::: "memory")
#define cpu_relax()	asm volatile("yield" ::: "memory")
#endif

#ifdef __mips__
#define mb()		asm volatile(					\
				".set	mips2\n\t"			\
				"sync\n\t"				\
				".set	mips0"				\
				: /* no output */			\
				: /* no input */			\
				: "memory")
#define wmb()	mb()
#define rmb()	mb()
#define CPUINFO_PROC	"cpu model"
#endif

#ifdef __arc__
#define mb()		asm volatile("" ::: "memory")
#define wmb()		asm volatile("" ::: "memory")
#define rmb()		asm volatile("" ::: "memory")
#define CPUINFO_PROC	"Processor"
#endif

#ifdef __metag__
#define mb()		asm volatile("" ::: "memory")
#define wmb()		asm volatile("" ::: "memory")
#define rmb()		asm volatile("" ::: "memory")
#define CPUINFO_PROC	"CPU"
#endif

#define barrier() asm volatile ("" ::: "memory")

#ifndef cpu_relax
#define cpu_relax() barrier()
#endif

#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))


#include <time.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/syscall.h>

#include <linux/perf_event.h>
#include "util/types.h"
#include <stdbool.h>

/*
 * prctl(PR_TASK_PERF_EVENTS_DISABLE) will (cheaply) disable all
 * counters in the current task.
 */
#define PR_TASK_PERF_EVENTS_DISABLE   31
#define PR_TASK_PERF_EVENTS_ENABLE    32

#ifndef NSEC_PER_SEC
# define NSEC_PER_SEC			1000000000ULL
#endif
#ifndef NSEC_PER_USEC
# define NSEC_PER_USEC			1000ULL
#endif

static inline unsigned long long rdclock(void)
{
	struct timespec ts;

	clock_gettime(CLOCK_MONOTONIC, &ts);
	return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
}

/*
 * Pick up some kernel type conventions:
 */
#define __user
#define asmlinkage

#define unlikely(x)	__builtin_expect(!!(x), 0)
#define min(x, y) ({				\
	typeof(x) _min1 = (x);			\
	typeof(y) _min2 = (y);			\
	(void) (&_min1 == &_min2);		\
	_min1 < _min2 ? _min1 : _min2; })

extern bool test_attr__enabled;
void test_attr__init(void);
void test_attr__open(struct perf_event_attr *attr, pid_t pid, int cpu,
		     int fd, int group_fd, unsigned long flags);

static inline int
sys_perf_event_open(struct perf_event_attr *attr,
		      pid_t pid, int cpu, int group_fd,
		      unsigned long flags)
{
	int fd;

	fd = syscall(__NR_perf_event_open, attr, pid, cpu,
		     group_fd, flags);

	if (unlikely(test_attr__enabled))
		test_attr__open(attr, pid, cpu, fd, group_fd, flags);

	return fd;
}

#define MAX_COUNTERS			256
#define MAX_NR_CPUS			256

struct ip_callchain {
	u64 nr;
	u64 ips[0];
};

struct branch_flags {
	u64 mispred:1;
	u64 predicted:1;
	u64 in_tx:1;
	u64 abort:1;
	u64 reserved:60;
};

struct branch_entry {
	u64				from;
	u64				to;
	struct branch_flags flags;
};

struct branch_stack {
	u64				nr;
	struct branch_entry	entries[0];
};

extern const char *input_name;
extern bool perf_host, perf_guest;
extern const char perf_version_string[];

void pthread__unblock_sigwinch(void);

#include "util/target.h"

enum perf_call_graph_mode {
	CALLCHAIN_NONE,
	CALLCHAIN_FP,
	CALLCHAIN_DWARF
};

struct perf_record_opts {
	struct target target;
	int	     call_graph;
	bool	     group;
	bool	     inherit_stat;
	bool	     no_delay;
	bool	     no_inherit;
	bool	     no_inherit_set;
	bool	     no_samples;
	bool	     raw_samples;
	bool	     sample_address;
	bool	     sample_weight;
	bool	     sample_time;
	bool	     period;
	unsigned int freq;
	unsigned int mmap_pages;
	unsigned int user_freq;
	u64          branch_stack;
	u64	     default_interval;
	u64	     user_interval;
	u16	     stack_dump_size;
	bool	     sample_transaction;
};

#endif