aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/tools/loongson3-llsc-check.c
blob: 0ebddd0ae46fb37bf3739de6282f38c550449f43 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
// SPDX-License-Identifier: GPL-2.0-only
#include <byteswap.h>
#include <elf.h>
#include <endian.h>
#include <errno.h>
#include <fcntl.h>
#include <inttypes.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>

#ifdef be32toh
/* If libc provides le{16,32,64}toh() then we'll use them */
#elif BYTE_ORDER == LITTLE_ENDIAN
# define le16toh(x)	(x)
# define le32toh(x)	(x)
# define le64toh(x)	(x)
#elif BYTE_ORDER == BIG_ENDIAN
# define le16toh(x)	bswap_16(x)
# define le32toh(x)	bswap_32(x)
# define le64toh(x)	bswap_64(x)
#endif

/* MIPS opcodes, in bits 31:26 of an instruction */
#define OP_SPECIAL	0x00
#define OP_REGIMM	0x01
#define OP_BEQ		0x04
#define OP_BNE		0x05
#define OP_BLEZ		0x06
#define OP_BGTZ		0x07
#define OP_BEQL		0x14
#define OP_BNEL		0x15
#define OP_BLEZL	0x16
#define OP_BGTZL	0x17
#define OP_LL		0x30
#define OP_LLD		0x34
#define OP_SC		0x38
#define OP_SCD		0x3c

/* Bits 20:16 of OP_REGIMM instructions */
#define REGIMM_BLTZ	0x00
#define REGIMM_BGEZ	0x01
#define REGIMM_BLTZL	0x02
#define REGIMM_BGEZL	0x03
#define REGIMM_BLTZAL	0x10
#define REGIMM_BGEZAL	0x11
#define REGIMM_BLTZALL	0x12
#define REGIMM_BGEZALL	0x13

/* Bits 5:0 of OP_SPECIAL instructions */
#define SPECIAL_SYNC	0x0f

static void usage(FILE *f)
{
	fprintf(f, "Usage: loongson3-llsc-check /path/to/vmlinux\n");
}

static int se16(uint16_t x)
{
	return (int16_t)x;
}

static bool is_ll(uint32_t insn)
{
	switch (insn >> 26) {
	case OP_LL:
	case OP_LLD:
		return true;

	default:
		return false;
	}
}

static bool is_sc(uint32_t insn)
{
	switch (insn >> 26) {
	case OP_SC:
	case OP_SCD:
		return true;

	default:
		return false;
	}
}

static bool is_sync(uint32_t insn)
{
	/* Bits 31:11 should all be zeroes */
	if (insn >> 11)
		return false;

	/* Bits 5:0 specify the SYNC special encoding */
	if ((insn & 0x3f) != SPECIAL_SYNC)
		return false;

	return true;
}

static bool is_branch(uint32_t insn, int *off)
{
	switch (insn >> 26) {
	case OP_BEQ:
	case OP_BEQL:
	case OP_BNE:
	case OP_BNEL:
	case OP_BGTZ:
	case OP_BGTZL:
	case OP_BLEZ:
	case OP_BLEZL:
		*off = se16(insn) + 1;
		return true;

	case OP_REGIMM:
		switch ((insn >> 16) & 0x1f) {
		case REGIMM_BGEZ:
		case REGIMM_BGEZL:
		case REGIMM_BGEZAL:
		case REGIMM_BGEZALL:
		case REGIMM_BLTZ:
		case REGIMM_BLTZL:
		case REGIMM_BLTZAL:
		case REGIMM_BLTZALL:
			*off = se16(insn) + 1;
			return true;

		default:
			return false;
		}

	default:
		return false;
	}
}

static int check_ll(uint64_t pc, uint32_t *code, size_t sz)
{
	ssize_t i, max, sc_pos;
	int off;

	/*
	 * Every LL must be preceded by a sync instruction in order to ensure
	 * that instruction reordering doesn't allow a prior memory access to
	 * execute after the LL & cause erroneous results.
	 */
	if (!is_sync(le32toh(code[-1]))) {
		fprintf(stderr, "%" PRIx64 ": LL not preceded by sync\n", pc);
		return -EINVAL;
	}

	/* Find the matching SC instruction */
	max = sz / 4;
	for (sc_pos = 0; sc_pos < max; sc_pos++) {
		if (is_sc(le32toh(code[sc_pos])))
			break;
	}
	if (sc_pos >= max) {
		fprintf(stderr, "%" PRIx64 ": LL has no matching SC\n", pc);
		return -EINVAL;
	}

	/*
	 * Check branches within the LL/SC loop target sync instructions,
	 * ensuring that speculative execution can't generate memory accesses
	 * due to instructions outside of the loop.
	 */
	for (i = 0; i < sc_pos; i++) {
		if (!is_branch(le32toh(code[i]), &off))
			continue;

		/*
		 * If the branch target is within the LL/SC loop then we don't
		 * need to worry about it.
		 */
		if ((off >= -i) && (off <= sc_pos))
			continue;

		/* If the branch targets a sync instruction we're all good... */
		if (is_sync(le32toh(code[i + off])))
			continue;

		/* ...but if not, we have a problem */
		fprintf(stderr, "%" PRIx64 ": Branch target not a sync\n",
			pc + (i * 4));
		return -EINVAL;
	}

	return 0;
}

static int check_code(uint64_t pc, uint32_t *code, size_t sz)
{
	int err = 0;

	if (sz % 4) {
		fprintf(stderr, "%" PRIx64 ": Section size not a multiple of 4\n",
			pc);
		err = -EINVAL;
		sz -= (sz % 4);
	}

	if (is_ll(le32toh(code[0]))) {
		fprintf(stderr, "%" PRIx64 ": First instruction in section is an LL\n",
			pc);
		err = -EINVAL;
	}

#define advance() (	\
	code++,		\
	pc += 4,	\
	sz -= 4		\
)

	/*
	 * Skip the first instructionm allowing check_ll to look backwards
	 * unconditionally.
	 */
	advance();

	/* Now scan through the code looking for LL instructions */
	for (; sz; advance()) {
		if (is_ll(le32toh(code[0])))
			err |= check_ll(pc, code, sz);
	}

	return err;
}

int main(int argc, char *argv[])
{
	int vmlinux_fd, status, err, i;
	const char *vmlinux_path;
	struct stat st;
	Elf64_Ehdr *eh;
	Elf64_Shdr *sh;
	void *vmlinux;

	status = EXIT_FAILURE;

	if (argc < 2) {
		usage(stderr);
		goto out_ret;
	}

	vmlinux_path = argv[1];
	vmlinux_fd = open(vmlinux_path, O_RDONLY);
	if (vmlinux_fd == -1) {
		perror("Unable to open vmlinux");
		goto out_ret;
	}

	err = fstat(vmlinux_fd, &st);
	if (err) {
		perror("Unable to stat vmlinux");
		goto out_close;
	}

	vmlinux = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, vmlinux_fd, 0);
	if (vmlinux == MAP_FAILED) {
		perror("Unable to mmap vmlinux");
		goto out_close;
	}

	eh = vmlinux;
	if (memcmp(eh->e_ident, ELFMAG, SELFMAG)) {
		fprintf(stderr, "vmlinux is not an ELF?\n");
		goto out_munmap;
	}

	if (eh->e_ident[EI_CLASS] != ELFCLASS64) {
		fprintf(stderr, "vmlinux is not 64b?\n");
		goto out_munmap;
	}

	if (eh->e_ident[EI_DATA] != ELFDATA2LSB) {
		fprintf(stderr, "vmlinux is not little endian?\n");
		goto out_munmap;
	}

	for (i = 0; i < le16toh(eh->e_shnum); i++) {
		sh = vmlinux + le64toh(eh->e_shoff) + (i * le16toh(eh->e_shentsize));

		if (sh->sh_type != SHT_PROGBITS)
			continue;
		if (!(sh->sh_flags & SHF_EXECINSTR))
			continue;

		err = check_code(le64toh(sh->sh_addr),
				 vmlinux + le64toh(sh->sh_offset),
				 le64toh(sh->sh_size));
		if (err)
			goto out_munmap;
	}

	status = EXIT_SUCCESS;
out_munmap:
	munmap(vmlinux, st.st_size);
out_close:
	close(vmlinux_fd);
out_ret:
	return status;
}