aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/src/crypto/poly1305-avx2-x86_64.S
blob: 35d958fdaae566898178385973c2fa73d7053a26 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
/*
 * Poly1305 authenticator algorithm, RFC7539, x64 AVX2 functions
 *
 * Copyright (C) 2015 Martin Willi
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 */

#include <linux/linkage.h>

.section .rodata.cst32.ANMASK, "aM", @progbits, 32
.align 32
ANMASK:	.octa 0x0000000003ffffff0000000003ffffff
	.octa 0x0000000003ffffff0000000003ffffff
.section .rodata.cst32.ORMASK, "aM", @progbits, 32
.align 32
ORMASK:	.octa 0x00000000010000000000000001000000
	.octa 0x00000000010000000000000001000000

.text

#define h0 0x00(%rdi)
#define h1 0x04(%rdi)
#define h2 0x08(%rdi)
#define h3 0x0c(%rdi)
#define h4 0x10(%rdi)
#define r0 0x00(%rdx)
#define r1 0x04(%rdx)
#define r2 0x08(%rdx)
#define r3 0x0c(%rdx)
#define r4 0x10(%rdx)
#define u0 0x00(%r8)
#define u1 0x04(%r8)
#define u2 0x08(%r8)
#define u3 0x0c(%r8)
#define u4 0x10(%r8)
#define w0 0x14(%r8)
#define w1 0x18(%r8)
#define w2 0x1c(%r8)
#define w3 0x20(%r8)
#define w4 0x24(%r8)
#define y0 0x28(%r8)
#define y1 0x2c(%r8)
#define y2 0x30(%r8)
#define y3 0x34(%r8)
#define y4 0x38(%r8)
#define m %rsi
#define hc0 %ymm0
#define hc1 %ymm1
#define hc2 %ymm2
#define hc3 %ymm3
#define hc4 %ymm4
#define hc0x %xmm0
#define hc1x %xmm1
#define hc2x %xmm2
#define hc3x %xmm3
#define hc4x %xmm4
#define t1 %ymm5
#define t2 %ymm6
#define t1x %xmm5
#define t2x %xmm6
#define ruwy0 %ymm7
#define ruwy1 %ymm8
#define ruwy2 %ymm9
#define ruwy3 %ymm10
#define ruwy4 %ymm11
#define ruwy0x %xmm7
#define ruwy1x %xmm8
#define ruwy2x %xmm9
#define ruwy3x %xmm10
#define ruwy4x %xmm11
#define svxz1 %ymm12
#define svxz2 %ymm13
#define svxz3 %ymm14
#define svxz4 %ymm15
#define d0 %r9
#define d1 %r10
#define d2 %r11
#define d3 %r12
#define d4 %r13

ENTRY(poly1305_asm_4block_avx2)
	# %rdi: Accumulator h[5]
	# %rsi: 64 byte input block m
	# %rdx: Poly1305 key r[5]
	# %rcx: Quadblock count
	# %r8:  Poly1305 derived key r^2 u[5], r^3 w[5], r^4 y[5],

	# This four-block variant uses loop unrolled block processing. It
	# requires 4 Poly1305 keys: r, r^2, r^3 and r^4:
	# h = (h + m) * r  =>  h = (h + m1) * r^4 + m2 * r^3 + m3 * r^2 + m4 * r

	vzeroupper
	push		%rbx
	push		%r12
	push		%r13

	# combine r0,u0,w0,y0
	vmovd		y0,ruwy0x
	vmovd		w0,t1x
	vpunpcklqdq	t1,ruwy0,ruwy0
	vmovd		u0,t1x
	vmovd		r0,t2x
	vpunpcklqdq	t2,t1,t1
	vperm2i128	$0x20,t1,ruwy0,ruwy0

	# combine r1,u1,w1,y1 and s1=r1*5,v1=u1*5,x1=w1*5,z1=y1*5
	vmovd		y1,ruwy1x
	vmovd		w1,t1x
	vpunpcklqdq	t1,ruwy1,ruwy1
	vmovd		u1,t1x
	vmovd		r1,t2x
	vpunpcklqdq	t2,t1,t1
	vperm2i128	$0x20,t1,ruwy1,ruwy1
	vpslld		$2,ruwy1,svxz1
	vpaddd		ruwy1,svxz1,svxz1

	# combine r2,u2,w2,y2 and s2=r2*5,v2=u2*5,x2=w2*5,z2=y2*5
	vmovd		y2,ruwy2x
	vmovd		w2,t1x
	vpunpcklqdq	t1,ruwy2,ruwy2
	vmovd		u2,t1x
	vmovd		r2,t2x
	vpunpcklqdq	t2,t1,t1
	vperm2i128	$0x20,t1,ruwy2,ruwy2
	vpslld		$2,ruwy2,svxz2
	vpaddd		ruwy2,svxz2,svxz2

	# combine r3,u3,w3,y3 and s3=r3*5,v3=u3*5,x3=w3*5,z3=y3*5
	vmovd		y3,ruwy3x
	vmovd		w3,t1x
	vpunpcklqdq	t1,ruwy3,ruwy3
	vmovd		u3,t1x
	vmovd		r3,t2x
	vpunpcklqdq	t2,t1,t1
	vperm2i128	$0x20,t1,ruwy3,ruwy3
	vpslld		$2,ruwy3,svxz3
	vpaddd		ruwy3,svxz3,svxz3

	# combine r4,u4,w4,y4 and s4=r4*5,v4=u4*5,x4=w4*5,z4=y4*5
	vmovd		y4,ruwy4x
	vmovd		w4,t1x
	vpunpcklqdq	t1,ruwy4,ruwy4
	vmovd		u4,t1x
	vmovd		r4,t2x
	vpunpcklqdq	t2,t1,t1
	vperm2i128	$0x20,t1,ruwy4,ruwy4
	vpslld		$2,ruwy4,svxz4
	vpaddd		ruwy4,svxz4,svxz4

.Ldoblock4:
	# hc0 = [m[48-51] & 0x3ffffff, m[32-35] & 0x3ffffff,
	#	 m[16-19] & 0x3ffffff, m[ 0- 3] & 0x3ffffff + h0]
	vmovd		0x00(m),hc0x
	vmovd		0x10(m),t1x
	vpunpcklqdq	t1,hc0,hc0
	vmovd		0x20(m),t1x
	vmovd		0x30(m),t2x
	vpunpcklqdq	t2,t1,t1
	vperm2i128	$0x20,t1,hc0,hc0
	vpand		ANMASK(%rip),hc0,hc0
	vmovd		h0,t1x
	vpaddd		t1,hc0,hc0
	# hc1 = [(m[51-54] >> 2) & 0x3ffffff, (m[35-38] >> 2) & 0x3ffffff,
	#	 (m[19-22] >> 2) & 0x3ffffff, (m[ 3- 6] >> 2) & 0x3ffffff + h1]
	vmovd		0x03(m),hc1x
	vmovd		0x13(m),t1x
	vpunpcklqdq	t1,hc1,hc1
	vmovd		0x23(m),t1x
	vmovd		0x33(m),t2x
	vpunpcklqdq	t2,t1,t1
	vperm2i128	$0x20,t1,hc1,hc1
	vpsrld		$2,hc1,hc1
	vpand		ANMASK(%rip),hc1,hc1
	vmovd		h1,t1x
	vpaddd		t1,hc1,hc1
	# hc2 = [(m[54-57] >> 4) & 0x3ffffff, (m[38-41] >> 4) & 0x3ffffff,
	#	 (m[22-25] >> 4) & 0x3ffffff, (m[ 6- 9] >> 4) & 0x3ffffff + h2]
	vmovd		0x06(m),hc2x
	vmovd		0x16(m),t1x
	vpunpcklqdq	t1,hc2,hc2
	vmovd		0x26(m),t1x
	vmovd		0x36(m),t2x
	vpunpcklqdq	t2,t1,t1
	vperm2i128	$0x20,t1,hc2,hc2
	vpsrld		$4,hc2,hc2
	vpand		ANMASK(%rip),hc2,hc2
	vmovd		h2,t1x
	vpaddd		t1,hc2,hc2
	# hc3 = [(m[57-60] >> 6) & 0x3ffffff, (m[41-44] >> 6) & 0x3ffffff,
	#	 (m[25-28] >> 6) & 0x3ffffff, (m[ 9-12] >> 6) & 0x3ffffff + h3]
	vmovd		0x09(m),hc3x
	vmovd		0x19(m),t1x
	vpunpcklqdq	t1,hc3,hc3
	vmovd		0x29(m),t1x
	vmovd		0x39(m),t2x
	vpunpcklqdq	t2,t1,t1
	vperm2i128	$0x20,t1,hc3,hc3
	vpsrld		$6,hc3,hc3
	vpand		ANMASK(%rip),hc3,hc3
	vmovd		h3,t1x
	vpaddd		t1,hc3,hc3
	# hc4 = [(m[60-63] >> 8) | (1<<24), (m[44-47] >> 8) | (1<<24),
	#	 (m[28-31] >> 8) | (1<<24), (m[12-15] >> 8) | (1<<24) + h4]
	vmovd		0x0c(m),hc4x
	vmovd		0x1c(m),t1x
	vpunpcklqdq	t1,hc4,hc4
	vmovd		0x2c(m),t1x
	vmovd		0x3c(m),t2x
	vpunpcklqdq	t2,t1,t1
	vperm2i128	$0x20,t1,hc4,hc4
	vpsrld		$8,hc4,hc4
	vpor		ORMASK(%rip),hc4,hc4
	vmovd		h4,t1x
	vpaddd		t1,hc4,hc4

	# t1 = [ hc0[3] * r0, hc0[2] * u0, hc0[1] * w0, hc0[0] * y0 ]
	vpmuludq	hc0,ruwy0,t1
	# t1 += [ hc1[3] * s4, hc1[2] * v4, hc1[1] * x4, hc1[0] * z4 ]
	vpmuludq	hc1,svxz4,t2
	vpaddq		t2,t1,t1
	# t1 += [ hc2[3] * s3, hc2[2] * v3, hc2[1] * x3, hc2[0] * z3 ]
	vpmuludq	hc2,svxz3,t2
	vpaddq		t2,t1,t1
	# t1 += [ hc3[3] * s2, hc3[2] * v2, hc3[1] * x2, hc3[0] * z2 ]
	vpmuludq	hc3,svxz2,t2
	vpaddq		t2,t1,t1
	# t1 += [ hc4[3] * s1, hc4[2] * v1, hc4[1] * x1, hc4[0] * z1 ]
	vpmuludq	hc4,svxz1,t2
	vpaddq		t2,t1,t1
	# d0 = t1[0] + t1[1] + t[2] + t[3]
	vpermq		$0xee,t1,t2
	vpaddq		t2,t1,t1
	vpsrldq		$8,t1,t2
	vpaddq		t2,t1,t1
	vmovq		t1x,d0

	# t1 = [ hc0[3] * r1, hc0[2] * u1,hc0[1] * w1, hc0[0] * y1 ]
	vpmuludq	hc0,ruwy1,t1
	# t1 += [ hc1[3] * r0, hc1[2] * u0, hc1[1] * w0, hc1[0] * y0 ]
	vpmuludq	hc1,ruwy0,t2
	vpaddq		t2,t1,t1
	# t1 += [ hc2[3] * s4, hc2[2] * v4, hc2[1] * x4, hc2[0] * z4 ]
	vpmuludq	hc2,svxz4,t2
	vpaddq		t2,t1,t1
	# t1 += [ hc3[3] * s3, hc3[2] * v3, hc3[1] * x3, hc3[0] * z3 ]
	vpmuludq	hc3,svxz3,t2
	vpaddq		t2,t1,t1
	# t1 += [ hc4[3] * s2, hc4[2] * v2, hc4[1] * x2, hc4[0] * z2 ]
	vpmuludq	hc4,svxz2,t2
	vpaddq		t2,t1,t1
	# d1 = t1[0] + t1[1] + t1[3] + t1[4]
	vpermq		$0xee,t1,t2
	vpaddq		t2,t1,t1
	vpsrldq		$8,t1,t2
	vpaddq		t2,t1,t1
	vmovq		t1x,d1

	# t1 = [ hc0[3] * r2, hc0[2] * u2, hc0[1] * w2, hc0[0] * y2 ]
	vpmuludq	hc0,ruwy2,t1
	# t1 += [ hc1[3] * r1, hc1[2] * u1, hc1[1] * w1, hc1[0] * y1 ]
	vpmuludq	hc1,ruwy1,t2
	vpaddq		t2,t1,t1
	# t1 += [ hc2[3] * r0, hc2[2] * u0, hc2[1] * w0, hc2[0] * y0 ]
	vpmuludq	hc2,ruwy0,t2
	vpaddq		t2,t1,t1
	# t1 += [ hc3[3] * s4, hc3[2] * v4, hc3[1] * x4, hc3[0] * z4 ]
	vpmuludq	hc3,svxz4,t2
	vpaddq		t2,t1,t1
	# t1 += [ hc4[3] * s3, hc4[2] * v3, hc4[1] * x3, hc4[0] * z3 ]
	vpmuludq	hc4,svxz3,t2
	vpaddq		t2,t1,t1
	# d2 = t1[0] + t1[1] + t1[2] + t1[3]
	vpermq		$0xee,t1,t2
	vpaddq		t2,t1,t1
	vpsrldq		$8,t1,t2
	vpaddq		t2,t1,t1
	vmovq		t1x,d2

	# t1 = [ hc0[3] * r3, hc0[2] * u3, hc0[1] * w3, hc0[0] * y3 ]
	vpmuludq	hc0,ruwy3,t1
	# t1 += [ hc1[3] * r2, hc1[2] * u2, hc1[1] * w2, hc1[0] * y2 ]
	vpmuludq	hc1,ruwy2,t2
	vpaddq		t2,t1,t1
	# t1 += [ hc2[3] * r1, hc2[2] * u1, hc2[1] * w1, hc2[0] * y1 ]
	vpmuludq	hc2,ruwy1,t2
	vpaddq		t2,t1,t1
	# t1 += [ hc3[3] * r0, hc3[2] * u0, hc3[1] * w0, hc3[0] * y0 ]
	vpmuludq	hc3,ruwy0,t2
	vpaddq		t2,t1,t1
	# t1 += [ hc4[3] * s4, hc4[2] * v4, hc4[1] * x4, hc4[0] * z4 ]
	vpmuludq	hc4,svxz4,t2
	vpaddq		t2,t1,t1
	# d3 = t1[0] + t1[1] + t1[2] + t1[3]
	vpermq		$0xee,t1,t2
	vpaddq		t2,t1,t1
	vpsrldq		$8,t1,t2
	vpaddq		t2,t1,t1
	vmovq		t1x,d3

	# t1 = [ hc0[3] * r4, hc0[2] * u4, hc0[1] * w4, hc0[0] * y4 ]
	vpmuludq	hc0,ruwy4,t1
	# t1 += [ hc1[3] * r3, hc1[2] * u3, hc1[1] * w3, hc1[0] * y3 ]
	vpmuludq	hc1,ruwy3,t2
	vpaddq		t2,t1,t1
	# t1 += [ hc2[3] * r2, hc2[2] * u2, hc2[1] * w2, hc2[0] * y2 ]
	vpmuludq	hc2,ruwy2,t2
	vpaddq		t2,t1,t1
	# t1 += [ hc3[3] * r1, hc3[2] * u1, hc3[1] * w1, hc3[0] * y1 ]
	vpmuludq	hc3,ruwy1,t2
	vpaddq		t2,t1,t1
	# t1 += [ hc4[3] * r0, hc4[2] * u0, hc4[1] * w0, hc4[0] * y0 ]
	vpmuludq	hc4,ruwy0,t2
	vpaddq		t2,t1,t1
	# d4 = t1[0] + t1[1] + t1[2] + t1[3]
	vpermq		$0xee,t1,t2
	vpaddq		t2,t1,t1
	vpsrldq		$8,t1,t2
	vpaddq		t2,t1,t1
	vmovq		t1x,d4

	# d1 += d0 >> 26
	mov		d0,%rax
	shr		$26,%rax
	add		%rax,d1
	# h0 = d0 & 0x3ffffff
	mov		d0,%rbx
	and		$0x3ffffff,%ebx

	# d2 += d1 >> 26
	mov		d1,%rax
	shr		$26,%rax
	add		%rax,d2
	# h1 = d1 & 0x3ffffff
	mov		d1,%rax
	and		$0x3ffffff,%eax
	mov		%eax,h1

	# d3 += d2 >> 26
	mov		d2,%rax
	shr		$26,%rax
	add		%rax,d3
	# h2 = d2 & 0x3ffffff
	mov		d2,%rax
	and		$0x3ffffff,%eax
	mov		%eax,h2

	# d4 += d3 >> 26
	mov		d3,%rax
	shr		$26,%rax
	add		%rax,d4
	# h3 = d3 & 0x3ffffff
	mov		d3,%rax
	and		$0x3ffffff,%eax
	mov		%eax,h3

	# h0 += (d4 >> 26) * 5
	mov		d4,%rax
	shr		$26,%rax
	lea		(%eax,%eax,4),%eax
	add		%eax,%ebx
	# h4 = d4 & 0x3ffffff
	mov		d4,%rax
	and		$0x3ffffff,%eax
	mov		%eax,h4

	# h1 += h0 >> 26
	mov		%ebx,%eax
	shr		$26,%eax
	add		%eax,h1
	# h0 = h0 & 0x3ffffff
	andl		$0x3ffffff,%ebx
	mov		%ebx,h0

	add		$0x40,m
	dec		%rcx
	jnz		.Ldoblock4

	vzeroupper
	pop		%r13
	pop		%r12
	pop		%rbx
	ret
ENDPROC(poly1305_asm_4block_avx2)