aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/crypto/crct10dif-ce-core.S
blob: 9e82e8e8ed05424466b35ae466f213826b74db46 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
//
// Accelerated CRC-T10DIF using arm64 NEON and Crypto Extensions instructions
//
// Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License version 2 as
// published by the Free Software Foundation.
//

//
// Implement fast CRC-T10DIF computation with SSE and PCLMULQDQ instructions
//
// Copyright (c) 2013, Intel Corporation
//
// Authors:
//     Erdinc Ozturk <erdinc.ozturk@intel.com>
//     Vinodh Gopal <vinodh.gopal@intel.com>
//     James Guilford <james.guilford@intel.com>
//     Tim Chen <tim.c.chen@linux.intel.com>
//
// This software is available to you under a choice of one of two
// licenses.  You may choose to be licensed under the terms of the GNU
// General Public License (GPL) Version 2, available from the file
// COPYING in the main directory of this source tree, or the
// OpenIB.org BSD license below:
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
//   notice, this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright
//   notice, this list of conditions and the following disclaimer in the
//   documentation and/or other materials provided with the
//   distribution.
//
// * Neither the name of the Intel Corporation nor the names of its
//   contributors may be used to endorse or promote products derived from
//   this software without specific prior written permission.
//
//
// THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
//       Function API:
//       UINT16 crc_t10dif_pcl(
//               UINT16 init_crc, //initial CRC value, 16 bits
//               const unsigned char *buf, //buffer pointer to calculate CRC on
//               UINT64 len //buffer length in bytes (64-bit data)
//       );
//
//       Reference paper titled "Fast CRC Computation for Generic
//	Polynomials Using PCLMULQDQ Instruction"
//       URL: http://www.intel.com/content/dam/www/public/us/en/documents
//  /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf
//
//

#include <linux/linkage.h>
#include <asm/assembler.h>

	.text
	.cpu		generic+crypto

	arg1_low32	.req	w19
	arg2		.req	x20
	arg3		.req	x21

	vzr		.req	v13

	ad		.req	v14
	bd		.req	v10

	k00_16		.req	v15
	k32_48		.req	v16

	t3		.req	v17
	t4		.req	v18
	t5		.req	v19
	t6		.req	v20
	t7		.req	v21
	t8		.req	v22
	t9		.req	v23

	perm1		.req	v24
	perm2		.req	v25
	perm3		.req	v26
	perm4		.req	v27

	bd1		.req	v28
	bd2		.req	v29
	bd3		.req	v30
	bd4		.req	v31

	.macro		__pmull_init_p64
	.endm

	.macro		__pmull_pre_p64, bd
	.endm

	.macro		__pmull_init_p8
	// k00_16 := 0x0000000000000000_000000000000ffff
	// k32_48 := 0x00000000ffffffff_0000ffffffffffff
	movi		k32_48.2d, #0xffffffff
	mov		k32_48.h[2], k32_48.h[0]
	ushr		k00_16.2d, k32_48.2d, #32

	// prepare the permutation vectors
	mov_q		x5, 0x080f0e0d0c0b0a09
	movi		perm4.8b, #8
	dup		perm1.2d, x5
	eor		perm1.16b, perm1.16b, perm4.16b
	ushr		perm2.2d, perm1.2d, #8
	ushr		perm3.2d, perm1.2d, #16
	ushr		perm4.2d, perm1.2d, #24
	sli		perm2.2d, perm1.2d, #56
	sli		perm3.2d, perm1.2d, #48
	sli		perm4.2d, perm1.2d, #40
	.endm

	.macro		__pmull_pre_p8, bd
	tbl		bd1.16b, {\bd\().16b}, perm1.16b
	tbl		bd2.16b, {\bd\().16b}, perm2.16b
	tbl		bd3.16b, {\bd\().16b}, perm3.16b
	tbl		bd4.16b, {\bd\().16b}, perm4.16b
	.endm

__pmull_p8_core:
.L__pmull_p8_core:
	ext		t4.8b, ad.8b, ad.8b, #1			// A1
	ext		t5.8b, ad.8b, ad.8b, #2			// A2
	ext		t6.8b, ad.8b, ad.8b, #3			// A3

	pmull		t4.8h, t4.8b, bd.8b			// F = A1*B
	pmull		t8.8h, ad.8b, bd1.8b			// E = A*B1
	pmull		t5.8h, t5.8b, bd.8b			// H = A2*B
	pmull		t7.8h, ad.8b, bd2.8b			// G = A*B2
	pmull		t6.8h, t6.8b, bd.8b			// J = A3*B
	pmull		t9.8h, ad.8b, bd3.8b			// I = A*B3
	pmull		t3.8h, ad.8b, bd4.8b			// K = A*B4
	b		0f

.L__pmull_p8_core2:
	tbl		t4.16b, {ad.16b}, perm1.16b		// A1
	tbl		t5.16b, {ad.16b}, perm2.16b		// A2
	tbl		t6.16b, {ad.16b}, perm3.16b		// A3

	pmull2		t4.8h, t4.16b, bd.16b			// F = A1*B
	pmull2		t8.8h, ad.16b, bd1.16b			// E = A*B1
	pmull2		t5.8h, t5.16b, bd.16b			// H = A2*B
	pmull2		t7.8h, ad.16b, bd2.16b			// G = A*B2
	pmull2		t6.8h, t6.16b, bd.16b			// J = A3*B
	pmull2		t9.8h, ad.16b, bd3.16b			// I = A*B3
	pmull2		t3.8h, ad.16b, bd4.16b			// K = A*B4

0:	eor		t4.16b, t4.16b, t8.16b			// L = E + F
	eor		t5.16b, t5.16b, t7.16b			// M = G + H
	eor		t6.16b, t6.16b, t9.16b			// N = I + J

	uzp1		t8.2d, t4.2d, t5.2d
	uzp2		t4.2d, t4.2d, t5.2d
	uzp1		t7.2d, t6.2d, t3.2d
	uzp2		t6.2d, t6.2d, t3.2d

	// t4 = (L) (P0 + P1) << 8
	// t5 = (M) (P2 + P3) << 16
	eor		t8.16b, t8.16b, t4.16b
	and		t4.16b, t4.16b, k32_48.16b

	// t6 = (N) (P4 + P5) << 24
	// t7 = (K) (P6 + P7) << 32
	eor		t7.16b, t7.16b, t6.16b
	and		t6.16b, t6.16b, k00_16.16b

	eor		t8.16b, t8.16b, t4.16b
	eor		t7.16b, t7.16b, t6.16b

	zip2		t5.2d, t8.2d, t4.2d
	zip1		t4.2d, t8.2d, t4.2d
	zip2		t3.2d, t7.2d, t6.2d
	zip1		t6.2d, t7.2d, t6.2d

	ext		t4.16b, t4.16b, t4.16b, #15
	ext		t5.16b, t5.16b, t5.16b, #14
	ext		t6.16b, t6.16b, t6.16b, #13
	ext		t3.16b, t3.16b, t3.16b, #12

	eor		t4.16b, t4.16b, t5.16b
	eor		t6.16b, t6.16b, t3.16b
	ret
ENDPROC(__pmull_p8_core)

	.macro		__pmull_p8, rq, ad, bd, i
	.ifnc		\bd, v10
	.err
	.endif
	mov		ad.16b, \ad\().16b
	.ifb		\i
	pmull		\rq\().8h, \ad\().8b, bd.8b		// D = A*B
	.else
	pmull2		\rq\().8h, \ad\().16b, bd.16b		// D = A*B
	.endif

	bl		.L__pmull_p8_core\i

	eor		\rq\().16b, \rq\().16b, t4.16b
	eor		\rq\().16b, \rq\().16b, t6.16b
	.endm

	.macro		fold64, p, reg1, reg2
	ldp		q11, q12, [arg2], #0x20

	__pmull_\p	v8, \reg1, v10, 2
	__pmull_\p	\reg1, \reg1, v10

CPU_LE(	rev64		v11.16b, v11.16b		)
CPU_LE(	rev64		v12.16b, v12.16b		)

	__pmull_\p	v9, \reg2, v10, 2
	__pmull_\p	\reg2, \reg2, v10

CPU_LE(	ext		v11.16b, v11.16b, v11.16b, #8	)
CPU_LE(	ext		v12.16b, v12.16b, v12.16b, #8	)

	eor		\reg1\().16b, \reg1\().16b, v8.16b
	eor		\reg2\().16b, \reg2\().16b, v9.16b
	eor		\reg1\().16b, \reg1\().16b, v11.16b
	eor		\reg2\().16b, \reg2\().16b, v12.16b
	.endm

	.macro		fold16, p, reg, rk
	__pmull_\p	v8, \reg, v10
	__pmull_\p	\reg, \reg, v10, 2
	.ifnb		\rk
	ldr_l		q10, \rk, x8
	__pmull_pre_\p	v10
	.endif
	eor		v7.16b, v7.16b, v8.16b
	eor		v7.16b, v7.16b, \reg\().16b
	.endm

	.macro		__pmull_p64, rd, rn, rm, n
	.ifb		\n
	pmull		\rd\().1q, \rn\().1d, \rm\().1d
	.else
	pmull2		\rd\().1q, \rn\().2d, \rm\().2d
	.endif
	.endm

	.macro		crc_t10dif_pmull, p
	frame_push	3, 128

	mov		arg1_low32, w0
	mov		arg2, x1
	mov		arg3, x2

	movi		vzr.16b, #0		// init zero register

	__pmull_init_\p

	// adjust the 16-bit initial_crc value, scale it to 32 bits
	lsl		arg1_low32, arg1_low32, #16

	// check if smaller than 256
	cmp		arg3, #256

	// for sizes less than 128, we can't fold 64B at a time...
	b.lt		.L_less_than_128_\@

	// load the initial crc value
	// crc value does not need to be byte-reflected, but it needs
	// to be moved to the high part of the register.
	// because data will be byte-reflected and will align with
	// initial crc at correct place.
	movi		v10.16b, #0
	mov		v10.s[3], arg1_low32		// initial crc

	// receive the initial 64B data, xor the initial crc value
	ldp		q0, q1, [arg2]
	ldp		q2, q3, [arg2, #0x20]
	ldp		q4, q5, [arg2, #0x40]
	ldp		q6, q7, [arg2, #0x60]
	add		arg2, arg2, #0x80

CPU_LE(	rev64		v0.16b, v0.16b			)
CPU_LE(	rev64		v1.16b, v1.16b			)
CPU_LE(	rev64		v2.16b, v2.16b			)
CPU_LE(	rev64		v3.16b, v3.16b			)
CPU_LE(	rev64		v4.16b, v4.16b			)
CPU_LE(	rev64		v5.16b, v5.16b			)
CPU_LE(	rev64		v6.16b, v6.16b			)
CPU_LE(	rev64		v7.16b, v7.16b			)

CPU_LE(	ext		v0.16b, v0.16b, v0.16b, #8	)
CPU_LE(	ext		v1.16b, v1.16b, v1.16b, #8	)
CPU_LE(	ext		v2.16b, v2.16b, v2.16b, #8	)
CPU_LE(	ext		v3.16b, v3.16b, v3.16b, #8	)
CPU_LE(	ext		v4.16b, v4.16b, v4.16b, #8	)
CPU_LE(	ext		v5.16b, v5.16b, v5.16b, #8	)
CPU_LE(	ext		v6.16b, v6.16b, v6.16b, #8	)
CPU_LE(	ext		v7.16b, v7.16b, v7.16b, #8	)

	// XOR the initial_crc value
	eor		v0.16b, v0.16b, v10.16b

	ldr_l		q10, rk3, x8	// xmm10 has rk3 and rk4
					// type of pmull instruction
					// will determine which constant to use
	__pmull_pre_\p	v10

	//
	// we subtract 256 instead of 128 to save one instruction from the loop
	//
	sub		arg3, arg3, #256

	// at this section of the code, there is 64*x+y (0<=y<64) bytes of
	// buffer. The _fold_64_B_loop will fold 64B at a time
	// until we have 64+y Bytes of buffer

	// fold 64B at a time. This section of the code folds 4 vector
	// registers in parallel
.L_fold_64_B_loop_\@:

	fold64		\p, v0, v1
	fold64		\p, v2, v3
	fold64		\p, v4, v5
	fold64		\p, v6, v7

	subs		arg3, arg3, #128

	// check if there is another 64B in the buffer to be able to fold
	b.lt		.L_fold_64_B_end_\@

	if_will_cond_yield_neon
	stp		q0, q1, [sp, #.Lframe_local_offset]
	stp		q2, q3, [sp, #.Lframe_local_offset + 32]
	stp		q4, q5, [sp, #.Lframe_local_offset + 64]
	stp		q6, q7, [sp, #.Lframe_local_offset + 96]
	do_cond_yield_neon
	ldp		q0, q1, [sp, #.Lframe_local_offset]
	ldp		q2, q3, [sp, #.Lframe_local_offset + 32]
	ldp		q4, q5, [sp, #.Lframe_local_offset + 64]
	ldp		q6, q7, [sp, #.Lframe_local_offset + 96]
	ldr_l		q10, rk3, x8
	movi		vzr.16b, #0		// init zero register
	__pmull_init_\p
	__pmull_pre_\p	v10
	endif_yield_neon

	b		.L_fold_64_B_loop_\@

.L_fold_64_B_end_\@:
	// at this point, the buffer pointer is pointing at the last y Bytes
	// of the buffer the 64B of folded data is in 4 of the vector
	// registers: v0, v1, v2, v3

	// fold the 8 vector registers to 1 vector register with different
	// constants

	ldr_l		q10, rk9, x8
	__pmull_pre_\p	v10

	fold16		\p, v0, rk11
	fold16		\p, v1, rk13
	fold16		\p, v2, rk15
	fold16		\p, v3, rk17
	fold16		\p, v4, rk19
	fold16		\p, v5, rk1
	fold16		\p, v6

	// instead of 64, we add 48 to the loop counter to save 1 instruction
	// from the loop instead of a cmp instruction, we use the negative
	// flag with the jl instruction
	adds		arg3, arg3, #(128-16)
	b.lt		.L_final_reduction_for_128_\@

	// now we have 16+y bytes left to reduce. 16 Bytes is in register v7
	// and the rest is in memory. We can fold 16 bytes at a time if y>=16
	// continue folding 16B at a time

.L_16B_reduction_loop_\@:
	__pmull_\p	v8, v7, v10
	__pmull_\p	v7, v7, v10, 2
	eor		v7.16b, v7.16b, v8.16b

	ldr		q0, [arg2], #16
CPU_LE(	rev64		v0.16b, v0.16b			)
CPU_LE(	ext		v0.16b, v0.16b, v0.16b, #8	)
	eor		v7.16b, v7.16b, v0.16b
	subs		arg3, arg3, #16

	// instead of a cmp instruction, we utilize the flags with the
	// jge instruction equivalent of: cmp arg3, 16-16
	// check if there is any more 16B in the buffer to be able to fold
	b.ge		.L_16B_reduction_loop_\@

	// now we have 16+z bytes left to reduce, where 0<= z < 16.
	// first, we reduce the data in the xmm7 register

.L_final_reduction_for_128_\@:
	// check if any more data to fold. If not, compute the CRC of
	// the final 128 bits
	adds		arg3, arg3, #16
	b.eq		.L_128_done_\@

	// here we are getting data that is less than 16 bytes.
	// since we know that there was data before the pointer, we can
	// offset the input pointer before the actual point, to receive
	// exactly 16 bytes. after that the registers need to be adjusted.
.L_get_last_two_regs_\@:
	add		arg2, arg2, arg3
	ldr		q1, [arg2, #-16]
CPU_LE(	rev64		v1.16b, v1.16b			)
CPU_LE(	ext		v1.16b, v1.16b, v1.16b, #8	)

	// get rid of the extra data that was loaded before
	// load the shift constant
	adr_l		x4, tbl_shf_table + 16
	sub		x4, x4, arg3
	ld1		{v0.16b}, [x4]

	// shift v2 to the left by arg3 bytes
	tbl		v2.16b, {v7.16b}, v0.16b

	// shift v7 to the right by 16-arg3 bytes
	movi		v9.16b, #0x80
	eor		v0.16b, v0.16b, v9.16b
	tbl		v7.16b, {v7.16b}, v0.16b

	// blend
	sshr		v0.16b, v0.16b, #7	// convert to 8-bit mask
	bsl		v0.16b, v2.16b, v1.16b

	// fold 16 Bytes
	__pmull_\p	v8, v7, v10
	__pmull_\p	v7, v7, v10, 2
	eor		v7.16b, v7.16b, v8.16b
	eor		v7.16b, v7.16b, v0.16b

.L_128_done_\@:
	// compute crc of a 128-bit value
	ldr_l		q10, rk5, x8		// rk5 and rk6 in xmm10
	__pmull_pre_\p	v10

	// 64b fold
	ext		v0.16b, vzr.16b, v7.16b, #8
	mov		v7.d[0], v7.d[1]
	__pmull_\p	v7, v7, v10
	eor		v7.16b, v7.16b, v0.16b

	// 32b fold
	ext		v0.16b, v7.16b, vzr.16b, #4
	mov		v7.s[3], vzr.s[0]
	__pmull_\p	v0, v0, v10, 2
	eor		v7.16b, v7.16b, v0.16b

	// barrett reduction
	ldr_l		q10, rk7, x8
	__pmull_pre_\p	v10
	mov		v0.d[0], v7.d[1]

	__pmull_\p	v0, v0, v10
	ext		v0.16b, vzr.16b, v0.16b, #12
	__pmull_\p	v0, v0, v10, 2
	ext		v0.16b, vzr.16b, v0.16b, #12
	eor		v7.16b, v7.16b, v0.16b
	mov		w0, v7.s[1]

.L_cleanup_\@:
	// scale the result back to 16 bits
	lsr		x0, x0, #16
	frame_pop
	ret

.L_less_than_128_\@:
	cbz		arg3, .L_cleanup_\@

	movi		v0.16b, #0
	mov		v0.s[3], arg1_low32	// get the initial crc value

	ldr		q7, [arg2], #0x10
CPU_LE(	rev64		v7.16b, v7.16b			)
CPU_LE(	ext		v7.16b, v7.16b, v7.16b, #8	)
	eor		v7.16b, v7.16b, v0.16b	// xor the initial crc value

	cmp		arg3, #16
	b.eq		.L_128_done_\@		// exactly 16 left
	b.lt		.L_less_than_16_left_\@

	ldr_l		q10, rk1, x8		// rk1 and rk2 in xmm10
	__pmull_pre_\p	v10

	// update the counter. subtract 32 instead of 16 to save one
	// instruction from the loop
	subs		arg3, arg3, #32
	b.ge		.L_16B_reduction_loop_\@

	add		arg3, arg3, #16
	b		.L_get_last_two_regs_\@

.L_less_than_16_left_\@:
	// shl r9, 4
	adr_l		x0, tbl_shf_table + 16
	sub		x0, x0, arg3
	ld1		{v0.16b}, [x0]
	movi		v9.16b, #0x80
	eor		v0.16b, v0.16b, v9.16b
	tbl		v7.16b, {v7.16b}, v0.16b
	b		.L_128_done_\@
	.endm

ENTRY(crc_t10dif_pmull_p8)
	crc_t10dif_pmull	p8
ENDPROC(crc_t10dif_pmull_p8)

	.align		5
ENTRY(crc_t10dif_pmull_p64)
	crc_t10dif_pmull	p64
ENDPROC(crc_t10dif_pmull_p64)

// precomputed constants
// these constants are precomputed from the poly:
// 0x8bb70000 (0x8bb7 scaled to 32 bits)
	.section	".rodata", "a"
	.align		4
// Q = 0x18BB70000
// rk1 = 2^(32*3) mod Q << 32
// rk2 = 2^(32*5) mod Q << 32
// rk3 = 2^(32*15) mod Q << 32
// rk4 = 2^(32*17) mod Q << 32
// rk5 = 2^(32*3) mod Q << 32
// rk6 = 2^(32*2) mod Q << 32
// rk7 = floor(2^64/Q)
// rk8 = Q

rk1:	.octa		0x06df0000000000002d56000000000000
rk3:	.octa		0x7cf50000000000009d9d000000000000
rk5:	.octa		0x13680000000000002d56000000000000
rk7:	.octa		0x000000018bb7000000000001f65a57f8
rk9:	.octa		0xbfd6000000000000ceae000000000000
rk11:	.octa		0x713c0000000000001e16000000000000
rk13:	.octa		0x80a6000000000000f7f9000000000000
rk15:	.octa		0xe658000000000000044c000000000000
rk17:	.octa		0xa497000000000000ad18000000000000
rk19:	.octa		0xe7b50000000000006ee3000000000000

tbl_shf_table:
// use these values for shift constants for the tbl/tbx instruction
// different alignments result in values as shown:
//	DDQ 0x008f8e8d8c8b8a898887868584838281 # shl 15 (16-1) / shr1
//	DDQ 0x01008f8e8d8c8b8a8988878685848382 # shl 14 (16-3) / shr2
//	DDQ 0x0201008f8e8d8c8b8a89888786858483 # shl 13 (16-4) / shr3
//	DDQ 0x030201008f8e8d8c8b8a898887868584 # shl 12 (16-4) / shr4
//	DDQ 0x04030201008f8e8d8c8b8a8988878685 # shl 11 (16-5) / shr5
//	DDQ 0x0504030201008f8e8d8c8b8a89888786 # shl 10 (16-6) / shr6
//	DDQ 0x060504030201008f8e8d8c8b8a898887 # shl 9  (16-7) / shr7
//	DDQ 0x07060504030201008f8e8d8c8b8a8988 # shl 8  (16-8) / shr8
//	DDQ 0x0807060504030201008f8e8d8c8b8a89 # shl 7  (16-9) / shr9
//	DDQ 0x090807060504030201008f8e8d8c8b8a # shl 6  (16-10) / shr10
//	DDQ 0x0a090807060504030201008f8e8d8c8b # shl 5  (16-11) / shr11
//	DDQ 0x0b0a090807060504030201008f8e8d8c # shl 4  (16-12) / shr12
//	DDQ 0x0c0b0a090807060504030201008f8e8d # shl 3  (16-13) / shr13
//	DDQ 0x0d0c0b0a090807060504030201008f8e # shl 2  (16-14) / shr14
//	DDQ 0x0e0d0c0b0a090807060504030201008f # shl 1  (16-15) / shr15

	.byte		 0x0, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87
	.byte		0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f
	.byte		 0x0,  0x1,  0x2,  0x3,  0x4,  0x5,  0x6,  0x7
	.byte		 0x8,  0x9,  0xa,  0xb,  0xc,  0xd,  0xe , 0x0