diff options
author | Jason A. Donenfeld <Jason@zx2c4.com> | 2018-09-18 23:46:46 +0200 |
---|---|---|
committer | Jason A. Donenfeld <Jason@zx2c4.com> | 2018-09-19 00:07:21 +0200 |
commit | 5d8b334d276ccf2514fc94b20f14d4ee709427f1 (patch) | |
tree | d9ffa567a04cfae873ee51560b0281c59fdcd8d0 | |
parent | Add neon and remove 64-bit implementations (diff) | |
download | kbench9000-5d8b334d276ccf2514fc94b20f14d4ee709427f1.tar.xz kbench9000-5d8b334d276ccf2514fc94b20f14d4ee709427f1.zip |
Convert to chacha bencher
-rw-r--r-- | Makefile | 2 | ||||
-rw-r--r-- | ard-glue.c | 51 | ||||
-rw-r--r-- | ard.S | 521 | ||||
-rw-r--r-- | curve25519-donna32.c | 861 | ||||
-rw-r--r-- | curve25519-fiat32.c | 838 | ||||
-rw-r--r-- | curve25519-neon.S | 2110 | ||||
-rw-r--r-- | generic.c | 96 | ||||
-rw-r--r-- | main.c | 124 | ||||
-rw-r--r-- | openssl.S | 1471 | ||||
-rw-r--r-- | test_vectors.h | 48 |
10 files changed, 2197 insertions, 3925 deletions
@@ -1,5 +1,5 @@ ifneq ($(KERNELRELEASE),) -kbench9000-y := main.o curve25519-neon.o curve25519-donna32.o curve25519-fiat32.o +kbench9000-y := main.o generic.o openssl.o ard.o ard-glue.o obj-m := kbench9000.o ccflags-y += -O3 ccflags-y += -D'pr_fmt(fmt)=KBUILD_MODNAME ": " fmt' diff --git a/ard-glue.c b/ard-glue.c new file mode 100644 index 0000000..9e7d585 --- /dev/null +++ b/ard-glue.c @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (C) 2015-2018 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#include <linux/kernel.h> +#include <linux/string.h> + +asmlinkage void chacha20_ard_block_xor_neon(u32 *state, u8 *dst, const u8 *src); +asmlinkage void chacha20_ard_4block_xor_neon(u32 *state, u8 *dst, const u8 *src); + +enum { + CHACHA20_IV_SIZE = 16, + CHACHA20_KEY_SIZE = 32, + CHACHA20_BLOCK_SIZE = 64, + CHACHA20_BLOCK_WORDS = CHACHA20_BLOCK_SIZE / sizeof(u32) +}; + +#define EXPAND_32_BYTE_K 0x61707865U, 0x3320646eU, 0x79622d32U, 0x6b206574U + +void chacha20_ard_neon(u8 *dst, const u8 *src, u32 len, const u32 key[8], const u32 counter[4]) +{ + u32 state[] = { + EXPAND_32_BYTE_K, + key[0], key[1], key[2], key[3], + key[4], key[5], key[6], key[7], + counter[0], counter[1], counter[2], counter[3] + }; + u8 buf[CHACHA20_BLOCK_SIZE]; + unsigned int bytes = len; + + while (bytes >= CHACHA20_BLOCK_SIZE * 4) { + chacha20_ard_4block_xor_neon(state, dst, src); + bytes -= CHACHA20_BLOCK_SIZE * 4; + src += CHACHA20_BLOCK_SIZE * 4; + dst += CHACHA20_BLOCK_SIZE * 4; + state[12] += 4; + } + while (bytes >= CHACHA20_BLOCK_SIZE) { + chacha20_ard_block_xor_neon(state, dst, src); + bytes -= CHACHA20_BLOCK_SIZE; + src += CHACHA20_BLOCK_SIZE; + dst += CHACHA20_BLOCK_SIZE; + state[12]++; + } + if (bytes) { + memcpy(buf, src, bytes); + chacha20_ard_block_xor_neon(state, buf, buf); + memcpy(dst, buf, bytes); + } +} @@ -0,0 +1,521 @@ +/* + * ChaCha20 256-bit cipher algorithm, RFC7539, ARM NEON functions + * + * Copyright (C) 2016 Linaro, Ltd. <ard.biesheuvel@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Based on: + * ChaCha20 256-bit cipher algorithm, RFC7539, x64 SSE3 functions + * + * Copyright (C) 2015 Martin Willi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/linkage.h> + + .text + .fpu neon + .align 5 + +ENTRY(chacha20_ard_block_xor_neon) + // r0: Input state matrix, s + // r1: 1 data block output, o + // r2: 1 data block input, i + + // + // This function encrypts one ChaCha20 block by loading the state matrix + // in four NEON registers. It performs matrix operation on four words in + // parallel, but requireds shuffling to rearrange the words after each + // round. + // + + // x0..3 = s0..3 + add ip, r0, #0x20 + vld1.32 {q0-q1}, [r0] + vld1.32 {q2-q3}, [ip] + + vmov q8, q0 + vmov q9, q1 + vmov q10, q2 + vmov q11, q3 + + mov r3, #10 + +.Ldoubleround: + // x0 += x1, x3 = rotl32(x3 ^ x0, 16) + vadd.i32 q0, q0, q1 + veor q3, q3, q0 + vrev32.16 q3, q3 + + // x2 += x3, x1 = rotl32(x1 ^ x2, 12) + vadd.i32 q2, q2, q3 + veor q4, q1, q2 + vshl.u32 q1, q4, #12 + vsri.u32 q1, q4, #20 + + // x0 += x1, x3 = rotl32(x3 ^ x0, 8) + vadd.i32 q0, q0, q1 + veor q4, q3, q0 + vshl.u32 q3, q4, #8 + vsri.u32 q3, q4, #24 + + // x2 += x3, x1 = rotl32(x1 ^ x2, 7) + vadd.i32 q2, q2, q3 + veor q4, q1, q2 + vshl.u32 q1, q4, #7 + vsri.u32 q1, q4, #25 + + // x1 = shuffle32(x1, MASK(0, 3, 2, 1)) + vext.8 q1, q1, q1, #4 + // x2 = shuffle32(x2, MASK(1, 0, 3, 2)) + vext.8 q2, q2, q2, #8 + // x3 = shuffle32(x3, MASK(2, 1, 0, 3)) + vext.8 q3, q3, q3, #12 + + // x0 += x1, x3 = rotl32(x3 ^ x0, 16) + vadd.i32 q0, q0, q1 + veor q3, q3, q0 + vrev32.16 q3, q3 + + // x2 += x3, x1 = rotl32(x1 ^ x2, 12) + vadd.i32 q2, q2, q3 + veor q4, q1, q2 + vshl.u32 q1, q4, #12 + vsri.u32 q1, q4, #20 + + // x0 += x1, x3 = rotl32(x3 ^ x0, 8) + vadd.i32 q0, q0, q1 + veor q4, q3, q0 + vshl.u32 q3, q4, #8 + vsri.u32 q3, q4, #24 + + // x2 += x3, x1 = rotl32(x1 ^ x2, 7) + vadd.i32 q2, q2, q3 + veor q4, q1, q2 + vshl.u32 q1, q4, #7 + vsri.u32 q1, q4, #25 + + // x1 = shuffle32(x1, MASK(2, 1, 0, 3)) + vext.8 q1, q1, q1, #12 + // x2 = shuffle32(x2, MASK(1, 0, 3, 2)) + vext.8 q2, q2, q2, #8 + // x3 = shuffle32(x3, MASK(0, 3, 2, 1)) + vext.8 q3, q3, q3, #4 + + subs r3, r3, #1 + bne .Ldoubleround + + add ip, r2, #0x20 + vld1.8 {q4-q5}, [r2] + vld1.8 {q6-q7}, [ip] + + // o0 = i0 ^ (x0 + s0) + vadd.i32 q0, q0, q8 + veor q0, q0, q4 + + // o1 = i1 ^ (x1 + s1) + vadd.i32 q1, q1, q9 + veor q1, q1, q5 + + // o2 = i2 ^ (x2 + s2) + vadd.i32 q2, q2, q10 + veor q2, q2, q6 + + // o3 = i3 ^ (x3 + s3) + vadd.i32 q3, q3, q11 + veor q3, q3, q7 + + add ip, r1, #0x20 + vst1.8 {q0-q1}, [r1] + vst1.8 {q2-q3}, [ip] + + bx lr +ENDPROC(chacha20_ard_block_xor_neon) + + .align 5 +ENTRY(chacha20_ard_4block_xor_neon) + push {r4-r6, lr} + mov ip, sp // preserve the stack pointer + sub r3, sp, #0x20 // allocate a 32 byte buffer + bic r3, r3, #0x1f // aligned to 32 bytes + mov sp, r3 + + // r0: Input state matrix, s + // r1: 4 data blocks output, o + // r2: 4 data blocks input, i + + // + // This function encrypts four consecutive ChaCha20 blocks by loading + // the state matrix in NEON registers four times. The algorithm performs + // each operation on the corresponding word of each state matrix, hence + // requires no word shuffling. For final XORing step we transpose the + // matrix by interleaving 32- and then 64-bit words, which allows us to + // do XOR in NEON registers. + // + + // x0..15[0-3] = s0..3[0..3] + add r3, r0, #0x20 + vld1.32 {q0-q1}, [r0] + vld1.32 {q2-q3}, [r3] + + adr r3, CTRINC + vdup.32 q15, d7[1] + vdup.32 q14, d7[0] + vld1.32 {q11}, [r3, :128] + vdup.32 q13, d6[1] + vdup.32 q12, d6[0] + vadd.i32 q12, q12, q11 // x12 += counter values 0-3 + vdup.32 q11, d5[1] + vdup.32 q10, d5[0] + vdup.32 q9, d4[1] + vdup.32 q8, d4[0] + vdup.32 q7, d3[1] + vdup.32 q6, d3[0] + vdup.32 q5, d2[1] + vdup.32 q4, d2[0] + vdup.32 q3, d1[1] + vdup.32 q2, d1[0] + vdup.32 q1, d0[1] + vdup.32 q0, d0[0] + + mov r3, #10 + +.Ldoubleround4: + // x0 += x4, x12 = rotl32(x12 ^ x0, 16) + // x1 += x5, x13 = rotl32(x13 ^ x1, 16) + // x2 += x6, x14 = rotl32(x14 ^ x2, 16) + // x3 += x7, x15 = rotl32(x15 ^ x3, 16) + vadd.i32 q0, q0, q4 + vadd.i32 q1, q1, q5 + vadd.i32 q2, q2, q6 + vadd.i32 q3, q3, q7 + + veor q12, q12, q0 + veor q13, q13, q1 + veor q14, q14, q2 + veor q15, q15, q3 + + vrev32.16 q12, q12 + vrev32.16 q13, q13 + vrev32.16 q14, q14 + vrev32.16 q15, q15 + + // x8 += x12, x4 = rotl32(x4 ^ x8, 12) + // x9 += x13, x5 = rotl32(x5 ^ x9, 12) + // x10 += x14, x6 = rotl32(x6 ^ x10, 12) + // x11 += x15, x7 = rotl32(x7 ^ x11, 12) + vadd.i32 q8, q8, q12 + vadd.i32 q9, q9, q13 + vadd.i32 q10, q10, q14 + vadd.i32 q11, q11, q15 + + vst1.32 {q8-q9}, [sp, :256] + + veor q8, q4, q8 + veor q9, q5, q9 + vshl.u32 q4, q8, #12 + vshl.u32 q5, q9, #12 + vsri.u32 q4, q8, #20 + vsri.u32 q5, q9, #20 + + veor q8, q6, q10 + veor q9, q7, q11 + vshl.u32 q6, q8, #12 + vshl.u32 q7, q9, #12 + vsri.u32 q6, q8, #20 + vsri.u32 q7, q9, #20 + + // x0 += x4, x12 = rotl32(x12 ^ x0, 8) + // x1 += x5, x13 = rotl32(x13 ^ x1, 8) + // x2 += x6, x14 = rotl32(x14 ^ x2, 8) + // x3 += x7, x15 = rotl32(x15 ^ x3, 8) + vadd.i32 q0, q0, q4 + vadd.i32 q1, q1, q5 + vadd.i32 q2, q2, q6 + vadd.i32 q3, q3, q7 + + veor q8, q12, q0 + veor q9, q13, q1 + vshl.u32 q12, q8, #8 + vshl.u32 q13, q9, #8 + vsri.u32 q12, q8, #24 + vsri.u32 q13, q9, #24 + + veor q8, q14, q2 + veor q9, q15, q3 + vshl.u32 q14, q8, #8 + vshl.u32 q15, q9, #8 + vsri.u32 q14, q8, #24 + vsri.u32 q15, q9, #24 + + vld1.32 {q8-q9}, [sp, :256] + + // x8 += x12, x4 = rotl32(x4 ^ x8, 7) + // x9 += x13, x5 = rotl32(x5 ^ x9, 7) + // x10 += x14, x6 = rotl32(x6 ^ x10, 7) + // x11 += x15, x7 = rotl32(x7 ^ x11, 7) + vadd.i32 q8, q8, q12 + vadd.i32 q9, q9, q13 + vadd.i32 q10, q10, q14 + vadd.i32 q11, q11, q15 + + vst1.32 {q8-q9}, [sp, :256] + + veor q8, q4, q8 + veor q9, q5, q9 + vshl.u32 q4, q8, #7 + vshl.u32 q5, q9, #7 + vsri.u32 q4, q8, #25 + vsri.u32 q5, q9, #25 + + veor q8, q6, q10 + veor q9, q7, q11 + vshl.u32 q6, q8, #7 + vshl.u32 q7, q9, #7 + vsri.u32 q6, q8, #25 + vsri.u32 q7, q9, #25 + + vld1.32 {q8-q9}, [sp, :256] + + // x0 += x5, x15 = rotl32(x15 ^ x0, 16) + // x1 += x6, x12 = rotl32(x12 ^ x1, 16) + // x2 += x7, x13 = rotl32(x13 ^ x2, 16) + // x3 += x4, x14 = rotl32(x14 ^ x3, 16) + vadd.i32 q0, q0, q5 + vadd.i32 q1, q1, q6 + vadd.i32 q2, q2, q7 + vadd.i32 q3, q3, q4 + + veor q15, q15, q0 + veor q12, q12, q1 + veor q13, q13, q2 + veor q14, q14, q3 + + vrev32.16 q15, q15 + vrev32.16 q12, q12 + vrev32.16 q13, q13 + vrev32.16 q14, q14 + + // x10 += x15, x5 = rotl32(x5 ^ x10, 12) + // x11 += x12, x6 = rotl32(x6 ^ x11, 12) + // x8 += x13, x7 = rotl32(x7 ^ x8, 12) + // x9 += x14, x4 = rotl32(x4 ^ x9, 12) + vadd.i32 q10, q10, q15 + vadd.i32 q11, q11, q12 + vadd.i32 q8, q8, q13 + vadd.i32 q9, q9, q14 + + vst1.32 {q8-q9}, [sp, :256] + + veor q8, q7, q8 + veor q9, q4, q9 + vshl.u32 q7, q8, #12 + vshl.u32 q4, q9, #12 + vsri.u32 q7, q8, #20 + vsri.u32 q4, q9, #20 + + veor q8, q5, q10 + veor q9, q6, q11 + vshl.u32 q5, q8, #12 + vshl.u32 q6, q9, #12 + vsri.u32 q5, q8, #20 + vsri.u32 q6, q9, #20 + + // x0 += x5, x15 = rotl32(x15 ^ x0, 8) + // x1 += x6, x12 = rotl32(x12 ^ x1, 8) + // x2 += x7, x13 = rotl32(x13 ^ x2, 8) + // x3 += x4, x14 = rotl32(x14 ^ x3, 8) + vadd.i32 q0, q0, q5 + vadd.i32 q1, q1, q6 + vadd.i32 q2, q2, q7 + vadd.i32 q3, q3, q4 + + veor q8, q15, q0 + veor q9, q12, q1 + vshl.u32 q15, q8, #8 + vshl.u32 q12, q9, #8 + vsri.u32 q15, q8, #24 + vsri.u32 q12, q9, #24 + + veor q8, q13, q2 + veor q9, q14, q3 + vshl.u32 q13, q8, #8 + vshl.u32 q14, q9, #8 + vsri.u32 q13, q8, #24 + vsri.u32 q14, q9, #24 + + vld1.32 {q8-q9}, [sp, :256] + + // x10 += x15, x5 = rotl32(x5 ^ x10, 7) + // x11 += x12, x6 = rotl32(x6 ^ x11, 7) + // x8 += x13, x7 = rotl32(x7 ^ x8, 7) + // x9 += x14, x4 = rotl32(x4 ^ x9, 7) + vadd.i32 q10, q10, q15 + vadd.i32 q11, q11, q12 + vadd.i32 q8, q8, q13 + vadd.i32 q9, q9, q14 + + vst1.32 {q8-q9}, [sp, :256] + + veor q8, q7, q8 + veor q9, q4, q9 + vshl.u32 q7, q8, #7 + vshl.u32 q4, q9, #7 + vsri.u32 q7, q8, #25 + vsri.u32 q4, q9, #25 + + veor q8, q5, q10 + veor q9, q6, q11 + vshl.u32 q5, q8, #7 + vshl.u32 q6, q9, #7 + vsri.u32 q5, q8, #25 + vsri.u32 q6, q9, #25 + + subs r3, r3, #1 + beq 0f + + vld1.32 {q8-q9}, [sp, :256] + b .Ldoubleround4 + + // x0[0-3] += s0[0] + // x1[0-3] += s0[1] + // x2[0-3] += s0[2] + // x3[0-3] += s0[3] +0: ldmia r0!, {r3-r6} + vdup.32 q8, r3 + vdup.32 q9, r4 + vadd.i32 q0, q0, q8 + vadd.i32 q1, q1, q9 + vdup.32 q8, r5 + vdup.32 q9, r6 + vadd.i32 q2, q2, q8 + vadd.i32 q3, q3, q9 + + // x4[0-3] += s1[0] + // x5[0-3] += s1[1] + // x6[0-3] += s1[2] + // x7[0-3] += s1[3] + ldmia r0!, {r3-r6} + vdup.32 q8, r3 + vdup.32 q9, r4 + vadd.i32 q4, q4, q8 + vadd.i32 q5, q5, q9 + vdup.32 q8, r5 + vdup.32 q9, r6 + vadd.i32 q6, q6, q8 + vadd.i32 q7, q7, q9 + + // interleave 32-bit words in state n, n+1 + vzip.32 q0, q1 + vzip.32 q2, q3 + vzip.32 q4, q5 + vzip.32 q6, q7 + + // interleave 64-bit words in state n, n+2 + vswp d1, d4 + vswp d3, d6 + vswp d9, d12 + vswp d11, d14 + + // xor with corresponding input, write to output + vld1.8 {q8-q9}, [r2]! + veor q8, q8, q0 + veor q9, q9, q4 + vst1.8 {q8-q9}, [r1]! + + vld1.32 {q8-q9}, [sp, :256] + + // x8[0-3] += s2[0] + // x9[0-3] += s2[1] + // x10[0-3] += s2[2] + // x11[0-3] += s2[3] + ldmia r0!, {r3-r6} + vdup.32 q0, r3 + vdup.32 q4, r4 + vadd.i32 q8, q8, q0 + vadd.i32 q9, q9, q4 + vdup.32 q0, r5 + vdup.32 q4, r6 + vadd.i32 q10, q10, q0 + vadd.i32 q11, q11, q4 + + // x12[0-3] += s3[0] + // x13[0-3] += s3[1] + // x14[0-3] += s3[2] + // x15[0-3] += s3[3] + ldmia r0!, {r3-r6} + vdup.32 q0, r3 + vdup.32 q4, r4 + adr r3, CTRINC + vadd.i32 q12, q12, q0 + vld1.32 {q0}, [r3, :128] + vadd.i32 q13, q13, q4 + vadd.i32 q12, q12, q0 // x12 += counter values 0-3 + + vdup.32 q0, r5 + vdup.32 q4, r6 + vadd.i32 q14, q14, q0 + vadd.i32 q15, q15, q4 + + // interleave 32-bit words in state n, n+1 + vzip.32 q8, q9 + vzip.32 q10, q11 + vzip.32 q12, q13 + vzip.32 q14, q15 + + // interleave 64-bit words in state n, n+2 + vswp d17, d20 + vswp d19, d22 + vswp d25, d28 + vswp d27, d30 + + vmov q4, q1 + + vld1.8 {q0-q1}, [r2]! + veor q0, q0, q8 + veor q1, q1, q12 + vst1.8 {q0-q1}, [r1]! + + vld1.8 {q0-q1}, [r2]! + veor q0, q0, q2 + veor q1, q1, q6 + vst1.8 {q0-q1}, [r1]! + + vld1.8 {q0-q1}, [r2]! + veor q0, q0, q10 + veor q1, q1, q14 + vst1.8 {q0-q1}, [r1]! + + vld1.8 {q0-q1}, [r2]! + veor q0, q0, q4 + veor q1, q1, q5 + vst1.8 {q0-q1}, [r1]! + + vld1.8 {q0-q1}, [r2]! + veor q0, q0, q9 + veor q1, q1, q13 + vst1.8 {q0-q1}, [r1]! + + vld1.8 {q0-q1}, [r2]! + veor q0, q0, q3 + veor q1, q1, q7 + vst1.8 {q0-q1}, [r1]! + + vld1.8 {q0-q1}, [r2] + veor q0, q0, q11 + veor q1, q1, q15 + vst1.8 {q0-q1}, [r1] + + mov sp, ip + pop {r4-r6, pc} +ENDPROC(chacha20_ard_4block_xor_neon) + + .align 4 +CTRINC: .word 0, 1, 2, 3 diff --git a/curve25519-donna32.c b/curve25519-donna32.c deleted file mode 100644 index 4721864..0000000 --- a/curve25519-donna32.c +++ /dev/null @@ -1,861 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 - * - * Copyright (C) 2008 Google Inc. All Rights Reserved. - * Copyright (C) 2015-2018 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. - * - * Original author: Adam Langley <agl@imperialviolet.org> - */ - -#include <linux/kernel.h> -#include <linux/string.h> - -enum { CURVE25519_POINT_SIZE = 32 }; - -static __always_inline void normalize_secret(u8 secret[CURVE25519_POINT_SIZE]) -{ - secret[0] &= 248; - secret[31] &= 127; - secret[31] |= 64; -} - -typedef s64 limb; - -/* Field element representation: - * - * Field elements are written as an array of signed, 64-bit limbs, least - * significant first. The value of the field element is: - * x[0] + 2^26·x[1] + x^51·x[2] + 2^102·x[3] + ... - * - * i.e. the limbs are 26, 25, 26, 25, ... bits wide. - */ - -/* Sum two numbers: output += in */ -static void fsum(limb *output, const limb *in) -{ - unsigned int i; - - for (i = 0; i < 10; i += 2) { - output[0 + i] = output[0 + i] + in[0 + i]; - output[1 + i] = output[1 + i] + in[1 + i]; - } -} - -/* Find the difference of two numbers: output = in - output - * (note the order of the arguments!). - */ -static void fdifference(limb *output, const limb *in) -{ - unsigned int i; - - for (i = 0; i < 10; ++i) - output[i] = in[i] - output[i]; -} - -/* Multiply a number by a scalar: output = in * scalar */ -static void fscalar_product(limb *output, const limb *in, const limb scalar) -{ - unsigned int i; - - for (i = 0; i < 10; ++i) - output[i] = in[i] * scalar; -} - -/* Multiply two numbers: output = in2 * in - * - * output must be distinct to both inputs. The inputs are reduced coefficient - * form, the output is not. - * - * output[x] <= 14 * the largest product of the input limbs. - */ -static void fproduct(limb *output, const limb *in2, const limb *in) -{ - output[0] = ((limb) ((s32) in2[0])) * ((s32) in[0]); - output[1] = ((limb) ((s32) in2[0])) * ((s32) in[1]) + - ((limb) ((s32) in2[1])) * ((s32) in[0]); - output[2] = 2 * ((limb) ((s32) in2[1])) * ((s32) in[1]) + - ((limb) ((s32) in2[0])) * ((s32) in[2]) + - ((limb) ((s32) in2[2])) * ((s32) in[0]); - output[3] = ((limb) ((s32) in2[1])) * ((s32) in[2]) + - ((limb) ((s32) in2[2])) * ((s32) in[1]) + - ((limb) ((s32) in2[0])) * ((s32) in[3]) + - ((limb) ((s32) in2[3])) * ((s32) in[0]); - output[4] = ((limb) ((s32) in2[2])) * ((s32) in[2]) + - 2 * (((limb) ((s32) in2[1])) * ((s32) in[3]) + - ((limb) ((s32) in2[3])) * ((s32) in[1])) + - ((limb) ((s32) in2[0])) * ((s32) in[4]) + - ((limb) ((s32) in2[4])) * ((s32) in[0]); - output[5] = ((limb) ((s32) in2[2])) * ((s32) in[3]) + - ((limb) ((s32) in2[3])) * ((s32) in[2]) + - ((limb) ((s32) in2[1])) * ((s32) in[4]) + - ((limb) ((s32) in2[4])) * ((s32) in[1]) + - ((limb) ((s32) in2[0])) * ((s32) in[5]) + - ((limb) ((s32) in2[5])) * ((s32) in[0]); - output[6] = 2 * (((limb) ((s32) in2[3])) * ((s32) in[3]) + - ((limb) ((s32) in2[1])) * ((s32) in[5]) + - ((limb) ((s32) in2[5])) * ((s32) in[1])) + - ((limb) ((s32) in2[2])) * ((s32) in[4]) + - ((limb) ((s32) in2[4])) * ((s32) in[2]) + - ((limb) ((s32) in2[0])) * ((s32) in[6]) + - ((limb) ((s32) in2[6])) * ((s32) in[0]); - output[7] = ((limb) ((s32) in2[3])) * ((s32) in[4]) + - ((limb) ((s32) in2[4])) * ((s32) in[3]) + - ((limb) ((s32) in2[2])) * ((s32) in[5]) + - ((limb) ((s32) in2[5])) * ((s32) in[2]) + - ((limb) ((s32) in2[1])) * ((s32) in[6]) + - ((limb) ((s32) in2[6])) * ((s32) in[1]) + - ((limb) ((s32) in2[0])) * ((s32) in[7]) + - ((limb) ((s32) in2[7])) * ((s32) in[0]); - output[8] = ((limb) ((s32) in2[4])) * ((s32) in[4]) + - 2 * (((limb) ((s32) in2[3])) * ((s32) in[5]) + - ((limb) ((s32) in2[5])) * ((s32) in[3]) + - ((limb) ((s32) in2[1])) * ((s32) in[7]) + - ((limb) ((s32) in2[7])) * ((s32) in[1])) + - ((limb) ((s32) in2[2])) * ((s32) in[6]) + - ((limb) ((s32) in2[6])) * ((s32) in[2]) + - ((limb) ((s32) in2[0])) * ((s32) in[8]) + - ((limb) ((s32) in2[8])) * ((s32) in[0]); - output[9] = ((limb) ((s32) in2[4])) * ((s32) in[5]) + - ((limb) ((s32) in2[5])) * ((s32) in[4]) + - ((limb) ((s32) in2[3])) * ((s32) in[6]) + - ((limb) ((s32) in2[6])) * ((s32) in[3]) + - ((limb) ((s32) in2[2])) * ((s32) in[7]) + - ((limb) ((s32) in2[7])) * ((s32) in[2]) + - ((limb) ((s32) in2[1])) * ((s32) in[8]) + - ((limb) ((s32) in2[8])) * ((s32) in[1]) + - ((limb) ((s32) in2[0])) * ((s32) in[9]) + - ((limb) ((s32) in2[9])) * ((s32) in[0]); - output[10] = 2 * (((limb) ((s32) in2[5])) * ((s32) in[5]) + - ((limb) ((s32) in2[3])) * ((s32) in[7]) + - ((limb) ((s32) in2[7])) * ((s32) in[3]) + - ((limb) ((s32) in2[1])) * ((s32) in[9]) + - ((limb) ((s32) in2[9])) * ((s32) in[1])) + - ((limb) ((s32) in2[4])) * ((s32) in[6]) + - ((limb) ((s32) in2[6])) * ((s32) in[4]) + - ((limb) ((s32) in2[2])) * ((s32) in[8]) + - ((limb) ((s32) in2[8])) * ((s32) in[2]); - output[11] = ((limb) ((s32) in2[5])) * ((s32) in[6]) + - ((limb) ((s32) in2[6])) * ((s32) in[5]) + - ((limb) ((s32) in2[4])) * ((s32) in[7]) + - ((limb) ((s32) in2[7])) * ((s32) in[4]) + - ((limb) ((s32) in2[3])) * ((s32) in[8]) + - ((limb) ((s32) in2[8])) * ((s32) in[3]) + - ((limb) ((s32) in2[2])) * ((s32) in[9]) + - ((limb) ((s32) in2[9])) * ((s32) in[2]); - output[12] = ((limb) ((s32) in2[6])) * ((s32) in[6]) + - 2 * (((limb) ((s32) in2[5])) * ((s32) in[7]) + - ((limb) ((s32) in2[7])) * ((s32) in[5]) + - ((limb) ((s32) in2[3])) * ((s32) in[9]) + - ((limb) ((s32) in2[9])) * ((s32) in[3])) + - ((limb) ((s32) in2[4])) * ((s32) in[8]) + - ((limb) ((s32) in2[8])) * ((s32) in[4]); - output[13] = ((limb) ((s32) in2[6])) * ((s32) in[7]) + - ((limb) ((s32) in2[7])) * ((s32) in[6]) + - ((limb) ((s32) in2[5])) * ((s32) in[8]) + - ((limb) ((s32) in2[8])) * ((s32) in[5]) + - ((limb) ((s32) in2[4])) * ((s32) in[9]) + - ((limb) ((s32) in2[9])) * ((s32) in[4]); - output[14] = 2 * (((limb) ((s32) in2[7])) * ((s32) in[7]) + - ((limb) ((s32) in2[5])) * ((s32) in[9]) + - ((limb) ((s32) in2[9])) * ((s32) in[5])) + - ((limb) ((s32) in2[6])) * ((s32) in[8]) + - ((limb) ((s32) in2[8])) * ((s32) in[6]); - output[15] = ((limb) ((s32) in2[7])) * ((s32) in[8]) + - ((limb) ((s32) in2[8])) * ((s32) in[7]) + - ((limb) ((s32) in2[6])) * ((s32) in[9]) + - ((limb) ((s32) in2[9])) * ((s32) in[6]); - output[16] = ((limb) ((s32) in2[8])) * ((s32) in[8]) + - 2 * (((limb) ((s32) in2[7])) * ((s32) in[9]) + - ((limb) ((s32) in2[9])) * ((s32) in[7])); - output[17] = ((limb) ((s32) in2[8])) * ((s32) in[9]) + - ((limb) ((s32) in2[9])) * ((s32) in[8]); - output[18] = 2 * ((limb) ((s32) in2[9])) * ((s32) in[9]); -} - -/* Reduce a long form to a short form by taking the input mod 2^255 - 19. - * - * On entry: |output[i]| < 14*2^54 - * On exit: |output[0..8]| < 280*2^54 - */ -static void freduce_degree(limb *output) -{ - /* Each of these shifts and adds ends up multiplying the value by 19. - * - * For output[0..8], the absolute entry value is < 14*2^54 and we add, at - * most, 19*14*2^54 thus, on exit, |output[0..8]| < 280*2^54. - */ - output[8] += output[18] << 4; - output[8] += output[18] << 1; - output[8] += output[18]; - output[7] += output[17] << 4; - output[7] += output[17] << 1; - output[7] += output[17]; - output[6] += output[16] << 4; - output[6] += output[16] << 1; - output[6] += output[16]; - output[5] += output[15] << 4; - output[5] += output[15] << 1; - output[5] += output[15]; - output[4] += output[14] << 4; - output[4] += output[14] << 1; - output[4] += output[14]; - output[3] += output[13] << 4; - output[3] += output[13] << 1; - output[3] += output[13]; - output[2] += output[12] << 4; - output[2] += output[12] << 1; - output[2] += output[12]; - output[1] += output[11] << 4; - output[1] += output[11] << 1; - output[1] += output[11]; - output[0] += output[10] << 4; - output[0] += output[10] << 1; - output[0] += output[10]; -} - -/* return v / 2^26, using only shifts and adds. - * - * On entry: v can take any value. - */ -static inline limb div_by_2_26(const limb v) -{ - /* High word of v; no shift needed. */ - const u32 highword = (u32) (((u64) v) >> 32); - /* Set to all 1s if v was negative; else set to 0s. */ - const s32 sign = ((s32) highword) >> 31; - /* Set to 0x3ffffff if v was negative; else set to 0. */ - const s32 roundoff = ((u32) sign) >> 6; - /* Should return v / (1<<26) */ - return (v + roundoff) >> 26; -} - -/* return v / (2^25), using only shifts and adds. - * - * On entry: v can take any value. - */ -static inline limb div_by_2_25(const limb v) -{ - /* High word of v; no shift needed*/ - const u32 highword = (u32) (((u64) v) >> 32); - /* Set to all 1s if v was negative; else set to 0s. */ - const s32 sign = ((s32) highword) >> 31; - /* Set to 0x1ffffff if v was negative; else set to 0. */ - const s32 roundoff = ((u32) sign) >> 7; - /* Should return v / (1<<25) */ - return (v + roundoff) >> 25; -} - -/* Reduce all coefficients of the short form input so that |x| < 2^26. - * - * On entry: |output[i]| < 280*2^54 - */ -static void freduce_coefficients(limb *output) -{ - unsigned int i; - - output[10] = 0; - - for (i = 0; i < 10; i += 2) { - limb over = div_by_2_26(output[i]); - /* The entry condition (that |output[i]| < 280*2^54) means that over is, at - * most, 280*2^28 in the first iteration of this loop. This is added to the - * next limb and we can approximate the resulting bound of that limb by - * 281*2^54. - */ - output[i] -= over << 26; - output[i+1] += over; - - /* For the first iteration, |output[i+1]| < 281*2^54, thus |over| < - * 281*2^29. When this is added to the next limb, the resulting bound can - * be approximated as 281*2^54. - * - * For subsequent iterations of the loop, 281*2^54 remains a conservative - * bound and no overflow occurs. - */ - over = div_by_2_25(output[i+1]); - output[i+1] -= over << 25; - output[i+2] += over; - } - /* Now |output[10]| < 281*2^29 and all other coefficients are reduced. */ - output[0] += output[10] << 4; - output[0] += output[10] << 1; - output[0] += output[10]; - - output[10] = 0; - - /* Now output[1..9] are reduced, and |output[0]| < 2^26 + 19*281*2^29 - * So |over| will be no more than 2^16. - */ - { - limb over = div_by_2_26(output[0]); - - output[0] -= over << 26; - output[1] += over; - } - - /* Now output[0,2..9] are reduced, and |output[1]| < 2^25 + 2^16 < 2^26. The - * bound on |output[1]| is sufficient to meet our needs. - */ -} - -/* A helpful wrapper around fproduct: output = in * in2. - * - * On entry: |in[i]| < 2^27 and |in2[i]| < 2^27. - * - * output must be distinct to both inputs. The output is reduced degree - * (indeed, one need only provide storage for 10 limbs) and |output[i]| < 2^26. - */ -static void fmul(limb *output, const limb *in, const limb *in2) -{ - limb t[19]; - - fproduct(t, in, in2); - /* |t[i]| < 14*2^54 */ - freduce_degree(t); - freduce_coefficients(t); - /* |t[i]| < 2^26 */ - memcpy(output, t, sizeof(limb) * 10); -} - -/* Square a number: output = in**2 - * - * output must be distinct from the input. The inputs are reduced coefficient - * form, the output is not. - * - * output[x] <= 14 * the largest product of the input limbs. - */ -static void fsquare_inner(limb *output, const limb *in) -{ - output[0] = ((limb) ((s32) in[0])) * ((s32) in[0]); - output[1] = 2 * ((limb) ((s32) in[0])) * ((s32) in[1]); - output[2] = 2 * (((limb) ((s32) in[1])) * ((s32) in[1]) + - ((limb) ((s32) in[0])) * ((s32) in[2])); - output[3] = 2 * (((limb) ((s32) in[1])) * ((s32) in[2]) + - ((limb) ((s32) in[0])) * ((s32) in[3])); - output[4] = ((limb) ((s32) in[2])) * ((s32) in[2]) + - 4 * ((limb) ((s32) in[1])) * ((s32) in[3]) + - 2 * ((limb) ((s32) in[0])) * ((s32) in[4]); - output[5] = 2 * (((limb) ((s32) in[2])) * ((s32) in[3]) + - ((limb) ((s32) in[1])) * ((s32) in[4]) + - ((limb) ((s32) in[0])) * ((s32) in[5])); - output[6] = 2 * (((limb) ((s32) in[3])) * ((s32) in[3]) + - ((limb) ((s32) in[2])) * ((s32) in[4]) + - ((limb) ((s32) in[0])) * ((s32) in[6]) + - 2 * ((limb) ((s32) in[1])) * ((s32) in[5])); - output[7] = 2 * (((limb) ((s32) in[3])) * ((s32) in[4]) + - ((limb) ((s32) in[2])) * ((s32) in[5]) + - ((limb) ((s32) in[1])) * ((s32) in[6]) + - ((limb) ((s32) in[0])) * ((s32) in[7])); - output[8] = ((limb) ((s32) in[4])) * ((s32) in[4]) + - 2 * (((limb) ((s32) in[2])) * ((s32) in[6]) + - ((limb) ((s32) in[0])) * ((s32) in[8]) + - 2 * (((limb) ((s32) in[1])) * ((s32) in[7]) + - ((limb) ((s32) in[3])) * ((s32) in[5]))); - output[9] = 2 * (((limb) ((s32) in[4])) * ((s32) in[5]) + - ((limb) ((s32) in[3])) * ((s32) in[6]) + - ((limb) ((s32) in[2])) * ((s32) in[7]) + - ((limb) ((s32) in[1])) * ((s32) in[8]) + - ((limb) ((s32) in[0])) * ((s32) in[9])); - output[10] = 2 * (((limb) ((s32) in[5])) * ((s32) in[5]) + - ((limb) ((s32) in[4])) * ((s32) in[6]) + - ((limb) ((s32) in[2])) * ((s32) in[8]) + - 2 * (((limb) ((s32) in[3])) * ((s32) in[7]) + - ((limb) ((s32) in[1])) * ((s32) in[9]))); - output[11] = 2 * (((limb) ((s32) in[5])) * ((s32) in[6]) + - ((limb) ((s32) in[4])) * ((s32) in[7]) + - ((limb) ((s32) in[3])) * ((s32) in[8]) + - ((limb) ((s32) in[2])) * ((s32) in[9])); - output[12] = ((limb) ((s32) in[6])) * ((s32) in[6]) + - 2 * (((limb) ((s32) in[4])) * ((s32) in[8]) + - 2 * (((limb) ((s32) in[5])) * ((s32) in[7]) + - ((limb) ((s32) in[3])) * ((s32) in[9]))); - output[13] = 2 * (((limb) ((s32) in[6])) * ((s32) in[7]) + - ((limb) ((s32) in[5])) * ((s32) in[8]) + - ((limb) ((s32) in[4])) * ((s32) in[9])); - output[14] = 2 * (((limb) ((s32) in[7])) * ((s32) in[7]) + - ((limb) ((s32) in[6])) * ((s32) in[8]) + - 2 * ((limb) ((s32) in[5])) * ((s32) in[9])); - output[15] = 2 * (((limb) ((s32) in[7])) * ((s32) in[8]) + - ((limb) ((s32) in[6])) * ((s32) in[9])); - output[16] = ((limb) ((s32) in[8])) * ((s32) in[8]) + - 4 * ((limb) ((s32) in[7])) * ((s32) in[9]); - output[17] = 2 * ((limb) ((s32) in[8])) * ((s32) in[9]); - output[18] = 2 * ((limb) ((s32) in[9])) * ((s32) in[9]); -} - -/* fsquare sets output = in^2. - * - * On entry: The |in| argument is in reduced coefficients form and |in[i]| < - * 2^27. - * - * On exit: The |output| argument is in reduced coefficients form (indeed, one - * need only provide storage for 10 limbs) and |out[i]| < 2^26. - */ -static void fsquare(limb *output, const limb *in) -{ - limb t[19]; - - fsquare_inner(t, in); - /* |t[i]| < 14*2^54 because the largest product of two limbs will be < - * 2^(27+27) and fsquare_inner adds together, at most, 14 of those - * products. - */ - freduce_degree(t); - freduce_coefficients(t); - /* |t[i]| < 2^26 */ - memcpy(output, t, sizeof(limb) * 10); -} - -/* Take a little-endian, 32-byte number and expand it into polynomial form */ -static inline void fexpand(limb *output, const u8 *input) -{ -#define F(n, start, shift, mask) \ - output[n] = ((((limb) input[start + 0]) | \ - ((limb) input[start + 1]) << 8 | \ - ((limb) input[start + 2]) << 16 | \ - ((limb) input[start + 3]) << 24) >> shift) & mask; - F(0, 0, 0, 0x3ffffff); - F(1, 3, 2, 0x1ffffff); - F(2, 6, 3, 0x3ffffff); - F(3, 9, 5, 0x1ffffff); - F(4, 12, 6, 0x3ffffff); - F(5, 16, 0, 0x1ffffff); - F(6, 19, 1, 0x3ffffff); - F(7, 22, 3, 0x1ffffff); - F(8, 25, 4, 0x3ffffff); - F(9, 28, 6, 0x1ffffff); -#undef F -} - -/* s32_eq returns 0xffffffff iff a == b and zero otherwise. */ -static s32 s32_eq(s32 a, s32 b) -{ - a = ~(a ^ b); - a &= a << 16; - a &= a << 8; - a &= a << 4; - a &= a << 2; - a &= a << 1; - return a >> 31; -} - -/* s32_gte returns 0xffffffff if a >= b and zero otherwise, where a and b are - * both non-negative. - */ -static s32 s32_gte(s32 a, s32 b) -{ - a -= b; - /* a >= 0 iff a >= b. */ - return ~(a >> 31); -} - -/* Take a fully reduced polynomial form number and contract it into a - * little-endian, 32-byte array. - * - * On entry: |input_limbs[i]| < 2^26 - */ -static void fcontract(u8 *output, limb *input_limbs) -{ - int i; - int j; - s32 input[10]; - s32 mask; - - /* |input_limbs[i]| < 2^26, so it's valid to convert to an s32. */ - for (i = 0; i < 10; i++) { - input[i] = input_limbs[i]; - } - - for (j = 0; j < 2; ++j) { - for (i = 0; i < 9; ++i) { - if ((i & 1) == 1) { - /* This calculation is a time-invariant way to make input[i] - * non-negative by borrowing from the next-larger limb. - */ - const s32 mask = input[i] >> 31; - const s32 carry = -((input[i] & mask) >> 25); - - input[i] = input[i] + (carry << 25); - input[i+1] = input[i+1] - carry; - } else { - const s32 mask = input[i] >> 31; - const s32 carry = -((input[i] & mask) >> 26); - - input[i] = input[i] + (carry << 26); - input[i+1] = input[i+1] - carry; - } - } - - /* There's no greater limb for input[9] to borrow from, but we can multiply - * by 19 and borrow from input[0], which is valid mod 2^255-19. - */ - { - const s32 mask = input[9] >> 31; - const s32 carry = -((input[9] & mask) >> 25); - - input[9] = input[9] + (carry << 25); - input[0] = input[0] - (carry * 19); - } - - /* After the first iteration, input[1..9] are non-negative and fit within - * 25 or 26 bits, depending on position. However, input[0] may be - * negative. - */ - } - - /* The first borrow-propagation pass above ended with every limb - except (possibly) input[0] non-negative. - If input[0] was negative after the first pass, then it was because of a - carry from input[9]. On entry, input[9] < 2^26 so the carry was, at most, - one, since (2**26-1) >> 25 = 1. Thus input[0] >= -19. - In the second pass, each limb is decreased by at most one. Thus the second - borrow-propagation pass could only have wrapped around to decrease - input[0] again if the first pass left input[0] negative *and* input[1] - through input[9] were all zero. In that case, input[1] is now 2^25 - 1, - and this last borrow-propagation step will leave input[1] non-negative. */ - { - const s32 mask = input[0] >> 31; - const s32 carry = -((input[0] & mask) >> 26); - - input[0] = input[0] + (carry << 26); - input[1] = input[1] - carry; - } - - /* All input[i] are now non-negative. However, there might be values between - * 2^25 and 2^26 in a limb which is, nominally, 25 bits wide. - */ - for (j = 0; j < 2; j++) { - for (i = 0; i < 9; i++) { - if ((i & 1) == 1) { - const s32 carry = input[i] >> 25; - - input[i] &= 0x1ffffff; - input[i+1] += carry; - } else { - const s32 carry = input[i] >> 26; - - input[i] &= 0x3ffffff; - input[i+1] += carry; - } - } - - { - const s32 carry = input[9] >> 25; - - input[9] &= 0x1ffffff; - input[0] += 19*carry; - } - } - - /* If the first carry-chain pass, just above, ended up with a carry from - * input[9], and that caused input[0] to be out-of-bounds, then input[0] was - * < 2^26 + 2*19, because the carry was, at most, two. - * - * If the second pass carried from input[9] again then input[0] is < 2*19 and - * the input[9] -> input[0] carry didn't push input[0] out of bounds. - */ - - /* It still remains the case that input might be between 2^255-19 and 2^255. - * In this case, input[1..9] must take their maximum value and input[0] must - * be >= (2^255-19) & 0x3ffffff, which is 0x3ffffed. - */ - mask = s32_gte(input[0], 0x3ffffed); - for (i = 1; i < 10; i++) { - if ((i & 1) == 1) { - mask &= s32_eq(input[i], 0x1ffffff); - } else { - mask &= s32_eq(input[i], 0x3ffffff); - } - } - - /* mask is either 0xffffffff (if input >= 2^255-19) and zero otherwise. Thus - * this conditionally subtracts 2^255-19. - */ - input[0] -= mask & 0x3ffffed; - - for (i = 1; i < 10; i++) { - if ((i & 1) == 1) { - input[i] -= mask & 0x1ffffff; - } else { - input[i] -= mask & 0x3ffffff; - } - } - - input[1] <<= 2; - input[2] <<= 3; - input[3] <<= 5; - input[4] <<= 6; - input[6] <<= 1; - input[7] <<= 3; - input[8] <<= 4; - input[9] <<= 6; -#define F(i, s) \ - output[s+0] |= input[i] & 0xff; \ - output[s+1] = (input[i] >> 8) & 0xff; \ - output[s+2] = (input[i] >> 16) & 0xff; \ - output[s+3] = (input[i] >> 24) & 0xff; - output[0] = 0; - output[16] = 0; - F(0, 0); - F(1, 3); - F(2, 6); - F(3, 9); - F(4, 12); - F(5, 16); - F(6, 19); - F(7, 22); - F(8, 25); - F(9, 28); -#undef F -} - -/* Conditionally swap two reduced-form limb arrays if 'iswap' is 1, but leave - * them unchanged if 'iswap' is 0. Runs in data-invariant time to avoid - * side-channel attacks. - * - * NOTE that this function requires that 'iswap' be 1 or 0; other values give - * wrong results. Also, the two limb arrays must be in reduced-coefficient, - * reduced-degree form: the values in a[10..19] or b[10..19] aren't swapped, - * and all all values in a[0..9],b[0..9] must have magnitude less than - * INT32_MAX. - */ -static void swap_conditional(limb a[19], limb b[19], limb iswap) -{ - unsigned int i; - const s32 swap = (s32) -iswap; - - for (i = 0; i < 10; ++i) { - const s32 x = swap & (((s32)a[i]) ^ ((s32)b[i])); - - a[i] = ((s32)a[i]) ^ x; - b[i] = ((s32)b[i]) ^ x; - } -} - -static void crecip(limb *out, const limb *z) -{ - limb z2[10]; - limb z9[10]; - limb z11[10]; - limb z2_5_0[10]; - limb z2_10_0[10]; - limb z2_20_0[10]; - limb z2_50_0[10]; - limb z2_100_0[10]; - limb t0[10]; - limb t1[10]; - int i; - - /* 2 */ fsquare(z2, z); - /* 4 */ fsquare(t1, z2); - /* 8 */ fsquare(t0, t1); - /* 9 */ fmul(z9, t0, z); - /* 11 */ fmul(z11, z9, z2); - /* 22 */ fsquare(t0, z11); - /* 2^5 - 2^0 = 31 */ fmul(z2_5_0, t0, z9); - - /* 2^6 - 2^1 */ fsquare(t0, z2_5_0); - /* 2^7 - 2^2 */ fsquare(t1, t0); - /* 2^8 - 2^3 */ fsquare(t0, t1); - /* 2^9 - 2^4 */ fsquare(t1, t0); - /* 2^10 - 2^5 */ fsquare(t0, t1); - /* 2^10 - 2^0 */ fmul(z2_10_0, t0, z2_5_0); - - /* 2^11 - 2^1 */ fsquare(t0, z2_10_0); - /* 2^12 - 2^2 */ fsquare(t1, t0); - /* 2^20 - 2^10 */ for (i = 2; i < 10; i += 2) { fsquare(t0, t1); fsquare(t1, t0); } - /* 2^20 - 2^0 */ fmul(z2_20_0, t1, z2_10_0); - - /* 2^21 - 2^1 */ fsquare(t0, z2_20_0); - /* 2^22 - 2^2 */ fsquare(t1, t0); - /* 2^40 - 2^20 */ for (i = 2; i < 20; i += 2) { fsquare(t0, t1); fsquare(t1, t0); } - /* 2^40 - 2^0 */ fmul(t0, t1, z2_20_0); - - /* 2^41 - 2^1 */ fsquare(t1, t0); - /* 2^42 - 2^2 */ fsquare(t0, t1); - /* 2^50 - 2^10 */ for (i = 2; i < 10; i += 2) { fsquare(t1, t0); fsquare(t0, t1); } - /* 2^50 - 2^0 */ fmul(z2_50_0, t0, z2_10_0); - - /* 2^51 - 2^1 */ fsquare(t0, z2_50_0); - /* 2^52 - 2^2 */ fsquare(t1, t0); - /* 2^100 - 2^50 */ for (i = 2; i < 50; i += 2) { fsquare(t0, t1); fsquare(t1, t0); } - /* 2^100 - 2^0 */ fmul(z2_100_0, t1, z2_50_0); - - /* 2^101 - 2^1 */ fsquare(t1, z2_100_0); - /* 2^102 - 2^2 */ fsquare(t0, t1); - /* 2^200 - 2^100 */ for (i = 2; i < 100; i += 2) { fsquare(t1, t0); fsquare(t0, t1); } - /* 2^200 - 2^0 */ fmul(t1, t0, z2_100_0); - - /* 2^201 - 2^1 */ fsquare(t0, t1); - /* 2^202 - 2^2 */ fsquare(t1, t0); - /* 2^250 - 2^50 */ for (i = 2; i < 50; i += 2) { fsquare(t0, t1); fsquare(t1, t0); } - /* 2^250 - 2^0 */ fmul(t0, t1, z2_50_0); - - /* 2^251 - 2^1 */ fsquare(t1, t0); - /* 2^252 - 2^2 */ fsquare(t0, t1); - /* 2^253 - 2^3 */ fsquare(t1, t0); - /* 2^254 - 2^4 */ fsquare(t0, t1); - /* 2^255 - 2^5 */ fsquare(t1, t0); - /* 2^255 - 21 */ fmul(out, t1, z11); -} - - -/* Input: Q, Q', Q-Q' - * Output: 2Q, Q+Q' - * - * x2 z3: long form - * x3 z3: long form - * x z: short form, destroyed - * xprime zprime: short form, destroyed - * qmqp: short form, preserved - * - * On entry and exit, the absolute value of the limbs of all inputs and outputs - * are < 2^26. - */ -static void fmonty(limb *x2, limb *z2, /* output 2Q */ - limb *x3, limb *z3, /* output Q + Q' */ - limb *x, limb *z, /* input Q */ - limb *xprime, limb *zprime, /* input Q' */ - - const limb *qmqp /* input Q - Q' */) -{ - limb origx[10], origxprime[10], zzz[19], xx[19], zz[19], xxprime[19], - zzprime[19], zzzprime[19], xxxprime[19]; - - memcpy(origx, x, 10 * sizeof(limb)); - fsum(x, z); - /* |x[i]| < 2^27 */ - fdifference(z, origx); /* does x - z */ - /* |z[i]| < 2^27 */ - - memcpy(origxprime, xprime, sizeof(limb) * 10); - fsum(xprime, zprime); - /* |xprime[i]| < 2^27 */ - fdifference(zprime, origxprime); - /* |zprime[i]| < 2^27 */ - fproduct(xxprime, xprime, z); - /* |xxprime[i]| < 14*2^54: the largest product of two limbs will be < - * 2^(27+27) and fproduct adds together, at most, 14 of those products. - * (Approximating that to 2^58 doesn't work out.) - */ - fproduct(zzprime, x, zprime); - /* |zzprime[i]| < 14*2^54 */ - freduce_degree(xxprime); - freduce_coefficients(xxprime); - /* |xxprime[i]| < 2^26 */ - freduce_degree(zzprime); - freduce_coefficients(zzprime); - /* |zzprime[i]| < 2^26 */ - memcpy(origxprime, xxprime, sizeof(limb) * 10); - fsum(xxprime, zzprime); - /* |xxprime[i]| < 2^27 */ - fdifference(zzprime, origxprime); - /* |zzprime[i]| < 2^27 */ - fsquare(xxxprime, xxprime); - /* |xxxprime[i]| < 2^26 */ - fsquare(zzzprime, zzprime); - /* |zzzprime[i]| < 2^26 */ - fproduct(zzprime, zzzprime, qmqp); - /* |zzprime[i]| < 14*2^52 */ - freduce_degree(zzprime); - freduce_coefficients(zzprime); - /* |zzprime[i]| < 2^26 */ - memcpy(x3, xxxprime, sizeof(limb) * 10); - memcpy(z3, zzprime, sizeof(limb) * 10); - - fsquare(xx, x); - /* |xx[i]| < 2^26 */ - fsquare(zz, z); - /* |zz[i]| < 2^26 */ - fproduct(x2, xx, zz); - /* |x2[i]| < 14*2^52 */ - freduce_degree(x2); - freduce_coefficients(x2); - /* |x2[i]| < 2^26 */ - fdifference(zz, xx); // does zz = xx - zz - /* |zz[i]| < 2^27 */ - memset(zzz + 10, 0, sizeof(limb) * 9); - fscalar_product(zzz, zz, 121665); - /* |zzz[i]| < 2^(27+17) */ - /* No need to call freduce_degree here: - fscalar_product doesn't increase the degree of its input. */ - freduce_coefficients(zzz); - /* |zzz[i]| < 2^26 */ - fsum(zzz, xx); - /* |zzz[i]| < 2^27 */ - fproduct(z2, zz, zzz); - /* |z2[i]| < 14*2^(26+27) */ - freduce_degree(z2); - freduce_coefficients(z2); - /* |z2|i| < 2^26 */ -} - -/* Calculates nQ where Q is the x-coordinate of a point on the curve - * - * resultx/resultz: the x coordinate of the resulting curve point (short form) - * n: a little endian, 32-byte number - * q: a point of the curve (short form) - */ -static void cmult(limb *resultx, limb *resultz, const u8 *n, const limb *q) -{ - limb a[19] = {0}, b[19] = {1}, c[19] = {1}, d[19] = {0}; - limb *nqpqx = a, *nqpqz = b, *nqx = c, *nqz = d, *t; - limb e[19] = {0}, f[19] = {1}, g[19] = {0}, h[19] = {1}; - limb *nqpqx2 = e, *nqpqz2 = f, *nqx2 = g, *nqz2 = h; - - unsigned int i, j; - - memcpy(nqpqx, q, sizeof(limb) * 10); - - for (i = 0; i < 32; ++i) { - u8 byte = n[31 - i]; - - for (j = 0; j < 8; ++j) { - const limb bit = byte >> 7; - - swap_conditional(nqx, nqpqx, bit); - swap_conditional(nqz, nqpqz, bit); - fmonty(nqx2, nqz2, - nqpqx2, nqpqz2, - nqx, nqz, - nqpqx, nqpqz, - q); - swap_conditional(nqx2, nqpqx2, bit); - swap_conditional(nqz2, nqpqz2, bit); - - t = nqx; - nqx = nqx2; - nqx2 = t; - t = nqz; - nqz = nqz2; - nqz2 = t; - t = nqpqx; - nqpqx = nqpqx2; - nqpqx2 = t; - t = nqpqz; - nqpqz = nqpqz2; - nqpqz2 = t; - - byte <<= 1; - } - } - - memcpy(resultx, nqx, sizeof(limb) * 10); - memcpy(resultz, nqz, sizeof(limb) * 10); -} - -bool curve25519_donna32(u8 mypublic[CURVE25519_POINT_SIZE], const u8 secret[CURVE25519_POINT_SIZE], const u8 basepoint[CURVE25519_POINT_SIZE]) -{ - limb bp[10], x[10], z[11], zmone[10]; - u8 e[32]; - - memcpy(e, secret, 32); - normalize_secret(e); - - fexpand(bp, basepoint); - cmult(x, z, e, bp); - crecip(zmone, z); - fmul(z, x, zmone); - fcontract(mypublic, z); - - return true; -} diff --git a/curve25519-fiat32.c b/curve25519-fiat32.c deleted file mode 100644 index 5912097..0000000 --- a/curve25519-fiat32.c +++ /dev/null @@ -1,838 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 - * - * Copyright (C) 2015-2016 The fiat-crypto Authors. - * Copyright (C) 2018 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. - * - * This is a machine-generated formally verified implementation of curve25519 DH from: - * https://github.com/mit-plv/fiat-crypto - */ - -#include <linux/kernel.h> -#include <linux/string.h> - -enum { CURVE25519_POINT_SIZE = 32 }; - -static __always_inline void normalize_secret(u8 secret[CURVE25519_POINT_SIZE]) -{ - secret[0] &= 248; - secret[31] &= 127; - secret[31] |= 64; -} - -/* fe means field element. Here the field is \Z/(2^255-19). An element t, - * entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 - * t[3]+2^102 t[4]+...+2^230 t[9]. - * fe limbs are bounded by 1.125*2^26,1.125*2^25,1.125*2^26,1.125*2^25,etc. - * Multiplication and carrying produce fe from fe_loose. - */ -typedef struct fe { u32 v[10]; } fe; - -/* fe_loose limbs are bounded by 3.375*2^26,3.375*2^25,3.375*2^26,3.375*2^25,etc. - * Addition and subtraction produce fe_loose from (fe, fe). - */ -typedef struct fe_loose { u32 v[10]; } fe_loose; - -static __always_inline void fe_frombytes_impl(u32 h[10], const u8 *s) -{ - /* Ignores top bit of s. */ - u32 a0 = le32_to_cpup((__force __le32 *)(s)); - u32 a1 = le32_to_cpup((__force __le32 *)(s+4)); - u32 a2 = le32_to_cpup((__force __le32 *)(s+8)); - u32 a3 = le32_to_cpup((__force __le32 *)(s+12)); - u32 a4 = le32_to_cpup((__force __le32 *)(s+16)); - u32 a5 = le32_to_cpup((__force __le32 *)(s+20)); - u32 a6 = le32_to_cpup((__force __le32 *)(s+24)); - u32 a7 = le32_to_cpup((__force __le32 *)(s+28)); - h[0] = a0&((1<<26)-1); /* 26 used, 32-26 left. 26 */ - h[1] = (a0>>26) | ((a1&((1<<19)-1))<< 6); /* (32-26) + 19 = 6+19 = 25 */ - h[2] = (a1>>19) | ((a2&((1<<13)-1))<<13); /* (32-19) + 13 = 13+13 = 26 */ - h[3] = (a2>>13) | ((a3&((1<< 6)-1))<<19); /* (32-13) + 6 = 19+ 6 = 25 */ - h[4] = (a3>> 6); /* (32- 6) = 26 */ - h[5] = a4&((1<<25)-1); /* 25 */ - h[6] = (a4>>25) | ((a5&((1<<19)-1))<< 7); /* (32-25) + 19 = 7+19 = 26 */ - h[7] = (a5>>19) | ((a6&((1<<12)-1))<<13); /* (32-19) + 12 = 13+12 = 25 */ - h[8] = (a6>>12) | ((a7&((1<< 6)-1))<<20); /* (32-12) + 6 = 20+ 6 = 26 */ - h[9] = (a7>> 6)&((1<<25)-1); /* 25 */ -} - -static __always_inline void fe_frombytes(fe *h, const u8 *s) -{ - fe_frombytes_impl(h->v, s); -} - -static __always_inline u8 /*bool*/ addcarryx_u25(u8 /*bool*/ c, u32 a, u32 b, u32 *low) -{ - /* This function extracts 25 bits of result and 1 bit of carry (26 total), so - * a 32-bit intermediate is sufficient. - */ - u32 x = a + b + c; - *low = x & ((1 << 25) - 1); - return (x >> 25) & 1; -} - -static __always_inline u8 /*bool*/ addcarryx_u26(u8 /*bool*/ c, u32 a, u32 b, u32 *low) -{ - /* This function extracts 26 bits of result and 1 bit of carry (27 total), so - * a 32-bit intermediate is sufficient. - */ - u32 x = a + b + c; - *low = x & ((1 << 26) - 1); - return (x >> 26) & 1; -} - -static __always_inline u8 /*bool*/ subborrow_u25(u8 /*bool*/ c, u32 a, u32 b, u32 *low) -{ - /* This function extracts 25 bits of result and 1 bit of borrow (26 total), so - * a 32-bit intermediate is sufficient. - */ - u32 x = a - b - c; - *low = x & ((1 << 25) - 1); - return x >> 31; -} - -static __always_inline u8 /*bool*/ subborrow_u26(u8 /*bool*/ c, u32 a, u32 b, u32 *low) -{ - /* This function extracts 26 bits of result and 1 bit of borrow (27 total), so - * a 32-bit intermediate is sufficient. - */ - u32 x = a - b - c; - *low = x & ((1 << 26) - 1); - return x >> 31; -} - -static __always_inline u32 cmovznz32(u32 t, u32 z, u32 nz) -{ - t = -!!t; /* all set if nonzero, 0 if 0 */ - return (t&nz) | ((~t)&z); -} - -static __always_inline void fe_freeze(u32 out[10], const u32 in1[10]) -{ - { const u32 x17 = in1[9]; - { const u32 x18 = in1[8]; - { const u32 x16 = in1[7]; - { const u32 x14 = in1[6]; - { const u32 x12 = in1[5]; - { const u32 x10 = in1[4]; - { const u32 x8 = in1[3]; - { const u32 x6 = in1[2]; - { const u32 x4 = in1[1]; - { const u32 x2 = in1[0]; - { u32 x20; u8/*bool*/ x21 = subborrow_u26(0x0, x2, 0x3ffffed, &x20); - { u32 x23; u8/*bool*/ x24 = subborrow_u25(x21, x4, 0x1ffffff, &x23); - { u32 x26; u8/*bool*/ x27 = subborrow_u26(x24, x6, 0x3ffffff, &x26); - { u32 x29; u8/*bool*/ x30 = subborrow_u25(x27, x8, 0x1ffffff, &x29); - { u32 x32; u8/*bool*/ x33 = subborrow_u26(x30, x10, 0x3ffffff, &x32); - { u32 x35; u8/*bool*/ x36 = subborrow_u25(x33, x12, 0x1ffffff, &x35); - { u32 x38; u8/*bool*/ x39 = subborrow_u26(x36, x14, 0x3ffffff, &x38); - { u32 x41; u8/*bool*/ x42 = subborrow_u25(x39, x16, 0x1ffffff, &x41); - { u32 x44; u8/*bool*/ x45 = subborrow_u26(x42, x18, 0x3ffffff, &x44); - { u32 x47; u8/*bool*/ x48 = subborrow_u25(x45, x17, 0x1ffffff, &x47); - { u32 x49 = cmovznz32(x48, 0x0, 0xffffffff); - { u32 x50 = (x49 & 0x3ffffed); - { u32 x52; u8/*bool*/ x53 = addcarryx_u26(0x0, x20, x50, &x52); - { u32 x54 = (x49 & 0x1ffffff); - { u32 x56; u8/*bool*/ x57 = addcarryx_u25(x53, x23, x54, &x56); - { u32 x58 = (x49 & 0x3ffffff); - { u32 x60; u8/*bool*/ x61 = addcarryx_u26(x57, x26, x58, &x60); - { u32 x62 = (x49 & 0x1ffffff); - { u32 x64; u8/*bool*/ x65 = addcarryx_u25(x61, x29, x62, &x64); - { u32 x66 = (x49 & 0x3ffffff); - { u32 x68; u8/*bool*/ x69 = addcarryx_u26(x65, x32, x66, &x68); - { u32 x70 = (x49 & 0x1ffffff); - { u32 x72; u8/*bool*/ x73 = addcarryx_u25(x69, x35, x70, &x72); - { u32 x74 = (x49 & 0x3ffffff); - { u32 x76; u8/*bool*/ x77 = addcarryx_u26(x73, x38, x74, &x76); - { u32 x78 = (x49 & 0x1ffffff); - { u32 x80; u8/*bool*/ x81 = addcarryx_u25(x77, x41, x78, &x80); - { u32 x82 = (x49 & 0x3ffffff); - { u32 x84; u8/*bool*/ x85 = addcarryx_u26(x81, x44, x82, &x84); - { u32 x86 = (x49 & 0x1ffffff); - { u32 x88; addcarryx_u25(x85, x47, x86, &x88); - out[0] = x52; - out[1] = x56; - out[2] = x60; - out[3] = x64; - out[4] = x68; - out[5] = x72; - out[6] = x76; - out[7] = x80; - out[8] = x84; - out[9] = x88; - }}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}} -} - -static __always_inline void fe_tobytes(u8 s[32], const fe *f) -{ - u32 h[10]; - fe_freeze(h, f->v); - s[0] = h[0] >> 0; - s[1] = h[0] >> 8; - s[2] = h[0] >> 16; - s[3] = (h[0] >> 24) | (h[1] << 2); - s[4] = h[1] >> 6; - s[5] = h[1] >> 14; - s[6] = (h[1] >> 22) | (h[2] << 3); - s[7] = h[2] >> 5; - s[8] = h[2] >> 13; - s[9] = (h[2] >> 21) | (h[3] << 5); - s[10] = h[3] >> 3; - s[11] = h[3] >> 11; - s[12] = (h[3] >> 19) | (h[4] << 6); - s[13] = h[4] >> 2; - s[14] = h[4] >> 10; - s[15] = h[4] >> 18; - s[16] = h[5] >> 0; - s[17] = h[5] >> 8; - s[18] = h[5] >> 16; - s[19] = (h[5] >> 24) | (h[6] << 1); - s[20] = h[6] >> 7; - s[21] = h[6] >> 15; - s[22] = (h[6] >> 23) | (h[7] << 3); - s[23] = h[7] >> 5; - s[24] = h[7] >> 13; - s[25] = (h[7] >> 21) | (h[8] << 4); - s[26] = h[8] >> 4; - s[27] = h[8] >> 12; - s[28] = (h[8] >> 20) | (h[9] << 6); - s[29] = h[9] >> 2; - s[30] = h[9] >> 10; - s[31] = h[9] >> 18; -} - -/* h = f */ -static __always_inline void fe_copy(fe *h, const fe *f) -{ - memmove(h, f, sizeof(u32) * 10); -} - -static __always_inline void fe_copy_lt(fe_loose *h, const fe *f) -{ - memmove(h, f, sizeof(u32) * 10); -} - -/* h = 0 */ -static __always_inline void fe_0(fe *h) -{ - memset(h, 0, sizeof(u32) * 10); -} - -/* h = 1 */ -static __always_inline void fe_1(fe *h) -{ - memset(h, 0, sizeof(u32) * 10); - h->v[0] = 1; -} - -static void fe_add_impl(u32 out[10], const u32 in1[10], const u32 in2[10]) -{ - { const u32 x20 = in1[9]; - { const u32 x21 = in1[8]; - { const u32 x19 = in1[7]; - { const u32 x17 = in1[6]; - { const u32 x15 = in1[5]; - { const u32 x13 = in1[4]; - { const u32 x11 = in1[3]; - { const u32 x9 = in1[2]; - { const u32 x7 = in1[1]; - { const u32 x5 = in1[0]; - { const u32 x38 = in2[9]; - { const u32 x39 = in2[8]; - { const u32 x37 = in2[7]; - { const u32 x35 = in2[6]; - { const u32 x33 = in2[5]; - { const u32 x31 = in2[4]; - { const u32 x29 = in2[3]; - { const u32 x27 = in2[2]; - { const u32 x25 = in2[1]; - { const u32 x23 = in2[0]; - out[0] = (x5 + x23); - out[1] = (x7 + x25); - out[2] = (x9 + x27); - out[3] = (x11 + x29); - out[4] = (x13 + x31); - out[5] = (x15 + x33); - out[6] = (x17 + x35); - out[7] = (x19 + x37); - out[8] = (x21 + x39); - out[9] = (x20 + x38); - }}}}}}}}}}}}}}}}}}}} -} - -/* h = f + g - * Can overlap h with f or g. - */ -static __always_inline void fe_add(fe_loose *h, const fe *f, const fe *g) -{ - fe_add_impl(h->v, f->v, g->v); -} - -static void fe_sub_impl(u32 out[10], const u32 in1[10], const u32 in2[10]) -{ - { const u32 x20 = in1[9]; - { const u32 x21 = in1[8]; - { const u32 x19 = in1[7]; - { const u32 x17 = in1[6]; - { const u32 x15 = in1[5]; - { const u32 x13 = in1[4]; - { const u32 x11 = in1[3]; - { const u32 x9 = in1[2]; - { const u32 x7 = in1[1]; - { const u32 x5 = in1[0]; - { const u32 x38 = in2[9]; - { const u32 x39 = in2[8]; - { const u32 x37 = in2[7]; - { const u32 x35 = in2[6]; - { const u32 x33 = in2[5]; - { const u32 x31 = in2[4]; - { const u32 x29 = in2[3]; - { const u32 x27 = in2[2]; - { const u32 x25 = in2[1]; - { const u32 x23 = in2[0]; - out[0] = ((0x7ffffda + x5) - x23); - out[1] = ((0x3fffffe + x7) - x25); - out[2] = ((0x7fffffe + x9) - x27); - out[3] = ((0x3fffffe + x11) - x29); - out[4] = ((0x7fffffe + x13) - x31); - out[5] = ((0x3fffffe + x15) - x33); - out[6] = ((0x7fffffe + x17) - x35); - out[7] = ((0x3fffffe + x19) - x37); - out[8] = ((0x7fffffe + x21) - x39); - out[9] = ((0x3fffffe + x20) - x38); - }}}}}}}}}}}}}}}}}}}} -} - -/* h = f - g - * Can overlap h with f or g. - */ -static __always_inline void fe_sub(fe_loose *h, const fe *f, const fe *g) -{ - fe_sub_impl(h->v, f->v, g->v); -} - -static void fe_mul_impl(u32 out[10], const u32 in1[10], const u32 in2[10]) -{ - { const u32 x20 = in1[9]; - { const u32 x21 = in1[8]; - { const u32 x19 = in1[7]; - { const u32 x17 = in1[6]; - { const u32 x15 = in1[5]; - { const u32 x13 = in1[4]; - { const u32 x11 = in1[3]; - { const u32 x9 = in1[2]; - { const u32 x7 = in1[1]; - { const u32 x5 = in1[0]; - { const u32 x38 = in2[9]; - { const u32 x39 = in2[8]; - { const u32 x37 = in2[7]; - { const u32 x35 = in2[6]; - { const u32 x33 = in2[5]; - { const u32 x31 = in2[4]; - { const u32 x29 = in2[3]; - { const u32 x27 = in2[2]; - { const u32 x25 = in2[1]; - { const u32 x23 = in2[0]; - { u64 x40 = ((u64)x23 * x5); - { u64 x41 = (((u64)x23 * x7) + ((u64)x25 * x5)); - { u64 x42 = ((((u64)(0x2 * x25) * x7) + ((u64)x23 * x9)) + ((u64)x27 * x5)); - { u64 x43 = (((((u64)x25 * x9) + ((u64)x27 * x7)) + ((u64)x23 * x11)) + ((u64)x29 * x5)); - { u64 x44 = (((((u64)x27 * x9) + (0x2 * (((u64)x25 * x11) + ((u64)x29 * x7)))) + ((u64)x23 * x13)) + ((u64)x31 * x5)); - { u64 x45 = (((((((u64)x27 * x11) + ((u64)x29 * x9)) + ((u64)x25 * x13)) + ((u64)x31 * x7)) + ((u64)x23 * x15)) + ((u64)x33 * x5)); - { u64 x46 = (((((0x2 * ((((u64)x29 * x11) + ((u64)x25 * x15)) + ((u64)x33 * x7))) + ((u64)x27 * x13)) + ((u64)x31 * x9)) + ((u64)x23 * x17)) + ((u64)x35 * x5)); - { u64 x47 = (((((((((u64)x29 * x13) + ((u64)x31 * x11)) + ((u64)x27 * x15)) + ((u64)x33 * x9)) + ((u64)x25 * x17)) + ((u64)x35 * x7)) + ((u64)x23 * x19)) + ((u64)x37 * x5)); - { u64 x48 = (((((((u64)x31 * x13) + (0x2 * (((((u64)x29 * x15) + ((u64)x33 * x11)) + ((u64)x25 * x19)) + ((u64)x37 * x7)))) + ((u64)x27 * x17)) + ((u64)x35 * x9)) + ((u64)x23 * x21)) + ((u64)x39 * x5)); - { u64 x49 = (((((((((((u64)x31 * x15) + ((u64)x33 * x13)) + ((u64)x29 * x17)) + ((u64)x35 * x11)) + ((u64)x27 * x19)) + ((u64)x37 * x9)) + ((u64)x25 * x21)) + ((u64)x39 * x7)) + ((u64)x23 * x20)) + ((u64)x38 * x5)); - { u64 x50 = (((((0x2 * ((((((u64)x33 * x15) + ((u64)x29 * x19)) + ((u64)x37 * x11)) + ((u64)x25 * x20)) + ((u64)x38 * x7))) + ((u64)x31 * x17)) + ((u64)x35 * x13)) + ((u64)x27 * x21)) + ((u64)x39 * x9)); - { u64 x51 = (((((((((u64)x33 * x17) + ((u64)x35 * x15)) + ((u64)x31 * x19)) + ((u64)x37 * x13)) + ((u64)x29 * x21)) + ((u64)x39 * x11)) + ((u64)x27 * x20)) + ((u64)x38 * x9)); - { u64 x52 = (((((u64)x35 * x17) + (0x2 * (((((u64)x33 * x19) + ((u64)x37 * x15)) + ((u64)x29 * x20)) + ((u64)x38 * x11)))) + ((u64)x31 * x21)) + ((u64)x39 * x13)); - { u64 x53 = (((((((u64)x35 * x19) + ((u64)x37 * x17)) + ((u64)x33 * x21)) + ((u64)x39 * x15)) + ((u64)x31 * x20)) + ((u64)x38 * x13)); - { u64 x54 = (((0x2 * ((((u64)x37 * x19) + ((u64)x33 * x20)) + ((u64)x38 * x15))) + ((u64)x35 * x21)) + ((u64)x39 * x17)); - { u64 x55 = (((((u64)x37 * x21) + ((u64)x39 * x19)) + ((u64)x35 * x20)) + ((u64)x38 * x17)); - { u64 x56 = (((u64)x39 * x21) + (0x2 * (((u64)x37 * x20) + ((u64)x38 * x19)))); - { u64 x57 = (((u64)x39 * x20) + ((u64)x38 * x21)); - { u64 x58 = ((u64)(0x2 * x38) * x20); - { u64 x59 = (x48 + (x58 << 0x4)); - { u64 x60 = (x59 + (x58 << 0x1)); - { u64 x61 = (x60 + x58); - { u64 x62 = (x47 + (x57 << 0x4)); - { u64 x63 = (x62 + (x57 << 0x1)); - { u64 x64 = (x63 + x57); - { u64 x65 = (x46 + (x56 << 0x4)); - { u64 x66 = (x65 + (x56 << 0x1)); - { u64 x67 = (x66 + x56); - { u64 x68 = (x45 + (x55 << 0x4)); - { u64 x69 = (x68 + (x55 << 0x1)); - { u64 x70 = (x69 + x55); - { u64 x71 = (x44 + (x54 << 0x4)); - { u64 x72 = (x71 + (x54 << 0x1)); - { u64 x73 = (x72 + x54); - { u64 x74 = (x43 + (x53 << 0x4)); - { u64 x75 = (x74 + (x53 << 0x1)); - { u64 x76 = (x75 + x53); - { u64 x77 = (x42 + (x52 << 0x4)); - { u64 x78 = (x77 + (x52 << 0x1)); - { u64 x79 = (x78 + x52); - { u64 x80 = (x41 + (x51 << 0x4)); - { u64 x81 = (x80 + (x51 << 0x1)); - { u64 x82 = (x81 + x51); - { u64 x83 = (x40 + (x50 << 0x4)); - { u64 x84 = (x83 + (x50 << 0x1)); - { u64 x85 = (x84 + x50); - { u64 x86 = (x85 >> 0x1a); - { u32 x87 = ((u32)x85 & 0x3ffffff); - { u64 x88 = (x86 + x82); - { u64 x89 = (x88 >> 0x19); - { u32 x90 = ((u32)x88 & 0x1ffffff); - { u64 x91 = (x89 + x79); - { u64 x92 = (x91 >> 0x1a); - { u32 x93 = ((u32)x91 & 0x3ffffff); - { u64 x94 = (x92 + x76); - { u64 x95 = (x94 >> 0x19); - { u32 x96 = ((u32)x94 & 0x1ffffff); - { u64 x97 = (x95 + x73); - { u64 x98 = (x97 >> 0x1a); - { u32 x99 = ((u32)x97 & 0x3ffffff); - { u64 x100 = (x98 + x70); - { u64 x101 = (x100 >> 0x19); - { u32 x102 = ((u32)x100 & 0x1ffffff); - { u64 x103 = (x101 + x67); - { u64 x104 = (x103 >> 0x1a); - { u32 x105 = ((u32)x103 & 0x3ffffff); - { u64 x106 = (x104 + x64); - { u64 x107 = (x106 >> 0x19); - { u32 x108 = ((u32)x106 & 0x1ffffff); - { u64 x109 = (x107 + x61); - { u64 x110 = (x109 >> 0x1a); - { u32 x111 = ((u32)x109 & 0x3ffffff); - { u64 x112 = (x110 + x49); - { u64 x113 = (x112 >> 0x19); - { u32 x114 = ((u32)x112 & 0x1ffffff); - { u64 x115 = (x87 + (0x13 * x113)); - { u32 x116 = (u32) (x115 >> 0x1a); - { u32 x117 = ((u32)x115 & 0x3ffffff); - { u32 x118 = (x116 + x90); - { u32 x119 = (x118 >> 0x19); - { u32 x120 = (x118 & 0x1ffffff); - out[0] = x117; - out[1] = x120; - out[2] = (x119 + x93); - out[3] = x96; - out[4] = x99; - out[5] = x102; - out[6] = x105; - out[7] = x108; - out[8] = x111; - out[9] = x114; - }}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}} -} - -static __always_inline void fe_mul_ttt(fe *h, const fe *f, const fe *g) -{ - fe_mul_impl(h->v, f->v, g->v); -} - -static __always_inline void fe_mul_tlt(fe *h, const fe_loose *f, const fe *g) -{ - fe_mul_impl(h->v, f->v, g->v); -} - -static __always_inline void fe_mul_tll(fe *h, const fe_loose *f, const fe_loose *g) -{ - fe_mul_impl(h->v, f->v, g->v); -} - -static void fe_sqr_impl(u32 out[10], const u32 in1[10]) -{ - { const u32 x17 = in1[9]; - { const u32 x18 = in1[8]; - { const u32 x16 = in1[7]; - { const u32 x14 = in1[6]; - { const u32 x12 = in1[5]; - { const u32 x10 = in1[4]; - { const u32 x8 = in1[3]; - { const u32 x6 = in1[2]; - { const u32 x4 = in1[1]; - { const u32 x2 = in1[0]; - { u64 x19 = ((u64)x2 * x2); - { u64 x20 = ((u64)(0x2 * x2) * x4); - { u64 x21 = (0x2 * (((u64)x4 * x4) + ((u64)x2 * x6))); - { u64 x22 = (0x2 * (((u64)x4 * x6) + ((u64)x2 * x8))); - { u64 x23 = ((((u64)x6 * x6) + ((u64)(0x4 * x4) * x8)) + ((u64)(0x2 * x2) * x10)); - { u64 x24 = (0x2 * ((((u64)x6 * x8) + ((u64)x4 * x10)) + ((u64)x2 * x12))); - { u64 x25 = (0x2 * (((((u64)x8 * x8) + ((u64)x6 * x10)) + ((u64)x2 * x14)) + ((u64)(0x2 * x4) * x12))); - { u64 x26 = (0x2 * (((((u64)x8 * x10) + ((u64)x6 * x12)) + ((u64)x4 * x14)) + ((u64)x2 * x16))); - { u64 x27 = (((u64)x10 * x10) + (0x2 * ((((u64)x6 * x14) + ((u64)x2 * x18)) + (0x2 * (((u64)x4 * x16) + ((u64)x8 * x12)))))); - { u64 x28 = (0x2 * ((((((u64)x10 * x12) + ((u64)x8 * x14)) + ((u64)x6 * x16)) + ((u64)x4 * x18)) + ((u64)x2 * x17))); - { u64 x29 = (0x2 * (((((u64)x12 * x12) + ((u64)x10 * x14)) + ((u64)x6 * x18)) + (0x2 * (((u64)x8 * x16) + ((u64)x4 * x17))))); - { u64 x30 = (0x2 * (((((u64)x12 * x14) + ((u64)x10 * x16)) + ((u64)x8 * x18)) + ((u64)x6 * x17))); - { u64 x31 = (((u64)x14 * x14) + (0x2 * (((u64)x10 * x18) + (0x2 * (((u64)x12 * x16) + ((u64)x8 * x17)))))); - { u64 x32 = (0x2 * ((((u64)x14 * x16) + ((u64)x12 * x18)) + ((u64)x10 * x17))); - { u64 x33 = (0x2 * ((((u64)x16 * x16) + ((u64)x14 * x18)) + ((u64)(0x2 * x12) * x17))); - { u64 x34 = (0x2 * (((u64)x16 * x18) + ((u64)x14 * x17))); - { u64 x35 = (((u64)x18 * x18) + ((u64)(0x4 * x16) * x17)); - { u64 x36 = ((u64)(0x2 * x18) * x17); - { u64 x37 = ((u64)(0x2 * x17) * x17); - { u64 x38 = (x27 + (x37 << 0x4)); - { u64 x39 = (x38 + (x37 << 0x1)); - { u64 x40 = (x39 + x37); - { u64 x41 = (x26 + (x36 << 0x4)); - { u64 x42 = (x41 + (x36 << 0x1)); - { u64 x43 = (x42 + x36); - { u64 x44 = (x25 + (x35 << 0x4)); - { u64 x45 = (x44 + (x35 << 0x1)); - { u64 x46 = (x45 + x35); - { u64 x47 = (x24 + (x34 << 0x4)); - { u64 x48 = (x47 + (x34 << 0x1)); - { u64 x49 = (x48 + x34); - { u64 x50 = (x23 + (x33 << 0x4)); - { u64 x51 = (x50 + (x33 << 0x1)); - { u64 x52 = (x51 + x33); - { u64 x53 = (x22 + (x32 << 0x4)); - { u64 x54 = (x53 + (x32 << 0x1)); - { u64 x55 = (x54 + x32); - { u64 x56 = (x21 + (x31 << 0x4)); - { u64 x57 = (x56 + (x31 << 0x1)); - { u64 x58 = (x57 + x31); - { u64 x59 = (x20 + (x30 << 0x4)); - { u64 x60 = (x59 + (x30 << 0x1)); - { u64 x61 = (x60 + x30); - { u64 x62 = (x19 + (x29 << 0x4)); - { u64 x63 = (x62 + (x29 << 0x1)); - { u64 x64 = (x63 + x29); - { u64 x65 = (x64 >> 0x1a); - { u32 x66 = ((u32)x64 & 0x3ffffff); - { u64 x67 = (x65 + x61); - { u64 x68 = (x67 >> 0x19); - { u32 x69 = ((u32)x67 & 0x1ffffff); - { u64 x70 = (x68 + x58); - { u64 x71 = (x70 >> 0x1a); - { u32 x72 = ((u32)x70 & 0x3ffffff); - { u64 x73 = (x71 + x55); - { u64 x74 = (x73 >> 0x19); - { u32 x75 = ((u32)x73 & 0x1ffffff); - { u64 x76 = (x74 + x52); - { u64 x77 = (x76 >> 0x1a); - { u32 x78 = ((u32)x76 & 0x3ffffff); - { u64 x79 = (x77 + x49); - { u64 x80 = (x79 >> 0x19); - { u32 x81 = ((u32)x79 & 0x1ffffff); - { u64 x82 = (x80 + x46); - { u64 x83 = (x82 >> 0x1a); - { u32 x84 = ((u32)x82 & 0x3ffffff); - { u64 x85 = (x83 + x43); - { u64 x86 = (x85 >> 0x19); - { u32 x87 = ((u32)x85 & 0x1ffffff); - { u64 x88 = (x86 + x40); - { u64 x89 = (x88 >> 0x1a); - { u32 x90 = ((u32)x88 & 0x3ffffff); - { u64 x91 = (x89 + x28); - { u64 x92 = (x91 >> 0x19); - { u32 x93 = ((u32)x91 & 0x1ffffff); - { u64 x94 = (x66 + (0x13 * x92)); - { u32 x95 = (u32) (x94 >> 0x1a); - { u32 x96 = ((u32)x94 & 0x3ffffff); - { u32 x97 = (x95 + x69); - { u32 x98 = (x97 >> 0x19); - { u32 x99 = (x97 & 0x1ffffff); - out[0] = x96; - out[1] = x99; - out[2] = (x98 + x72); - out[3] = x75; - out[4] = x78; - out[5] = x81; - out[6] = x84; - out[7] = x87; - out[8] = x90; - out[9] = x93; - }}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}} -} - -static __always_inline void fe_sq_tl(fe *h, const fe_loose *f) -{ - fe_sqr_impl(h->v, f->v); -} - -static __always_inline void fe_sq_tt(fe *h, const fe *f) -{ - fe_sqr_impl(h->v, f->v); -} - -static __always_inline void fe_loose_invert(fe *out, const fe_loose *z) -{ - fe t0; - fe t1; - fe t2; - fe t3; - int i; - - fe_sq_tl(&t0, z); - fe_sq_tt(&t1, &t0); - for (i = 1; i < 2; ++i) - fe_sq_tt(&t1, &t1); - fe_mul_tlt(&t1, z, &t1); - fe_mul_ttt(&t0, &t0, &t1); - fe_sq_tt(&t2, &t0); - fe_mul_ttt(&t1, &t1, &t2); - fe_sq_tt(&t2, &t1); - for (i = 1; i < 5; ++i) - fe_sq_tt(&t2, &t2); - fe_mul_ttt(&t1, &t2, &t1); - fe_sq_tt(&t2, &t1); - for (i = 1; i < 10; ++i) - fe_sq_tt(&t2, &t2); - fe_mul_ttt(&t2, &t2, &t1); - fe_sq_tt(&t3, &t2); - for (i = 1; i < 20; ++i) - fe_sq_tt(&t3, &t3); - fe_mul_ttt(&t2, &t3, &t2); - fe_sq_tt(&t2, &t2); - for (i = 1; i < 10; ++i) - fe_sq_tt(&t2, &t2); - fe_mul_ttt(&t1, &t2, &t1); - fe_sq_tt(&t2, &t1); - for (i = 1; i < 50; ++i) - fe_sq_tt(&t2, &t2); - fe_mul_ttt(&t2, &t2, &t1); - fe_sq_tt(&t3, &t2); - for (i = 1; i < 100; ++i) - fe_sq_tt(&t3, &t3); - fe_mul_ttt(&t2, &t3, &t2); - fe_sq_tt(&t2, &t2); - for (i = 1; i < 50; ++i) - fe_sq_tt(&t2, &t2); - fe_mul_ttt(&t1, &t2, &t1); - fe_sq_tt(&t1, &t1); - for (i = 1; i < 5; ++i) - fe_sq_tt(&t1, &t1); - fe_mul_ttt(out, &t1, &t0); -} - -static __always_inline void fe_invert(fe *out, const fe *z) -{ - fe_loose l; - fe_copy_lt(&l, z); - fe_loose_invert(out, &l); -} - -/* Replace (f,g) with (g,f) if b == 1; - * replace (f,g) with (f,g) if b == 0. - * - * Preconditions: b in {0,1} - */ -static __always_inline void fe_cswap(fe *f, fe *g, unsigned int b) -{ - unsigned i; - b = 0-b; - for (i = 0; i < 10; i++) { - u32 x = f->v[i] ^ g->v[i]; - x &= b; - f->v[i] ^= x; - g->v[i] ^= x; - } -} - -/* NOTE: based on fiat-crypto fe_mul, edited for in2=121666, 0, 0.*/ -static __always_inline void fe_mul_121666_impl(u32 out[10], const u32 in1[10]) -{ - { const u32 x20 = in1[9]; - { const u32 x21 = in1[8]; - { const u32 x19 = in1[7]; - { const u32 x17 = in1[6]; - { const u32 x15 = in1[5]; - { const u32 x13 = in1[4]; - { const u32 x11 = in1[3]; - { const u32 x9 = in1[2]; - { const u32 x7 = in1[1]; - { const u32 x5 = in1[0]; - { const u32 x38 = 0; - { const u32 x39 = 0; - { const u32 x37 = 0; - { const u32 x35 = 0; - { const u32 x33 = 0; - { const u32 x31 = 0; - { const u32 x29 = 0; - { const u32 x27 = 0; - { const u32 x25 = 0; - { const u32 x23 = 121666; - { u64 x40 = ((u64)x23 * x5); - { u64 x41 = (((u64)x23 * x7) + ((u64)x25 * x5)); - { u64 x42 = ((((u64)(0x2 * x25) * x7) + ((u64)x23 * x9)) + ((u64)x27 * x5)); - { u64 x43 = (((((u64)x25 * x9) + ((u64)x27 * x7)) + ((u64)x23 * x11)) + ((u64)x29 * x5)); - { u64 x44 = (((((u64)x27 * x9) + (0x2 * (((u64)x25 * x11) + ((u64)x29 * x7)))) + ((u64)x23 * x13)) + ((u64)x31 * x5)); - { u64 x45 = (((((((u64)x27 * x11) + ((u64)x29 * x9)) + ((u64)x25 * x13)) + ((u64)x31 * x7)) + ((u64)x23 * x15)) + ((u64)x33 * x5)); - { u64 x46 = (((((0x2 * ((((u64)x29 * x11) + ((u64)x25 * x15)) + ((u64)x33 * x7))) + ((u64)x27 * x13)) + ((u64)x31 * x9)) + ((u64)x23 * x17)) + ((u64)x35 * x5)); - { u64 x47 = (((((((((u64)x29 * x13) + ((u64)x31 * x11)) + ((u64)x27 * x15)) + ((u64)x33 * x9)) + ((u64)x25 * x17)) + ((u64)x35 * x7)) + ((u64)x23 * x19)) + ((u64)x37 * x5)); - { u64 x48 = (((((((u64)x31 * x13) + (0x2 * (((((u64)x29 * x15) + ((u64)x33 * x11)) + ((u64)x25 * x19)) + ((u64)x37 * x7)))) + ((u64)x27 * x17)) + ((u64)x35 * x9)) + ((u64)x23 * x21)) + ((u64)x39 * x5)); - { u64 x49 = (((((((((((u64)x31 * x15) + ((u64)x33 * x13)) + ((u64)x29 * x17)) + ((u64)x35 * x11)) + ((u64)x27 * x19)) + ((u64)x37 * x9)) + ((u64)x25 * x21)) + ((u64)x39 * x7)) + ((u64)x23 * x20)) + ((u64)x38 * x5)); - { u64 x50 = (((((0x2 * ((((((u64)x33 * x15) + ((u64)x29 * x19)) + ((u64)x37 * x11)) + ((u64)x25 * x20)) + ((u64)x38 * x7))) + ((u64)x31 * x17)) + ((u64)x35 * x13)) + ((u64)x27 * x21)) + ((u64)x39 * x9)); - { u64 x51 = (((((((((u64)x33 * x17) + ((u64)x35 * x15)) + ((u64)x31 * x19)) + ((u64)x37 * x13)) + ((u64)x29 * x21)) + ((u64)x39 * x11)) + ((u64)x27 * x20)) + ((u64)x38 * x9)); - { u64 x52 = (((((u64)x35 * x17) + (0x2 * (((((u64)x33 * x19) + ((u64)x37 * x15)) + ((u64)x29 * x20)) + ((u64)x38 * x11)))) + ((u64)x31 * x21)) + ((u64)x39 * x13)); - { u64 x53 = (((((((u64)x35 * x19) + ((u64)x37 * x17)) + ((u64)x33 * x21)) + ((u64)x39 * x15)) + ((u64)x31 * x20)) + ((u64)x38 * x13)); - { u64 x54 = (((0x2 * ((((u64)x37 * x19) + ((u64)x33 * x20)) + ((u64)x38 * x15))) + ((u64)x35 * x21)) + ((u64)x39 * x17)); - { u64 x55 = (((((u64)x37 * x21) + ((u64)x39 * x19)) + ((u64)x35 * x20)) + ((u64)x38 * x17)); - { u64 x56 = (((u64)x39 * x21) + (0x2 * (((u64)x37 * x20) + ((u64)x38 * x19)))); - { u64 x57 = (((u64)x39 * x20) + ((u64)x38 * x21)); - { u64 x58 = ((u64)(0x2 * x38) * x20); - { u64 x59 = (x48 + (x58 << 0x4)); - { u64 x60 = (x59 + (x58 << 0x1)); - { u64 x61 = (x60 + x58); - { u64 x62 = (x47 + (x57 << 0x4)); - { u64 x63 = (x62 + (x57 << 0x1)); - { u64 x64 = (x63 + x57); - { u64 x65 = (x46 + (x56 << 0x4)); - { u64 x66 = (x65 + (x56 << 0x1)); - { u64 x67 = (x66 + x56); - { u64 x68 = (x45 + (x55 << 0x4)); - { u64 x69 = (x68 + (x55 << 0x1)); - { u64 x70 = (x69 + x55); - { u64 x71 = (x44 + (x54 << 0x4)); - { u64 x72 = (x71 + (x54 << 0x1)); - { u64 x73 = (x72 + x54); - { u64 x74 = (x43 + (x53 << 0x4)); - { u64 x75 = (x74 + (x53 << 0x1)); - { u64 x76 = (x75 + x53); - { u64 x77 = (x42 + (x52 << 0x4)); - { u64 x78 = (x77 + (x52 << 0x1)); - { u64 x79 = (x78 + x52); - { u64 x80 = (x41 + (x51 << 0x4)); - { u64 x81 = (x80 + (x51 << 0x1)); - { u64 x82 = (x81 + x51); - { u64 x83 = (x40 + (x50 << 0x4)); - { u64 x84 = (x83 + (x50 << 0x1)); - { u64 x85 = (x84 + x50); - { u64 x86 = (x85 >> 0x1a); - { u32 x87 = ((u32)x85 & 0x3ffffff); - { u64 x88 = (x86 + x82); - { u64 x89 = (x88 >> 0x19); - { u32 x90 = ((u32)x88 & 0x1ffffff); - { u64 x91 = (x89 + x79); - { u64 x92 = (x91 >> 0x1a); - { u32 x93 = ((u32)x91 & 0x3ffffff); - { u64 x94 = (x92 + x76); - { u64 x95 = (x94 >> 0x19); - { u32 x96 = ((u32)x94 & 0x1ffffff); - { u64 x97 = (x95 + x73); - { u64 x98 = (x97 >> 0x1a); - { u32 x99 = ((u32)x97 & 0x3ffffff); - { u64 x100 = (x98 + x70); - { u64 x101 = (x100 >> 0x19); - { u32 x102 = ((u32)x100 & 0x1ffffff); - { u64 x103 = (x101 + x67); - { u64 x104 = (x103 >> 0x1a); - { u32 x105 = ((u32)x103 & 0x3ffffff); - { u64 x106 = (x104 + x64); - { u64 x107 = (x106 >> 0x19); - { u32 x108 = ((u32)x106 & 0x1ffffff); - { u64 x109 = (x107 + x61); - { u64 x110 = (x109 >> 0x1a); - { u32 x111 = ((u32)x109 & 0x3ffffff); - { u64 x112 = (x110 + x49); - { u64 x113 = (x112 >> 0x19); - { u32 x114 = ((u32)x112 & 0x1ffffff); - { u64 x115 = (x87 + (0x13 * x113)); - { u32 x116 = (u32) (x115 >> 0x1a); - { u32 x117 = ((u32)x115 & 0x3ffffff); - { u32 x118 = (x116 + x90); - { u32 x119 = (x118 >> 0x19); - { u32 x120 = (x118 & 0x1ffffff); - out[0] = x117; - out[1] = x120; - out[2] = (x119 + x93); - out[3] = x96; - out[4] = x99; - out[5] = x102; - out[6] = x105; - out[7] = x108; - out[8] = x111; - out[9] = x114; - }}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}} -} - -static __always_inline void fe_mul121666(fe *h, const fe_loose *f) -{ - fe_mul_121666_impl(h->v, f->v); -} - -bool curve25519_fiat32(u8 out[CURVE25519_POINT_SIZE], const u8 scalar[CURVE25519_POINT_SIZE], const u8 point[CURVE25519_POINT_SIZE]) -{ - fe x1, x2, z2, x3, z3, tmp0, tmp1; - fe_loose x2l, z2l, x3l, tmp0l, tmp1l; - unsigned swap = 0; - int pos; - u8 e[32]; - - memcpy(e, scalar, 32); - normalize_secret(e); - - /* The following implementation was transcribed to Coq and proven to - * correspond to unary scalar multiplication in affine coordinates given that - * x1 != 0 is the x coordinate of some point on the curve. It was also checked - * in Coq that doing a ladderstep with x1 = x3 = 0 gives z2' = z3' = 0, and z2 - * = z3 = 0 gives z2' = z3' = 0. The statement was quantified over the - * underlying field, so it applies to Curve25519 itself and the quadratic - * twist of Curve25519. It was not proven in Coq that prime-field arithmetic - * correctly simulates extension-field arithmetic on prime-field values. - * The decoding of the byte array representation of e was not considered. - * Specification of Montgomery curves in affine coordinates: - * <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Spec/MontgomeryCurve.v#L27> - * Proof that these form a group that is isomorphic to a Weierstrass curve: - * <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/AffineProofs.v#L35> - * Coq transcription and correctness proof of the loop (where scalarbits=255): - * <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZ.v#L118> - * <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZProofs.v#L278> - * preconditions: 0 <= e < 2^255 (not necessarily e < order), fe_invert(0) = 0 - */ - fe_frombytes(&x1, point); - fe_1(&x2); - fe_0(&z2); - fe_copy(&x3, &x1); - fe_1(&z3); - - for (pos = 254; pos >= 0; --pos) { - /* loop invariant as of right before the test, for the case where x1 != 0: - * pos >= -1; if z2 = 0 then x2 is nonzero; if z3 = 0 then x3 is nonzero - * let r := e >> (pos+1) in the following equalities of projective points: - * to_xz (r*P) === if swap then (x3, z3) else (x2, z2) - * to_xz ((r+1)*P) === if swap then (x2, z2) else (x3, z3) - * x1 is the nonzero x coordinate of the nonzero point (r*P-(r+1)*P) - */ - unsigned b = 1 & (e[pos / 8] >> (pos & 7)); - swap ^= b; - fe_cswap(&x2, &x3, swap); - fe_cswap(&z2, &z3, swap); - swap = b; - /* Coq transcription of ladderstep formula (called from transcribed loop): - * <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZ.v#L89> - * <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZProofs.v#L131> - * x1 != 0 <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZProofs.v#L217> - * x1 = 0 <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZProofs.v#L147> - */ - fe_sub(&tmp0l, &x3, &z3); - fe_sub(&tmp1l, &x2, &z2); - fe_add(&x2l, &x2, &z2); - fe_add(&z2l, &x3, &z3); - fe_mul_tll(&z3, &tmp0l, &x2l); - fe_mul_tll(&z2, &z2l, &tmp1l); - fe_sq_tl(&tmp0, &tmp1l); - fe_sq_tl(&tmp1, &x2l); - fe_add(&x3l, &z3, &z2); - fe_sub(&z2l, &z3, &z2); - fe_mul_ttt(&x2, &tmp1, &tmp0); - fe_sub(&tmp1l, &tmp1, &tmp0); - fe_sq_tl(&z2, &z2l); - fe_mul121666(&z3, &tmp1l); - fe_sq_tl(&x3, &x3l); - fe_add(&tmp0l, &tmp0, &z3); - fe_mul_ttt(&z3, &x1, &z2); - fe_mul_tll(&z2, &tmp1l, &tmp0l); - } - /* here pos=-1, so r=e, so to_xz (e*P) === if swap then (x3, z3) else (x2, z2) */ - fe_cswap(&x2, &x3, swap); - fe_cswap(&z2, &z3, swap); - - fe_invert(&z2, &z2); - fe_mul_ttt(&x2, &x2, &z2); - fe_tobytes(out, &x2); - - return true; -} diff --git a/curve25519-neon.S b/curve25519-neon.S deleted file mode 100644 index b8e2c1d..0000000 --- a/curve25519-neon.S +++ /dev/null @@ -1,2110 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 - * - * Copyright (C) 2015-2018 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. - * - * Based on algorithms from Daniel J. Bernstein and Peter Schwabe. - */ - -#if IS_ENABLED(CONFIG_KERNEL_MODE_NEON) - -#include <linux/linkage.h> - - .text - .fpu neon - .align 4 - -ENTRY(curve25519_neon) - vpush {q4,q5,q6,q7} - mov r12,sp - sub r3,sp,#736 - and r3,r3,#0xffffffe0 - mov sp,r3 - strd r4,[sp,#0] - strd r6,[sp,#8] - strd r8,[sp,#16] - strd r10,[sp,#24] - str r12,[sp,#480] - str r14,[sp,#484] - mov r0,r0 - mov r1,r1 - mov r2,r2 - add r3,sp,#32 - ldr r4,=0 - ldr r5,=254 - vmov.i32 q0,#1 - vshr.u64 q1,q0,#7 - vshr.u64 q0,q0,#8 - vmov.i32 d4,#19 - vmov.i32 d5,#38 - add r6,sp,#512 - vst1.8 {d2-d3},[r6,: 128] - add r6,sp,#528 - vst1.8 {d0-d1},[r6,: 128] - add r6,sp,#544 - vst1.8 {d4-d5},[r6,: 128] - add r6,r3,#0 - vmov.i32 q2,#0 - vst1.8 {d4-d5},[r6,: 128]! - vst1.8 {d4-d5},[r6,: 128]! - vst1.8 d4,[r6,: 64] - add r6,r3,#0 - ldr r7,=960 - sub r7,r7,#2 - neg r7,r7 - sub r7,r7,r7,LSL #7 - str r7,[r6] - add r6,sp,#704 - vld1.8 {d4-d5},[r1]! - vld1.8 {d6-d7},[r1] - vst1.8 {d4-d5},[r6,: 128]! - vst1.8 {d6-d7},[r6,: 128] - sub r1,r6,#16 - ldrb r6,[r1] - and r6,r6,#248 - strb r6,[r1] - ldrb r6,[r1,#31] - and r6,r6,#127 - orr r6,r6,#64 - strb r6,[r1,#31] - vmov.i64 q2,#0xffffffff - vshr.u64 q3,q2,#7 - vshr.u64 q2,q2,#6 - vld1.8 {d8},[r2] - vld1.8 {d10},[r2] - add r2,r2,#6 - vld1.8 {d12},[r2] - vld1.8 {d14},[r2] - add r2,r2,#6 - vld1.8 {d16},[r2] - add r2,r2,#4 - vld1.8 {d18},[r2] - vld1.8 {d20},[r2] - add r2,r2,#6 - vld1.8 {d22},[r2] - add r2,r2,#2 - vld1.8 {d24},[r2] - vld1.8 {d26},[r2] - vshr.u64 q5,q5,#26 - vshr.u64 q6,q6,#3 - vshr.u64 q7,q7,#29 - vshr.u64 q8,q8,#6 - vshr.u64 q10,q10,#25 - vshr.u64 q11,q11,#3 - vshr.u64 q12,q12,#12 - vshr.u64 q13,q13,#38 - vand q4,q4,q2 - vand q6,q6,q2 - vand q8,q8,q2 - vand q10,q10,q2 - vand q2,q12,q2 - vand q5,q5,q3 - vand q7,q7,q3 - vand q9,q9,q3 - vand q11,q11,q3 - vand q3,q13,q3 - add r2,r3,#48 - vadd.i64 q12,q4,q1 - vadd.i64 q13,q10,q1 - vshr.s64 q12,q12,#26 - vshr.s64 q13,q13,#26 - vadd.i64 q5,q5,q12 - vshl.i64 q12,q12,#26 - vadd.i64 q14,q5,q0 - vadd.i64 q11,q11,q13 - vshl.i64 q13,q13,#26 - vadd.i64 q15,q11,q0 - vsub.i64 q4,q4,q12 - vshr.s64 q12,q14,#25 - vsub.i64 q10,q10,q13 - vshr.s64 q13,q15,#25 - vadd.i64 q6,q6,q12 - vshl.i64 q12,q12,#25 - vadd.i64 q14,q6,q1 - vadd.i64 q2,q2,q13 - vsub.i64 q5,q5,q12 - vshr.s64 q12,q14,#26 - vshl.i64 q13,q13,#25 - vadd.i64 q14,q2,q1 - vadd.i64 q7,q7,q12 - vshl.i64 q12,q12,#26 - vadd.i64 q15,q7,q0 - vsub.i64 q11,q11,q13 - vshr.s64 q13,q14,#26 - vsub.i64 q6,q6,q12 - vshr.s64 q12,q15,#25 - vadd.i64 q3,q3,q13 - vshl.i64 q13,q13,#26 - vadd.i64 q14,q3,q0 - vadd.i64 q8,q8,q12 - vshl.i64 q12,q12,#25 - vadd.i64 q15,q8,q1 - add r2,r2,#8 - vsub.i64 q2,q2,q13 - vshr.s64 q13,q14,#25 - vsub.i64 q7,q7,q12 - vshr.s64 q12,q15,#26 - vadd.i64 q14,q13,q13 - vadd.i64 q9,q9,q12 - vtrn.32 d12,d14 - vshl.i64 q12,q12,#26 - vtrn.32 d13,d15 - vadd.i64 q0,q9,q0 - vadd.i64 q4,q4,q14 - vst1.8 d12,[r2,: 64]! - vshl.i64 q6,q13,#4 - vsub.i64 q7,q8,q12 - vshr.s64 q0,q0,#25 - vadd.i64 q4,q4,q6 - vadd.i64 q6,q10,q0 - vshl.i64 q0,q0,#25 - vadd.i64 q8,q6,q1 - vadd.i64 q4,q4,q13 - vshl.i64 q10,q13,#25 - vadd.i64 q1,q4,q1 - vsub.i64 q0,q9,q0 - vshr.s64 q8,q8,#26 - vsub.i64 q3,q3,q10 - vtrn.32 d14,d0 - vshr.s64 q1,q1,#26 - vtrn.32 d15,d1 - vadd.i64 q0,q11,q8 - vst1.8 d14,[r2,: 64] - vshl.i64 q7,q8,#26 - vadd.i64 q5,q5,q1 - vtrn.32 d4,d6 - vshl.i64 q1,q1,#26 - vtrn.32 d5,d7 - vsub.i64 q3,q6,q7 - add r2,r2,#16 - vsub.i64 q1,q4,q1 - vst1.8 d4,[r2,: 64] - vtrn.32 d6,d0 - vtrn.32 d7,d1 - sub r2,r2,#8 - vtrn.32 d2,d10 - vtrn.32 d3,d11 - vst1.8 d6,[r2,: 64] - sub r2,r2,#24 - vst1.8 d2,[r2,: 64] - add r2,r3,#96 - vmov.i32 q0,#0 - vmov.i64 d2,#0xff - vmov.i64 d3,#0 - vshr.u32 q1,q1,#7 - vst1.8 {d2-d3},[r2,: 128]! - vst1.8 {d0-d1},[r2,: 128]! - vst1.8 d0,[r2,: 64] - add r2,r3,#144 - vmov.i32 q0,#0 - vst1.8 {d0-d1},[r2,: 128]! - vst1.8 {d0-d1},[r2,: 128]! - vst1.8 d0,[r2,: 64] - add r2,r3,#240 - vmov.i32 q0,#0 - vmov.i64 d2,#0xff - vmov.i64 d3,#0 - vshr.u32 q1,q1,#7 - vst1.8 {d2-d3},[r2,: 128]! - vst1.8 {d0-d1},[r2,: 128]! - vst1.8 d0,[r2,: 64] - add r2,r3,#48 - add r6,r3,#192 - vld1.8 {d0-d1},[r2,: 128]! - vld1.8 {d2-d3},[r2,: 128]! - vld1.8 {d4},[r2,: 64] - vst1.8 {d0-d1},[r6,: 128]! - vst1.8 {d2-d3},[r6,: 128]! - vst1.8 d4,[r6,: 64] - .Lmainloop: - mov r2,r5,LSR #3 - and r6,r5,#7 - ldrb r2,[r1,r2] - mov r2,r2,LSR r6 - and r2,r2,#1 - str r5,[sp,#488] - eor r4,r4,r2 - str r2,[sp,#492] - neg r2,r4 - add r4,r3,#96 - add r5,r3,#192 - add r6,r3,#144 - vld1.8 {d8-d9},[r4,: 128]! - add r7,r3,#240 - vld1.8 {d10-d11},[r5,: 128]! - veor q6,q4,q5 - vld1.8 {d14-d15},[r6,: 128]! - vdup.i32 q8,r2 - vld1.8 {d18-d19},[r7,: 128]! - veor q10,q7,q9 - vld1.8 {d22-d23},[r4,: 128]! - vand q6,q6,q8 - vld1.8 {d24-d25},[r5,: 128]! - vand q10,q10,q8 - vld1.8 {d26-d27},[r6,: 128]! - veor q4,q4,q6 - vld1.8 {d28-d29},[r7,: 128]! - veor q5,q5,q6 - vld1.8 {d0},[r4,: 64] - veor q6,q7,q10 - vld1.8 {d2},[r5,: 64] - veor q7,q9,q10 - vld1.8 {d4},[r6,: 64] - veor q9,q11,q12 - vld1.8 {d6},[r7,: 64] - veor q10,q0,q1 - sub r2,r4,#32 - vand q9,q9,q8 - sub r4,r5,#32 - vand q10,q10,q8 - sub r5,r6,#32 - veor q11,q11,q9 - sub r6,r7,#32 - veor q0,q0,q10 - veor q9,q12,q9 - veor q1,q1,q10 - veor q10,q13,q14 - veor q12,q2,q3 - vand q10,q10,q8 - vand q8,q12,q8 - veor q12,q13,q10 - veor q2,q2,q8 - veor q10,q14,q10 - veor q3,q3,q8 - vadd.i32 q8,q4,q6 - vsub.i32 q4,q4,q6 - vst1.8 {d16-d17},[r2,: 128]! - vadd.i32 q6,q11,q12 - vst1.8 {d8-d9},[r5,: 128]! - vsub.i32 q4,q11,q12 - vst1.8 {d12-d13},[r2,: 128]! - vadd.i32 q6,q0,q2 - vst1.8 {d8-d9},[r5,: 128]! - vsub.i32 q0,q0,q2 - vst1.8 d12,[r2,: 64] - vadd.i32 q2,q5,q7 - vst1.8 d0,[r5,: 64] - vsub.i32 q0,q5,q7 - vst1.8 {d4-d5},[r4,: 128]! - vadd.i32 q2,q9,q10 - vst1.8 {d0-d1},[r6,: 128]! - vsub.i32 q0,q9,q10 - vst1.8 {d4-d5},[r4,: 128]! - vadd.i32 q2,q1,q3 - vst1.8 {d0-d1},[r6,: 128]! - vsub.i32 q0,q1,q3 - vst1.8 d4,[r4,: 64] - vst1.8 d0,[r6,: 64] - add r2,sp,#544 - add r4,r3,#96 - add r5,r3,#144 - vld1.8 {d0-d1},[r2,: 128] - vld1.8 {d2-d3},[r4,: 128]! - vld1.8 {d4-d5},[r5,: 128]! - vzip.i32 q1,q2 - vld1.8 {d6-d7},[r4,: 128]! - vld1.8 {d8-d9},[r5,: 128]! - vshl.i32 q5,q1,#1 - vzip.i32 q3,q4 - vshl.i32 q6,q2,#1 - vld1.8 {d14},[r4,: 64] - vshl.i32 q8,q3,#1 - vld1.8 {d15},[r5,: 64] - vshl.i32 q9,q4,#1 - vmul.i32 d21,d7,d1 - vtrn.32 d14,d15 - vmul.i32 q11,q4,q0 - vmul.i32 q0,q7,q0 - vmull.s32 q12,d2,d2 - vmlal.s32 q12,d11,d1 - vmlal.s32 q12,d12,d0 - vmlal.s32 q12,d13,d23 - vmlal.s32 q12,d16,d22 - vmlal.s32 q12,d7,d21 - vmull.s32 q10,d2,d11 - vmlal.s32 q10,d4,d1 - vmlal.s32 q10,d13,d0 - vmlal.s32 q10,d6,d23 - vmlal.s32 q10,d17,d22 - vmull.s32 q13,d10,d4 - vmlal.s32 q13,d11,d3 - vmlal.s32 q13,d13,d1 - vmlal.s32 q13,d16,d0 - vmlal.s32 q13,d17,d23 - vmlal.s32 q13,d8,d22 - vmull.s32 q1,d10,d5 - vmlal.s32 q1,d11,d4 - vmlal.s32 q1,d6,d1 - vmlal.s32 q1,d17,d0 - vmlal.s32 q1,d8,d23 - vmull.s32 q14,d10,d6 - vmlal.s32 q14,d11,d13 - vmlal.s32 q14,d4,d4 - vmlal.s32 q14,d17,d1 - vmlal.s32 q14,d18,d0 - vmlal.s32 q14,d9,d23 - vmull.s32 q11,d10,d7 - vmlal.s32 q11,d11,d6 - vmlal.s32 q11,d12,d5 - vmlal.s32 q11,d8,d1 - vmlal.s32 q11,d19,d0 - vmull.s32 q15,d10,d8 - vmlal.s32 q15,d11,d17 - vmlal.s32 q15,d12,d6 - vmlal.s32 q15,d13,d5 - vmlal.s32 q15,d19,d1 - vmlal.s32 q15,d14,d0 - vmull.s32 q2,d10,d9 - vmlal.s32 q2,d11,d8 - vmlal.s32 q2,d12,d7 - vmlal.s32 q2,d13,d6 - vmlal.s32 q2,d14,d1 - vmull.s32 q0,d15,d1 - vmlal.s32 q0,d10,d14 - vmlal.s32 q0,d11,d19 - vmlal.s32 q0,d12,d8 - vmlal.s32 q0,d13,d17 - vmlal.s32 q0,d6,d6 - add r2,sp,#512 - vld1.8 {d18-d19},[r2,: 128] - vmull.s32 q3,d16,d7 - vmlal.s32 q3,d10,d15 - vmlal.s32 q3,d11,d14 - vmlal.s32 q3,d12,d9 - vmlal.s32 q3,d13,d8 - add r2,sp,#528 - vld1.8 {d8-d9},[r2,: 128] - vadd.i64 q5,q12,q9 - vadd.i64 q6,q15,q9 - vshr.s64 q5,q5,#26 - vshr.s64 q6,q6,#26 - vadd.i64 q7,q10,q5 - vshl.i64 q5,q5,#26 - vadd.i64 q8,q7,q4 - vadd.i64 q2,q2,q6 - vshl.i64 q6,q6,#26 - vadd.i64 q10,q2,q4 - vsub.i64 q5,q12,q5 - vshr.s64 q8,q8,#25 - vsub.i64 q6,q15,q6 - vshr.s64 q10,q10,#25 - vadd.i64 q12,q13,q8 - vshl.i64 q8,q8,#25 - vadd.i64 q13,q12,q9 - vadd.i64 q0,q0,q10 - vsub.i64 q7,q7,q8 - vshr.s64 q8,q13,#26 - vshl.i64 q10,q10,#25 - vadd.i64 q13,q0,q9 - vadd.i64 q1,q1,q8 - vshl.i64 q8,q8,#26 - vadd.i64 q15,q1,q4 - vsub.i64 q2,q2,q10 - vshr.s64 q10,q13,#26 - vsub.i64 q8,q12,q8 - vshr.s64 q12,q15,#25 - vadd.i64 q3,q3,q10 - vshl.i64 q10,q10,#26 - vadd.i64 q13,q3,q4 - vadd.i64 q14,q14,q12 - add r2,r3,#288 - vshl.i64 q12,q12,#25 - add r4,r3,#336 - vadd.i64 q15,q14,q9 - add r2,r2,#8 - vsub.i64 q0,q0,q10 - add r4,r4,#8 - vshr.s64 q10,q13,#25 - vsub.i64 q1,q1,q12 - vshr.s64 q12,q15,#26 - vadd.i64 q13,q10,q10 - vadd.i64 q11,q11,q12 - vtrn.32 d16,d2 - vshl.i64 q12,q12,#26 - vtrn.32 d17,d3 - vadd.i64 q1,q11,q4 - vadd.i64 q4,q5,q13 - vst1.8 d16,[r2,: 64]! - vshl.i64 q5,q10,#4 - vst1.8 d17,[r4,: 64]! - vsub.i64 q8,q14,q12 - vshr.s64 q1,q1,#25 - vadd.i64 q4,q4,q5 - vadd.i64 q5,q6,q1 - vshl.i64 q1,q1,#25 - vadd.i64 q6,q5,q9 - vadd.i64 q4,q4,q10 - vshl.i64 q10,q10,#25 - vadd.i64 q9,q4,q9 - vsub.i64 q1,q11,q1 - vshr.s64 q6,q6,#26 - vsub.i64 q3,q3,q10 - vtrn.32 d16,d2 - vshr.s64 q9,q9,#26 - vtrn.32 d17,d3 - vadd.i64 q1,q2,q6 - vst1.8 d16,[r2,: 64] - vshl.i64 q2,q6,#26 - vst1.8 d17,[r4,: 64] - vadd.i64 q6,q7,q9 - vtrn.32 d0,d6 - vshl.i64 q7,q9,#26 - vtrn.32 d1,d7 - vsub.i64 q2,q5,q2 - add r2,r2,#16 - vsub.i64 q3,q4,q7 - vst1.8 d0,[r2,: 64] - add r4,r4,#16 - vst1.8 d1,[r4,: 64] - vtrn.32 d4,d2 - vtrn.32 d5,d3 - sub r2,r2,#8 - sub r4,r4,#8 - vtrn.32 d6,d12 - vtrn.32 d7,d13 - vst1.8 d4,[r2,: 64] - vst1.8 d5,[r4,: 64] - sub r2,r2,#24 - sub r4,r4,#24 - vst1.8 d6,[r2,: 64] - vst1.8 d7,[r4,: 64] - add r2,r3,#240 - add r4,r3,#96 - vld1.8 {d0-d1},[r4,: 128]! - vld1.8 {d2-d3},[r4,: 128]! - vld1.8 {d4},[r4,: 64] - add r4,r3,#144 - vld1.8 {d6-d7},[r4,: 128]! - vtrn.32 q0,q3 - vld1.8 {d8-d9},[r4,: 128]! - vshl.i32 q5,q0,#4 - vtrn.32 q1,q4 - vshl.i32 q6,q3,#4 - vadd.i32 q5,q5,q0 - vadd.i32 q6,q6,q3 - vshl.i32 q7,q1,#4 - vld1.8 {d5},[r4,: 64] - vshl.i32 q8,q4,#4 - vtrn.32 d4,d5 - vadd.i32 q7,q7,q1 - vadd.i32 q8,q8,q4 - vld1.8 {d18-d19},[r2,: 128]! - vshl.i32 q10,q2,#4 - vld1.8 {d22-d23},[r2,: 128]! - vadd.i32 q10,q10,q2 - vld1.8 {d24},[r2,: 64] - vadd.i32 q5,q5,q0 - add r2,r3,#192 - vld1.8 {d26-d27},[r2,: 128]! - vadd.i32 q6,q6,q3 - vld1.8 {d28-d29},[r2,: 128]! - vadd.i32 q8,q8,q4 - vld1.8 {d25},[r2,: 64] - vadd.i32 q10,q10,q2 - vtrn.32 q9,q13 - vadd.i32 q7,q7,q1 - vadd.i32 q5,q5,q0 - vtrn.32 q11,q14 - vadd.i32 q6,q6,q3 - add r2,sp,#560 - vadd.i32 q10,q10,q2 - vtrn.32 d24,d25 - vst1.8 {d12-d13},[r2,: 128] - vshl.i32 q6,q13,#1 - add r2,sp,#576 - vst1.8 {d20-d21},[r2,: 128] - vshl.i32 q10,q14,#1 - add r2,sp,#592 - vst1.8 {d12-d13},[r2,: 128] - vshl.i32 q15,q12,#1 - vadd.i32 q8,q8,q4 - vext.32 d10,d31,d30,#0 - vadd.i32 q7,q7,q1 - add r2,sp,#608 - vst1.8 {d16-d17},[r2,: 128] - vmull.s32 q8,d18,d5 - vmlal.s32 q8,d26,d4 - vmlal.s32 q8,d19,d9 - vmlal.s32 q8,d27,d3 - vmlal.s32 q8,d22,d8 - vmlal.s32 q8,d28,d2 - vmlal.s32 q8,d23,d7 - vmlal.s32 q8,d29,d1 - vmlal.s32 q8,d24,d6 - vmlal.s32 q8,d25,d0 - add r2,sp,#624 - vst1.8 {d14-d15},[r2,: 128] - vmull.s32 q2,d18,d4 - vmlal.s32 q2,d12,d9 - vmlal.s32 q2,d13,d8 - vmlal.s32 q2,d19,d3 - vmlal.s32 q2,d22,d2 - vmlal.s32 q2,d23,d1 - vmlal.s32 q2,d24,d0 - add r2,sp,#640 - vst1.8 {d20-d21},[r2,: 128] - vmull.s32 q7,d18,d9 - vmlal.s32 q7,d26,d3 - vmlal.s32 q7,d19,d8 - vmlal.s32 q7,d27,d2 - vmlal.s32 q7,d22,d7 - vmlal.s32 q7,d28,d1 - vmlal.s32 q7,d23,d6 - vmlal.s32 q7,d29,d0 - add r2,sp,#656 - vst1.8 {d10-d11},[r2,: 128] - vmull.s32 q5,d18,d3 - vmlal.s32 q5,d19,d2 - vmlal.s32 q5,d22,d1 - vmlal.s32 q5,d23,d0 - vmlal.s32 q5,d12,d8 - add r2,sp,#672 - vst1.8 {d16-d17},[r2,: 128] - vmull.s32 q4,d18,d8 - vmlal.s32 q4,d26,d2 - vmlal.s32 q4,d19,d7 - vmlal.s32 q4,d27,d1 - vmlal.s32 q4,d22,d6 - vmlal.s32 q4,d28,d0 - vmull.s32 q8,d18,d7 - vmlal.s32 q8,d26,d1 - vmlal.s32 q8,d19,d6 - vmlal.s32 q8,d27,d0 - add r2,sp,#576 - vld1.8 {d20-d21},[r2,: 128] - vmlal.s32 q7,d24,d21 - vmlal.s32 q7,d25,d20 - vmlal.s32 q4,d23,d21 - vmlal.s32 q4,d29,d20 - vmlal.s32 q8,d22,d21 - vmlal.s32 q8,d28,d20 - vmlal.s32 q5,d24,d20 - add r2,sp,#576 - vst1.8 {d14-d15},[r2,: 128] - vmull.s32 q7,d18,d6 - vmlal.s32 q7,d26,d0 - add r2,sp,#656 - vld1.8 {d30-d31},[r2,: 128] - vmlal.s32 q2,d30,d21 - vmlal.s32 q7,d19,d21 - vmlal.s32 q7,d27,d20 - add r2,sp,#624 - vld1.8 {d26-d27},[r2,: 128] - vmlal.s32 q4,d25,d27 - vmlal.s32 q8,d29,d27 - vmlal.s32 q8,d25,d26 - vmlal.s32 q7,d28,d27 - vmlal.s32 q7,d29,d26 - add r2,sp,#608 - vld1.8 {d28-d29},[r2,: 128] - vmlal.s32 q4,d24,d29 - vmlal.s32 q8,d23,d29 - vmlal.s32 q8,d24,d28 - vmlal.s32 q7,d22,d29 - vmlal.s32 q7,d23,d28 - add r2,sp,#608 - vst1.8 {d8-d9},[r2,: 128] - add r2,sp,#560 - vld1.8 {d8-d9},[r2,: 128] - vmlal.s32 q7,d24,d9 - vmlal.s32 q7,d25,d31 - vmull.s32 q1,d18,d2 - vmlal.s32 q1,d19,d1 - vmlal.s32 q1,d22,d0 - vmlal.s32 q1,d24,d27 - vmlal.s32 q1,d23,d20 - vmlal.s32 q1,d12,d7 - vmlal.s32 q1,d13,d6 - vmull.s32 q6,d18,d1 - vmlal.s32 q6,d19,d0 - vmlal.s32 q6,d23,d27 - vmlal.s32 q6,d22,d20 - vmlal.s32 q6,d24,d26 - vmull.s32 q0,d18,d0 - vmlal.s32 q0,d22,d27 - vmlal.s32 q0,d23,d26 - vmlal.s32 q0,d24,d31 - vmlal.s32 q0,d19,d20 - add r2,sp,#640 - vld1.8 {d18-d19},[r2,: 128] - vmlal.s32 q2,d18,d7 - vmlal.s32 q2,d19,d6 - vmlal.s32 q5,d18,d6 - vmlal.s32 q5,d19,d21 - vmlal.s32 q1,d18,d21 - vmlal.s32 q1,d19,d29 - vmlal.s32 q0,d18,d28 - vmlal.s32 q0,d19,d9 - vmlal.s32 q6,d18,d29 - vmlal.s32 q6,d19,d28 - add r2,sp,#592 - vld1.8 {d18-d19},[r2,: 128] - add r2,sp,#512 - vld1.8 {d22-d23},[r2,: 128] - vmlal.s32 q5,d19,d7 - vmlal.s32 q0,d18,d21 - vmlal.s32 q0,d19,d29 - vmlal.s32 q6,d18,d6 - add r2,sp,#528 - vld1.8 {d6-d7},[r2,: 128] - vmlal.s32 q6,d19,d21 - add r2,sp,#576 - vld1.8 {d18-d19},[r2,: 128] - vmlal.s32 q0,d30,d8 - add r2,sp,#672 - vld1.8 {d20-d21},[r2,: 128] - vmlal.s32 q5,d30,d29 - add r2,sp,#608 - vld1.8 {d24-d25},[r2,: 128] - vmlal.s32 q1,d30,d28 - vadd.i64 q13,q0,q11 - vadd.i64 q14,q5,q11 - vmlal.s32 q6,d30,d9 - vshr.s64 q4,q13,#26 - vshr.s64 q13,q14,#26 - vadd.i64 q7,q7,q4 - vshl.i64 q4,q4,#26 - vadd.i64 q14,q7,q3 - vadd.i64 q9,q9,q13 - vshl.i64 q13,q13,#26 - vadd.i64 q15,q9,q3 - vsub.i64 q0,q0,q4 - vshr.s64 q4,q14,#25 - vsub.i64 q5,q5,q13 - vshr.s64 q13,q15,#25 - vadd.i64 q6,q6,q4 - vshl.i64 q4,q4,#25 - vadd.i64 q14,q6,q11 - vadd.i64 q2,q2,q13 - vsub.i64 q4,q7,q4 - vshr.s64 q7,q14,#26 - vshl.i64 q13,q13,#25 - vadd.i64 q14,q2,q11 - vadd.i64 q8,q8,q7 - vshl.i64 q7,q7,#26 - vadd.i64 q15,q8,q3 - vsub.i64 q9,q9,q13 - vshr.s64 q13,q14,#26 - vsub.i64 q6,q6,q7 - vshr.s64 q7,q15,#25 - vadd.i64 q10,q10,q13 - vshl.i64 q13,q13,#26 - vadd.i64 q14,q10,q3 - vadd.i64 q1,q1,q7 - add r2,r3,#144 - vshl.i64 q7,q7,#25 - add r4,r3,#96 - vadd.i64 q15,q1,q11 - add r2,r2,#8 - vsub.i64 q2,q2,q13 - add r4,r4,#8 - vshr.s64 q13,q14,#25 - vsub.i64 q7,q8,q7 - vshr.s64 q8,q15,#26 - vadd.i64 q14,q13,q13 - vadd.i64 q12,q12,q8 - vtrn.32 d12,d14 - vshl.i64 q8,q8,#26 - vtrn.32 d13,d15 - vadd.i64 q3,q12,q3 - vadd.i64 q0,q0,q14 - vst1.8 d12,[r2,: 64]! - vshl.i64 q7,q13,#4 - vst1.8 d13,[r4,: 64]! - vsub.i64 q1,q1,q8 - vshr.s64 q3,q3,#25 - vadd.i64 q0,q0,q7 - vadd.i64 q5,q5,q3 - vshl.i64 q3,q3,#25 - vadd.i64 q6,q5,q11 - vadd.i64 q0,q0,q13 - vshl.i64 q7,q13,#25 - vadd.i64 q8,q0,q11 - vsub.i64 q3,q12,q3 - vshr.s64 q6,q6,#26 - vsub.i64 q7,q10,q7 - vtrn.32 d2,d6 - vshr.s64 q8,q8,#26 - vtrn.32 d3,d7 - vadd.i64 q3,q9,q6 - vst1.8 d2,[r2,: 64] - vshl.i64 q6,q6,#26 - vst1.8 d3,[r4,: 64] - vadd.i64 q1,q4,q8 - vtrn.32 d4,d14 - vshl.i64 q4,q8,#26 - vtrn.32 d5,d15 - vsub.i64 q5,q5,q6 - add r2,r2,#16 - vsub.i64 q0,q0,q4 - vst1.8 d4,[r2,: 64] - add r4,r4,#16 - vst1.8 d5,[r4,: 64] - vtrn.32 d10,d6 - vtrn.32 d11,d7 - sub r2,r2,#8 - sub r4,r4,#8 - vtrn.32 d0,d2 - vtrn.32 d1,d3 - vst1.8 d10,[r2,: 64] - vst1.8 d11,[r4,: 64] - sub r2,r2,#24 - sub r4,r4,#24 - vst1.8 d0,[r2,: 64] - vst1.8 d1,[r4,: 64] - add r2,r3,#288 - add r4,r3,#336 - vld1.8 {d0-d1},[r2,: 128]! - vld1.8 {d2-d3},[r4,: 128]! - vsub.i32 q0,q0,q1 - vld1.8 {d2-d3},[r2,: 128]! - vld1.8 {d4-d5},[r4,: 128]! - vsub.i32 q1,q1,q2 - add r5,r3,#240 - vld1.8 {d4},[r2,: 64] - vld1.8 {d6},[r4,: 64] - vsub.i32 q2,q2,q3 - vst1.8 {d0-d1},[r5,: 128]! - vst1.8 {d2-d3},[r5,: 128]! - vst1.8 d4,[r5,: 64] - add r2,r3,#144 - add r4,r3,#96 - add r5,r3,#144 - add r6,r3,#192 - vld1.8 {d0-d1},[r2,: 128]! - vld1.8 {d2-d3},[r4,: 128]! - vsub.i32 q2,q0,q1 - vadd.i32 q0,q0,q1 - vld1.8 {d2-d3},[r2,: 128]! - vld1.8 {d6-d7},[r4,: 128]! - vsub.i32 q4,q1,q3 - vadd.i32 q1,q1,q3 - vld1.8 {d6},[r2,: 64] - vld1.8 {d10},[r4,: 64] - vsub.i32 q6,q3,q5 - vadd.i32 q3,q3,q5 - vst1.8 {d4-d5},[r5,: 128]! - vst1.8 {d0-d1},[r6,: 128]! - vst1.8 {d8-d9},[r5,: 128]! - vst1.8 {d2-d3},[r6,: 128]! - vst1.8 d12,[r5,: 64] - vst1.8 d6,[r6,: 64] - add r2,r3,#0 - add r4,r3,#240 - vld1.8 {d0-d1},[r4,: 128]! - vld1.8 {d2-d3},[r4,: 128]! - vld1.8 {d4},[r4,: 64] - add r4,r3,#336 - vld1.8 {d6-d7},[r4,: 128]! - vtrn.32 q0,q3 - vld1.8 {d8-d9},[r4,: 128]! - vshl.i32 q5,q0,#4 - vtrn.32 q1,q4 - vshl.i32 q6,q3,#4 - vadd.i32 q5,q5,q0 - vadd.i32 q6,q6,q3 - vshl.i32 q7,q1,#4 - vld1.8 {d5},[r4,: 64] - vshl.i32 q8,q4,#4 - vtrn.32 d4,d5 - vadd.i32 q7,q7,q1 - vadd.i32 q8,q8,q4 - vld1.8 {d18-d19},[r2,: 128]! - vshl.i32 q10,q2,#4 - vld1.8 {d22-d23},[r2,: 128]! - vadd.i32 q10,q10,q2 - vld1.8 {d24},[r2,: 64] - vadd.i32 q5,q5,q0 - add r2,r3,#288 - vld1.8 {d26-d27},[r2,: 128]! - vadd.i32 q6,q6,q3 - vld1.8 {d28-d29},[r2,: 128]! - vadd.i32 q8,q8,q4 - vld1.8 {d25},[r2,: 64] - vadd.i32 q10,q10,q2 - vtrn.32 q9,q13 - vadd.i32 q7,q7,q1 - vadd.i32 q5,q5,q0 - vtrn.32 q11,q14 - vadd.i32 q6,q6,q3 - add r2,sp,#560 - vadd.i32 q10,q10,q2 - vtrn.32 d24,d25 - vst1.8 {d12-d13},[r2,: 128] - vshl.i32 q6,q13,#1 - add r2,sp,#576 - vst1.8 {d20-d21},[r2,: 128] - vshl.i32 q10,q14,#1 - add r2,sp,#592 - vst1.8 {d12-d13},[r2,: 128] - vshl.i32 q15,q12,#1 - vadd.i32 q8,q8,q4 - vext.32 d10,d31,d30,#0 - vadd.i32 q7,q7,q1 - add r2,sp,#608 - vst1.8 {d16-d17},[r2,: 128] - vmull.s32 q8,d18,d5 - vmlal.s32 q8,d26,d4 - vmlal.s32 q8,d19,d9 - vmlal.s32 q8,d27,d3 - vmlal.s32 q8,d22,d8 - vmlal.s32 q8,d28,d2 - vmlal.s32 q8,d23,d7 - vmlal.s32 q8,d29,d1 - vmlal.s32 q8,d24,d6 - vmlal.s32 q8,d25,d0 - add r2,sp,#624 - vst1.8 {d14-d15},[r2,: 128] - vmull.s32 q2,d18,d4 - vmlal.s32 q2,d12,d9 - vmlal.s32 q2,d13,d8 - vmlal.s32 q2,d19,d3 - vmlal.s32 q2,d22,d2 - vmlal.s32 q2,d23,d1 - vmlal.s32 q2,d24,d0 - add r2,sp,#640 - vst1.8 {d20-d21},[r2,: 128] - vmull.s32 q7,d18,d9 - vmlal.s32 q7,d26,d3 - vmlal.s32 q7,d19,d8 - vmlal.s32 q7,d27,d2 - vmlal.s32 q7,d22,d7 - vmlal.s32 q7,d28,d1 - vmlal.s32 q7,d23,d6 - vmlal.s32 q7,d29,d0 - add r2,sp,#656 - vst1.8 {d10-d11},[r2,: 128] - vmull.s32 q5,d18,d3 - vmlal.s32 q5,d19,d2 - vmlal.s32 q5,d22,d1 - vmlal.s32 q5,d23,d0 - vmlal.s32 q5,d12,d8 - add r2,sp,#672 - vst1.8 {d16-d17},[r2,: 128] - vmull.s32 q4,d18,d8 - vmlal.s32 q4,d26,d2 - vmlal.s32 q4,d19,d7 - vmlal.s32 q4,d27,d1 - vmlal.s32 q4,d22,d6 - vmlal.s32 q4,d28,d0 - vmull.s32 q8,d18,d7 - vmlal.s32 q8,d26,d1 - vmlal.s32 q8,d19,d6 - vmlal.s32 q8,d27,d0 - add r2,sp,#576 - vld1.8 {d20-d21},[r2,: 128] - vmlal.s32 q7,d24,d21 - vmlal.s32 q7,d25,d20 - vmlal.s32 q4,d23,d21 - vmlal.s32 q4,d29,d20 - vmlal.s32 q8,d22,d21 - vmlal.s32 q8,d28,d20 - vmlal.s32 q5,d24,d20 - add r2,sp,#576 - vst1.8 {d14-d15},[r2,: 128] - vmull.s32 q7,d18,d6 - vmlal.s32 q7,d26,d0 - add r2,sp,#656 - vld1.8 {d30-d31},[r2,: 128] - vmlal.s32 q2,d30,d21 - vmlal.s32 q7,d19,d21 - vmlal.s32 q7,d27,d20 - add r2,sp,#624 - vld1.8 {d26-d27},[r2,: 128] - vmlal.s32 q4,d25,d27 - vmlal.s32 q8,d29,d27 - vmlal.s32 q8,d25,d26 - vmlal.s32 q7,d28,d27 - vmlal.s32 q7,d29,d26 - add r2,sp,#608 - vld1.8 {d28-d29},[r2,: 128] - vmlal.s32 q4,d24,d29 - vmlal.s32 q8,d23,d29 - vmlal.s32 q8,d24,d28 - vmlal.s32 q7,d22,d29 - vmlal.s32 q7,d23,d28 - add r2,sp,#608 - vst1.8 {d8-d9},[r2,: 128] - add r2,sp,#560 - vld1.8 {d8-d9},[r2,: 128] - vmlal.s32 q7,d24,d9 - vmlal.s32 q7,d25,d31 - vmull.s32 q1,d18,d2 - vmlal.s32 q1,d19,d1 - vmlal.s32 q1,d22,d0 - vmlal.s32 q1,d24,d27 - vmlal.s32 q1,d23,d20 - vmlal.s32 q1,d12,d7 - vmlal.s32 q1,d13,d6 - vmull.s32 q6,d18,d1 - vmlal.s32 q6,d19,d0 - vmlal.s32 q6,d23,d27 - vmlal.s32 q6,d22,d20 - vmlal.s32 q6,d24,d26 - vmull.s32 q0,d18,d0 - vmlal.s32 q0,d22,d27 - vmlal.s32 q0,d23,d26 - vmlal.s32 q0,d24,d31 - vmlal.s32 q0,d19,d20 - add r2,sp,#640 - vld1.8 {d18-d19},[r2,: 128] - vmlal.s32 q2,d18,d7 - vmlal.s32 q2,d19,d6 - vmlal.s32 q5,d18,d6 - vmlal.s32 q5,d19,d21 - vmlal.s32 q1,d18,d21 - vmlal.s32 q1,d19,d29 - vmlal.s32 q0,d18,d28 - vmlal.s32 q0,d19,d9 - vmlal.s32 q6,d18,d29 - vmlal.s32 q6,d19,d28 - add r2,sp,#592 - vld1.8 {d18-d19},[r2,: 128] - add r2,sp,#512 - vld1.8 {d22-d23},[r2,: 128] - vmlal.s32 q5,d19,d7 - vmlal.s32 q0,d18,d21 - vmlal.s32 q0,d19,d29 - vmlal.s32 q6,d18,d6 - add r2,sp,#528 - vld1.8 {d6-d7},[r2,: 128] - vmlal.s32 q6,d19,d21 - add r2,sp,#576 - vld1.8 {d18-d19},[r2,: 128] - vmlal.s32 q0,d30,d8 - add r2,sp,#672 - vld1.8 {d20-d21},[r2,: 128] - vmlal.s32 q5,d30,d29 - add r2,sp,#608 - vld1.8 {d24-d25},[r2,: 128] - vmlal.s32 q1,d30,d28 - vadd.i64 q13,q0,q11 - vadd.i64 q14,q5,q11 - vmlal.s32 q6,d30,d9 - vshr.s64 q4,q13,#26 - vshr.s64 q13,q14,#26 - vadd.i64 q7,q7,q4 - vshl.i64 q4,q4,#26 - vadd.i64 q14,q7,q3 - vadd.i64 q9,q9,q13 - vshl.i64 q13,q13,#26 - vadd.i64 q15,q9,q3 - vsub.i64 q0,q0,q4 - vshr.s64 q4,q14,#25 - vsub.i64 q5,q5,q13 - vshr.s64 q13,q15,#25 - vadd.i64 q6,q6,q4 - vshl.i64 q4,q4,#25 - vadd.i64 q14,q6,q11 - vadd.i64 q2,q2,q13 - vsub.i64 q4,q7,q4 - vshr.s64 q7,q14,#26 - vshl.i64 q13,q13,#25 - vadd.i64 q14,q2,q11 - vadd.i64 q8,q8,q7 - vshl.i64 q7,q7,#26 - vadd.i64 q15,q8,q3 - vsub.i64 q9,q9,q13 - vshr.s64 q13,q14,#26 - vsub.i64 q6,q6,q7 - vshr.s64 q7,q15,#25 - vadd.i64 q10,q10,q13 - vshl.i64 q13,q13,#26 - vadd.i64 q14,q10,q3 - vadd.i64 q1,q1,q7 - add r2,r3,#288 - vshl.i64 q7,q7,#25 - add r4,r3,#96 - vadd.i64 q15,q1,q11 - add r2,r2,#8 - vsub.i64 q2,q2,q13 - add r4,r4,#8 - vshr.s64 q13,q14,#25 - vsub.i64 q7,q8,q7 - vshr.s64 q8,q15,#26 - vadd.i64 q14,q13,q13 - vadd.i64 q12,q12,q8 - vtrn.32 d12,d14 - vshl.i64 q8,q8,#26 - vtrn.32 d13,d15 - vadd.i64 q3,q12,q3 - vadd.i64 q0,q0,q14 - vst1.8 d12,[r2,: 64]! - vshl.i64 q7,q13,#4 - vst1.8 d13,[r4,: 64]! - vsub.i64 q1,q1,q8 - vshr.s64 q3,q3,#25 - vadd.i64 q0,q0,q7 - vadd.i64 q5,q5,q3 - vshl.i64 q3,q3,#25 - vadd.i64 q6,q5,q11 - vadd.i64 q0,q0,q13 - vshl.i64 q7,q13,#25 - vadd.i64 q8,q0,q11 - vsub.i64 q3,q12,q3 - vshr.s64 q6,q6,#26 - vsub.i64 q7,q10,q7 - vtrn.32 d2,d6 - vshr.s64 q8,q8,#26 - vtrn.32 d3,d7 - vadd.i64 q3,q9,q6 - vst1.8 d2,[r2,: 64] - vshl.i64 q6,q6,#26 - vst1.8 d3,[r4,: 64] - vadd.i64 q1,q4,q8 - vtrn.32 d4,d14 - vshl.i64 q4,q8,#26 - vtrn.32 d5,d15 - vsub.i64 q5,q5,q6 - add r2,r2,#16 - vsub.i64 q0,q0,q4 - vst1.8 d4,[r2,: 64] - add r4,r4,#16 - vst1.8 d5,[r4,: 64] - vtrn.32 d10,d6 - vtrn.32 d11,d7 - sub r2,r2,#8 - sub r4,r4,#8 - vtrn.32 d0,d2 - vtrn.32 d1,d3 - vst1.8 d10,[r2,: 64] - vst1.8 d11,[r4,: 64] - sub r2,r2,#24 - sub r4,r4,#24 - vst1.8 d0,[r2,: 64] - vst1.8 d1,[r4,: 64] - add r2,sp,#544 - add r4,r3,#144 - add r5,r3,#192 - vld1.8 {d0-d1},[r2,: 128] - vld1.8 {d2-d3},[r4,: 128]! - vld1.8 {d4-d5},[r5,: 128]! - vzip.i32 q1,q2 - vld1.8 {d6-d7},[r4,: 128]! - vld1.8 {d8-d9},[r5,: 128]! - vshl.i32 q5,q1,#1 - vzip.i32 q3,q4 - vshl.i32 q6,q2,#1 - vld1.8 {d14},[r4,: 64] - vshl.i32 q8,q3,#1 - vld1.8 {d15},[r5,: 64] - vshl.i32 q9,q4,#1 - vmul.i32 d21,d7,d1 - vtrn.32 d14,d15 - vmul.i32 q11,q4,q0 - vmul.i32 q0,q7,q0 - vmull.s32 q12,d2,d2 - vmlal.s32 q12,d11,d1 - vmlal.s32 q12,d12,d0 - vmlal.s32 q12,d13,d23 - vmlal.s32 q12,d16,d22 - vmlal.s32 q12,d7,d21 - vmull.s32 q10,d2,d11 - vmlal.s32 q10,d4,d1 - vmlal.s32 q10,d13,d0 - vmlal.s32 q10,d6,d23 - vmlal.s32 q10,d17,d22 - vmull.s32 q13,d10,d4 - vmlal.s32 q13,d11,d3 - vmlal.s32 q13,d13,d1 - vmlal.s32 q13,d16,d0 - vmlal.s32 q13,d17,d23 - vmlal.s32 q13,d8,d22 - vmull.s32 q1,d10,d5 - vmlal.s32 q1,d11,d4 - vmlal.s32 q1,d6,d1 - vmlal.s32 q1,d17,d0 - vmlal.s32 q1,d8,d23 - vmull.s32 q14,d10,d6 - vmlal.s32 q14,d11,d13 - vmlal.s32 q14,d4,d4 - vmlal.s32 q14,d17,d1 - vmlal.s32 q14,d18,d0 - vmlal.s32 q14,d9,d23 - vmull.s32 q11,d10,d7 - vmlal.s32 q11,d11,d6 - vmlal.s32 q11,d12,d5 - vmlal.s32 q11,d8,d1 - vmlal.s32 q11,d19,d0 - vmull.s32 q15,d10,d8 - vmlal.s32 q15,d11,d17 - vmlal.s32 q15,d12,d6 - vmlal.s32 q15,d13,d5 - vmlal.s32 q15,d19,d1 - vmlal.s32 q15,d14,d0 - vmull.s32 q2,d10,d9 - vmlal.s32 q2,d11,d8 - vmlal.s32 q2,d12,d7 - vmlal.s32 q2,d13,d6 - vmlal.s32 q2,d14,d1 - vmull.s32 q0,d15,d1 - vmlal.s32 q0,d10,d14 - vmlal.s32 q0,d11,d19 - vmlal.s32 q0,d12,d8 - vmlal.s32 q0,d13,d17 - vmlal.s32 q0,d6,d6 - add r2,sp,#512 - vld1.8 {d18-d19},[r2,: 128] - vmull.s32 q3,d16,d7 - vmlal.s32 q3,d10,d15 - vmlal.s32 q3,d11,d14 - vmlal.s32 q3,d12,d9 - vmlal.s32 q3,d13,d8 - add r2,sp,#528 - vld1.8 {d8-d9},[r2,: 128] - vadd.i64 q5,q12,q9 - vadd.i64 q6,q15,q9 - vshr.s64 q5,q5,#26 - vshr.s64 q6,q6,#26 - vadd.i64 q7,q10,q5 - vshl.i64 q5,q5,#26 - vadd.i64 q8,q7,q4 - vadd.i64 q2,q2,q6 - vshl.i64 q6,q6,#26 - vadd.i64 q10,q2,q4 - vsub.i64 q5,q12,q5 - vshr.s64 q8,q8,#25 - vsub.i64 q6,q15,q6 - vshr.s64 q10,q10,#25 - vadd.i64 q12,q13,q8 - vshl.i64 q8,q8,#25 - vadd.i64 q13,q12,q9 - vadd.i64 q0,q0,q10 - vsub.i64 q7,q7,q8 - vshr.s64 q8,q13,#26 - vshl.i64 q10,q10,#25 - vadd.i64 q13,q0,q9 - vadd.i64 q1,q1,q8 - vshl.i64 q8,q8,#26 - vadd.i64 q15,q1,q4 - vsub.i64 q2,q2,q10 - vshr.s64 q10,q13,#26 - vsub.i64 q8,q12,q8 - vshr.s64 q12,q15,#25 - vadd.i64 q3,q3,q10 - vshl.i64 q10,q10,#26 - vadd.i64 q13,q3,q4 - vadd.i64 q14,q14,q12 - add r2,r3,#144 - vshl.i64 q12,q12,#25 - add r4,r3,#192 - vadd.i64 q15,q14,q9 - add r2,r2,#8 - vsub.i64 q0,q0,q10 - add r4,r4,#8 - vshr.s64 q10,q13,#25 - vsub.i64 q1,q1,q12 - vshr.s64 q12,q15,#26 - vadd.i64 q13,q10,q10 - vadd.i64 q11,q11,q12 - vtrn.32 d16,d2 - vshl.i64 q12,q12,#26 - vtrn.32 d17,d3 - vadd.i64 q1,q11,q4 - vadd.i64 q4,q5,q13 - vst1.8 d16,[r2,: 64]! - vshl.i64 q5,q10,#4 - vst1.8 d17,[r4,: 64]! - vsub.i64 q8,q14,q12 - vshr.s64 q1,q1,#25 - vadd.i64 q4,q4,q5 - vadd.i64 q5,q6,q1 - vshl.i64 q1,q1,#25 - vadd.i64 q6,q5,q9 - vadd.i64 q4,q4,q10 - vshl.i64 q10,q10,#25 - vadd.i64 q9,q4,q9 - vsub.i64 q1,q11,q1 - vshr.s64 q6,q6,#26 - vsub.i64 q3,q3,q10 - vtrn.32 d16,d2 - vshr.s64 q9,q9,#26 - vtrn.32 d17,d3 - vadd.i64 q1,q2,q6 - vst1.8 d16,[r2,: 64] - vshl.i64 q2,q6,#26 - vst1.8 d17,[r4,: 64] - vadd.i64 q6,q7,q9 - vtrn.32 d0,d6 - vshl.i64 q7,q9,#26 - vtrn.32 d1,d7 - vsub.i64 q2,q5,q2 - add r2,r2,#16 - vsub.i64 q3,q4,q7 - vst1.8 d0,[r2,: 64] - add r4,r4,#16 - vst1.8 d1,[r4,: 64] - vtrn.32 d4,d2 - vtrn.32 d5,d3 - sub r2,r2,#8 - sub r4,r4,#8 - vtrn.32 d6,d12 - vtrn.32 d7,d13 - vst1.8 d4,[r2,: 64] - vst1.8 d5,[r4,: 64] - sub r2,r2,#24 - sub r4,r4,#24 - vst1.8 d6,[r2,: 64] - vst1.8 d7,[r4,: 64] - add r2,r3,#336 - add r4,r3,#288 - vld1.8 {d0-d1},[r2,: 128]! - vld1.8 {d2-d3},[r4,: 128]! - vadd.i32 q0,q0,q1 - vld1.8 {d2-d3},[r2,: 128]! - vld1.8 {d4-d5},[r4,: 128]! - vadd.i32 q1,q1,q2 - add r5,r3,#288 - vld1.8 {d4},[r2,: 64] - vld1.8 {d6},[r4,: 64] - vadd.i32 q2,q2,q3 - vst1.8 {d0-d1},[r5,: 128]! - vst1.8 {d2-d3},[r5,: 128]! - vst1.8 d4,[r5,: 64] - add r2,r3,#48 - add r4,r3,#144 - vld1.8 {d0-d1},[r4,: 128]! - vld1.8 {d2-d3},[r4,: 128]! - vld1.8 {d4},[r4,: 64] - add r4,r3,#288 - vld1.8 {d6-d7},[r4,: 128]! - vtrn.32 q0,q3 - vld1.8 {d8-d9},[r4,: 128]! - vshl.i32 q5,q0,#4 - vtrn.32 q1,q4 - vshl.i32 q6,q3,#4 - vadd.i32 q5,q5,q0 - vadd.i32 q6,q6,q3 - vshl.i32 q7,q1,#4 - vld1.8 {d5},[r4,: 64] - vshl.i32 q8,q4,#4 - vtrn.32 d4,d5 - vadd.i32 q7,q7,q1 - vadd.i32 q8,q8,q4 - vld1.8 {d18-d19},[r2,: 128]! - vshl.i32 q10,q2,#4 - vld1.8 {d22-d23},[r2,: 128]! - vadd.i32 q10,q10,q2 - vld1.8 {d24},[r2,: 64] - vadd.i32 q5,q5,q0 - add r2,r3,#240 - vld1.8 {d26-d27},[r2,: 128]! - vadd.i32 q6,q6,q3 - vld1.8 {d28-d29},[r2,: 128]! - vadd.i32 q8,q8,q4 - vld1.8 {d25},[r2,: 64] - vadd.i32 q10,q10,q2 - vtrn.32 q9,q13 - vadd.i32 q7,q7,q1 - vadd.i32 q5,q5,q0 - vtrn.32 q11,q14 - vadd.i32 q6,q6,q3 - add r2,sp,#560 - vadd.i32 q10,q10,q2 - vtrn.32 d24,d25 - vst1.8 {d12-d13},[r2,: 128] - vshl.i32 q6,q13,#1 - add r2,sp,#576 - vst1.8 {d20-d21},[r2,: 128] - vshl.i32 q10,q14,#1 - add r2,sp,#592 - vst1.8 {d12-d13},[r2,: 128] - vshl.i32 q15,q12,#1 - vadd.i32 q8,q8,q4 - vext.32 d10,d31,d30,#0 - vadd.i32 q7,q7,q1 - add r2,sp,#608 - vst1.8 {d16-d17},[r2,: 128] - vmull.s32 q8,d18,d5 - vmlal.s32 q8,d26,d4 - vmlal.s32 q8,d19,d9 - vmlal.s32 q8,d27,d3 - vmlal.s32 q8,d22,d8 - vmlal.s32 q8,d28,d2 - vmlal.s32 q8,d23,d7 - vmlal.s32 q8,d29,d1 - vmlal.s32 q8,d24,d6 - vmlal.s32 q8,d25,d0 - add r2,sp,#624 - vst1.8 {d14-d15},[r2,: 128] - vmull.s32 q2,d18,d4 - vmlal.s32 q2,d12,d9 - vmlal.s32 q2,d13,d8 - vmlal.s32 q2,d19,d3 - vmlal.s32 q2,d22,d2 - vmlal.s32 q2,d23,d1 - vmlal.s32 q2,d24,d0 - add r2,sp,#640 - vst1.8 {d20-d21},[r2,: 128] - vmull.s32 q7,d18,d9 - vmlal.s32 q7,d26,d3 - vmlal.s32 q7,d19,d8 - vmlal.s32 q7,d27,d2 - vmlal.s32 q7,d22,d7 - vmlal.s32 q7,d28,d1 - vmlal.s32 q7,d23,d6 - vmlal.s32 q7,d29,d0 - add r2,sp,#656 - vst1.8 {d10-d11},[r2,: 128] - vmull.s32 q5,d18,d3 - vmlal.s32 q5,d19,d2 - vmlal.s32 q5,d22,d1 - vmlal.s32 q5,d23,d0 - vmlal.s32 q5,d12,d8 - add r2,sp,#672 - vst1.8 {d16-d17},[r2,: 128] - vmull.s32 q4,d18,d8 - vmlal.s32 q4,d26,d2 - vmlal.s32 q4,d19,d7 - vmlal.s32 q4,d27,d1 - vmlal.s32 q4,d22,d6 - vmlal.s32 q4,d28,d0 - vmull.s32 q8,d18,d7 - vmlal.s32 q8,d26,d1 - vmlal.s32 q8,d19,d6 - vmlal.s32 q8,d27,d0 - add r2,sp,#576 - vld1.8 {d20-d21},[r2,: 128] - vmlal.s32 q7,d24,d21 - vmlal.s32 q7,d25,d20 - vmlal.s32 q4,d23,d21 - vmlal.s32 q4,d29,d20 - vmlal.s32 q8,d22,d21 - vmlal.s32 q8,d28,d20 - vmlal.s32 q5,d24,d20 - add r2,sp,#576 - vst1.8 {d14-d15},[r2,: 128] - vmull.s32 q7,d18,d6 - vmlal.s32 q7,d26,d0 - add r2,sp,#656 - vld1.8 {d30-d31},[r2,: 128] - vmlal.s32 q2,d30,d21 - vmlal.s32 q7,d19,d21 - vmlal.s32 q7,d27,d20 - add r2,sp,#624 - vld1.8 {d26-d27},[r2,: 128] - vmlal.s32 q4,d25,d27 - vmlal.s32 q8,d29,d27 - vmlal.s32 q8,d25,d26 - vmlal.s32 q7,d28,d27 - vmlal.s32 q7,d29,d26 - add r2,sp,#608 - vld1.8 {d28-d29},[r2,: 128] - vmlal.s32 q4,d24,d29 - vmlal.s32 q8,d23,d29 - vmlal.s32 q8,d24,d28 - vmlal.s32 q7,d22,d29 - vmlal.s32 q7,d23,d28 - add r2,sp,#608 - vst1.8 {d8-d9},[r2,: 128] - add r2,sp,#560 - vld1.8 {d8-d9},[r2,: 128] - vmlal.s32 q7,d24,d9 - vmlal.s32 q7,d25,d31 - vmull.s32 q1,d18,d2 - vmlal.s32 q1,d19,d1 - vmlal.s32 q1,d22,d0 - vmlal.s32 q1,d24,d27 - vmlal.s32 q1,d23,d20 - vmlal.s32 q1,d12,d7 - vmlal.s32 q1,d13,d6 - vmull.s32 q6,d18,d1 - vmlal.s32 q6,d19,d0 - vmlal.s32 q6,d23,d27 - vmlal.s32 q6,d22,d20 - vmlal.s32 q6,d24,d26 - vmull.s32 q0,d18,d0 - vmlal.s32 q0,d22,d27 - vmlal.s32 q0,d23,d26 - vmlal.s32 q0,d24,d31 - vmlal.s32 q0,d19,d20 - add r2,sp,#640 - vld1.8 {d18-d19},[r2,: 128] - vmlal.s32 q2,d18,d7 - vmlal.s32 q2,d19,d6 - vmlal.s32 q5,d18,d6 - vmlal.s32 q5,d19,d21 - vmlal.s32 q1,d18,d21 - vmlal.s32 q1,d19,d29 - vmlal.s32 q0,d18,d28 - vmlal.s32 q0,d19,d9 - vmlal.s32 q6,d18,d29 - vmlal.s32 q6,d19,d28 - add r2,sp,#592 - vld1.8 {d18-d19},[r2,: 128] - add r2,sp,#512 - vld1.8 {d22-d23},[r2,: 128] - vmlal.s32 q5,d19,d7 - vmlal.s32 q0,d18,d21 - vmlal.s32 q0,d19,d29 - vmlal.s32 q6,d18,d6 - add r2,sp,#528 - vld1.8 {d6-d7},[r2,: 128] - vmlal.s32 q6,d19,d21 - add r2,sp,#576 - vld1.8 {d18-d19},[r2,: 128] - vmlal.s32 q0,d30,d8 - add r2,sp,#672 - vld1.8 {d20-d21},[r2,: 128] - vmlal.s32 q5,d30,d29 - add r2,sp,#608 - vld1.8 {d24-d25},[r2,: 128] - vmlal.s32 q1,d30,d28 - vadd.i64 q13,q0,q11 - vadd.i64 q14,q5,q11 - vmlal.s32 q6,d30,d9 - vshr.s64 q4,q13,#26 - vshr.s64 q13,q14,#26 - vadd.i64 q7,q7,q4 - vshl.i64 q4,q4,#26 - vadd.i64 q14,q7,q3 - vadd.i64 q9,q9,q13 - vshl.i64 q13,q13,#26 - vadd.i64 q15,q9,q3 - vsub.i64 q0,q0,q4 - vshr.s64 q4,q14,#25 - vsub.i64 q5,q5,q13 - vshr.s64 q13,q15,#25 - vadd.i64 q6,q6,q4 - vshl.i64 q4,q4,#25 - vadd.i64 q14,q6,q11 - vadd.i64 q2,q2,q13 - vsub.i64 q4,q7,q4 - vshr.s64 q7,q14,#26 - vshl.i64 q13,q13,#25 - vadd.i64 q14,q2,q11 - vadd.i64 q8,q8,q7 - vshl.i64 q7,q7,#26 - vadd.i64 q15,q8,q3 - vsub.i64 q9,q9,q13 - vshr.s64 q13,q14,#26 - vsub.i64 q6,q6,q7 - vshr.s64 q7,q15,#25 - vadd.i64 q10,q10,q13 - vshl.i64 q13,q13,#26 - vadd.i64 q14,q10,q3 - vadd.i64 q1,q1,q7 - add r2,r3,#240 - vshl.i64 q7,q7,#25 - add r4,r3,#144 - vadd.i64 q15,q1,q11 - add r2,r2,#8 - vsub.i64 q2,q2,q13 - add r4,r4,#8 - vshr.s64 q13,q14,#25 - vsub.i64 q7,q8,q7 - vshr.s64 q8,q15,#26 - vadd.i64 q14,q13,q13 - vadd.i64 q12,q12,q8 - vtrn.32 d12,d14 - vshl.i64 q8,q8,#26 - vtrn.32 d13,d15 - vadd.i64 q3,q12,q3 - vadd.i64 q0,q0,q14 - vst1.8 d12,[r2,: 64]! - vshl.i64 q7,q13,#4 - vst1.8 d13,[r4,: 64]! - vsub.i64 q1,q1,q8 - vshr.s64 q3,q3,#25 - vadd.i64 q0,q0,q7 - vadd.i64 q5,q5,q3 - vshl.i64 q3,q3,#25 - vadd.i64 q6,q5,q11 - vadd.i64 q0,q0,q13 - vshl.i64 q7,q13,#25 - vadd.i64 q8,q0,q11 - vsub.i64 q3,q12,q3 - vshr.s64 q6,q6,#26 - vsub.i64 q7,q10,q7 - vtrn.32 d2,d6 - vshr.s64 q8,q8,#26 - vtrn.32 d3,d7 - vadd.i64 q3,q9,q6 - vst1.8 d2,[r2,: 64] - vshl.i64 q6,q6,#26 - vst1.8 d3,[r4,: 64] - vadd.i64 q1,q4,q8 - vtrn.32 d4,d14 - vshl.i64 q4,q8,#26 - vtrn.32 d5,d15 - vsub.i64 q5,q5,q6 - add r2,r2,#16 - vsub.i64 q0,q0,q4 - vst1.8 d4,[r2,: 64] - add r4,r4,#16 - vst1.8 d5,[r4,: 64] - vtrn.32 d10,d6 - vtrn.32 d11,d7 - sub r2,r2,#8 - sub r4,r4,#8 - vtrn.32 d0,d2 - vtrn.32 d1,d3 - vst1.8 d10,[r2,: 64] - vst1.8 d11,[r4,: 64] - sub r2,r2,#24 - sub r4,r4,#24 - vst1.8 d0,[r2,: 64] - vst1.8 d1,[r4,: 64] - ldr r2,[sp,#488] - ldr r4,[sp,#492] - subs r5,r2,#1 - bge .Lmainloop - add r1,r3,#144 - add r2,r3,#336 - vld1.8 {d0-d1},[r1,: 128]! - vld1.8 {d2-d3},[r1,: 128]! - vld1.8 {d4},[r1,: 64] - vst1.8 {d0-d1},[r2,: 128]! - vst1.8 {d2-d3},[r2,: 128]! - vst1.8 d4,[r2,: 64] - ldr r1,=0 - .Linvertloop: - add r2,r3,#144 - ldr r4,=0 - ldr r5,=2 - cmp r1,#1 - ldreq r5,=1 - addeq r2,r3,#336 - addeq r4,r3,#48 - cmp r1,#2 - ldreq r5,=1 - addeq r2,r3,#48 - cmp r1,#3 - ldreq r5,=5 - addeq r4,r3,#336 - cmp r1,#4 - ldreq r5,=10 - cmp r1,#5 - ldreq r5,=20 - cmp r1,#6 - ldreq r5,=10 - addeq r2,r3,#336 - addeq r4,r3,#336 - cmp r1,#7 - ldreq r5,=50 - cmp r1,#8 - ldreq r5,=100 - cmp r1,#9 - ldreq r5,=50 - addeq r2,r3,#336 - cmp r1,#10 - ldreq r5,=5 - addeq r2,r3,#48 - cmp r1,#11 - ldreq r5,=0 - addeq r2,r3,#96 - add r6,r3,#144 - add r7,r3,#288 - vld1.8 {d0-d1},[r6,: 128]! - vld1.8 {d2-d3},[r6,: 128]! - vld1.8 {d4},[r6,: 64] - vst1.8 {d0-d1},[r7,: 128]! - vst1.8 {d2-d3},[r7,: 128]! - vst1.8 d4,[r7,: 64] - cmp r5,#0 - beq .Lskipsquaringloop - .Lsquaringloop: - add r6,r3,#288 - add r7,r3,#288 - add r8,r3,#288 - vmov.i32 q0,#19 - vmov.i32 q1,#0 - vmov.i32 q2,#1 - vzip.i32 q1,q2 - vld1.8 {d4-d5},[r7,: 128]! - vld1.8 {d6-d7},[r7,: 128]! - vld1.8 {d9},[r7,: 64] - vld1.8 {d10-d11},[r6,: 128]! - add r7,sp,#416 - vld1.8 {d12-d13},[r6,: 128]! - vmul.i32 q7,q2,q0 - vld1.8 {d8},[r6,: 64] - vext.32 d17,d11,d10,#1 - vmul.i32 q9,q3,q0 - vext.32 d16,d10,d8,#1 - vshl.u32 q10,q5,q1 - vext.32 d22,d14,d4,#1 - vext.32 d24,d18,d6,#1 - vshl.u32 q13,q6,q1 - vshl.u32 d28,d8,d2 - vrev64.i32 d22,d22 - vmul.i32 d1,d9,d1 - vrev64.i32 d24,d24 - vext.32 d29,d8,d13,#1 - vext.32 d0,d1,d9,#1 - vrev64.i32 d0,d0 - vext.32 d2,d9,d1,#1 - vext.32 d23,d15,d5,#1 - vmull.s32 q4,d20,d4 - vrev64.i32 d23,d23 - vmlal.s32 q4,d21,d1 - vrev64.i32 d2,d2 - vmlal.s32 q4,d26,d19 - vext.32 d3,d5,d15,#1 - vmlal.s32 q4,d27,d18 - vrev64.i32 d3,d3 - vmlal.s32 q4,d28,d15 - vext.32 d14,d12,d11,#1 - vmull.s32 q5,d16,d23 - vext.32 d15,d13,d12,#1 - vmlal.s32 q5,d17,d4 - vst1.8 d8,[r7,: 64]! - vmlal.s32 q5,d14,d1 - vext.32 d12,d9,d8,#0 - vmlal.s32 q5,d15,d19 - vmov.i64 d13,#0 - vmlal.s32 q5,d29,d18 - vext.32 d25,d19,d7,#1 - vmlal.s32 q6,d20,d5 - vrev64.i32 d25,d25 - vmlal.s32 q6,d21,d4 - vst1.8 d11,[r7,: 64]! - vmlal.s32 q6,d26,d1 - vext.32 d9,d10,d10,#0 - vmlal.s32 q6,d27,d19 - vmov.i64 d8,#0 - vmlal.s32 q6,d28,d18 - vmlal.s32 q4,d16,d24 - vmlal.s32 q4,d17,d5 - vmlal.s32 q4,d14,d4 - vst1.8 d12,[r7,: 64]! - vmlal.s32 q4,d15,d1 - vext.32 d10,d13,d12,#0 - vmlal.s32 q4,d29,d19 - vmov.i64 d11,#0 - vmlal.s32 q5,d20,d6 - vmlal.s32 q5,d21,d5 - vmlal.s32 q5,d26,d4 - vext.32 d13,d8,d8,#0 - vmlal.s32 q5,d27,d1 - vmov.i64 d12,#0 - vmlal.s32 q5,d28,d19 - vst1.8 d9,[r7,: 64]! - vmlal.s32 q6,d16,d25 - vmlal.s32 q6,d17,d6 - vst1.8 d10,[r7,: 64] - vmlal.s32 q6,d14,d5 - vext.32 d8,d11,d10,#0 - vmlal.s32 q6,d15,d4 - vmov.i64 d9,#0 - vmlal.s32 q6,d29,d1 - vmlal.s32 q4,d20,d7 - vmlal.s32 q4,d21,d6 - vmlal.s32 q4,d26,d5 - vext.32 d11,d12,d12,#0 - vmlal.s32 q4,d27,d4 - vmov.i64 d10,#0 - vmlal.s32 q4,d28,d1 - vmlal.s32 q5,d16,d0 - sub r6,r7,#32 - vmlal.s32 q5,d17,d7 - vmlal.s32 q5,d14,d6 - vext.32 d30,d9,d8,#0 - vmlal.s32 q5,d15,d5 - vld1.8 {d31},[r6,: 64]! - vmlal.s32 q5,d29,d4 - vmlal.s32 q15,d20,d0 - vext.32 d0,d6,d18,#1 - vmlal.s32 q15,d21,d25 - vrev64.i32 d0,d0 - vmlal.s32 q15,d26,d24 - vext.32 d1,d7,d19,#1 - vext.32 d7,d10,d10,#0 - vmlal.s32 q15,d27,d23 - vrev64.i32 d1,d1 - vld1.8 {d6},[r6,: 64] - vmlal.s32 q15,d28,d22 - vmlal.s32 q3,d16,d4 - add r6,r6,#24 - vmlal.s32 q3,d17,d2 - vext.32 d4,d31,d30,#0 - vmov d17,d11 - vmlal.s32 q3,d14,d1 - vext.32 d11,d13,d13,#0 - vext.32 d13,d30,d30,#0 - vmlal.s32 q3,d15,d0 - vext.32 d1,d8,d8,#0 - vmlal.s32 q3,d29,d3 - vld1.8 {d5},[r6,: 64] - sub r6,r6,#16 - vext.32 d10,d6,d6,#0 - vmov.i32 q1,#0xffffffff - vshl.i64 q4,q1,#25 - add r7,sp,#512 - vld1.8 {d14-d15},[r7,: 128] - vadd.i64 q9,q2,q7 - vshl.i64 q1,q1,#26 - vshr.s64 q10,q9,#26 - vld1.8 {d0},[r6,: 64]! - vadd.i64 q5,q5,q10 - vand q9,q9,q1 - vld1.8 {d16},[r6,: 64]! - add r6,sp,#528 - vld1.8 {d20-d21},[r6,: 128] - vadd.i64 q11,q5,q10 - vsub.i64 q2,q2,q9 - vshr.s64 q9,q11,#25 - vext.32 d12,d5,d4,#0 - vand q11,q11,q4 - vadd.i64 q0,q0,q9 - vmov d19,d7 - vadd.i64 q3,q0,q7 - vsub.i64 q5,q5,q11 - vshr.s64 q11,q3,#26 - vext.32 d18,d11,d10,#0 - vand q3,q3,q1 - vadd.i64 q8,q8,q11 - vadd.i64 q11,q8,q10 - vsub.i64 q0,q0,q3 - vshr.s64 q3,q11,#25 - vand q11,q11,q4 - vadd.i64 q3,q6,q3 - vadd.i64 q6,q3,q7 - vsub.i64 q8,q8,q11 - vshr.s64 q11,q6,#26 - vand q6,q6,q1 - vadd.i64 q9,q9,q11 - vadd.i64 d25,d19,d21 - vsub.i64 q3,q3,q6 - vshr.s64 d23,d25,#25 - vand q4,q12,q4 - vadd.i64 d21,d23,d23 - vshl.i64 d25,d23,#4 - vadd.i64 d21,d21,d23 - vadd.i64 d25,d25,d21 - vadd.i64 d4,d4,d25 - vzip.i32 q0,q8 - vadd.i64 d12,d4,d14 - add r6,r8,#8 - vst1.8 d0,[r6,: 64] - vsub.i64 d19,d19,d9 - add r6,r6,#16 - vst1.8 d16,[r6,: 64] - vshr.s64 d22,d12,#26 - vand q0,q6,q1 - vadd.i64 d10,d10,d22 - vzip.i32 q3,q9 - vsub.i64 d4,d4,d0 - sub r6,r6,#8 - vst1.8 d6,[r6,: 64] - add r6,r6,#16 - vst1.8 d18,[r6,: 64] - vzip.i32 q2,q5 - sub r6,r6,#32 - vst1.8 d4,[r6,: 64] - subs r5,r5,#1 - bhi .Lsquaringloop - .Lskipsquaringloop: - mov r2,r2 - add r5,r3,#288 - add r6,r3,#144 - vmov.i32 q0,#19 - vmov.i32 q1,#0 - vmov.i32 q2,#1 - vzip.i32 q1,q2 - vld1.8 {d4-d5},[r5,: 128]! - vld1.8 {d6-d7},[r5,: 128]! - vld1.8 {d9},[r5,: 64] - vld1.8 {d10-d11},[r2,: 128]! - add r5,sp,#416 - vld1.8 {d12-d13},[r2,: 128]! - vmul.i32 q7,q2,q0 - vld1.8 {d8},[r2,: 64] - vext.32 d17,d11,d10,#1 - vmul.i32 q9,q3,q0 - vext.32 d16,d10,d8,#1 - vshl.u32 q10,q5,q1 - vext.32 d22,d14,d4,#1 - vext.32 d24,d18,d6,#1 - vshl.u32 q13,q6,q1 - vshl.u32 d28,d8,d2 - vrev64.i32 d22,d22 - vmul.i32 d1,d9,d1 - vrev64.i32 d24,d24 - vext.32 d29,d8,d13,#1 - vext.32 d0,d1,d9,#1 - vrev64.i32 d0,d0 - vext.32 d2,d9,d1,#1 - vext.32 d23,d15,d5,#1 - vmull.s32 q4,d20,d4 - vrev64.i32 d23,d23 - vmlal.s32 q4,d21,d1 - vrev64.i32 d2,d2 - vmlal.s32 q4,d26,d19 - vext.32 d3,d5,d15,#1 - vmlal.s32 q4,d27,d18 - vrev64.i32 d3,d3 - vmlal.s32 q4,d28,d15 - vext.32 d14,d12,d11,#1 - vmull.s32 q5,d16,d23 - vext.32 d15,d13,d12,#1 - vmlal.s32 q5,d17,d4 - vst1.8 d8,[r5,: 64]! - vmlal.s32 q5,d14,d1 - vext.32 d12,d9,d8,#0 - vmlal.s32 q5,d15,d19 - vmov.i64 d13,#0 - vmlal.s32 q5,d29,d18 - vext.32 d25,d19,d7,#1 - vmlal.s32 q6,d20,d5 - vrev64.i32 d25,d25 - vmlal.s32 q6,d21,d4 - vst1.8 d11,[r5,: 64]! - vmlal.s32 q6,d26,d1 - vext.32 d9,d10,d10,#0 - vmlal.s32 q6,d27,d19 - vmov.i64 d8,#0 - vmlal.s32 q6,d28,d18 - vmlal.s32 q4,d16,d24 - vmlal.s32 q4,d17,d5 - vmlal.s32 q4,d14,d4 - vst1.8 d12,[r5,: 64]! - vmlal.s32 q4,d15,d1 - vext.32 d10,d13,d12,#0 - vmlal.s32 q4,d29,d19 - vmov.i64 d11,#0 - vmlal.s32 q5,d20,d6 - vmlal.s32 q5,d21,d5 - vmlal.s32 q5,d26,d4 - vext.32 d13,d8,d8,#0 - vmlal.s32 q5,d27,d1 - vmov.i64 d12,#0 - vmlal.s32 q5,d28,d19 - vst1.8 d9,[r5,: 64]! - vmlal.s32 q6,d16,d25 - vmlal.s32 q6,d17,d6 - vst1.8 d10,[r5,: 64] - vmlal.s32 q6,d14,d5 - vext.32 d8,d11,d10,#0 - vmlal.s32 q6,d15,d4 - vmov.i64 d9,#0 - vmlal.s32 q6,d29,d1 - vmlal.s32 q4,d20,d7 - vmlal.s32 q4,d21,d6 - vmlal.s32 q4,d26,d5 - vext.32 d11,d12,d12,#0 - vmlal.s32 q4,d27,d4 - vmov.i64 d10,#0 - vmlal.s32 q4,d28,d1 - vmlal.s32 q5,d16,d0 - sub r2,r5,#32 - vmlal.s32 q5,d17,d7 - vmlal.s32 q5,d14,d6 - vext.32 d30,d9,d8,#0 - vmlal.s32 q5,d15,d5 - vld1.8 {d31},[r2,: 64]! - vmlal.s32 q5,d29,d4 - vmlal.s32 q15,d20,d0 - vext.32 d0,d6,d18,#1 - vmlal.s32 q15,d21,d25 - vrev64.i32 d0,d0 - vmlal.s32 q15,d26,d24 - vext.32 d1,d7,d19,#1 - vext.32 d7,d10,d10,#0 - vmlal.s32 q15,d27,d23 - vrev64.i32 d1,d1 - vld1.8 {d6},[r2,: 64] - vmlal.s32 q15,d28,d22 - vmlal.s32 q3,d16,d4 - add r2,r2,#24 - vmlal.s32 q3,d17,d2 - vext.32 d4,d31,d30,#0 - vmov d17,d11 - vmlal.s32 q3,d14,d1 - vext.32 d11,d13,d13,#0 - vext.32 d13,d30,d30,#0 - vmlal.s32 q3,d15,d0 - vext.32 d1,d8,d8,#0 - vmlal.s32 q3,d29,d3 - vld1.8 {d5},[r2,: 64] - sub r2,r2,#16 - vext.32 d10,d6,d6,#0 - vmov.i32 q1,#0xffffffff - vshl.i64 q4,q1,#25 - add r5,sp,#512 - vld1.8 {d14-d15},[r5,: 128] - vadd.i64 q9,q2,q7 - vshl.i64 q1,q1,#26 - vshr.s64 q10,q9,#26 - vld1.8 {d0},[r2,: 64]! - vadd.i64 q5,q5,q10 - vand q9,q9,q1 - vld1.8 {d16},[r2,: 64]! - add r2,sp,#528 - vld1.8 {d20-d21},[r2,: 128] - vadd.i64 q11,q5,q10 - vsub.i64 q2,q2,q9 - vshr.s64 q9,q11,#25 - vext.32 d12,d5,d4,#0 - vand q11,q11,q4 - vadd.i64 q0,q0,q9 - vmov d19,d7 - vadd.i64 q3,q0,q7 - vsub.i64 q5,q5,q11 - vshr.s64 q11,q3,#26 - vext.32 d18,d11,d10,#0 - vand q3,q3,q1 - vadd.i64 q8,q8,q11 - vadd.i64 q11,q8,q10 - vsub.i64 q0,q0,q3 - vshr.s64 q3,q11,#25 - vand q11,q11,q4 - vadd.i64 q3,q6,q3 - vadd.i64 q6,q3,q7 - vsub.i64 q8,q8,q11 - vshr.s64 q11,q6,#26 - vand q6,q6,q1 - vadd.i64 q9,q9,q11 - vadd.i64 d25,d19,d21 - vsub.i64 q3,q3,q6 - vshr.s64 d23,d25,#25 - vand q4,q12,q4 - vadd.i64 d21,d23,d23 - vshl.i64 d25,d23,#4 - vadd.i64 d21,d21,d23 - vadd.i64 d25,d25,d21 - vadd.i64 d4,d4,d25 - vzip.i32 q0,q8 - vadd.i64 d12,d4,d14 - add r2,r6,#8 - vst1.8 d0,[r2,: 64] - vsub.i64 d19,d19,d9 - add r2,r2,#16 - vst1.8 d16,[r2,: 64] - vshr.s64 d22,d12,#26 - vand q0,q6,q1 - vadd.i64 d10,d10,d22 - vzip.i32 q3,q9 - vsub.i64 d4,d4,d0 - sub r2,r2,#8 - vst1.8 d6,[r2,: 64] - add r2,r2,#16 - vst1.8 d18,[r2,: 64] - vzip.i32 q2,q5 - sub r2,r2,#32 - vst1.8 d4,[r2,: 64] - cmp r4,#0 - beq .Lskippostcopy - add r2,r3,#144 - mov r4,r4 - vld1.8 {d0-d1},[r2,: 128]! - vld1.8 {d2-d3},[r2,: 128]! - vld1.8 {d4},[r2,: 64] - vst1.8 {d0-d1},[r4,: 128]! - vst1.8 {d2-d3},[r4,: 128]! - vst1.8 d4,[r4,: 64] - .Lskippostcopy: - cmp r1,#1 - bne .Lskipfinalcopy - add r2,r3,#288 - add r4,r3,#144 - vld1.8 {d0-d1},[r2,: 128]! - vld1.8 {d2-d3},[r2,: 128]! - vld1.8 {d4},[r2,: 64] - vst1.8 {d0-d1},[r4,: 128]! - vst1.8 {d2-d3},[r4,: 128]! - vst1.8 d4,[r4,: 64] - .Lskipfinalcopy: - add r1,r1,#1 - cmp r1,#12 - blo .Linvertloop - add r1,r3,#144 - ldr r2,[r1],#4 - ldr r3,[r1],#4 - ldr r4,[r1],#4 - ldr r5,[r1],#4 - ldr r6,[r1],#4 - ldr r7,[r1],#4 - ldr r8,[r1],#4 - ldr r9,[r1],#4 - ldr r10,[r1],#4 - ldr r1,[r1] - add r11,r1,r1,LSL #4 - add r11,r11,r1,LSL #1 - add r11,r11,#16777216 - mov r11,r11,ASR #25 - add r11,r11,r2 - mov r11,r11,ASR #26 - add r11,r11,r3 - mov r11,r11,ASR #25 - add r11,r11,r4 - mov r11,r11,ASR #26 - add r11,r11,r5 - mov r11,r11,ASR #25 - add r11,r11,r6 - mov r11,r11,ASR #26 - add r11,r11,r7 - mov r11,r11,ASR #25 - add r11,r11,r8 - mov r11,r11,ASR #26 - add r11,r11,r9 - mov r11,r11,ASR #25 - add r11,r11,r10 - mov r11,r11,ASR #26 - add r11,r11,r1 - mov r11,r11,ASR #25 - add r2,r2,r11 - add r2,r2,r11,LSL #1 - add r2,r2,r11,LSL #4 - mov r11,r2,ASR #26 - add r3,r3,r11 - sub r2,r2,r11,LSL #26 - mov r11,r3,ASR #25 - add r4,r4,r11 - sub r3,r3,r11,LSL #25 - mov r11,r4,ASR #26 - add r5,r5,r11 - sub r4,r4,r11,LSL #26 - mov r11,r5,ASR #25 - add r6,r6,r11 - sub r5,r5,r11,LSL #25 - mov r11,r6,ASR #26 - add r7,r7,r11 - sub r6,r6,r11,LSL #26 - mov r11,r7,ASR #25 - add r8,r8,r11 - sub r7,r7,r11,LSL #25 - mov r11,r8,ASR #26 - add r9,r9,r11 - sub r8,r8,r11,LSL #26 - mov r11,r9,ASR #25 - add r10,r10,r11 - sub r9,r9,r11,LSL #25 - mov r11,r10,ASR #26 - add r1,r1,r11 - sub r10,r10,r11,LSL #26 - mov r11,r1,ASR #25 - sub r1,r1,r11,LSL #25 - add r2,r2,r3,LSL #26 - mov r3,r3,LSR #6 - add r3,r3,r4,LSL #19 - mov r4,r4,LSR #13 - add r4,r4,r5,LSL #13 - mov r5,r5,LSR #19 - add r5,r5,r6,LSL #6 - add r6,r7,r8,LSL #25 - mov r7,r8,LSR #7 - add r7,r7,r9,LSL #19 - mov r8,r9,LSR #13 - add r8,r8,r10,LSL #12 - mov r9,r10,LSR #20 - add r1,r9,r1,LSL #6 - str r2,[r0],#4 - str r3,[r0],#4 - str r4,[r0],#4 - str r5,[r0],#4 - str r6,[r0],#4 - str r7,[r0],#4 - str r8,[r0],#4 - str r1,[r0] - ldrd r4,[sp,#0] - ldrd r6,[sp,#8] - ldrd r8,[sp,#16] - ldrd r10,[sp,#24] - ldr r12,[sp,#480] - ldr r14,[sp,#484] - ldr r0,=0 - mov sp,r12 - vpop {q4,q5,q6,q7} - bx lr -ENDPROC(curve25519_neon) -#endif diff --git a/generic.c b/generic.c new file mode 100644 index 0000000..4e311df --- /dev/null +++ b/generic.c @@ -0,0 +1,96 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (C) 2015-2018 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#include <linux/kernel.h> +#include <crypto/algapi.h> + +enum { + CHACHA20_IV_SIZE = 16, + CHACHA20_KEY_SIZE = 32, + CHACHA20_BLOCK_SIZE = 64, + CHACHA20_BLOCK_WORDS = CHACHA20_BLOCK_SIZE / sizeof(u32) +}; + +#define EXPAND_32_BYTE_K 0x61707865U, 0x3320646eU, 0x79622d32U, 0x6b206574U + +#define QUARTER_ROUND(x, a, b, c, d) ( \ + x[a] += x[b], \ + x[d] = rol32((x[d] ^ x[a]), 16), \ + x[c] += x[d], \ + x[b] = rol32((x[b] ^ x[c]), 12), \ + x[a] += x[b], \ + x[d] = rol32((x[d] ^ x[a]), 8), \ + x[c] += x[d], \ + x[b] = rol32((x[b] ^ x[c]), 7) \ +) + +#define C(i, j) (i * 4 + j) + +#define DOUBLE_ROUND(x) ( \ + /* Column Round */ \ + QUARTER_ROUND(x, C(0, 0), C(1, 0), C(2, 0), C(3, 0)), \ + QUARTER_ROUND(x, C(0, 1), C(1, 1), C(2, 1), C(3, 1)), \ + QUARTER_ROUND(x, C(0, 2), C(1, 2), C(2, 2), C(3, 2)), \ + QUARTER_ROUND(x, C(0, 3), C(1, 3), C(2, 3), C(3, 3)), \ + /* Diagonal Round */ \ + QUARTER_ROUND(x, C(0, 0), C(1, 1), C(2, 2), C(3, 3)), \ + QUARTER_ROUND(x, C(0, 1), C(1, 2), C(2, 3), C(3, 0)), \ + QUARTER_ROUND(x, C(0, 2), C(1, 3), C(2, 0), C(3, 1)), \ + QUARTER_ROUND(x, C(0, 3), C(1, 0), C(2, 1), C(3, 2)) \ +) + +#define TWENTY_ROUNDS(x) ( \ + DOUBLE_ROUND(x), \ + DOUBLE_ROUND(x), \ + DOUBLE_ROUND(x), \ + DOUBLE_ROUND(x), \ + DOUBLE_ROUND(x), \ + DOUBLE_ROUND(x), \ + DOUBLE_ROUND(x), \ + DOUBLE_ROUND(x), \ + DOUBLE_ROUND(x), \ + DOUBLE_ROUND(x) \ +) + +static void chacha20_block_generic(__le32 *stream, u32 *state) +{ + u32 x[CHACHA20_BLOCK_WORDS]; + int i; + + for (i = 0; i < ARRAY_SIZE(x); ++i) + x[i] = state[i]; + + TWENTY_ROUNDS(x); + + for (i = 0; i < ARRAY_SIZE(x); ++i) + stream[i] = cpu_to_le32(x[i] + state[i]); + + ++state[12]; +} + +void chacha20_generic(u8 *out, const u8 *in, u32 len, const u32 key[8], const u32 counter[4]) +{ + __le32 buf[CHACHA20_BLOCK_WORDS]; + u32 x[] = { + EXPAND_32_BYTE_K, + key[0], key[1], key[2], key[3], + key[4], key[5], key[6], key[7], + counter[0], counter[1], counter[2], counter[3] + }; + + if (out != in) + memmove(out, in, len); + + while (len >= CHACHA20_BLOCK_SIZE) { + chacha20_block_generic(buf, x); + crypto_xor(out, (u8 *)buf, CHACHA20_BLOCK_SIZE); + len -= CHACHA20_BLOCK_SIZE; + out += CHACHA20_BLOCK_SIZE; + } + if (len) { + chacha20_block_generic(buf, x); + crypto_xor(out, (u8 *)buf, len); + } +} @@ -6,98 +6,88 @@ #include <linux/init.h> #include <linux/module.h> #include <linux/delay.h> +#include <linux/slab.h> +#include <linux/sort.h> #include <asm/neon.h> static unsigned long stamp = 0; module_param(stamp, ulong, 0); -int dummy; +#define declare_it(name) void chacha20_ ## name(u8 *dst, const u8 *src, u32 len, const u32 key[8], const u32 counter[4]); -enum { CURVE25519_POINT_SIZE = 32 }; -u8 dummy_out[CURVE25519_POINT_SIZE]; -#include "test_vectors.h" - -#define declare_it(name) \ -bool curve25519_ ## name(u8 mypublic[CURVE25519_POINT_SIZE], const u8 secret[CURVE25519_POINT_SIZE], const u8 basepoint[CURVE25519_POINT_SIZE]); \ -static __always_inline int name(void) \ -{ \ - return curve25519_ ## name(dummy_out, curve25519_test_vectors[0].private, curve25519_test_vectors[0].public); \ +static int compare_cycles(const void *a, const void *b) +{ + return *((cycles_t *)a) - *((cycles_t *)b); } -#define do_it(name) do { \ - for (i = 0; i < WARMUP; ++i) \ - ret |= name(); \ - start_ ## name = get_cycles(); \ - for (i = 0; i < TRIALS; ++i) \ - ret |= name(); \ - end_ ## name = get_cycles(); \ -} while (0) - -#define test_it(name, before, after) do { \ - memset(out, __LINE__, CURVE25519_POINT_SIZE); \ +#define do_it(name, len, before, after) ({ \ before; \ - ret = curve25519_ ## name(out, curve25519_test_vectors[i].private, curve25519_test_vectors[i].public); \ - after; \ - if (memcmp(out, curve25519_test_vectors[i].result, CURVE25519_POINT_SIZE)) { \ - pr_err(#name " self-test %zu: FAIL\n", i + 1); \ - return false; \ + for (j = 0; j < WARMUP; ++j) \ + chacha20_ ## name(output, input, len, key, counter); \ + for (j = 0; j <= TRIALS; ++j) { \ + trial_times[j] = get_cycles(); \ + chacha20_ ## name(output, input, len, key, counter); \ } \ -} while (0) - -#define report_it(name) do { \ - pr_err("%lu: %7s: %lu cycles per call\n", stamp, #name, (end_ ## name - start_ ## name) / TRIALS); \ -} while (0) - - -declare_it(neon) -declare_it(fiat32) -declare_it(donna32) + after; \ + for (j = 0; j < TRIALS; ++j) \ + trial_times[j] = trial_times[j + 1] - trial_times[j]; \ + sort(trial_times, TRIALS + 1, sizeof(cycles_t), compare_cycles, NULL); \ + trial_times[TRIALS / 2]; \ +}) -static bool verify(void) -{ - int ret; - size_t i = 0; - u8 out[CURVE25519_POINT_SIZE]; - - for (i = 0; i < ARRAY_SIZE(curve25519_test_vectors); ++i) { - test_it(neon, { kernel_neon_begin(); }, { kernel_neon_end(); }); - test_it(fiat32, {}, {}); - test_it(donna32, {}, {}); - } - return true; -} +declare_it(generic) +declare_it(ossl_scalar) +declare_it(ossl_neon) +declare_it(ard_neon) static int __init mod_init(void) { - enum { WARMUP = 5000, TRIALS = 10000, IDLE = 1 * 1000 }; - int ret = 0, i; - cycles_t start_neon, end_neon; - cycles_t start_fiat32, end_fiat32; - cycles_t start_donna32, end_donna32; + enum { WARMUP = 500, TRIALS = 5000, IDLE = 1 * 1000, STEP = 32, STEPS = 128 }; + u32 key[8] = { 1, 2, 3, 4, 5, 6, 7, 8 }; + u32 counter[4] = { 1, 2, 3, 4 }; + u8 *input = NULL, *output = NULL; + cycles_t *trial_times = NULL; + cycles_t median_generic[STEPS], median_ossl_scalar[STEPS], median_ossl_neon[STEPS], median_ard_neon[STEPS]; + size_t i, j; unsigned long flags; DEFINE_SPINLOCK(lock); - if (!verify()) - return -EBFONT; + trial_times = kcalloc(TRIALS + 1, sizeof(cycles_t), GFP_KERNEL); + if (!trial_times) + goto out; + input = kcalloc(STEP, STEPS, GFP_KERNEL); + if (!input) + goto out; + output = kcalloc(STEP, STEPS, GFP_KERNEL); + if (!output) + goto out; + + for (i = 0; i < (STEP * STEPS); ++i) + input[i] = i; msleep(IDLE); spin_lock_irqsave(&lock, flags); - kernel_neon_begin(); - do_it(neon); - kernel_neon_end(); - do_it(fiat32); - do_it(donna32); + for (i = 0; i < STEPS; ++i) { + median_generic[i] = do_it(generic, i * STEP, {}, {}); + median_ossl_scalar[i] = do_it(ossl_scalar, i * STEP, {}, {}); + median_ossl_neon[i] = do_it(ossl_neon, i * STEP, { kernel_neon_begin(); }, { kernel_neon_end(); }); + median_ard_neon[i] = do_it(ard_neon, i * STEP, { kernel_neon_begin(); }, { kernel_neon_end(); }); + } spin_unlock_irqrestore(&lock, flags); - - report_it(neon); - report_it(fiat32); - report_it(donna32); - /* Don't let compiler be too clever. */ - dummy = ret; + pr_err("%lu: %12s %12s %12s %12s %12s\n", stamp, "length", "generic", "ossl scalar", "ossl neon", "ard neon"); + + for (i = 0; i < STEPS; ++i) + pr_err("%lu: %12u %12lu %12lu %12lu %12lu ", stamp, i * STEP, + median_generic[i], median_ossl_scalar[i], median_ossl_neon[i], median_ard_neon[i]); + +out: + kfree(trial_times); + kfree(input); + kfree(output); /* We should never actually agree to insert the module. Choosing * -0x1000 here is an amazing hack. It causes the kernel to not diff --git a/openssl.S b/openssl.S new file mode 100644 index 0000000..d3c3f82 --- /dev/null +++ b/openssl.S @@ -0,0 +1,1471 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright (C) 2015-2018 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + * Copyright (C) 2006-2017 CRYPTOGAMS by <appro@openssl.org>. All Rights Reserved. + */ + +#include <linux/linkage.h> + +.text +#if defined(__thumb2__) || defined(__clang__) +.syntax unified +#endif +#if defined(__thumb2__) +.thumb +#else +.code 32 +#endif + +#if defined(__thumb2__) || defined(__clang__) +#define ldrhsb ldrbhs +#endif + +.align 5 +.Lsigma: +.long 0x61707865,0x3320646e,0x79622d32,0x6b206574 @ endian-neutral +.Lone: +.long 1,0,0,0 +.word -1 + +.align 5 +ENTRY(chacha20_ossl_scalar) + ldr r12,[sp,#0] @ pull pointer to counter and nonce + stmdb sp!,{r0-r2,r4-r11,lr} + cmp r2,#0 @ len==0? +#ifdef __thumb2__ + itt eq +#endif + addeq sp,sp,#4*3 + beq .Lno_data_arm +.Lshort: + ldmia r12,{r4-r7} @ load counter and nonce + sub sp,sp,#4*(16) @ off-load area +#if __LINUX_ARM_ARCH__ < 7 && !defined(__thumb2__) + sub r14,pc,#100 @ .Lsigma +#else + adr r14,.Lsigma @ .Lsigma +#endif + stmdb sp!,{r4-r7} @ copy counter and nonce + ldmia r3,{r4-r11} @ load key + ldmia r14,{r0-r3} @ load sigma + stmdb sp!,{r4-r11} @ copy key + stmdb sp!,{r0-r3} @ copy sigma + str r10,[sp,#4*(16+10)] @ off-load "rx" + str r11,[sp,#4*(16+11)] @ off-load "rx" + b .Loop_outer_enter + +.align 4 +.Loop_outer: + ldmia sp,{r0-r9} @ load key material + str r11,[sp,#4*(32+2)] @ save len + str r12, [sp,#4*(32+1)] @ save inp + str r14, [sp,#4*(32+0)] @ save out +.Loop_outer_enter: + ldr r11, [sp,#4*(15)] + ldr r12,[sp,#4*(12)] @ modulo-scheduled load + ldr r10, [sp,#4*(13)] + ldr r14,[sp,#4*(14)] + str r11, [sp,#4*(16+15)] + mov r11,#10 + b .Loop + +.align 4 +.Loop: + subs r11,r11,#1 + add r0,r0,r4 + mov r12,r12,ror#16 + add r1,r1,r5 + mov r10,r10,ror#16 + eor r12,r12,r0,ror#16 + eor r10,r10,r1,ror#16 + add r8,r8,r12 + mov r4,r4,ror#20 + add r9,r9,r10 + mov r5,r5,ror#20 + eor r4,r4,r8,ror#20 + eor r5,r5,r9,ror#20 + add r0,r0,r4 + mov r12,r12,ror#24 + add r1,r1,r5 + mov r10,r10,ror#24 + eor r12,r12,r0,ror#24 + eor r10,r10,r1,ror#24 + add r8,r8,r12 + mov r4,r4,ror#25 + add r9,r9,r10 + mov r5,r5,ror#25 + str r10,[sp,#4*(16+13)] + ldr r10,[sp,#4*(16+15)] + eor r4,r4,r8,ror#25 + eor r5,r5,r9,ror#25 + str r8,[sp,#4*(16+8)] + ldr r8,[sp,#4*(16+10)] + add r2,r2,r6 + mov r14,r14,ror#16 + str r9,[sp,#4*(16+9)] + ldr r9,[sp,#4*(16+11)] + add r3,r3,r7 + mov r10,r10,ror#16 + eor r14,r14,r2,ror#16 + eor r10,r10,r3,ror#16 + add r8,r8,r14 + mov r6,r6,ror#20 + add r9,r9,r10 + mov r7,r7,ror#20 + eor r6,r6,r8,ror#20 + eor r7,r7,r9,ror#20 + add r2,r2,r6 + mov r14,r14,ror#24 + add r3,r3,r7 + mov r10,r10,ror#24 + eor r14,r14,r2,ror#24 + eor r10,r10,r3,ror#24 + add r8,r8,r14 + mov r6,r6,ror#25 + add r9,r9,r10 + mov r7,r7,ror#25 + eor r6,r6,r8,ror#25 + eor r7,r7,r9,ror#25 + add r0,r0,r5 + mov r10,r10,ror#16 + add r1,r1,r6 + mov r12,r12,ror#16 + eor r10,r10,r0,ror#16 + eor r12,r12,r1,ror#16 + add r8,r8,r10 + mov r5,r5,ror#20 + add r9,r9,r12 + mov r6,r6,ror#20 + eor r5,r5,r8,ror#20 + eor r6,r6,r9,ror#20 + add r0,r0,r5 + mov r10,r10,ror#24 + add r1,r1,r6 + mov r12,r12,ror#24 + eor r10,r10,r0,ror#24 + eor r12,r12,r1,ror#24 + add r8,r8,r10 + mov r5,r5,ror#25 + str r10,[sp,#4*(16+15)] + ldr r10,[sp,#4*(16+13)] + add r9,r9,r12 + mov r6,r6,ror#25 + eor r5,r5,r8,ror#25 + eor r6,r6,r9,ror#25 + str r8,[sp,#4*(16+10)] + ldr r8,[sp,#4*(16+8)] + add r2,r2,r7 + mov r10,r10,ror#16 + str r9,[sp,#4*(16+11)] + ldr r9,[sp,#4*(16+9)] + add r3,r3,r4 + mov r14,r14,ror#16 + eor r10,r10,r2,ror#16 + eor r14,r14,r3,ror#16 + add r8,r8,r10 + mov r7,r7,ror#20 + add r9,r9,r14 + mov r4,r4,ror#20 + eor r7,r7,r8,ror#20 + eor r4,r4,r9,ror#20 + add r2,r2,r7 + mov r10,r10,ror#24 + add r3,r3,r4 + mov r14,r14,ror#24 + eor r10,r10,r2,ror#24 + eor r14,r14,r3,ror#24 + add r8,r8,r10 + mov r7,r7,ror#25 + add r9,r9,r14 + mov r4,r4,ror#25 + eor r7,r7,r8,ror#25 + eor r4,r4,r9,ror#25 + bne .Loop + + ldr r11,[sp,#4*(32+2)] @ load len + + str r8, [sp,#4*(16+8)] @ modulo-scheduled store + str r9, [sp,#4*(16+9)] + str r12,[sp,#4*(16+12)] + str r10, [sp,#4*(16+13)] + str r14,[sp,#4*(16+14)] + + @ at this point we have first half of 512-bit result in + @ rx and second half at sp+4*(16+8) + + cmp r11,#64 @ done yet? +#ifdef __thumb2__ + itete lo +#endif + addlo r12,sp,#4*(0) @ shortcut or ... + ldrhs r12,[sp,#4*(32+1)] @ ... load inp + addlo r14,sp,#4*(0) @ shortcut or ... + ldrhs r14,[sp,#4*(32+0)] @ ... load out + + ldr r8,[sp,#4*(0)] @ load key material + ldr r9,[sp,#4*(1)] + +#if __LINUX_ARM_ARCH__ >= 6 || !defined(__ARMEB__) +#if __LINUX_ARM_ARCH__ < 7 + orr r10,r12,r14 + tst r10,#3 @ are input and output aligned? + ldr r10,[sp,#4*(2)] + bne .Lunaligned + cmp r11,#64 @ restore flags +#else + ldr r10,[sp,#4*(2)] +#endif + ldr r11,[sp,#4*(3)] + + add r0,r0,r8 @ accumulate key material + add r1,r1,r9 +#ifdef __thumb2__ + itt hs +#endif + ldrhs r8,[r12],#16 @ load input + ldrhs r9,[r12,#-12] + + add r2,r2,r10 + add r3,r3,r11 +#ifdef __thumb2__ + itt hs +#endif + ldrhs r10,[r12,#-8] + ldrhs r11,[r12,#-4] +#if __LINUX_ARM_ARCH__ >= 6 && defined(__ARMEB__) + rev r0,r0 + rev r1,r1 + rev r2,r2 + rev r3,r3 +#endif +#ifdef __thumb2__ + itt hs +#endif + eorhs r0,r0,r8 @ xor with input + eorhs r1,r1,r9 + add r8,sp,#4*(4) + str r0,[r14],#16 @ store output +#ifdef __thumb2__ + itt hs +#endif + eorhs r2,r2,r10 + eorhs r3,r3,r11 + ldmia r8,{r8-r11} @ load key material + str r1,[r14,#-12] + str r2,[r14,#-8] + str r3,[r14,#-4] + + add r4,r4,r8 @ accumulate key material + add r5,r5,r9 +#ifdef __thumb2__ + itt hs +#endif + ldrhs r8,[r12],#16 @ load input + ldrhs r9,[r12,#-12] + add r6,r6,r10 + add r7,r7,r11 +#ifdef __thumb2__ + itt hs +#endif + ldrhs r10,[r12,#-8] + ldrhs r11,[r12,#-4] +#if __LINUX_ARM_ARCH__ >= 6 && defined(__ARMEB__) + rev r4,r4 + rev r5,r5 + rev r6,r6 + rev r7,r7 +#endif +#ifdef __thumb2__ + itt hs +#endif + eorhs r4,r4,r8 + eorhs r5,r5,r9 + add r8,sp,#4*(8) + str r4,[r14],#16 @ store output +#ifdef __thumb2__ + itt hs +#endif + eorhs r6,r6,r10 + eorhs r7,r7,r11 + str r5,[r14,#-12] + ldmia r8,{r8-r11} @ load key material + str r6,[r14,#-8] + add r0,sp,#4*(16+8) + str r7,[r14,#-4] + + ldmia r0,{r0-r7} @ load second half + + add r0,r0,r8 @ accumulate key material + add r1,r1,r9 +#ifdef __thumb2__ + itt hs +#endif + ldrhs r8,[r12],#16 @ load input + ldrhs r9,[r12,#-12] +#ifdef __thumb2__ + itt hi +#endif + strhi r10,[sp,#4*(16+10)] @ copy "rx" while at it + strhi r11,[sp,#4*(16+11)] @ copy "rx" while at it + add r2,r2,r10 + add r3,r3,r11 +#ifdef __thumb2__ + itt hs +#endif + ldrhs r10,[r12,#-8] + ldrhs r11,[r12,#-4] +#if __LINUX_ARM_ARCH__ >= 6 && defined(__ARMEB__) + rev r0,r0 + rev r1,r1 + rev r2,r2 + rev r3,r3 +#endif +#ifdef __thumb2__ + itt hs +#endif + eorhs r0,r0,r8 + eorhs r1,r1,r9 + add r8,sp,#4*(12) + str r0,[r14],#16 @ store output +#ifdef __thumb2__ + itt hs +#endif + eorhs r2,r2,r10 + eorhs r3,r3,r11 + str r1,[r14,#-12] + ldmia r8,{r8-r11} @ load key material + str r2,[r14,#-8] + str r3,[r14,#-4] + + add r4,r4,r8 @ accumulate key material + add r5,r5,r9 +#ifdef __thumb2__ + itt hi +#endif + addhi r8,r8,#1 @ next counter value + strhi r8,[sp,#4*(12)] @ save next counter value +#ifdef __thumb2__ + itt hs +#endif + ldrhs r8,[r12],#16 @ load input + ldrhs r9,[r12,#-12] + add r6,r6,r10 + add r7,r7,r11 +#ifdef __thumb2__ + itt hs +#endif + ldrhs r10,[r12,#-8] + ldrhs r11,[r12,#-4] +#if __LINUX_ARM_ARCH__ >= 6 && defined(__ARMEB__) + rev r4,r4 + rev r5,r5 + rev r6,r6 + rev r7,r7 +#endif +#ifdef __thumb2__ + itt hs +#endif + eorhs r4,r4,r8 + eorhs r5,r5,r9 +#ifdef __thumb2__ + it ne +#endif + ldrne r8,[sp,#4*(32+2)] @ re-load len +#ifdef __thumb2__ + itt hs +#endif + eorhs r6,r6,r10 + eorhs r7,r7,r11 + str r4,[r14],#16 @ store output + str r5,[r14,#-12] +#ifdef __thumb2__ + it hs +#endif + subhs r11,r8,#64 @ len-=64 + str r6,[r14,#-8] + str r7,[r14,#-4] + bhi .Loop_outer + + beq .Ldone +#if __LINUX_ARM_ARCH__ < 7 + b .Ltail + +.align 4 +.Lunaligned: @ unaligned endian-neutral path + cmp r11,#64 @ restore flags +#endif +#endif +#if __LINUX_ARM_ARCH__ < 7 + ldr r11,[sp,#4*(3)] + add r0,r0,r8 @ accumulate key material + add r1,r1,r9 + add r2,r2,r10 +#ifdef __thumb2__ + itete lo +#endif + eorlo r8,r8,r8 @ zero or ... + ldrhsb r8,[r12],#16 @ ... load input + eorlo r9,r9,r9 + ldrhsb r9,[r12,#-12] + + add r3,r3,r11 +#ifdef __thumb2__ + itete lo +#endif + eorlo r10,r10,r10 + ldrhsb r10,[r12,#-8] + eorlo r11,r11,r11 + ldrhsb r11,[r12,#-4] + + eor r0,r8,r0 @ xor with input (or zero) + eor r1,r9,r1 +#ifdef __thumb2__ + itt hs +#endif + ldrhsb r8,[r12,#-15] @ load more input + ldrhsb r9,[r12,#-11] + eor r2,r10,r2 + strb r0,[r14],#16 @ store output + eor r3,r11,r3 +#ifdef __thumb2__ + itt hs +#endif + ldrhsb r10,[r12,#-7] + ldrhsb r11,[r12,#-3] + strb r1,[r14,#-12] + eor r0,r8,r0,lsr#8 + strb r2,[r14,#-8] + eor r1,r9,r1,lsr#8 +#ifdef __thumb2__ + itt hs +#endif + ldrhsb r8,[r12,#-14] @ load more input + ldrhsb r9,[r12,#-10] + strb r3,[r14,#-4] + eor r2,r10,r2,lsr#8 + strb r0,[r14,#-15] + eor r3,r11,r3,lsr#8 +#ifdef __thumb2__ + itt hs +#endif + ldrhsb r10,[r12,#-6] + ldrhsb r11,[r12,#-2] + strb r1,[r14,#-11] + eor r0,r8,r0,lsr#8 + strb r2,[r14,#-7] + eor r1,r9,r1,lsr#8 +#ifdef __thumb2__ + itt hs +#endif + ldrhsb r8,[r12,#-13] @ load more input + ldrhsb r9,[r12,#-9] + strb r3,[r14,#-3] + eor r2,r10,r2,lsr#8 + strb r0,[r14,#-14] + eor r3,r11,r3,lsr#8 +#ifdef __thumb2__ + itt hs +#endif + ldrhsb r10,[r12,#-5] + ldrhsb r11,[r12,#-1] + strb r1,[r14,#-10] + strb r2,[r14,#-6] + eor r0,r8,r0,lsr#8 + strb r3,[r14,#-2] + eor r1,r9,r1,lsr#8 + strb r0,[r14,#-13] + eor r2,r10,r2,lsr#8 + strb r1,[r14,#-9] + eor r3,r11,r3,lsr#8 + strb r2,[r14,#-5] + strb r3,[r14,#-1] + add r8,sp,#4*(4+0) + ldmia r8,{r8-r11} @ load key material + add r0,sp,#4*(16+8) + add r4,r4,r8 @ accumulate key material + add r5,r5,r9 + add r6,r6,r10 +#ifdef __thumb2__ + itete lo +#endif + eorlo r8,r8,r8 @ zero or ... + ldrhsb r8,[r12],#16 @ ... load input + eorlo r9,r9,r9 + ldrhsb r9,[r12,#-12] + + add r7,r7,r11 +#ifdef __thumb2__ + itete lo +#endif + eorlo r10,r10,r10 + ldrhsb r10,[r12,#-8] + eorlo r11,r11,r11 + ldrhsb r11,[r12,#-4] + + eor r4,r8,r4 @ xor with input (or zero) + eor r5,r9,r5 +#ifdef __thumb2__ + itt hs +#endif + ldrhsb r8,[r12,#-15] @ load more input + ldrhsb r9,[r12,#-11] + eor r6,r10,r6 + strb r4,[r14],#16 @ store output + eor r7,r11,r7 +#ifdef __thumb2__ + itt hs +#endif + ldrhsb r10,[r12,#-7] + ldrhsb r11,[r12,#-3] + strb r5,[r14,#-12] + eor r4,r8,r4,lsr#8 + strb r6,[r14,#-8] + eor r5,r9,r5,lsr#8 +#ifdef __thumb2__ + itt hs +#endif + ldrhsb r8,[r12,#-14] @ load more input + ldrhsb r9,[r12,#-10] + strb r7,[r14,#-4] + eor r6,r10,r6,lsr#8 + strb r4,[r14,#-15] + eor r7,r11,r7,lsr#8 +#ifdef __thumb2__ + itt hs +#endif + ldrhsb r10,[r12,#-6] + ldrhsb r11,[r12,#-2] + strb r5,[r14,#-11] + eor r4,r8,r4,lsr#8 + strb r6,[r14,#-7] + eor r5,r9,r5,lsr#8 +#ifdef __thumb2__ + itt hs +#endif + ldrhsb r8,[r12,#-13] @ load more input + ldrhsb r9,[r12,#-9] + strb r7,[r14,#-3] + eor r6,r10,r6,lsr#8 + strb r4,[r14,#-14] + eor r7,r11,r7,lsr#8 +#ifdef __thumb2__ + itt hs +#endif + ldrhsb r10,[r12,#-5] + ldrhsb r11,[r12,#-1] + strb r5,[r14,#-10] + strb r6,[r14,#-6] + eor r4,r8,r4,lsr#8 + strb r7,[r14,#-2] + eor r5,r9,r5,lsr#8 + strb r4,[r14,#-13] + eor r6,r10,r6,lsr#8 + strb r5,[r14,#-9] + eor r7,r11,r7,lsr#8 + strb r6,[r14,#-5] + strb r7,[r14,#-1] + add r8,sp,#4*(4+4) + ldmia r8,{r8-r11} @ load key material + ldmia r0,{r0-r7} @ load second half +#ifdef __thumb2__ + itt hi +#endif + strhi r10,[sp,#4*(16+10)] @ copy "rx" + strhi r11,[sp,#4*(16+11)] @ copy "rx" + add r0,r0,r8 @ accumulate key material + add r1,r1,r9 + add r2,r2,r10 +#ifdef __thumb2__ + itete lo +#endif + eorlo r8,r8,r8 @ zero or ... + ldrhsb r8,[r12],#16 @ ... load input + eorlo r9,r9,r9 + ldrhsb r9,[r12,#-12] + + add r3,r3,r11 +#ifdef __thumb2__ + itete lo +#endif + eorlo r10,r10,r10 + ldrhsb r10,[r12,#-8] + eorlo r11,r11,r11 + ldrhsb r11,[r12,#-4] + + eor r0,r8,r0 @ xor with input (or zero) + eor r1,r9,r1 +#ifdef __thumb2__ + itt hs +#endif + ldrhsb r8,[r12,#-15] @ load more input + ldrhsb r9,[r12,#-11] + eor r2,r10,r2 + strb r0,[r14],#16 @ store output + eor r3,r11,r3 +#ifdef __thumb2__ + itt hs +#endif + ldrhsb r10,[r12,#-7] + ldrhsb r11,[r12,#-3] + strb r1,[r14,#-12] + eor r0,r8,r0,lsr#8 + strb r2,[r14,#-8] + eor r1,r9,r1,lsr#8 +#ifdef __thumb2__ + itt hs +#endif + ldrhsb r8,[r12,#-14] @ load more input + ldrhsb r9,[r12,#-10] + strb r3,[r14,#-4] + eor r2,r10,r2,lsr#8 + strb r0,[r14,#-15] + eor r3,r11,r3,lsr#8 +#ifdef __thumb2__ + itt hs +#endif + ldrhsb r10,[r12,#-6] + ldrhsb r11,[r12,#-2] + strb r1,[r14,#-11] + eor r0,r8,r0,lsr#8 + strb r2,[r14,#-7] + eor r1,r9,r1,lsr#8 +#ifdef __thumb2__ + itt hs +#endif + ldrhsb r8,[r12,#-13] @ load more input + ldrhsb r9,[r12,#-9] + strb r3,[r14,#-3] + eor r2,r10,r2,lsr#8 + strb r0,[r14,#-14] + eor r3,r11,r3,lsr#8 +#ifdef __thumb2__ + itt hs +#endif + ldrhsb r10,[r12,#-5] + ldrhsb r11,[r12,#-1] + strb r1,[r14,#-10] + strb r2,[r14,#-6] + eor r0,r8,r0,lsr#8 + strb r3,[r14,#-2] + eor r1,r9,r1,lsr#8 + strb r0,[r14,#-13] + eor r2,r10,r2,lsr#8 + strb r1,[r14,#-9] + eor r3,r11,r3,lsr#8 + strb r2,[r14,#-5] + strb r3,[r14,#-1] + add r8,sp,#4*(4+8) + ldmia r8,{r8-r11} @ load key material + add r4,r4,r8 @ accumulate key material +#ifdef __thumb2__ + itt hi +#endif + addhi r8,r8,#1 @ next counter value + strhi r8,[sp,#4*(12)] @ save next counter value + add r5,r5,r9 + add r6,r6,r10 +#ifdef __thumb2__ + itete lo +#endif + eorlo r8,r8,r8 @ zero or ... + ldrhsb r8,[r12],#16 @ ... load input + eorlo r9,r9,r9 + ldrhsb r9,[r12,#-12] + + add r7,r7,r11 +#ifdef __thumb2__ + itete lo +#endif + eorlo r10,r10,r10 + ldrhsb r10,[r12,#-8] + eorlo r11,r11,r11 + ldrhsb r11,[r12,#-4] + + eor r4,r8,r4 @ xor with input (or zero) + eor r5,r9,r5 +#ifdef __thumb2__ + itt hs +#endif + ldrhsb r8,[r12,#-15] @ load more input + ldrhsb r9,[r12,#-11] + eor r6,r10,r6 + strb r4,[r14],#16 @ store output + eor r7,r11,r7 +#ifdef __thumb2__ + itt hs +#endif + ldrhsb r10,[r12,#-7] + ldrhsb r11,[r12,#-3] + strb r5,[r14,#-12] + eor r4,r8,r4,lsr#8 + strb r6,[r14,#-8] + eor r5,r9,r5,lsr#8 +#ifdef __thumb2__ + itt hs +#endif + ldrhsb r8,[r12,#-14] @ load more input + ldrhsb r9,[r12,#-10] + strb r7,[r14,#-4] + eor r6,r10,r6,lsr#8 + strb r4,[r14,#-15] + eor r7,r11,r7,lsr#8 +#ifdef __thumb2__ + itt hs +#endif + ldrhsb r10,[r12,#-6] + ldrhsb r11,[r12,#-2] + strb r5,[r14,#-11] + eor r4,r8,r4,lsr#8 + strb r6,[r14,#-7] + eor r5,r9,r5,lsr#8 +#ifdef __thumb2__ + itt hs +#endif + ldrhsb r8,[r12,#-13] @ load more input + ldrhsb r9,[r12,#-9] + strb r7,[r14,#-3] + eor r6,r10,r6,lsr#8 + strb r4,[r14,#-14] + eor r7,r11,r7,lsr#8 +#ifdef __thumb2__ + itt hs +#endif + ldrhsb r10,[r12,#-5] + ldrhsb r11,[r12,#-1] + strb r5,[r14,#-10] + strb r6,[r14,#-6] + eor r4,r8,r4,lsr#8 + strb r7,[r14,#-2] + eor r5,r9,r5,lsr#8 + strb r4,[r14,#-13] + eor r6,r10,r6,lsr#8 + strb r5,[r14,#-9] + eor r7,r11,r7,lsr#8 + strb r6,[r14,#-5] + strb r7,[r14,#-1] +#ifdef __thumb2__ + it ne +#endif + ldrne r8,[sp,#4*(32+2)] @ re-load len +#ifdef __thumb2__ + it hs +#endif + subhs r11,r8,#64 @ len-=64 + bhi .Loop_outer + + beq .Ldone +#endif + +.Ltail: + ldr r12,[sp,#4*(32+1)] @ load inp + add r9,sp,#4*(0) + ldr r14,[sp,#4*(32+0)] @ load out + +.Loop_tail: + ldrb r10,[r9],#1 @ read buffer on stack + ldrb r11,[r12],#1 @ read input + subs r8,r8,#1 + eor r11,r11,r10 + strb r11,[r14],#1 @ store output + bne .Loop_tail + +.Ldone: + add sp,sp,#4*(32+3) +.Lno_data_arm: + ldmia sp!,{r4-r11,pc} +ENDPROC(chacha20_ossl_scalar) + +#if __LINUX_ARM_ARCH__ >= 7 && IS_ENABLED(CONFIG_KERNEL_MODE_NEON) +.align 5 +.Lsigma2: +.long 0x61707865,0x3320646e,0x79622d32,0x6b206574 @ endian-neutral +.Lone2: +.long 1,0,0,0 +.word -1 + +.arch armv7-a +.fpu neon + +.align 5 +ENTRY(chacha20_ossl_neon) + ldr r12,[sp,#0] @ pull pointer to counter and nonce + stmdb sp!,{r0-r2,r4-r11,lr} + cmp r2,#0 @ len==0? +#ifdef __thumb2__ + itt eq +#endif + addeq sp,sp,#4*3 + beq .Lno_data_neon + cmp r2,#192 @ test len + bls .Lshort +.Lchacha20_ossl_neon_begin: + adr r14,.Lsigma2 + vstmdb sp!,{d8-d15} @ ABI spec says so + stmdb sp!,{r0-r3} + + vld1.32 {q1-q2},[r3] @ load key + ldmia r3,{r4-r11} @ load key + + sub sp,sp,#4*(16+16) + vld1.32 {q3},[r12] @ load counter and nonce + add r12,sp,#4*8 + ldmia r14,{r0-r3} @ load sigma + vld1.32 {q0},[r14]! @ load sigma + vld1.32 {q12},[r14] @ one + vst1.32 {q2-q3},[r12] @ copy 1/2key|counter|nonce + vst1.32 {q0-q1},[sp] @ copy sigma|1/2key + + str r10,[sp,#4*(16+10)] @ off-load "rx" + str r11,[sp,#4*(16+11)] @ off-load "rx" + vshl.i32 d26,d24,#1 @ two + vstr d24,[sp,#4*(16+0)] + vshl.i32 d28,d24,#2 @ four + vstr d26,[sp,#4*(16+2)] + vmov q4,q0 + vstr d28,[sp,#4*(16+4)] + vmov q8,q0 + vmov q5,q1 + vmov q9,q1 + b .Loop_neon_enter + +.align 4 +.Loop_neon_outer: + ldmia sp,{r0-r9} @ load key material + cmp r11,#64*2 @ if len<=64*2 + bls .Lbreak_neon @ switch to integer-only + vmov q4,q0 + str r11,[sp,#4*(32+2)] @ save len + vmov q8,q0 + str r12, [sp,#4*(32+1)] @ save inp + vmov q5,q1 + str r14, [sp,#4*(32+0)] @ save out + vmov q9,q1 +.Loop_neon_enter: + ldr r11, [sp,#4*(15)] + vadd.i32 q7,q3,q12 @ counter+1 + ldr r12,[sp,#4*(12)] @ modulo-scheduled load + vmov q6,q2 + ldr r10, [sp,#4*(13)] + vmov q10,q2 + ldr r14,[sp,#4*(14)] + vadd.i32 q11,q7,q12 @ counter+2 + str r11, [sp,#4*(16+15)] + mov r11,#10 + add r12,r12,#3 @ counter+3 + b .Loop_neon + +.align 4 +.Loop_neon: + subs r11,r11,#1 + vadd.i32 q0,q0,q1 + add r0,r0,r4 + vadd.i32 q4,q4,q5 + mov r12,r12,ror#16 + vadd.i32 q8,q8,q9 + add r1,r1,r5 + veor q3,q3,q0 + mov r10,r10,ror#16 + veor q7,q7,q4 + eor r12,r12,r0,ror#16 + veor q11,q11,q8 + eor r10,r10,r1,ror#16 + vrev32.16 q3,q3 + add r8,r8,r12 + vrev32.16 q7,q7 + mov r4,r4,ror#20 + vrev32.16 q11,q11 + add r9,r9,r10 + vadd.i32 q2,q2,q3 + mov r5,r5,ror#20 + vadd.i32 q6,q6,q7 + eor r4,r4,r8,ror#20 + vadd.i32 q10,q10,q11 + eor r5,r5,r9,ror#20 + veor q12,q1,q2 + add r0,r0,r4 + veor q13,q5,q6 + mov r12,r12,ror#24 + veor q14,q9,q10 + add r1,r1,r5 + vshr.u32 q1,q12,#20 + mov r10,r10,ror#24 + vshr.u32 q5,q13,#20 + eor r12,r12,r0,ror#24 + vshr.u32 q9,q14,#20 + eor r10,r10,r1,ror#24 + vsli.32 q1,q12,#12 + add r8,r8,r12 + vsli.32 q5,q13,#12 + mov r4,r4,ror#25 + vsli.32 q9,q14,#12 + add r9,r9,r10 + vadd.i32 q0,q0,q1 + mov r5,r5,ror#25 + vadd.i32 q4,q4,q5 + str r10,[sp,#4*(16+13)] + vadd.i32 q8,q8,q9 + ldr r10,[sp,#4*(16+15)] + veor q12,q3,q0 + eor r4,r4,r8,ror#25 + veor q13,q7,q4 + eor r5,r5,r9,ror#25 + veor q14,q11,q8 + str r8,[sp,#4*(16+8)] + vshr.u32 q3,q12,#24 + ldr r8,[sp,#4*(16+10)] + vshr.u32 q7,q13,#24 + add r2,r2,r6 + vshr.u32 q11,q14,#24 + mov r14,r14,ror#16 + vsli.32 q3,q12,#8 + str r9,[sp,#4*(16+9)] + vsli.32 q7,q13,#8 + ldr r9,[sp,#4*(16+11)] + vsli.32 q11,q14,#8 + add r3,r3,r7 + vadd.i32 q2,q2,q3 + mov r10,r10,ror#16 + vadd.i32 q6,q6,q7 + eor r14,r14,r2,ror#16 + vadd.i32 q10,q10,q11 + eor r10,r10,r3,ror#16 + veor q12,q1,q2 + add r8,r8,r14 + veor q13,q5,q6 + mov r6,r6,ror#20 + veor q14,q9,q10 + add r9,r9,r10 + vshr.u32 q1,q12,#25 + mov r7,r7,ror#20 + vshr.u32 q5,q13,#25 + eor r6,r6,r8,ror#20 + vshr.u32 q9,q14,#25 + eor r7,r7,r9,ror#20 + vsli.32 q1,q12,#7 + add r2,r2,r6 + vsli.32 q5,q13,#7 + mov r14,r14,ror#24 + vsli.32 q9,q14,#7 + add r3,r3,r7 + vext.8 q2,q2,q2,#8 + mov r10,r10,ror#24 + vext.8 q6,q6,q6,#8 + eor r14,r14,r2,ror#24 + vext.8 q10,q10,q10,#8 + eor r10,r10,r3,ror#24 + vext.8 q1,q1,q1,#4 + add r8,r8,r14 + vext.8 q5,q5,q5,#4 + mov r6,r6,ror#25 + vext.8 q9,q9,q9,#4 + add r9,r9,r10 + vext.8 q3,q3,q3,#12 + mov r7,r7,ror#25 + vext.8 q7,q7,q7,#12 + eor r6,r6,r8,ror#25 + vext.8 q11,q11,q11,#12 + eor r7,r7,r9,ror#25 + vadd.i32 q0,q0,q1 + add r0,r0,r5 + vadd.i32 q4,q4,q5 + mov r10,r10,ror#16 + vadd.i32 q8,q8,q9 + add r1,r1,r6 + veor q3,q3,q0 + mov r12,r12,ror#16 + veor q7,q7,q4 + eor r10,r10,r0,ror#16 + veor q11,q11,q8 + eor r12,r12,r1,ror#16 + vrev32.16 q3,q3 + add r8,r8,r10 + vrev32.16 q7,q7 + mov r5,r5,ror#20 + vrev32.16 q11,q11 + add r9,r9,r12 + vadd.i32 q2,q2,q3 + mov r6,r6,ror#20 + vadd.i32 q6,q6,q7 + eor r5,r5,r8,ror#20 + vadd.i32 q10,q10,q11 + eor r6,r6,r9,ror#20 + veor q12,q1,q2 + add r0,r0,r5 + veor q13,q5,q6 + mov r10,r10,ror#24 + veor q14,q9,q10 + add r1,r1,r6 + vshr.u32 q1,q12,#20 + mov r12,r12,ror#24 + vshr.u32 q5,q13,#20 + eor r10,r10,r0,ror#24 + vshr.u32 q9,q14,#20 + eor r12,r12,r1,ror#24 + vsli.32 q1,q12,#12 + add r8,r8,r10 + vsli.32 q5,q13,#12 + mov r5,r5,ror#25 + vsli.32 q9,q14,#12 + str r10,[sp,#4*(16+15)] + vadd.i32 q0,q0,q1 + ldr r10,[sp,#4*(16+13)] + vadd.i32 q4,q4,q5 + add r9,r9,r12 + vadd.i32 q8,q8,q9 + mov r6,r6,ror#25 + veor q12,q3,q0 + eor r5,r5,r8,ror#25 + veor q13,q7,q4 + eor r6,r6,r9,ror#25 + veor q14,q11,q8 + str r8,[sp,#4*(16+10)] + vshr.u32 q3,q12,#24 + ldr r8,[sp,#4*(16+8)] + vshr.u32 q7,q13,#24 + add r2,r2,r7 + vshr.u32 q11,q14,#24 + mov r10,r10,ror#16 + vsli.32 q3,q12,#8 + str r9,[sp,#4*(16+11)] + vsli.32 q7,q13,#8 + ldr r9,[sp,#4*(16+9)] + vsli.32 q11,q14,#8 + add r3,r3,r4 + vadd.i32 q2,q2,q3 + mov r14,r14,ror#16 + vadd.i32 q6,q6,q7 + eor r10,r10,r2,ror#16 + vadd.i32 q10,q10,q11 + eor r14,r14,r3,ror#16 + veor q12,q1,q2 + add r8,r8,r10 + veor q13,q5,q6 + mov r7,r7,ror#20 + veor q14,q9,q10 + add r9,r9,r14 + vshr.u32 q1,q12,#25 + mov r4,r4,ror#20 + vshr.u32 q5,q13,#25 + eor r7,r7,r8,ror#20 + vshr.u32 q9,q14,#25 + eor r4,r4,r9,ror#20 + vsli.32 q1,q12,#7 + add r2,r2,r7 + vsli.32 q5,q13,#7 + mov r10,r10,ror#24 + vsli.32 q9,q14,#7 + add r3,r3,r4 + vext.8 q2,q2,q2,#8 + mov r14,r14,ror#24 + vext.8 q6,q6,q6,#8 + eor r10,r10,r2,ror#24 + vext.8 q10,q10,q10,#8 + eor r14,r14,r3,ror#24 + vext.8 q1,q1,q1,#12 + add r8,r8,r10 + vext.8 q5,q5,q5,#12 + mov r7,r7,ror#25 + vext.8 q9,q9,q9,#12 + add r9,r9,r14 + vext.8 q3,q3,q3,#4 + mov r4,r4,ror#25 + vext.8 q7,q7,q7,#4 + eor r7,r7,r8,ror#25 + vext.8 q11,q11,q11,#4 + eor r4,r4,r9,ror#25 + bne .Loop_neon + + add r11,sp,#32 + vld1.32 {q12-q13},[sp] @ load key material + vld1.32 {q14-q15},[r11] + + ldr r11,[sp,#4*(32+2)] @ load len + + str r8, [sp,#4*(16+8)] @ modulo-scheduled store + str r9, [sp,#4*(16+9)] + str r12,[sp,#4*(16+12)] + str r10, [sp,#4*(16+13)] + str r14,[sp,#4*(16+14)] + + @ at this point we have first half of 512-bit result in + @ rx and second half at sp+4*(16+8) + + ldr r12,[sp,#4*(32+1)] @ load inp + ldr r14,[sp,#4*(32+0)] @ load out + + vadd.i32 q0,q0,q12 @ accumulate key material + vadd.i32 q4,q4,q12 + vadd.i32 q8,q8,q12 + vldr d24,[sp,#4*(16+0)] @ one + + vadd.i32 q1,q1,q13 + vadd.i32 q5,q5,q13 + vadd.i32 q9,q9,q13 + vldr d26,[sp,#4*(16+2)] @ two + + vadd.i32 q2,q2,q14 + vadd.i32 q6,q6,q14 + vadd.i32 q10,q10,q14 + vadd.i32 d14,d14,d24 @ counter+1 + vadd.i32 d22,d22,d26 @ counter+2 + + vadd.i32 q3,q3,q15 + vadd.i32 q7,q7,q15 + vadd.i32 q11,q11,q15 + + cmp r11,#64*4 + blo .Ltail_neon + + vld1.8 {q12-q13},[r12]! @ load input + mov r11,sp + vld1.8 {q14-q15},[r12]! + veor q0,q0,q12 @ xor with input + veor q1,q1,q13 + vld1.8 {q12-q13},[r12]! + veor q2,q2,q14 + veor q3,q3,q15 + vld1.8 {q14-q15},[r12]! + + veor q4,q4,q12 + vst1.8 {q0-q1},[r14]! @ store output + veor q5,q5,q13 + vld1.8 {q12-q13},[r12]! + veor q6,q6,q14 + vst1.8 {q2-q3},[r14]! + veor q7,q7,q15 + vld1.8 {q14-q15},[r12]! + + veor q8,q8,q12 + vld1.32 {q0-q1},[r11]! @ load for next iteration + veor d25,d25,d25 + vldr d24,[sp,#4*(16+4)] @ four + veor q9,q9,q13 + vld1.32 {q2-q3},[r11] + veor q10,q10,q14 + vst1.8 {q4-q5},[r14]! + veor q11,q11,q15 + vst1.8 {q6-q7},[r14]! + + vadd.i32 d6,d6,d24 @ next counter value + vldr d24,[sp,#4*(16+0)] @ one + + ldmia sp,{r8-r11} @ load key material + add r0,r0,r8 @ accumulate key material + ldr r8,[r12],#16 @ load input + vst1.8 {q8-q9},[r14]! + add r1,r1,r9 + ldr r9,[r12,#-12] + vst1.8 {q10-q11},[r14]! + add r2,r2,r10 + ldr r10,[r12,#-8] + add r3,r3,r11 + ldr r11,[r12,#-4] +#ifdef __ARMEB__ + rev r0,r0 + rev r1,r1 + rev r2,r2 + rev r3,r3 +#endif + eor r0,r0,r8 @ xor with input + add r8,sp,#4*(4) + eor r1,r1,r9 + str r0,[r14],#16 @ store output + eor r2,r2,r10 + str r1,[r14,#-12] + eor r3,r3,r11 + ldmia r8,{r8-r11} @ load key material + str r2,[r14,#-8] + str r3,[r14,#-4] + + add r4,r4,r8 @ accumulate key material + ldr r8,[r12],#16 @ load input + add r5,r5,r9 + ldr r9,[r12,#-12] + add r6,r6,r10 + ldr r10,[r12,#-8] + add r7,r7,r11 + ldr r11,[r12,#-4] +#ifdef __ARMEB__ + rev r4,r4 + rev r5,r5 + rev r6,r6 + rev r7,r7 +#endif + eor r4,r4,r8 + add r8,sp,#4*(8) + eor r5,r5,r9 + str r4,[r14],#16 @ store output + eor r6,r6,r10 + str r5,[r14,#-12] + eor r7,r7,r11 + ldmia r8,{r8-r11} @ load key material + str r6,[r14,#-8] + add r0,sp,#4*(16+8) + str r7,[r14,#-4] + + ldmia r0,{r0-r7} @ load second half + + add r0,r0,r8 @ accumulate key material + ldr r8,[r12],#16 @ load input + add r1,r1,r9 + ldr r9,[r12,#-12] +#ifdef __thumb2__ + it hi +#endif + strhi r10,[sp,#4*(16+10)] @ copy "rx" while at it + add r2,r2,r10 + ldr r10,[r12,#-8] +#ifdef __thumb2__ + it hi +#endif + strhi r11,[sp,#4*(16+11)] @ copy "rx" while at it + add r3,r3,r11 + ldr r11,[r12,#-4] +#ifdef __ARMEB__ + rev r0,r0 + rev r1,r1 + rev r2,r2 + rev r3,r3 +#endif + eor r0,r0,r8 + add r8,sp,#4*(12) + eor r1,r1,r9 + str r0,[r14],#16 @ store output + eor r2,r2,r10 + str r1,[r14,#-12] + eor r3,r3,r11 + ldmia r8,{r8-r11} @ load key material + str r2,[r14,#-8] + str r3,[r14,#-4] + + add r4,r4,r8 @ accumulate key material + add r8,r8,#4 @ next counter value + add r5,r5,r9 + str r8,[sp,#4*(12)] @ save next counter value + ldr r8,[r12],#16 @ load input + add r6,r6,r10 + add r4,r4,#3 @ counter+3 + ldr r9,[r12,#-12] + add r7,r7,r11 + ldr r10,[r12,#-8] + ldr r11,[r12,#-4] +#ifdef __ARMEB__ + rev r4,r4 + rev r5,r5 + rev r6,r6 + rev r7,r7 +#endif + eor r4,r4,r8 +#ifdef __thumb2__ + it hi +#endif + ldrhi r8,[sp,#4*(32+2)] @ re-load len + eor r5,r5,r9 + eor r6,r6,r10 + str r4,[r14],#16 @ store output + eor r7,r7,r11 + str r5,[r14,#-12] + sub r11,r8,#64*4 @ len-=64*4 + str r6,[r14,#-8] + str r7,[r14,#-4] + bhi .Loop_neon_outer + + b .Ldone_neon + +.align 4 +.Lbreak_neon: + @ harmonize NEON and integer-only stack frames: load data + @ from NEON frame, but save to integer-only one; distance + @ between the two is 4*(32+4+16-32)=4*(20). + + str r11, [sp,#4*(20+32+2)] @ save len + add r11,sp,#4*(32+4) + str r12, [sp,#4*(20+32+1)] @ save inp + str r14, [sp,#4*(20+32+0)] @ save out + + ldr r12,[sp,#4*(16+10)] + ldr r14,[sp,#4*(16+11)] + vldmia r11,{d8-d15} @ fulfill ABI requirement + str r12,[sp,#4*(20+16+10)] @ copy "rx" + str r14,[sp,#4*(20+16+11)] @ copy "rx" + + ldr r11, [sp,#4*(15)] + ldr r12,[sp,#4*(12)] @ modulo-scheduled load + ldr r10, [sp,#4*(13)] + ldr r14,[sp,#4*(14)] + str r11, [sp,#4*(20+16+15)] + add r11,sp,#4*(20) + vst1.32 {q0-q1},[r11]! @ copy key + add sp,sp,#4*(20) @ switch frame + vst1.32 {q2-q3},[r11] + mov r11,#10 + b .Loop @ go integer-only + +.align 4 +.Ltail_neon: + cmp r11,#64*3 + bhs .L192_or_more_neon + cmp r11,#64*2 + bhs .L128_or_more_neon + cmp r11,#64*1 + bhs .L64_or_more_neon + + add r8,sp,#4*(8) + vst1.8 {q0-q1},[sp] + add r10,sp,#4*(0) + vst1.8 {q2-q3},[r8] + b .Loop_tail_neon + +.align 4 +.L64_or_more_neon: + vld1.8 {q12-q13},[r12]! + vld1.8 {q14-q15},[r12]! + veor q0,q0,q12 + veor q1,q1,q13 + veor q2,q2,q14 + veor q3,q3,q15 + vst1.8 {q0-q1},[r14]! + vst1.8 {q2-q3},[r14]! + + beq .Ldone_neon + + add r8,sp,#4*(8) + vst1.8 {q4-q5},[sp] + add r10,sp,#4*(0) + vst1.8 {q6-q7},[r8] + sub r11,r11,#64*1 @ len-=64*1 + b .Loop_tail_neon + +.align 4 +.L128_or_more_neon: + vld1.8 {q12-q13},[r12]! + vld1.8 {q14-q15},[r12]! + veor q0,q0,q12 + veor q1,q1,q13 + vld1.8 {q12-q13},[r12]! + veor q2,q2,q14 + veor q3,q3,q15 + vld1.8 {q14-q15},[r12]! + + veor q4,q4,q12 + veor q5,q5,q13 + vst1.8 {q0-q1},[r14]! + veor q6,q6,q14 + vst1.8 {q2-q3},[r14]! + veor q7,q7,q15 + vst1.8 {q4-q5},[r14]! + vst1.8 {q6-q7},[r14]! + + beq .Ldone_neon + + add r8,sp,#4*(8) + vst1.8 {q8-q9},[sp] + add r10,sp,#4*(0) + vst1.8 {q10-q11},[r8] + sub r11,r11,#64*2 @ len-=64*2 + b .Loop_tail_neon + +.align 4 +.L192_or_more_neon: + vld1.8 {q12-q13},[r12]! + vld1.8 {q14-q15},[r12]! + veor q0,q0,q12 + veor q1,q1,q13 + vld1.8 {q12-q13},[r12]! + veor q2,q2,q14 + veor q3,q3,q15 + vld1.8 {q14-q15},[r12]! + + veor q4,q4,q12 + veor q5,q5,q13 + vld1.8 {q12-q13},[r12]! + veor q6,q6,q14 + vst1.8 {q0-q1},[r14]! + veor q7,q7,q15 + vld1.8 {q14-q15},[r12]! + + veor q8,q8,q12 + vst1.8 {q2-q3},[r14]! + veor q9,q9,q13 + vst1.8 {q4-q5},[r14]! + veor q10,q10,q14 + vst1.8 {q6-q7},[r14]! + veor q11,q11,q15 + vst1.8 {q8-q9},[r14]! + vst1.8 {q10-q11},[r14]! + + beq .Ldone_neon + + ldmia sp,{r8-r11} @ load key material + add r0,r0,r8 @ accumulate key material + add r8,sp,#4*(4) + add r1,r1,r9 + add r2,r2,r10 + add r3,r3,r11 + ldmia r8,{r8-r11} @ load key material + + add r4,r4,r8 @ accumulate key material + add r8,sp,#4*(8) + add r5,r5,r9 + add r6,r6,r10 + add r7,r7,r11 + ldmia r8,{r8-r11} @ load key material +#ifdef __ARMEB__ + rev r0,r0 + rev r1,r1 + rev r2,r2 + rev r3,r3 + rev r4,r4 + rev r5,r5 + rev r6,r6 + rev r7,r7 +#endif + stmia sp,{r0-r7} + add r0,sp,#4*(16+8) + + ldmia r0,{r0-r7} @ load second half + + add r0,r0,r8 @ accumulate key material + add r8,sp,#4*(12) + add r1,r1,r9 + add r2,r2,r10 + add r3,r3,r11 + ldmia r8,{r8-r11} @ load key material + + add r4,r4,r8 @ accumulate key material + add r8,sp,#4*(8) + add r5,r5,r9 + add r4,r4,#3 @ counter+3 + add r6,r6,r10 + add r7,r7,r11 + ldr r11,[sp,#4*(32+2)] @ re-load len +#ifdef __ARMEB__ + rev r0,r0 + rev r1,r1 + rev r2,r2 + rev r3,r3 + rev r4,r4 + rev r5,r5 + rev r6,r6 + rev r7,r7 +#endif + stmia r8,{r0-r7} + add r10,sp,#4*(0) + sub r11,r11,#64*3 @ len-=64*3 + +.Loop_tail_neon: + ldrb r8,[r10],#1 @ read buffer on stack + ldrb r9,[r12],#1 @ read input + subs r11,r11,#1 + eor r8,r8,r9 + strb r8,[r14],#1 @ store output + bne .Loop_tail_neon + +.Ldone_neon: + add sp,sp,#4*(32+4) + vldmia sp,{d8-d15} + add sp,sp,#4*(16+3) +.Lno_data_neon: + ldmia sp!,{r4-r11,pc} +ENDPROC(chacha20_ossl_neon) +#endif diff --git a/test_vectors.h b/test_vectors.h deleted file mode 100644 index 91b24ee..0000000 --- a/test_vectors.h +++ /dev/null @@ -1,48 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 - * - * Copyright (C) 2018 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. - */ - -struct curve25519_test_vector { - u8 private[CURVE25519_POINT_SIZE]; - u8 public[CURVE25519_POINT_SIZE]; - u8 result[CURVE25519_POINT_SIZE]; -}; - -static const struct curve25519_test_vector curve25519_test_vectors[] __initconst = { - { - .private = { 0x77, 0x07, 0x6d, 0x0a, 0x73, 0x18, 0xa5, 0x7d, 0x3c, 0x16, 0xc1, 0x72, 0x51, 0xb2, 0x66, 0x45, 0xdf, 0x4c, 0x2f, 0x87, 0xeb, 0xc0, 0x99, 0x2a, 0xb1, 0x77, 0xfb, 0xa5, 0x1d, 0xb9, 0x2c, 0x2a }, - .public = { 0xde, 0x9e, 0xdb, 0x7d, 0x7b, 0x7d, 0xc1, 0xb4, 0xd3, 0x5b, 0x61, 0xc2, 0xec, 0xe4, 0x35, 0x37, 0x3f, 0x83, 0x43, 0xc8, 0x5b, 0x78, 0x67, 0x4d, 0xad, 0xfc, 0x7e, 0x14, 0x6f, 0x88, 0x2b, 0x4f }, - .result = { 0x4a, 0x5d, 0x9d, 0x5b, 0xa4, 0xce, 0x2d, 0xe1, 0x72, 0x8e, 0x3b, 0xf4, 0x80, 0x35, 0x0f, 0x25, 0xe0, 0x7e, 0x21, 0xc9, 0x47, 0xd1, 0x9e, 0x33, 0x76, 0xf0, 0x9b, 0x3c, 0x1e, 0x16, 0x17, 0x42 } - }, - { - .private = { 0x5d, 0xab, 0x08, 0x7e, 0x62, 0x4a, 0x8a, 0x4b, 0x79, 0xe1, 0x7f, 0x8b, 0x83, 0x80, 0x0e, 0xe6, 0x6f, 0x3b, 0xb1, 0x29, 0x26, 0x18, 0xb6, 0xfd, 0x1c, 0x2f, 0x8b, 0x27, 0xff, 0x88, 0xe0, 0xeb }, - .public = { 0x85, 0x20, 0xf0, 0x09, 0x89, 0x30, 0xa7, 0x54, 0x74, 0x8b, 0x7d, 0xdc, 0xb4, 0x3e, 0xf7, 0x5a, 0x0d, 0xbf, 0x3a, 0x0d, 0x26, 0x38, 0x1a, 0xf4, 0xeb, 0xa4, 0xa9, 0x8e, 0xaa, 0x9b, 0x4e, 0x6a }, - .result = { 0x4a, 0x5d, 0x9d, 0x5b, 0xa4, 0xce, 0x2d, 0xe1, 0x72, 0x8e, 0x3b, 0xf4, 0x80, 0x35, 0x0f, 0x25, 0xe0, 0x7e, 0x21, 0xc9, 0x47, 0xd1, 0x9e, 0x33, 0x76, 0xf0, 0x9b, 0x3c, 0x1e, 0x16, 0x17, 0x42 } - }, - { - .private = { 1 }, - .public = { 0x25, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, - .result = { 0x3c, 0x77, 0x77, 0xca, 0xf9, 0x97, 0xb2, 0x64, 0x41, 0x60, 0x77, 0x66, 0x5b, 0x4e, 0x22, 0x9d, 0xb, 0x95, 0x48, 0xdc, 0xc, 0xd8, 0x19, 0x98, 0xdd, 0xcd, 0xc5, 0xc8, 0x53, 0x3c, 0x79, 0x7f } - }, - { - .private = { 1 }, - .public = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, - .result = { 0xb3, 0x2d, 0x13, 0x62, 0xc2, 0x48, 0xd6, 0x2f, 0xe6, 0x26, 0x19, 0xcf, 0xf0, 0x4d, 0xd4, 0x3d, 0xb7, 0x3f, 0xfc, 0x1b, 0x63, 0x8, 0xed, 0xe3, 0xb, 0x78, 0xd8, 0x73, 0x80, 0xf1, 0xe8, 0x34 } - }, - { - .private = { 0xa5, 0x46, 0xe3, 0x6b, 0xf0, 0x52, 0x7c, 0x9d, 0x3b, 0x16, 0x15, 0x4b, 0x82, 0x46, 0x5e, 0xdd, 0x62, 0x14, 0x4c, 0x0a, 0xc1, 0xfc, 0x5a, 0x18, 0x50, 0x6a, 0x22, 0x44, 0xba, 0x44, 0x9a, 0xc4 }, - .public = { 0xe6, 0xdb, 0x68, 0x67, 0x58, 0x30, 0x30, 0xdb, 0x35, 0x94, 0xc1, 0xa4, 0x24, 0xb1, 0x5f, 0x7c, 0x72, 0x66, 0x24, 0xec, 0x26, 0xb3, 0x35, 0x3b, 0x10, 0xa9, 0x03, 0xa6, 0xd0, 0xab, 0x1c, 0x4c }, - .result = { 0xc3, 0xda, 0x55, 0x37, 0x9d, 0xe9, 0xc6, 0x90, 0x8e, 0x94, 0xea, 0x4d, 0xf2, 0x8d, 0x08, 0x4f, 0x32, 0xec, 0xcf, 0x03, 0x49, 0x1c, 0x71, 0xf7, 0x54, 0xb4, 0x07, 0x55, 0x77, 0xa2, 0x85, 0x52 } - }, - { - .private = { 1, 2, 3, 4 }, - .public = { 0 }, - .result = { 0 } - }, - { - .private = { 2, 4, 6, 8 }, - .public = { 0xe0, 0xeb, 0x7a, 0x7c, 0x3b, 0x41, 0xb8, 0xae, 0x16, 0x56, 0xe3, 0xfa, 0xf1, 0x9f, 0xc4, 0x6a, 0xda, 0x09, 0x8d, 0xeb, 0x9c, 0x32, 0xb1, 0xfd, 0x86, 0x62, 0x05, 0x16, 0x5f, 0x49, 0xb8 }, - .result = { 0 } - } -}; |