diff options
-rw-r--r-- | Makefile | 2 | ||||
-rw-r--r-- | blake2s-compress.S | 762 | ||||
-rw-r--r-- | curve25519-u128.c | 420 | ||||
-rw-r--r-- | function.h | 68 | ||||
-rw-r--r-- | main.c | 80 | ||||
-rwxr-xr-x | run.sh | 16 |
6 files changed, 847 insertions, 501 deletions
@@ -1,5 +1,5 @@ ifneq ($(KERNELRELEASE),) -kbench9000-y := main.o curve25519-u128.o +kbench9000-y := main.o blake2s-compress.o obj-m := kbench9000.o ccflags-y += -O3 ccflags-y += -D'pr_fmt(fmt)=KBUILD_MODNAME ": " fmt' diff --git a/blake2s-compress.S b/blake2s-compress.S new file mode 100644 index 0000000..5b67a54 --- /dev/null +++ b/blake2s-compress.S @@ -0,0 +1,762 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * Copyright (C) 2015-2018 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + * Copyright (C) 2017 Samuel Neves <sneves@dei.uc.pt>. All Rights Reserved. + */ + +#include <linux/linkage.h> + +.section .rodata.cst32.BLAKE2S_IV, "aM", @progbits, 32 +.align 32 +IV: .octa 0xA54FF53A3C6EF372BB67AE856A09E667 + .octa 0x5BE0CD191F83D9AB9B05688C510E527F +.section .rodata.cst16.ROT16, "aM", @progbits, 16 +.align 16 +ROT16: .octa 0x0D0C0F0E09080B0A0504070601000302 +.section .rodata.cst16.ROR328, "aM", @progbits, 16 +.align 16 +ROR328: .octa 0x0C0F0E0D080B0A090407060500030201 +#ifdef CONFIG_AS_AVX512 +.section .rodata.cst64.BLAKE2S_SIGMA, "aM", @progbits, 640 +.align 64 +SIGMA: +.long 0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15 +.long 11, 2, 12, 14, 9, 8, 15, 3, 4, 0, 13, 6, 10, 1, 7, 5 +.long 10, 12, 11, 6, 5, 9, 13, 3, 4, 15, 14, 2, 0, 7, 8, 1 +.long 10, 9, 7, 0, 11, 14, 1, 12, 6, 2, 15, 3, 13, 8, 5, 4 +.long 4, 9, 8, 13, 14, 0, 10, 11, 7, 3, 12, 1, 5, 6, 15, 2 +.long 2, 10, 4, 14, 13, 3, 9, 11, 6, 5, 7, 12, 15, 1, 8, 0 +.long 4, 11, 14, 8, 13, 10, 12, 5, 2, 1, 15, 3, 9, 7, 0, 6 +.long 6, 12, 0, 13, 15, 2, 1, 10, 4, 5, 11, 14, 8, 3, 9, 7 +.long 14, 5, 4, 12, 9, 7, 3, 10, 2, 0, 6, 15, 11, 1, 13, 8 +.long 11, 7, 13, 10, 12, 14, 0, 15, 4, 5, 6, 9, 2, 1, 8, 3 +#endif /* CONFIG_AS_AVX512 */ + +.text +#ifdef CONFIG_AS_AVX +ENTRY(blake2s_compress_avx) + movl %ecx, %ecx + testq %rdx, %rdx + je .Lendofloop + .align 32 +.Lbeginofloop: + addq %rcx, 32(%rdi) + vmovdqu IV+16(%rip), %xmm1 + vmovdqu (%rsi), %xmm4 + vpxor 32(%rdi), %xmm1, %xmm1 + vmovdqu 16(%rsi), %xmm3 + vshufps $136, %xmm3, %xmm4, %xmm6 + vmovdqa ROT16(%rip), %xmm7 + vpaddd (%rdi), %xmm6, %xmm6 + vpaddd 16(%rdi), %xmm6, %xmm6 + vpxor %xmm6, %xmm1, %xmm1 + vmovdqu IV(%rip), %xmm8 + vpshufb %xmm7, %xmm1, %xmm1 + vmovdqu 48(%rsi), %xmm5 + vpaddd %xmm1, %xmm8, %xmm8 + vpxor 16(%rdi), %xmm8, %xmm9 + vmovdqu 32(%rsi), %xmm2 + vpblendw $12, %xmm3, %xmm5, %xmm13 + vshufps $221, %xmm5, %xmm2, %xmm12 + vpunpckhqdq %xmm2, %xmm4, %xmm14 + vpslld $20, %xmm9, %xmm0 + vpsrld $12, %xmm9, %xmm9 + vpxor %xmm0, %xmm9, %xmm0 + vshufps $221, %xmm3, %xmm4, %xmm9 + vpaddd %xmm9, %xmm6, %xmm9 + vpaddd %xmm0, %xmm9, %xmm9 + vpxor %xmm9, %xmm1, %xmm1 + vmovdqa ROR328(%rip), %xmm6 + vpshufb %xmm6, %xmm1, %xmm1 + vpaddd %xmm1, %xmm8, %xmm8 + vpxor %xmm8, %xmm0, %xmm0 + vpshufd $147, %xmm1, %xmm1 + vpshufd $78, %xmm8, %xmm8 + vpslld $25, %xmm0, %xmm10 + vpsrld $7, %xmm0, %xmm0 + vpxor %xmm10, %xmm0, %xmm0 + vshufps $136, %xmm5, %xmm2, %xmm10 + vpshufd $57, %xmm0, %xmm0 + vpaddd %xmm10, %xmm9, %xmm9 + vpaddd %xmm0, %xmm9, %xmm9 + vpxor %xmm9, %xmm1, %xmm1 + vpaddd %xmm12, %xmm9, %xmm9 + vpblendw $12, %xmm2, %xmm3, %xmm12 + vpshufb %xmm7, %xmm1, %xmm1 + vpaddd %xmm1, %xmm8, %xmm8 + vpxor %xmm8, %xmm0, %xmm10 + vpslld $20, %xmm10, %xmm0 + vpsrld $12, %xmm10, %xmm10 + vpxor %xmm0, %xmm10, %xmm0 + vpaddd %xmm0, %xmm9, %xmm9 + vpxor %xmm9, %xmm1, %xmm1 + vpshufb %xmm6, %xmm1, %xmm1 + vpaddd %xmm1, %xmm8, %xmm8 + vpxor %xmm8, %xmm0, %xmm0 + vpshufd $57, %xmm1, %xmm1 + vpshufd $78, %xmm8, %xmm8 + vpslld $25, %xmm0, %xmm10 + vpsrld $7, %xmm0, %xmm0 + vpxor %xmm10, %xmm0, %xmm0 + vpslldq $4, %xmm5, %xmm10 + vpblendw $240, %xmm10, %xmm12, %xmm12 + vpshufd $147, %xmm0, %xmm0 + vpshufd $147, %xmm12, %xmm12 + vpaddd %xmm9, %xmm12, %xmm12 + vpaddd %xmm0, %xmm12, %xmm12 + vpxor %xmm12, %xmm1, %xmm1 + vpshufb %xmm7, %xmm1, %xmm1 + vpaddd %xmm1, %xmm8, %xmm8 + vpxor %xmm8, %xmm0, %xmm11 + vpslld $20, %xmm11, %xmm9 + vpsrld $12, %xmm11, %xmm11 + vpxor %xmm9, %xmm11, %xmm0 + vpshufd $8, %xmm2, %xmm9 + vpblendw $192, %xmm5, %xmm3, %xmm11 + vpblendw $240, %xmm11, %xmm9, %xmm9 + vpshufd $177, %xmm9, %xmm9 + vpaddd %xmm12, %xmm9, %xmm9 + vpaddd %xmm0, %xmm9, %xmm11 + vpxor %xmm11, %xmm1, %xmm1 + vpshufb %xmm6, %xmm1, %xmm1 + vpaddd %xmm1, %xmm8, %xmm8 + vpxor %xmm8, %xmm0, %xmm9 + vpshufd $147, %xmm1, %xmm1 + vpshufd $78, %xmm8, %xmm8 + vpslld $25, %xmm9, %xmm0 + vpsrld $7, %xmm9, %xmm9 + vpxor %xmm0, %xmm9, %xmm0 + vpslldq $4, %xmm3, %xmm9 + vpblendw $48, %xmm9, %xmm2, %xmm9 + vpblendw $240, %xmm9, %xmm4, %xmm9 + vpshufd $57, %xmm0, %xmm0 + vpshufd $177, %xmm9, %xmm9 + vpaddd %xmm11, %xmm9, %xmm9 + vpaddd %xmm0, %xmm9, %xmm9 + vpxor %xmm9, %xmm1, %xmm1 + vpshufb %xmm7, %xmm1, %xmm1 + vpaddd %xmm1, %xmm8, %xmm11 + vpxor %xmm11, %xmm0, %xmm0 + vpslld $20, %xmm0, %xmm8 + vpsrld $12, %xmm0, %xmm0 + vpxor %xmm8, %xmm0, %xmm0 + vpunpckhdq %xmm3, %xmm4, %xmm8 + vpblendw $12, %xmm10, %xmm8, %xmm12 + vpshufd $177, %xmm12, %xmm12 + vpaddd %xmm9, %xmm12, %xmm9 + vpaddd %xmm0, %xmm9, %xmm9 + vpxor %xmm9, %xmm1, %xmm1 + vpshufb %xmm6, %xmm1, %xmm1 + vpaddd %xmm1, %xmm11, %xmm11 + vpxor %xmm11, %xmm0, %xmm0 + vpshufd $57, %xmm1, %xmm1 + vpshufd $78, %xmm11, %xmm11 + vpslld $25, %xmm0, %xmm12 + vpsrld $7, %xmm0, %xmm0 + vpxor %xmm12, %xmm0, %xmm0 + vpunpckhdq %xmm5, %xmm2, %xmm12 + vpshufd $147, %xmm0, %xmm0 + vpblendw $15, %xmm13, %xmm12, %xmm12 + vpslldq $8, %xmm5, %xmm13 + vpshufd $210, %xmm12, %xmm12 + vpaddd %xmm9, %xmm12, %xmm9 + vpaddd %xmm0, %xmm9, %xmm9 + vpxor %xmm9, %xmm1, %xmm1 + vpshufb %xmm7, %xmm1, %xmm1 + vpaddd %xmm1, %xmm11, %xmm11 + vpxor %xmm11, %xmm0, %xmm0 + vpslld $20, %xmm0, %xmm12 + vpsrld $12, %xmm0, %xmm0 + vpxor %xmm12, %xmm0, %xmm0 + vpunpckldq %xmm4, %xmm2, %xmm12 + vpblendw $240, %xmm4, %xmm12, %xmm12 + vpblendw $192, %xmm13, %xmm12, %xmm12 + vpsrldq $12, %xmm3, %xmm13 + vpaddd %xmm12, %xmm9, %xmm9 + vpaddd %xmm0, %xmm9, %xmm9 + vpxor %xmm9, %xmm1, %xmm1 + vpshufb %xmm6, %xmm1, %xmm1 + vpaddd %xmm1, %xmm11, %xmm11 + vpxor %xmm11, %xmm0, %xmm0 + vpshufd $147, %xmm1, %xmm1 + vpshufd $78, %xmm11, %xmm11 + vpslld $25, %xmm0, %xmm12 + vpsrld $7, %xmm0, %xmm0 + vpxor %xmm12, %xmm0, %xmm0 + vpblendw $60, %xmm2, %xmm4, %xmm12 + vpblendw $3, %xmm13, %xmm12, %xmm12 + vpshufd $57, %xmm0, %xmm0 + vpshufd $78, %xmm12, %xmm12 + vpaddd %xmm9, %xmm12, %xmm9 + vpaddd %xmm0, %xmm9, %xmm9 + vpxor %xmm9, %xmm1, %xmm1 + vpshufb %xmm7, %xmm1, %xmm1 + vpaddd %xmm1, %xmm11, %xmm11 + vpxor %xmm11, %xmm0, %xmm12 + vpslld $20, %xmm12, %xmm13 + vpsrld $12, %xmm12, %xmm0 + vpblendw $51, %xmm3, %xmm4, %xmm12 + vpxor %xmm13, %xmm0, %xmm0 + vpblendw $192, %xmm10, %xmm12, %xmm10 + vpslldq $8, %xmm2, %xmm12 + vpshufd $27, %xmm10, %xmm10 + vpaddd %xmm9, %xmm10, %xmm9 + vpaddd %xmm0, %xmm9, %xmm9 + vpxor %xmm9, %xmm1, %xmm1 + vpshufb %xmm6, %xmm1, %xmm1 + vpaddd %xmm1, %xmm11, %xmm11 + vpxor %xmm11, %xmm0, %xmm0 + vpshufd $57, %xmm1, %xmm1 + vpshufd $78, %xmm11, %xmm11 + vpslld $25, %xmm0, %xmm10 + vpsrld $7, %xmm0, %xmm0 + vpxor %xmm10, %xmm0, %xmm0 + vpunpckhdq %xmm2, %xmm8, %xmm10 + vpshufd $147, %xmm0, %xmm0 + vpblendw $12, %xmm5, %xmm10, %xmm10 + vpshufd $210, %xmm10, %xmm10 + vpaddd %xmm9, %xmm10, %xmm9 + vpaddd %xmm0, %xmm9, %xmm9 + vpxor %xmm9, %xmm1, %xmm1 + vpshufb %xmm7, %xmm1, %xmm1 + vpaddd %xmm1, %xmm11, %xmm11 + vpxor %xmm11, %xmm0, %xmm10 + vpslld $20, %xmm10, %xmm0 + vpsrld $12, %xmm10, %xmm10 + vpxor %xmm0, %xmm10, %xmm0 + vpblendw $12, %xmm4, %xmm5, %xmm10 + vpblendw $192, %xmm12, %xmm10, %xmm10 + vpunpckldq %xmm2, %xmm4, %xmm12 + vpshufd $135, %xmm10, %xmm10 + vpaddd %xmm9, %xmm10, %xmm9 + vpaddd %xmm0, %xmm9, %xmm9 + vpxor %xmm9, %xmm1, %xmm1 + vpshufb %xmm6, %xmm1, %xmm1 + vpaddd %xmm1, %xmm11, %xmm13 + vpxor %xmm13, %xmm0, %xmm0 + vpshufd $147, %xmm1, %xmm1 + vpshufd $78, %xmm13, %xmm13 + vpslld $25, %xmm0, %xmm10 + vpsrld $7, %xmm0, %xmm0 + vpxor %xmm10, %xmm0, %xmm0 + vpblendw $15, %xmm3, %xmm4, %xmm10 + vpblendw $192, %xmm5, %xmm10, %xmm10 + vpshufd $57, %xmm0, %xmm0 + vpshufd $198, %xmm10, %xmm10 + vpaddd %xmm9, %xmm10, %xmm10 + vpaddd %xmm0, %xmm10, %xmm10 + vpxor %xmm10, %xmm1, %xmm1 + vpshufb %xmm7, %xmm1, %xmm1 + vpaddd %xmm1, %xmm13, %xmm13 + vpxor %xmm13, %xmm0, %xmm9 + vpslld $20, %xmm9, %xmm0 + vpsrld $12, %xmm9, %xmm9 + vpxor %xmm0, %xmm9, %xmm0 + vpunpckhdq %xmm2, %xmm3, %xmm9 + vpunpcklqdq %xmm12, %xmm9, %xmm15 + vpunpcklqdq %xmm12, %xmm8, %xmm12 + vpblendw $15, %xmm5, %xmm8, %xmm8 + vpaddd %xmm15, %xmm10, %xmm15 + vpaddd %xmm0, %xmm15, %xmm15 + vpxor %xmm15, %xmm1, %xmm1 + vpshufd $141, %xmm8, %xmm8 + vpshufb %xmm6, %xmm1, %xmm1 + vpaddd %xmm1, %xmm13, %xmm13 + vpxor %xmm13, %xmm0, %xmm0 + vpshufd $57, %xmm1, %xmm1 + vpshufd $78, %xmm13, %xmm13 + vpslld $25, %xmm0, %xmm10 + vpsrld $7, %xmm0, %xmm0 + vpxor %xmm10, %xmm0, %xmm0 + vpunpcklqdq %xmm2, %xmm3, %xmm10 + vpshufd $147, %xmm0, %xmm0 + vpblendw $51, %xmm14, %xmm10, %xmm14 + vpshufd $135, %xmm14, %xmm14 + vpaddd %xmm15, %xmm14, %xmm14 + vpaddd %xmm0, %xmm14, %xmm14 + vpxor %xmm14, %xmm1, %xmm1 + vpunpcklqdq %xmm3, %xmm4, %xmm15 + vpshufb %xmm7, %xmm1, %xmm1 + vpaddd %xmm1, %xmm13, %xmm13 + vpxor %xmm13, %xmm0, %xmm0 + vpslld $20, %xmm0, %xmm11 + vpsrld $12, %xmm0, %xmm0 + vpxor %xmm11, %xmm0, %xmm0 + vpunpckhqdq %xmm5, %xmm3, %xmm11 + vpblendw $51, %xmm15, %xmm11, %xmm11 + vpunpckhqdq %xmm3, %xmm5, %xmm15 + vpaddd %xmm11, %xmm14, %xmm11 + vpaddd %xmm0, %xmm11, %xmm11 + vpxor %xmm11, %xmm1, %xmm1 + vpshufb %xmm6, %xmm1, %xmm1 + vpaddd %xmm1, %xmm13, %xmm13 + vpxor %xmm13, %xmm0, %xmm0 + vpshufd $147, %xmm1, %xmm1 + vpshufd $78, %xmm13, %xmm13 + vpslld $25, %xmm0, %xmm14 + vpsrld $7, %xmm0, %xmm0 + vpxor %xmm14, %xmm0, %xmm14 + vpunpckhqdq %xmm4, %xmm2, %xmm0 + vpshufd $57, %xmm14, %xmm14 + vpblendw $51, %xmm15, %xmm0, %xmm15 + vpaddd %xmm15, %xmm11, %xmm15 + vpaddd %xmm14, %xmm15, %xmm15 + vpxor %xmm15, %xmm1, %xmm1 + vpshufb %xmm7, %xmm1, %xmm1 + vpaddd %xmm1, %xmm13, %xmm13 + vpxor %xmm13, %xmm14, %xmm14 + vpslld $20, %xmm14, %xmm11 + vpsrld $12, %xmm14, %xmm14 + vpxor %xmm11, %xmm14, %xmm14 + vpblendw $3, %xmm2, %xmm4, %xmm11 + vpslldq $8, %xmm11, %xmm0 + vpblendw $15, %xmm5, %xmm0, %xmm0 + vpshufd $99, %xmm0, %xmm0 + vpaddd %xmm15, %xmm0, %xmm15 + vpaddd %xmm14, %xmm15, %xmm15 + vpxor %xmm15, %xmm1, %xmm0 + vpaddd %xmm12, %xmm15, %xmm15 + vpshufb %xmm6, %xmm0, %xmm0 + vpaddd %xmm0, %xmm13, %xmm13 + vpxor %xmm13, %xmm14, %xmm14 + vpshufd $57, %xmm0, %xmm0 + vpshufd $78, %xmm13, %xmm13 + vpslld $25, %xmm14, %xmm1 + vpsrld $7, %xmm14, %xmm14 + vpxor %xmm1, %xmm14, %xmm14 + vpblendw $3, %xmm5, %xmm4, %xmm1 + vpshufd $147, %xmm14, %xmm14 + vpaddd %xmm14, %xmm15, %xmm15 + vpxor %xmm15, %xmm0, %xmm0 + vpshufb %xmm7, %xmm0, %xmm0 + vpaddd %xmm0, %xmm13, %xmm13 + vpxor %xmm13, %xmm14, %xmm14 + vpslld $20, %xmm14, %xmm12 + vpsrld $12, %xmm14, %xmm14 + vpxor %xmm12, %xmm14, %xmm14 + vpsrldq $4, %xmm2, %xmm12 + vpblendw $60, %xmm12, %xmm1, %xmm1 + vpaddd %xmm1, %xmm15, %xmm15 + vpaddd %xmm14, %xmm15, %xmm15 + vpxor %xmm15, %xmm0, %xmm0 + vpblendw $12, %xmm4, %xmm3, %xmm1 + vpshufb %xmm6, %xmm0, %xmm0 + vpaddd %xmm0, %xmm13, %xmm13 + vpxor %xmm13, %xmm14, %xmm14 + vpshufd $147, %xmm0, %xmm0 + vpshufd $78, %xmm13, %xmm13 + vpslld $25, %xmm14, %xmm12 + vpsrld $7, %xmm14, %xmm14 + vpxor %xmm12, %xmm14, %xmm14 + vpsrldq $4, %xmm5, %xmm12 + vpblendw $48, %xmm12, %xmm1, %xmm1 + vpshufd $33, %xmm5, %xmm12 + vpshufd $57, %xmm14, %xmm14 + vpshufd $108, %xmm1, %xmm1 + vpblendw $51, %xmm12, %xmm10, %xmm12 + vpaddd %xmm15, %xmm1, %xmm15 + vpaddd %xmm14, %xmm15, %xmm15 + vpxor %xmm15, %xmm0, %xmm0 + vpaddd %xmm12, %xmm15, %xmm15 + vpshufb %xmm7, %xmm0, %xmm0 + vpaddd %xmm0, %xmm13, %xmm1 + vpxor %xmm1, %xmm14, %xmm14 + vpslld $20, %xmm14, %xmm13 + vpsrld $12, %xmm14, %xmm14 + vpxor %xmm13, %xmm14, %xmm14 + vpslldq $12, %xmm3, %xmm13 + vpaddd %xmm14, %xmm15, %xmm15 + vpxor %xmm15, %xmm0, %xmm0 + vpshufb %xmm6, %xmm0, %xmm0 + vpaddd %xmm0, %xmm1, %xmm1 + vpxor %xmm1, %xmm14, %xmm14 + vpshufd $57, %xmm0, %xmm0 + vpshufd $78, %xmm1, %xmm1 + vpslld $25, %xmm14, %xmm12 + vpsrld $7, %xmm14, %xmm14 + vpxor %xmm12, %xmm14, %xmm14 + vpblendw $51, %xmm5, %xmm4, %xmm12 + vpshufd $147, %xmm14, %xmm14 + vpblendw $192, %xmm13, %xmm12, %xmm12 + vpaddd %xmm12, %xmm15, %xmm15 + vpaddd %xmm14, %xmm15, %xmm15 + vpxor %xmm15, %xmm0, %xmm0 + vpsrldq $4, %xmm3, %xmm12 + vpshufb %xmm7, %xmm0, %xmm0 + vpaddd %xmm0, %xmm1, %xmm1 + vpxor %xmm1, %xmm14, %xmm14 + vpslld $20, %xmm14, %xmm13 + vpsrld $12, %xmm14, %xmm14 + vpxor %xmm13, %xmm14, %xmm14 + vpblendw $48, %xmm2, %xmm5, %xmm13 + vpblendw $3, %xmm12, %xmm13, %xmm13 + vpshufd $156, %xmm13, %xmm13 + vpaddd %xmm15, %xmm13, %xmm15 + vpaddd %xmm14, %xmm15, %xmm15 + vpxor %xmm15, %xmm0, %xmm0 + vpshufb %xmm6, %xmm0, %xmm0 + vpaddd %xmm0, %xmm1, %xmm1 + vpxor %xmm1, %xmm14, %xmm14 + vpshufd $147, %xmm0, %xmm0 + vpshufd $78, %xmm1, %xmm1 + vpslld $25, %xmm14, %xmm13 + vpsrld $7, %xmm14, %xmm14 + vpxor %xmm13, %xmm14, %xmm14 + vpunpcklqdq %xmm2, %xmm4, %xmm13 + vpshufd $57, %xmm14, %xmm14 + vpblendw $12, %xmm12, %xmm13, %xmm12 + vpshufd $180, %xmm12, %xmm12 + vpaddd %xmm15, %xmm12, %xmm15 + vpaddd %xmm14, %xmm15, %xmm15 + vpxor %xmm15, %xmm0, %xmm0 + vpshufb %xmm7, %xmm0, %xmm0 + vpaddd %xmm0, %xmm1, %xmm1 + vpxor %xmm1, %xmm14, %xmm14 + vpslld $20, %xmm14, %xmm12 + vpsrld $12, %xmm14, %xmm14 + vpxor %xmm12, %xmm14, %xmm14 + vpunpckhqdq %xmm9, %xmm4, %xmm12 + vpshufd $198, %xmm12, %xmm12 + vpaddd %xmm15, %xmm12, %xmm15 + vpaddd %xmm14, %xmm15, %xmm15 + vpxor %xmm15, %xmm0, %xmm0 + vpaddd %xmm15, %xmm8, %xmm15 + vpshufb %xmm6, %xmm0, %xmm0 + vpaddd %xmm0, %xmm1, %xmm1 + vpxor %xmm1, %xmm14, %xmm14 + vpshufd $57, %xmm0, %xmm0 + vpshufd $78, %xmm1, %xmm1 + vpslld $25, %xmm14, %xmm12 + vpsrld $7, %xmm14, %xmm14 + vpxor %xmm12, %xmm14, %xmm14 + vpsrldq $4, %xmm4, %xmm12 + vpshufd $147, %xmm14, %xmm14 + vpaddd %xmm14, %xmm15, %xmm15 + vpxor %xmm15, %xmm0, %xmm0 + vpshufb %xmm7, %xmm0, %xmm0 + vpaddd %xmm0, %xmm1, %xmm1 + vpxor %xmm1, %xmm14, %xmm14 + vpslld $20, %xmm14, %xmm8 + vpsrld $12, %xmm14, %xmm14 + vpxor %xmm14, %xmm8, %xmm14 + vpblendw $48, %xmm5, %xmm2, %xmm8 + vpblendw $3, %xmm12, %xmm8, %xmm8 + vpunpckhqdq %xmm5, %xmm4, %xmm12 + vpshufd $75, %xmm8, %xmm8 + vpblendw $60, %xmm10, %xmm12, %xmm10 + vpaddd %xmm15, %xmm8, %xmm15 + vpaddd %xmm14, %xmm15, %xmm15 + vpxor %xmm0, %xmm15, %xmm0 + vpshufd $45, %xmm10, %xmm10 + vpshufb %xmm6, %xmm0, %xmm0 + vpaddd %xmm15, %xmm10, %xmm15 + vpaddd %xmm0, %xmm1, %xmm1 + vpxor %xmm1, %xmm14, %xmm14 + vpshufd $147, %xmm0, %xmm0 + vpshufd $78, %xmm1, %xmm1 + vpslld $25, %xmm14, %xmm8 + vpsrld $7, %xmm14, %xmm14 + vpxor %xmm14, %xmm8, %xmm8 + vpshufd $57, %xmm8, %xmm8 + vpaddd %xmm8, %xmm15, %xmm15 + vpxor %xmm0, %xmm15, %xmm0 + vpshufb %xmm7, %xmm0, %xmm0 + vpaddd %xmm0, %xmm1, %xmm1 + vpxor %xmm8, %xmm1, %xmm8 + vpslld $20, %xmm8, %xmm10 + vpsrld $12, %xmm8, %xmm8 + vpxor %xmm8, %xmm10, %xmm10 + vpunpckldq %xmm3, %xmm4, %xmm8 + vpunpcklqdq %xmm9, %xmm8, %xmm9 + vpaddd %xmm9, %xmm15, %xmm9 + vpaddd %xmm10, %xmm9, %xmm9 + vpxor %xmm0, %xmm9, %xmm8 + vpshufb %xmm6, %xmm8, %xmm8 + vpaddd %xmm8, %xmm1, %xmm1 + vpxor %xmm1, %xmm10, %xmm10 + vpshufd $57, %xmm8, %xmm8 + vpshufd $78, %xmm1, %xmm1 + vpslld $25, %xmm10, %xmm12 + vpsrld $7, %xmm10, %xmm10 + vpxor %xmm10, %xmm12, %xmm10 + vpblendw $48, %xmm4, %xmm3, %xmm12 + vpshufd $147, %xmm10, %xmm0 + vpunpckhdq %xmm5, %xmm3, %xmm10 + vpshufd $78, %xmm12, %xmm12 + vpunpcklqdq %xmm4, %xmm10, %xmm10 + vpblendw $192, %xmm2, %xmm10, %xmm10 + vpshufhw $78, %xmm10, %xmm10 + vpaddd %xmm10, %xmm9, %xmm10 + vpaddd %xmm0, %xmm10, %xmm10 + vpxor %xmm8, %xmm10, %xmm8 + vpshufb %xmm7, %xmm8, %xmm8 + vpaddd %xmm8, %xmm1, %xmm1 + vpxor %xmm0, %xmm1, %xmm9 + vpslld $20, %xmm9, %xmm0 + vpsrld $12, %xmm9, %xmm9 + vpxor %xmm9, %xmm0, %xmm0 + vpunpckhdq %xmm5, %xmm4, %xmm9 + vpblendw $240, %xmm9, %xmm2, %xmm13 + vpshufd $39, %xmm13, %xmm13 + vpaddd %xmm10, %xmm13, %xmm10 + vpaddd %xmm0, %xmm10, %xmm10 + vpxor %xmm8, %xmm10, %xmm8 + vpblendw $12, %xmm4, %xmm2, %xmm13 + vpshufb %xmm6, %xmm8, %xmm8 + vpslldq $4, %xmm13, %xmm13 + vpblendw $15, %xmm5, %xmm13, %xmm13 + vpaddd %xmm8, %xmm1, %xmm1 + vpxor %xmm1, %xmm0, %xmm0 + vpaddd %xmm13, %xmm10, %xmm13 + vpshufd $147, %xmm8, %xmm8 + vpshufd $78, %xmm1, %xmm1 + vpslld $25, %xmm0, %xmm14 + vpsrld $7, %xmm0, %xmm0 + vpxor %xmm0, %xmm14, %xmm14 + vpshufd $57, %xmm14, %xmm14 + vpaddd %xmm14, %xmm13, %xmm13 + vpxor %xmm8, %xmm13, %xmm8 + vpaddd %xmm13, %xmm12, %xmm12 + vpshufb %xmm7, %xmm8, %xmm8 + vpaddd %xmm8, %xmm1, %xmm1 + vpxor %xmm14, %xmm1, %xmm14 + vpslld $20, %xmm14, %xmm10 + vpsrld $12, %xmm14, %xmm14 + vpxor %xmm14, %xmm10, %xmm10 + vpaddd %xmm10, %xmm12, %xmm12 + vpxor %xmm8, %xmm12, %xmm8 + vpshufb %xmm6, %xmm8, %xmm8 + vpaddd %xmm8, %xmm1, %xmm1 + vpxor %xmm1, %xmm10, %xmm0 + vpshufd $57, %xmm8, %xmm8 + vpshufd $78, %xmm1, %xmm1 + vpslld $25, %xmm0, %xmm10 + vpsrld $7, %xmm0, %xmm0 + vpxor %xmm0, %xmm10, %xmm10 + vpblendw $48, %xmm2, %xmm3, %xmm0 + vpblendw $15, %xmm11, %xmm0, %xmm0 + vpshufd $147, %xmm10, %xmm10 + vpshufd $114, %xmm0, %xmm0 + vpaddd %xmm12, %xmm0, %xmm0 + vpaddd %xmm10, %xmm0, %xmm0 + vpxor %xmm8, %xmm0, %xmm8 + vpshufb %xmm7, %xmm8, %xmm8 + vpaddd %xmm8, %xmm1, %xmm1 + vpxor %xmm10, %xmm1, %xmm10 + vpslld $20, %xmm10, %xmm11 + vpsrld $12, %xmm10, %xmm10 + vpxor %xmm10, %xmm11, %xmm10 + vpslldq $4, %xmm4, %xmm11 + vpblendw $192, %xmm11, %xmm3, %xmm3 + vpunpckldq %xmm5, %xmm4, %xmm4 + vpshufd $99, %xmm3, %xmm3 + vpaddd %xmm0, %xmm3, %xmm3 + vpaddd %xmm10, %xmm3, %xmm3 + vpxor %xmm8, %xmm3, %xmm11 + vpunpckldq %xmm5, %xmm2, %xmm0 + vpblendw $192, %xmm2, %xmm5, %xmm2 + vpshufb %xmm6, %xmm11, %xmm11 + vpunpckhqdq %xmm0, %xmm9, %xmm0 + vpblendw $15, %xmm4, %xmm2, %xmm4 + vpaddd %xmm11, %xmm1, %xmm1 + vpxor %xmm1, %xmm10, %xmm10 + vpshufd $147, %xmm11, %xmm11 + vpshufd $201, %xmm0, %xmm0 + vpslld $25, %xmm10, %xmm8 + vpsrld $7, %xmm10, %xmm10 + vpxor %xmm10, %xmm8, %xmm10 + vpshufd $78, %xmm1, %xmm1 + vpaddd %xmm3, %xmm0, %xmm0 + vpshufd $27, %xmm4, %xmm4 + vpshufd $57, %xmm10, %xmm10 + vpaddd %xmm10, %xmm0, %xmm0 + vpxor %xmm11, %xmm0, %xmm11 + vpaddd %xmm0, %xmm4, %xmm0 + vpshufb %xmm7, %xmm11, %xmm7 + vpaddd %xmm7, %xmm1, %xmm1 + vpxor %xmm10, %xmm1, %xmm10 + vpslld $20, %xmm10, %xmm8 + vpsrld $12, %xmm10, %xmm10 + vpxor %xmm10, %xmm8, %xmm8 + vpaddd %xmm8, %xmm0, %xmm0 + vpxor %xmm7, %xmm0, %xmm7 + vpshufb %xmm6, %xmm7, %xmm6 + vpaddd %xmm6, %xmm1, %xmm1 + vpxor %xmm1, %xmm8, %xmm8 + vpshufd $78, %xmm1, %xmm1 + vpshufd $57, %xmm6, %xmm6 + vpslld $25, %xmm8, %xmm2 + vpsrld $7, %xmm8, %xmm8 + vpxor %xmm8, %xmm2, %xmm8 + vpxor (%rdi), %xmm1, %xmm1 + vpshufd $147, %xmm8, %xmm8 + vpxor %xmm0, %xmm1, %xmm0 + vmovups %xmm0, (%rdi) + vpxor 16(%rdi), %xmm8, %xmm0 + vpxor %xmm6, %xmm0, %xmm6 + vmovups %xmm6, 16(%rdi) + addq $64, %rsi + decq %rdx + jnz .Lbeginofloop +.Lendofloop: + ret +ENDPROC(blake2s_compress_avx) +#endif /* CONFIG_AS_AVX */ + +#ifdef CONFIG_AS_AVX512 +ENTRY(blake2s_compress_avx512_ymm) + vmovdqu (%rdi),%xmm0 + vmovdqu 0x10(%rdi),%xmm1 + vmovdqu 0x20(%rdi),%xmm4 + vmovq %rcx,%xmm5 + vmovdqa IV(%rip),%xmm14 + vmovdqa IV+16(%rip),%xmm15 + jmp .Lblake2s_compress_avx512_ymm_mainloop +.align 32 +.Lblake2s_compress_avx512_ymm_mainloop: + vmovdqa %xmm0,%xmm10 + vmovdqa %xmm1,%xmm11 + vpaddq %xmm5,%xmm4,%xmm4 + vmovdqa %xmm14,%xmm2 + vpxor %xmm15,%xmm4,%xmm3 + vmovdqu (%rsi),%ymm6 + vmovdqu 0x20(%rsi),%ymm7 + addq $0x40,%rsi + leaq SIGMA(%rip),%rax + movb $0xa,%cl +.Lblake2s_compress_avx512_ymm_roundloop: + addq $0x40,%rax + vmovdqa -0x40(%rax),%ymm8 + vmovdqa -0x20(%rax),%ymm9 + vpermi2d %ymm7,%ymm6,%ymm8 + vpermi2d %ymm7,%ymm6,%ymm9 + vmovdqa %ymm8,%ymm6 + vmovdqa %ymm9,%ymm7 + vpaddd %xmm8,%xmm0,%xmm0 + vpaddd %xmm1,%xmm0,%xmm0 + vpxor %xmm0,%xmm3,%xmm3 + vprord $0x10,%xmm3,%xmm3 + vpaddd %xmm3,%xmm2,%xmm2 + vpxor %xmm2,%xmm1,%xmm1 + vprord $0xc,%xmm1,%xmm1 + vextracti128 $0x1,%ymm8,%xmm8 + vpaddd %xmm8,%xmm0,%xmm0 + vpaddd %xmm1,%xmm0,%xmm0 + vpxor %xmm0,%xmm3,%xmm3 + vprord $0x8,%xmm3,%xmm3 + vpaddd %xmm3,%xmm2,%xmm2 + vpxor %xmm2,%xmm1,%xmm1 + vprord $0x7,%xmm1,%xmm1 + vpshufd $0x39,%xmm1,%xmm1 + vpshufd $0x4e,%xmm2,%xmm2 + vpshufd $0x93,%xmm3,%xmm3 + vpaddd %xmm9,%xmm0,%xmm0 + vpaddd %xmm1,%xmm0,%xmm0 + vpxor %xmm0,%xmm3,%xmm3 + vprord $0x10,%xmm3,%xmm3 + vpaddd %xmm3,%xmm2,%xmm2 + vpxor %xmm2,%xmm1,%xmm1 + vprord $0xc,%xmm1,%xmm1 + vextracti128 $0x1,%ymm9,%xmm9 + vpaddd %xmm9,%xmm0,%xmm0 + vpaddd %xmm1,%xmm0,%xmm0 + vpxor %xmm0,%xmm3,%xmm3 + vprord $0x8,%xmm3,%xmm3 + vpaddd %xmm3,%xmm2,%xmm2 + vpxor %xmm2,%xmm1,%xmm1 + vprord $0x7,%xmm1,%xmm1 + vpshufd $0x93,%xmm1,%xmm1 + vpshufd $0x4e,%xmm2,%xmm2 + vpshufd $0x39,%xmm3,%xmm3 + decb %cl + jne .Lblake2s_compress_avx512_ymm_roundloop + vpxor %xmm10,%xmm0,%xmm0 + vpxor %xmm11,%xmm1,%xmm1 + vpxor %xmm2,%xmm0,%xmm0 + vpxor %xmm3,%xmm1,%xmm1 + decq %rdx + jne .Lblake2s_compress_avx512_ymm_mainloop + vmovdqu %xmm0,(%rdi) + vmovdqu %xmm1,0x10(%rdi) + vmovdqu %xmm4,0x20(%rdi) + vzeroupper + retq +ENDPROC(blake2s_compress_avx512_ymm) + +ENTRY(blake2s_compress_avx512_zmm) + vmovdqu (%rdi),%xmm0 + vmovdqu 0x10(%rdi),%xmm1 + vmovdqu 0x20(%rdi),%xmm15 + vmovq %rcx,%xmm13 + jmp .Lblake2s_compress_avx512_zmm_mainloop +.align 32 +.Lblake2s_compress_avx512_zmm_mainloop: + vpaddq %xmm13,%xmm15,%xmm15 + vmovdqa IV(%rip),%xmm2 + vpxor IV+16(%rip),%xmm15,%xmm3 + lea SIGMA(%rip),%rax + movl $10,%ecx +.Lblake2s_compress_avx512_zmm_roundloop: + add $0x40,%rax + vmovdqa -0x40(%rax),%xmm7 + vpcmpeqd %xmm14,%xmm14,%xmm14 + vpgatherdd %xmm14,(%rsi,%xmm7,4),%xmm6 + vpaddd %xmm6,%xmm0,%xmm0 + vpaddd %xmm1,%xmm0,%xmm0 + vpxor %xmm0,%xmm3,%xmm3 + vprord $0x10,%xmm3,%xmm3 + vpaddd %xmm3,%xmm2,%xmm2 + vpxor %xmm2,%xmm1,%xmm1 + vprord $0xc,%xmm1,%xmm1 + vmovdqa -0x30(%rax),%xmm7 + vpcmpeqd %xmm14,%xmm14,%xmm14 + vpgatherdd %xmm14,(%rsi,%xmm7,4),%xmm6 + vpaddd %xmm6,%xmm0,%xmm0 + vpaddd %xmm1,%xmm0,%xmm0 + vpxor %xmm0,%xmm3,%xmm3 + vprord $0x8,%xmm3,%xmm3 + vpaddd %xmm3,%xmm2,%xmm2 + vpxor %xmm2,%xmm1,%xmm1 + vprord $0x7,%xmm1,%xmm1 + vpshufd $0x39,%xmm1,%xmm1 + vpshufd $0x4e,%xmm2,%xmm2 + vpshufd $0x93,%xmm3,%xmm3 + vmovdqa -0x20(%rax),%xmm7 + vpcmpeqd %xmm14,%xmm14,%xmm14 + vpgatherdd %xmm14,(%rsi,%xmm7,4),%xmm6 + vpaddd %xmm6,%xmm0,%xmm0 + vpaddd %xmm1,%xmm0,%xmm0 + vpxor %xmm0,%xmm3,%xmm3 + vprord $0x10,%xmm3,%xmm3 + vpaddd %xmm3,%xmm2,%xmm2 + vpxor %xmm2,%xmm1,%xmm1 + vprord $0xc,%xmm1,%xmm1 + vmovdqa -0x10(%rax),%xmm7 + vpcmpeqd %xmm14,%xmm14,%xmm14 + vpgatherdd %xmm14,(%rsi,%xmm7,4),%xmm6 + vpaddd %xmm6,%xmm0,%xmm0 + vpaddd %xmm1,%xmm0,%xmm0 + vpxor %xmm0,%xmm3,%xmm3 + vprord $0x8,%xmm3,%xmm3 + vpaddd %xmm3,%xmm2,%xmm2 + vpxor %xmm2,%xmm1,%xmm1 + vprord $0x7,%xmm1,%xmm1 + vpshufd $0x93,%xmm1,%xmm1 + vpshufd $0x4e,%xmm2,%xmm2 + vpshufd $0x39,%xmm3,%xmm3 + decl %ecx + jne .Lblake2s_compress_avx512_zmm_roundloop + add $0x40,%rsi + vpxor (%rdi),%xmm0,%xmm0 + vpxor 0x10(%rdi),%xmm1,%xmm1 + vpxor %xmm2,%xmm0,%xmm0 + vpxor %xmm3,%xmm1,%xmm1 + vmovdqu %xmm0,(%rdi) + vmovdqu %xmm1,0x10(%rdi) + dec %rdx + jne .Lblake2s_compress_avx512_zmm_mainloop + vmovdqu %xmm15,0x20(%rdi) + vzeroupper + retq +ENDPROC(blake2s_compress_avx512_zmm) +#endif /* CONFIG_AS_AVX512 */ diff --git a/curve25519-u128.c b/curve25519-u128.c deleted file mode 100644 index b51d18a..0000000 --- a/curve25519-u128.c +++ /dev/null @@ -1,420 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 - * - * Copyright (C) 2008 Google Inc. All Rights Reserved. - * Copyright (C) 2015-2018 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. - * - * Original author: Adam Langley <agl@imperialviolet.org> - */ - -#include <linux/kernel.h> -#include <linux/string.h> - -enum { CURVE25519_POINT_SIZE = 32 }; - -typedef u64 limb; -typedef limb felem[5]; -typedef __uint128_t u128; - -static __always_inline void normalize_secret(u8 secret[CURVE25519_POINT_SIZE]) -{ - secret[0] &= 248; - secret[31] &= 127; - secret[31] |= 64; -} - -/* Sum two numbers: output += in */ -static __always_inline void fsum(limb *output, const limb *in) -{ - output[0] += in[0]; - output[1] += in[1]; - output[2] += in[2]; - output[3] += in[3]; - output[4] += in[4]; -} - -/* Find the difference of two numbers: output = in - output - * (note the order of the arguments!) - * - * Assumes that out[i] < 2**52 - * On return, out[i] < 2**55 - */ -static __always_inline void fdifference_backwards(felem out, const felem in) -{ - /* 152 is 19 << 3 */ - static const limb two54m152 = (((limb)1) << 54) - 152; - static const limb two54m8 = (((limb)1) << 54) - 8; - - out[0] = in[0] + two54m152 - out[0]; - out[1] = in[1] + two54m8 - out[1]; - out[2] = in[2] + two54m8 - out[2]; - out[3] = in[3] + two54m8 - out[3]; - out[4] = in[4] + two54m8 - out[4]; -} - -/* Multiply a number by a scalar: output = in * scalar */ -static __always_inline void fscalar_product(felem output, const felem in, const limb scalar) -{ - u128 a; - - a = ((u128) in[0]) * scalar; - output[0] = ((limb)a) & 0x7ffffffffffffUL; - - a = ((u128) in[1]) * scalar + ((limb) (a >> 51)); - output[1] = ((limb)a) & 0x7ffffffffffffUL; - - a = ((u128) in[2]) * scalar + ((limb) (a >> 51)); - output[2] = ((limb)a) & 0x7ffffffffffffUL; - - a = ((u128) in[3]) * scalar + ((limb) (a >> 51)); - output[3] = ((limb)a) & 0x7ffffffffffffUL; - - a = ((u128) in[4]) * scalar + ((limb) (a >> 51)); - output[4] = ((limb)a) & 0x7ffffffffffffUL; - - output[0] += (a >> 51) * 19; -} - -/* Multiply two numbers: output = in2 * in - * - * output must be distinct to both inputs. The inputs are reduced coefficient - * form, the output is not. - * - * Assumes that in[i] < 2**55 and likewise for in2. - * On return, output[i] < 2**52 - */ -static __always_inline void fmul(felem output, const felem in2, const felem in) -{ - u128 t[5]; - limb r0, r1, r2, r3, r4, s0, s1, s2, s3, s4, c; - - r0 = in[0]; - r1 = in[1]; - r2 = in[2]; - r3 = in[3]; - r4 = in[4]; - - s0 = in2[0]; - s1 = in2[1]; - s2 = in2[2]; - s3 = in2[3]; - s4 = in2[4]; - - t[0] = ((u128) r0) * s0; - t[1] = ((u128) r0) * s1 + ((u128) r1) * s0; - t[2] = ((u128) r0) * s2 + ((u128) r2) * s0 + ((u128) r1) * s1; - t[3] = ((u128) r0) * s3 + ((u128) r3) * s0 + ((u128) r1) * s2 + ((u128) r2) * s1; - t[4] = ((u128) r0) * s4 + ((u128) r4) * s0 + ((u128) r3) * s1 + ((u128) r1) * s3 + ((u128) r2) * s2; - - r4 *= 19; - r1 *= 19; - r2 *= 19; - r3 *= 19; - - t[0] += ((u128) r4) * s1 + ((u128) r1) * s4 + ((u128) r2) * s3 + ((u128) r3) * s2; - t[1] += ((u128) r4) * s2 + ((u128) r2) * s4 + ((u128) r3) * s3; - t[2] += ((u128) r4) * s3 + ((u128) r3) * s4; - t[3] += ((u128) r4) * s4; - - r0 = (limb)t[0] & 0x7ffffffffffffUL; c = (limb)(t[0] >> 51); - t[1] += c; r1 = (limb)t[1] & 0x7ffffffffffffUL; c = (limb)(t[1] >> 51); - t[2] += c; r2 = (limb)t[2] & 0x7ffffffffffffUL; c = (limb)(t[2] >> 51); - t[3] += c; r3 = (limb)t[3] & 0x7ffffffffffffUL; c = (limb)(t[3] >> 51); - t[4] += c; r4 = (limb)t[4] & 0x7ffffffffffffUL; c = (limb)(t[4] >> 51); - r0 += c * 19; c = r0 >> 51; r0 = r0 & 0x7ffffffffffffUL; - r1 += c; c = r1 >> 51; r1 = r1 & 0x7ffffffffffffUL; - r2 += c; - - output[0] = r0; - output[1] = r1; - output[2] = r2; - output[3] = r3; - output[4] = r4; -} - -static __always_inline void fsquare_times(felem output, const felem in, limb count) -{ - u128 t[5]; - limb r0, r1, r2, r3, r4, c; - limb d0, d1, d2, d4, d419; - - r0 = in[0]; - r1 = in[1]; - r2 = in[2]; - r3 = in[3]; - r4 = in[4]; - - do { - d0 = r0 * 2; - d1 = r1 * 2; - d2 = r2 * 2 * 19; - d419 = r4 * 19; - d4 = d419 * 2; - - t[0] = ((u128) r0) * r0 + ((u128) d4) * r1 + (((u128) d2) * (r3 )); - t[1] = ((u128) d0) * r1 + ((u128) d4) * r2 + (((u128) r3) * (r3 * 19)); - t[2] = ((u128) d0) * r2 + ((u128) r1) * r1 + (((u128) d4) * (r3 )); - t[3] = ((u128) d0) * r3 + ((u128) d1) * r2 + (((u128) r4) * (d419 )); - t[4] = ((u128) d0) * r4 + ((u128) d1) * r3 + (((u128) r2) * (r2 )); - - r0 = (limb)t[0] & 0x7ffffffffffffUL; c = (limb)(t[0] >> 51); - t[1] += c; r1 = (limb)t[1] & 0x7ffffffffffffUL; c = (limb)(t[1] >> 51); - t[2] += c; r2 = (limb)t[2] & 0x7ffffffffffffUL; c = (limb)(t[2] >> 51); - t[3] += c; r3 = (limb)t[3] & 0x7ffffffffffffUL; c = (limb)(t[3] >> 51); - t[4] += c; r4 = (limb)t[4] & 0x7ffffffffffffUL; c = (limb)(t[4] >> 51); - r0 += c * 19; c = r0 >> 51; r0 = r0 & 0x7ffffffffffffUL; - r1 += c; c = r1 >> 51; r1 = r1 & 0x7ffffffffffffUL; - r2 += c; - } while (--count); - - output[0] = r0; - output[1] = r1; - output[2] = r2; - output[3] = r3; - output[4] = r4; -} - -/* Load a little-endian 64-bit number */ -static inline limb load_limb(const u8 *in) -{ - return le64_to_cpu(*(__le64 *)in); -} - -static inline void store_limb(u8 *out, limb in) -{ - *(__le64 *)out = cpu_to_le64(in); -} - -/* Take a little-endian, 32-byte number and expand it into polynomial form */ -static inline void fexpand(limb *output, const u8 *in) -{ - output[0] = load_limb(in) & 0x7ffffffffffffUL; - output[1] = (load_limb(in + 6) >> 3) & 0x7ffffffffffffUL; - output[2] = (load_limb(in + 12) >> 6) & 0x7ffffffffffffUL; - output[3] = (load_limb(in + 19) >> 1) & 0x7ffffffffffffUL; - output[4] = (load_limb(in + 24) >> 12) & 0x7ffffffffffffUL; -} - -/* Take a fully reduced polynomial form number and contract it into a - * little-endian, 32-byte array - */ -static void fcontract(u8 *output, const felem input) -{ - u128 t[5]; - - t[0] = input[0]; - t[1] = input[1]; - t[2] = input[2]; - t[3] = input[3]; - t[4] = input[4]; - - t[1] += t[0] >> 51; t[0] &= 0x7ffffffffffffUL; - t[2] += t[1] >> 51; t[1] &= 0x7ffffffffffffUL; - t[3] += t[2] >> 51; t[2] &= 0x7ffffffffffffUL; - t[4] += t[3] >> 51; t[3] &= 0x7ffffffffffffUL; - t[0] += 19 * (t[4] >> 51); t[4] &= 0x7ffffffffffffUL; - - t[1] += t[0] >> 51; t[0] &= 0x7ffffffffffffUL; - t[2] += t[1] >> 51; t[1] &= 0x7ffffffffffffUL; - t[3] += t[2] >> 51; t[2] &= 0x7ffffffffffffUL; - t[4] += t[3] >> 51; t[3] &= 0x7ffffffffffffUL; - t[0] += 19 * (t[4] >> 51); t[4] &= 0x7ffffffffffffUL; - - /* now t is between 0 and 2^255-1, properly carried. */ - /* case 1: between 0 and 2^255-20. case 2: between 2^255-19 and 2^255-1. */ - - t[0] += 19; - - t[1] += t[0] >> 51; t[0] &= 0x7ffffffffffffUL; - t[2] += t[1] >> 51; t[1] &= 0x7ffffffffffffUL; - t[3] += t[2] >> 51; t[2] &= 0x7ffffffffffffUL; - t[4] += t[3] >> 51; t[3] &= 0x7ffffffffffffUL; - t[0] += 19 * (t[4] >> 51); t[4] &= 0x7ffffffffffffUL; - - /* now between 19 and 2^255-1 in both cases, and offset by 19. */ - - t[0] += 0x8000000000000UL - 19; - t[1] += 0x8000000000000UL - 1; - t[2] += 0x8000000000000UL - 1; - t[3] += 0x8000000000000UL - 1; - t[4] += 0x8000000000000UL - 1; - - /* now between 2^255 and 2^256-20, and offset by 2^255. */ - - t[1] += t[0] >> 51; t[0] &= 0x7ffffffffffffUL; - t[2] += t[1] >> 51; t[1] &= 0x7ffffffffffffUL; - t[3] += t[2] >> 51; t[2] &= 0x7ffffffffffffUL; - t[4] += t[3] >> 51; t[3] &= 0x7ffffffffffffUL; - t[4] &= 0x7ffffffffffffUL; - - store_limb(output, t[0] | (t[1] << 51)); - store_limb(output+8, (t[1] >> 13) | (t[2] << 38)); - store_limb(output+16, (t[2] >> 26) | (t[3] << 25)); - store_limb(output+24, (t[3] >> 39) | (t[4] << 12)); -} - -/* Input: Q, Q', Q-Q' - * Output: 2Q, Q+Q' - * - * x2 z3: long form - * x3 z3: long form - * x z: short form, destroyed - * xprime zprime: short form, destroyed - * qmqp: short form, preserved - */ -static void fmonty(limb *x2, limb *z2, /* output 2Q */ - limb *x3, limb *z3, /* output Q + Q' */ - limb *x, limb *z, /* input Q */ - limb *xprime, limb *zprime, /* input Q' */ - - const limb *qmqp /* input Q - Q' */) -{ - limb origx[5], origxprime[5], zzz[5], xx[5], zz[5], xxprime[5], zzprime[5], zzzprime[5]; - - memcpy(origx, x, 5 * sizeof(limb)); - fsum(x, z); - fdifference_backwards(z, origx); // does x - z - - memcpy(origxprime, xprime, sizeof(limb) * 5); - fsum(xprime, zprime); - fdifference_backwards(zprime, origxprime); - fmul(xxprime, xprime, z); - fmul(zzprime, x, zprime); - memcpy(origxprime, xxprime, sizeof(limb) * 5); - fsum(xxprime, zzprime); - fdifference_backwards(zzprime, origxprime); - fsquare_times(x3, xxprime, 1); - fsquare_times(zzzprime, zzprime, 1); - fmul(z3, zzzprime, qmqp); - - fsquare_times(xx, x, 1); - fsquare_times(zz, z, 1); - fmul(x2, xx, zz); - fdifference_backwards(zz, xx); // does zz = xx - zz - fscalar_product(zzz, zz, 121665); - fsum(zzz, xx); - fmul(z2, zz, zzz); -} - -/* Maybe swap the contents of two limb arrays (@a and @b), each @len elements - * long. Perform the swap iff @swap is non-zero. - * - * This function performs the swap without leaking any side-channel - * information. - */ -static void swap_conditional(limb a[5], limb b[5], limb iswap) -{ - unsigned int i; - const limb swap = -iswap; - - for (i = 0; i < 5; ++i) { - const limb x = swap & (a[i] ^ b[i]); - - a[i] ^= x; - b[i] ^= x; - } -} - -/* Calculates nQ where Q is the x-coordinate of a point on the curve - * - * resultx/resultz: the x coordinate of the resulting curve point (short form) - * n: a little endian, 32-byte number - * q: a point of the curve (short form) - */ -static void cmult(limb *resultx, limb *resultz, const u8 *n, const limb *q) -{ - limb a[5] = {0}, b[5] = {1}, c[5] = {1}, d[5] = {0}; - limb *nqpqx = a, *nqpqz = b, *nqx = c, *nqz = d, *t; - limb e[5] = {0}, f[5] = {1}, g[5] = {0}, h[5] = {1}; - limb *nqpqx2 = e, *nqpqz2 = f, *nqx2 = g, *nqz2 = h; - - unsigned int i, j; - - memcpy(nqpqx, q, sizeof(limb) * 5); - - for (i = 0; i < 32; ++i) { - u8 byte = n[31 - i]; - - for (j = 0; j < 8; ++j) { - const limb bit = byte >> 7; - - swap_conditional(nqx, nqpqx, bit); - swap_conditional(nqz, nqpqz, bit); - fmonty(nqx2, nqz2, - nqpqx2, nqpqz2, - nqx, nqz, - nqpqx, nqpqz, - q); - swap_conditional(nqx2, nqpqx2, bit); - swap_conditional(nqz2, nqpqz2, bit); - - t = nqx; - nqx = nqx2; - nqx2 = t; - t = nqz; - nqz = nqz2; - nqz2 = t; - t = nqpqx; - nqpqx = nqpqx2; - nqpqx2 = t; - t = nqpqz; - nqpqz = nqpqz2; - nqpqz2 = t; - - byte <<= 1; - } - } - - memcpy(resultx, nqx, sizeof(limb) * 5); - memcpy(resultz, nqz, sizeof(limb) * 5); -} - -static void crecip(felem out, const felem z) -{ - felem a, t0, b, c; - - /* 2 */ fsquare_times(a, z, 1); // a = 2 - /* 8 */ fsquare_times(t0, a, 2); - /* 9 */ fmul(b, t0, z); // b = 9 - /* 11 */ fmul(a, b, a); // a = 11 - /* 22 */ fsquare_times(t0, a, 1); - /* 2^5 - 2^0 = 31 */ fmul(b, t0, b); - /* 2^10 - 2^5 */ fsquare_times(t0, b, 5); - /* 2^10 - 2^0 */ fmul(b, t0, b); - /* 2^20 - 2^10 */ fsquare_times(t0, b, 10); - /* 2^20 - 2^0 */ fmul(c, t0, b); - /* 2^40 - 2^20 */ fsquare_times(t0, c, 20); - /* 2^40 - 2^0 */ fmul(t0, t0, c); - /* 2^50 - 2^10 */ fsquare_times(t0, t0, 10); - /* 2^50 - 2^0 */ fmul(b, t0, b); - /* 2^100 - 2^50 */ fsquare_times(t0, b, 50); - /* 2^100 - 2^0 */ fmul(c, t0, b); - /* 2^200 - 2^100 */ fsquare_times(t0, c, 100); - /* 2^200 - 2^0 */ fmul(t0, t0, c); - /* 2^250 - 2^50 */ fsquare_times(t0, t0, 50); - /* 2^250 - 2^0 */ fmul(t0, t0, b); - /* 2^255 - 2^5 */ fsquare_times(t0, t0, 5); - /* 2^255 - 21 */ fmul(out, t0, a); -} - -bool curve25519(u8 mypublic[CURVE25519_POINT_SIZE], const u8 secret[CURVE25519_POINT_SIZE], const u8 basepoint[CURVE25519_POINT_SIZE]) -{ - limb bp[5], x[5], z[5], zmone[5]; - u8 e[32]; - - memcpy(e, secret, 32); - normalize_secret(e); - - fexpand(bp, basepoint); - cmult(x, z, e, bp); - crecip(zmone, z); - fmul(z, x, zmone); - fcontract(mypublic, z); - - memzero_explicit(e, sizeof(e)); - memzero_explicit(bp, sizeof(bp)); - memzero_explicit(x, sizeof(x)); - memzero_explicit(z, sizeof(z)); - memzero_explicit(zmone, sizeof(zmone)); - - return true; -} diff --git a/function.h b/function.h deleted file mode 100644 index 6f360e9..0000000 --- a/function.h +++ /dev/null @@ -1,68 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 - * - * Copyright (C) 2018 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. - */ - -enum { WARMUP = 5000, TRIALS = 10000, IDLE = 1 * 1000 }; - -enum { CURVE25519_POINT_SIZE = 32 }; -bool curve25519(u8 mypublic[CURVE25519_POINT_SIZE], const u8 secret[CURVE25519_POINT_SIZE], const u8 basepoint[CURVE25519_POINT_SIZE]); - -struct curve25519_test_vector { - u8 private[CURVE25519_POINT_SIZE]; - u8 public[CURVE25519_POINT_SIZE]; - u8 result[CURVE25519_POINT_SIZE]; - bool valid; -}; - -static const struct curve25519_test_vector curve25519_test_vectors[] __initconst = { - { - .private = { 0x77, 0x07, 0x6d, 0x0a, 0x73, 0x18, 0xa5, 0x7d, 0x3c, 0x16, 0xc1, 0x72, 0x51, 0xb2, 0x66, 0x45, 0xdf, 0x4c, 0x2f, 0x87, 0xeb, 0xc0, 0x99, 0x2a, 0xb1, 0x77, 0xfb, 0xa5, 0x1d, 0xb9, 0x2c, 0x2a }, - .public = { 0xde, 0x9e, 0xdb, 0x7d, 0x7b, 0x7d, 0xc1, 0xb4, 0xd3, 0x5b, 0x61, 0xc2, 0xec, 0xe4, 0x35, 0x37, 0x3f, 0x83, 0x43, 0xc8, 0x5b, 0x78, 0x67, 0x4d, 0xad, 0xfc, 0x7e, 0x14, 0x6f, 0x88, 0x2b, 0x4f }, - .result = { 0x4a, 0x5d, 0x9d, 0x5b, 0xa4, 0xce, 0x2d, 0xe1, 0x72, 0x8e, 0x3b, 0xf4, 0x80, 0x35, 0x0f, 0x25, 0xe0, 0x7e, 0x21, 0xc9, 0x47, 0xd1, 0x9e, 0x33, 0x76, 0xf0, 0x9b, 0x3c, 0x1e, 0x16, 0x17, 0x42 }, - .valid = true - }, - { - .private = { 0x5d, 0xab, 0x08, 0x7e, 0x62, 0x4a, 0x8a, 0x4b, 0x79, 0xe1, 0x7f, 0x8b, 0x83, 0x80, 0x0e, 0xe6, 0x6f, 0x3b, 0xb1, 0x29, 0x26, 0x18, 0xb6, 0xfd, 0x1c, 0x2f, 0x8b, 0x27, 0xff, 0x88, 0xe0, 0xeb }, - .public = { 0x85, 0x20, 0xf0, 0x09, 0x89, 0x30, 0xa7, 0x54, 0x74, 0x8b, 0x7d, 0xdc, 0xb4, 0x3e, 0xf7, 0x5a, 0x0d, 0xbf, 0x3a, 0x0d, 0x26, 0x38, 0x1a, 0xf4, 0xeb, 0xa4, 0xa9, 0x8e, 0xaa, 0x9b, 0x4e, 0x6a }, - .result = { 0x4a, 0x5d, 0x9d, 0x5b, 0xa4, 0xce, 0x2d, 0xe1, 0x72, 0x8e, 0x3b, 0xf4, 0x80, 0x35, 0x0f, 0x25, 0xe0, 0x7e, 0x21, 0xc9, 0x47, 0xd1, 0x9e, 0x33, 0x76, 0xf0, 0x9b, 0x3c, 0x1e, 0x16, 0x17, 0x42 }, - .valid = true - }, - { - .private = { 1 }, - .public = { 0x25, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, - .result = { 0x3c, 0x77, 0x77, 0xca, 0xf9, 0x97, 0xb2, 0x64, 0x41, 0x60, 0x77, 0x66, 0x5b, 0x4e, 0x22, 0x9d, 0xb, 0x95, 0x48, 0xdc, 0xc, 0xd8, 0x19, 0x98, 0xdd, 0xcd, 0xc5, 0xc8, 0x53, 0x3c, 0x79, 0x7f }, - .valid = true - }, - { - .private = { 1 }, - .public = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, - .result = { 0xb3, 0x2d, 0x13, 0x62, 0xc2, 0x48, 0xd6, 0x2f, 0xe6, 0x26, 0x19, 0xcf, 0xf0, 0x4d, 0xd4, 0x3d, 0xb7, 0x3f, 0xfc, 0x1b, 0x63, 0x8, 0xed, 0xe3, 0xb, 0x78, 0xd8, 0x73, 0x80, 0xf1, 0xe8, 0x34 }, - .valid = true - }, - { - .private = { 0xa5, 0x46, 0xe3, 0x6b, 0xf0, 0x52, 0x7c, 0x9d, 0x3b, 0x16, 0x15, 0x4b, 0x82, 0x46, 0x5e, 0xdd, 0x62, 0x14, 0x4c, 0x0a, 0xc1, 0xfc, 0x5a, 0x18, 0x50, 0x6a, 0x22, 0x44, 0xba, 0x44, 0x9a, 0xc4 }, - .public = { 0xe6, 0xdb, 0x68, 0x67, 0x58, 0x30, 0x30, 0xdb, 0x35, 0x94, 0xc1, 0xa4, 0x24, 0xb1, 0x5f, 0x7c, 0x72, 0x66, 0x24, 0xec, 0x26, 0xb3, 0x35, 0x3b, 0x10, 0xa9, 0x03, 0xa6, 0xd0, 0xab, 0x1c, 0x4c }, - .result = { 0xc3, 0xda, 0x55, 0x37, 0x9d, 0xe9, 0xc6, 0x90, 0x8e, 0x94, 0xea, 0x4d, 0xf2, 0x8d, 0x08, 0x4f, 0x32, 0xec, 0xcf, 0x03, 0x49, 0x1c, 0x71, 0xf7, 0x54, 0xb4, 0x07, 0x55, 0x77, 0xa2, 0x85, 0x52 }, - .valid = true - }, - { - .private = { 1, 2, 3, 4 }, - .public = { 0 }, - .result = { 0 }, - .valid = false - }, - { - .private = { 2, 4, 6, 8 }, - .public = { 0xe0, 0xeb, 0x7a, 0x7c, 0x3b, 0x41, 0xb8, 0xae, 0x16, 0x56, 0xe3, 0xfa, 0xf1, 0x9f, 0xc4, 0x6a, 0xda, 0x09, 0x8d, 0xeb, 0x9c, 0x32, 0xb1, 0xfd, 0x86, 0x62, 0x05, 0x16, 0x5f, 0x49, 0xb8 }, - .result = { 0 }, - .valid = false - } -}; - -u8 dummy_out[CURVE25519_POINT_SIZE]; - -static __always_inline int function(void) -{ - return curve25519(dummy_out, curve25519_test_vectors[0].private, curve25519_test_vectors[0].public); -} @@ -6,37 +6,95 @@ #include <linux/init.h> #include <linux/module.h> #include <linux/delay.h> -#include "function.h" +#include <linux/slab.h> +#include <linux/sort.h> +#include <asm/cpufeature.h> +#include <asm/processor.h> +#include <asm/fpu/api.h> +#include <asm/simd.h> static unsigned long stamp = 0; module_param(stamp, ulong, 0); int dummy; +enum { BLOCKS_PER_CALL = 16 }; + +static u8 state[128]; +static u8 input[64 * BLOCKS_PER_CALL]; + +#define declare_it(name) \ +asmlinkage void blake2s_compress_ ## name(u8 state[128], const u8 *block, const size_t nblocks, const u32 inc); \ +static __always_inline u8 name(void) \ +{ \ + blake2s_compress_ ## name(state, input, BLOCKS_PER_CALL, 0); \ + return input[0]; \ +} + +#define do_it(name) do { \ + u32 eax = 0, ebx = 0, ecx = 0, edx = 0; \ + for (i = 0; i < WARMUP; ++i) \ + ret |= name(); \ + asm volatile("cpuid" : "+a" (eax), "=b" (ebx), "=d" (edx), "+c" (ecx)); \ + for (i = 0; i <= TRIALS; ++i) { \ + trial_times[i] = get_cycles(); \ + ret |= name(); \ + } \ + for (i = 0; i < TRIALS; ++i) \ + trial_times[i] = trial_times[i + 1] - trial_times[i]; \ + sort(trial_times, TRIALS + 1, sizeof(cycles_t), compare_cycles, NULL); \ + median_ ## name = trial_times[TRIALS / 2]; \ +} while (0) + +#define report_it(name) do { \ + pr_err("%lu: %12s: %6llu cycles per block\n", stamp, #name, median_ ## name / BLOCKS_PER_CALL); \ +} while (0) + + +declare_it(avx) +declare_it(avx512_ymm) +declare_it(avx512_zmm) + +static int compare_cycles(const void *a, const void *b) +{ + return *((cycles_t *)a) - *((cycles_t *)b); +} + static int __init mod_init(void) { + enum { WARMUP = 6000, TRIALS = 5000, IDLE = 1 * 1000 }; int ret = 0, i; - cycles_t start, end; + cycles_t *trial_times; + cycles_t median_avx = 0; + cycles_t median_avx512_ymm = 0; + cycles_t median_avx512_zmm = 0; unsigned long flags; DEFINE_SPINLOCK(lock); - + + trial_times = kcalloc(TRIALS + 1, sizeof(cycles_t), GFP_KERNEL); + if (!trial_times) + return -ENOMEM; + msleep(IDLE); spin_lock_irqsave(&lock, flags); - - for (i = 0; i < WARMUP; ++i) - ret |= function(); - start = get_cycles(); - for (i = 0; i < TRIALS; ++i) - ret |= function(); - end = get_cycles(); + kernel_fpu_begin(); + + do_it(avx); + do_it(avx512_ymm); + do_it(avx512_zmm); + + kernel_fpu_end(); spin_unlock_irqrestore(&lock, flags); - pr_err("%lu: %llu cycles per call\n", stamp, (end - start) / TRIALS); + report_it(avx); + report_it(avx512_ymm); + report_it(avx512_zmm); /* Don't let compiler be too clever. */ dummy = ret; + kfree(trial_times); /* We should never actually agree to insert the module. Choosing * -0x1000 here is an amazing hack. It causes the kernel to not @@ -8,9 +8,23 @@ nob_cpus() { done } +noturbo() { + echo "[+] Setting no-turbo to status $1" + if [[ -e /sys/devices/system/cpu/intel_pstate/no_turbo ]]; then + echo "$1" > /sys/devices/system/cpu/intel_pstate/no_turbo + else + local val + [[ $1 == 0 ]] && val=0x850089 + [[ $1 == 1 ]] && val=0x4000850089 + [[ -n $val ]] || return 0 + wrmsr -a 0x1a0 $val + fi +} + [[ -e kbench9000.ko ]] -trap "nob_cpus 1" INT TERM EXIT +trap "nob_cpus 1; noturbo 0;" INT TERM EXIT +noturbo 1 nob_cpus 0 echo "[+] Inserting module to run tests" |