aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/src/crypto/zinc/chacha20
diff options
context:
space:
mode:
authorSamuel Neves <sneves@dei.uc.pt>2019-05-04 17:14:09 +0100
committerJason A. Donenfeld <Jason@zx2c4.com>2019-05-29 01:23:24 +0200
commit22bbac4d2ffb62f28b0483f05f24a0f41639b787 (patch)
tree262a0864dc669ac71dd27264f119c145799c4bc0 /src/crypto/zinc/chacha20
parentqemu: do not check for alignment with ubsan (diff)
downloadwireguard-monolithic-historical-22bbac4d2ffb62f28b0483f05f24a0f41639b787.tar.xz
wireguard-monolithic-historical-22bbac4d2ffb62f28b0483f05f24a0f41639b787.zip
blake2s,chacha: latency tweak
In every odd-numbered round, instead of operating over the state x00 x01 x02 x03 x05 x06 x07 x04 x10 x11 x08 x09 x15 x12 x13 x14 we operate over the rotated state x03 x00 x01 x02 x04 x05 x06 x07 x09 x10 x11 x08 x14 x15 x12 x13 The advantage here is that this requires no changes to the 'x04 x05 x06 x07' row, which is in the critical path. This results in a noticeable latency improvement of roughly R cycles, for R diagonal rounds in the primitive. In the case of BLAKE2s, which I also moved from requiring AVX to only requiring SSSE3, we save approximately 30 cycles per compression function call on Haswell and Skylake. In other words, this is an improvement of ~0.6 cpb. This idea was pointed out to me by Shunsuke Shimizu, though it appears to have been around for longer. Signed-off-by: Samuel Neves <sneves@dei.uc.pt>
Diffstat (limited to '')
-rw-r--r--src/crypto/zinc/chacha20/chacha20-arm.pl6
-rw-r--r--src/crypto/zinc/chacha20/chacha20-arm64.pl6
-rw-r--r--src/crypto/zinc/chacha20/chacha20-x86_64.pl48
3 files changed, 30 insertions, 30 deletions
diff --git a/src/crypto/zinc/chacha20/chacha20-arm.pl b/src/crypto/zinc/chacha20/chacha20-arm.pl
index 6a7d62c..6785383 100644
--- a/src/crypto/zinc/chacha20/chacha20-arm.pl
+++ b/src/crypto/zinc/chacha20/chacha20-arm.pl
@@ -686,9 +686,9 @@ my ($a,$b,$c,$d,$t)=@_;
"&vshr_u32 ($b,$t,25)",
"&vsli_32 ($b,$t,7)",
- "&vext_8 ($c,$c,$c,8)",
- "&vext_8 ($b,$b,$b,$odd?12:4)",
- "&vext_8 ($d,$d,$d,$odd?4:12)"
+ "&vext_8 ($a,$a,$a,$odd?4:12)",
+ "&vext_8 ($d,$d,$d,8)",
+ "&vext_8 ($c,$c,$c,$odd?12:4)"
);
}
diff --git a/src/crypto/zinc/chacha20/chacha20-arm64.pl b/src/crypto/zinc/chacha20/chacha20-arm64.pl
index fc63cc8..ac14a99 100644
--- a/src/crypto/zinc/chacha20/chacha20-arm64.pl
+++ b/src/crypto/zinc/chacha20/chacha20-arm64.pl
@@ -378,9 +378,9 @@ my ($a,$b,$c,$d,$t)=@_;
"&ushr ('$b','$t',25)",
"&sli ('$b','$t',7)",
- "&ext ('$c','$c','$c',8)",
- "&ext ('$d','$d','$d',$odd?4:12)",
- "&ext ('$b','$b','$b',$odd?12:4)"
+ "&ext ('$a','$a','$a',$odd?4:12)",
+ "&ext ('$d','$d','$d',8)",
+ "&ext ('$c','$c','$c',$odd?12:4)"
);
}
diff --git a/src/crypto/zinc/chacha20/chacha20-x86_64.pl b/src/crypto/zinc/chacha20/chacha20-x86_64.pl
index 38532f8..116c16e 100644
--- a/src/crypto/zinc/chacha20/chacha20-x86_64.pl
+++ b/src/crypto/zinc/chacha20/chacha20-x86_64.pl
@@ -525,15 +525,15 @@ $code.=<<___;
1:
___
&SSSE3ROUND();
- &pshufd ($c,$c,0b01001110);
- &pshufd ($b,$b,0b00111001);
- &pshufd ($d,$d,0b10010011);
+ &pshufd ($a,$a,0b10010011);
+ &pshufd ($d,$d,0b01001110);
+ &pshufd ($c,$c,0b00111001);
&nop ();
&SSSE3ROUND();
- &pshufd ($c,$c,0b01001110);
- &pshufd ($b,$b,0b10010011);
- &pshufd ($d,$d,0b00111001);
+ &pshufd ($a,$a,0b00111001);
+ &pshufd ($d,$d,0b01001110);
+ &pshufd ($c,$c,0b10010011);
&dec ($counter);
&jnz ("1b");
@@ -600,15 +600,15 @@ $code.=<<___;
.Loop_ssse3:
___
&SSSE3ROUND();
- &pshufd ($c,$c,0b01001110);
- &pshufd ($b,$b,0b00111001);
- &pshufd ($d,$d,0b10010011);
+ &pshufd ($a,$a,0b10010011);
+ &pshufd ($d,$d,0b01001110);
+ &pshufd ($c,$c,0b00111001);
&nop ();
&SSSE3ROUND();
- &pshufd ($c,$c,0b01001110);
- &pshufd ($b,$b,0b10010011);
- &pshufd ($d,$d,0b00111001);
+ &pshufd ($a,$a,0b00111001);
+ &pshufd ($d,$d,0b01001110);
+ &pshufd ($c,$c,0b10010011);
&dec ($counter);
&jnz (".Loop_ssse3");
@@ -770,20 +770,20 @@ $code.=<<___;
.Loop_128:
___
&SSSE3ROUND_2x();
- &pshufd ($c,$c,0b01001110);
- &pshufd ($b,$b,0b00111001);
- &pshufd ($d,$d,0b10010011);
- &pshufd ($c1,$c1,0b01001110);
- &pshufd ($b1,$b1,0b00111001);
- &pshufd ($d1,$d1,0b10010011);
+ &pshufd ($a,$a,0b10010011);
+ &pshufd ($d,$d,0b01001110);
+ &pshufd ($c,$c,0b00111001);
+ &pshufd ($a1,$a1,0b10010011);
+ &pshufd ($d1,$d1,0b01001110);
+ &pshufd ($c1,$c1,0b00111001);
&SSSE3ROUND_2x();
- &pshufd ($c,$c,0b01001110);
- &pshufd ($b,$b,0b10010011);
- &pshufd ($d,$d,0b00111001);
- &pshufd ($c1,$c1,0b01001110);
- &pshufd ($b1,$b1,0b10010011);
- &pshufd ($d1,$d1,0b00111001);
+ &pshufd ($a,$a,0b00111001);
+ &pshufd ($d,$d,0b01001110);
+ &pshufd ($c,$c,0b10010011);
+ &pshufd ($a1,$a1,0b00111001);
+ &pshufd ($d1,$d1,0b01001110);
+ &pshufd ($c1,$c1,0b10010011);
&dec ($counter);
&jnz (".Loop_128");