From 22bbac4d2ffb62f28b0483f05f24a0f41639b787 Mon Sep 17 00:00:00 2001 From: Samuel Neves Date: Sat, 4 May 2019 17:14:09 +0100 Subject: blake2s,chacha: latency tweak In every odd-numbered round, instead of operating over the state x00 x01 x02 x03 x05 x06 x07 x04 x10 x11 x08 x09 x15 x12 x13 x14 we operate over the rotated state x03 x00 x01 x02 x04 x05 x06 x07 x09 x10 x11 x08 x14 x15 x12 x13 The advantage here is that this requires no changes to the 'x04 x05 x06 x07' row, which is in the critical path. This results in a noticeable latency improvement of roughly R cycles, for R diagonal rounds in the primitive. In the case of BLAKE2s, which I also moved from requiring AVX to only requiring SSSE3, we save approximately 30 cycles per compression function call on Haswell and Skylake. In other words, this is an improvement of ~0.6 cpb. This idea was pointed out to me by Shunsuke Shimizu, though it appears to have been around for longer. Signed-off-by: Samuel Neves --- src/crypto/zinc/chacha20/chacha20-arm.pl | 6 ++-- src/crypto/zinc/chacha20/chacha20-arm64.pl | 6 ++-- src/crypto/zinc/chacha20/chacha20-x86_64.pl | 48 ++++++++++++++--------------- 3 files changed, 30 insertions(+), 30 deletions(-) (limited to 'src/crypto/zinc/chacha20') diff --git a/src/crypto/zinc/chacha20/chacha20-arm.pl b/src/crypto/zinc/chacha20/chacha20-arm.pl index 6a7d62c..6785383 100644 --- a/src/crypto/zinc/chacha20/chacha20-arm.pl +++ b/src/crypto/zinc/chacha20/chacha20-arm.pl @@ -686,9 +686,9 @@ my ($a,$b,$c,$d,$t)=@_; "&vshr_u32 ($b,$t,25)", "&vsli_32 ($b,$t,7)", - "&vext_8 ($c,$c,$c,8)", - "&vext_8 ($b,$b,$b,$odd?12:4)", - "&vext_8 ($d,$d,$d,$odd?4:12)" + "&vext_8 ($a,$a,$a,$odd?4:12)", + "&vext_8 ($d,$d,$d,8)", + "&vext_8 ($c,$c,$c,$odd?12:4)" ); } diff --git a/src/crypto/zinc/chacha20/chacha20-arm64.pl b/src/crypto/zinc/chacha20/chacha20-arm64.pl index fc63cc8..ac14a99 100644 --- a/src/crypto/zinc/chacha20/chacha20-arm64.pl +++ b/src/crypto/zinc/chacha20/chacha20-arm64.pl @@ -378,9 +378,9 @@ my ($a,$b,$c,$d,$t)=@_; "&ushr ('$b','$t',25)", "&sli ('$b','$t',7)", - "&ext ('$c','$c','$c',8)", - "&ext ('$d','$d','$d',$odd?4:12)", - "&ext ('$b','$b','$b',$odd?12:4)" + "&ext ('$a','$a','$a',$odd?4:12)", + "&ext ('$d','$d','$d',8)", + "&ext ('$c','$c','$c',$odd?12:4)" ); } diff --git a/src/crypto/zinc/chacha20/chacha20-x86_64.pl b/src/crypto/zinc/chacha20/chacha20-x86_64.pl index 38532f8..116c16e 100644 --- a/src/crypto/zinc/chacha20/chacha20-x86_64.pl +++ b/src/crypto/zinc/chacha20/chacha20-x86_64.pl @@ -525,15 +525,15 @@ $code.=<<___; 1: ___ &SSSE3ROUND(); - &pshufd ($c,$c,0b01001110); - &pshufd ($b,$b,0b00111001); - &pshufd ($d,$d,0b10010011); + &pshufd ($a,$a,0b10010011); + &pshufd ($d,$d,0b01001110); + &pshufd ($c,$c,0b00111001); &nop (); &SSSE3ROUND(); - &pshufd ($c,$c,0b01001110); - &pshufd ($b,$b,0b10010011); - &pshufd ($d,$d,0b00111001); + &pshufd ($a,$a,0b00111001); + &pshufd ($d,$d,0b01001110); + &pshufd ($c,$c,0b10010011); &dec ($counter); &jnz ("1b"); @@ -600,15 +600,15 @@ $code.=<<___; .Loop_ssse3: ___ &SSSE3ROUND(); - &pshufd ($c,$c,0b01001110); - &pshufd ($b,$b,0b00111001); - &pshufd ($d,$d,0b10010011); + &pshufd ($a,$a,0b10010011); + &pshufd ($d,$d,0b01001110); + &pshufd ($c,$c,0b00111001); &nop (); &SSSE3ROUND(); - &pshufd ($c,$c,0b01001110); - &pshufd ($b,$b,0b10010011); - &pshufd ($d,$d,0b00111001); + &pshufd ($a,$a,0b00111001); + &pshufd ($d,$d,0b01001110); + &pshufd ($c,$c,0b10010011); &dec ($counter); &jnz (".Loop_ssse3"); @@ -770,20 +770,20 @@ $code.=<<___; .Loop_128: ___ &SSSE3ROUND_2x(); - &pshufd ($c,$c,0b01001110); - &pshufd ($b,$b,0b00111001); - &pshufd ($d,$d,0b10010011); - &pshufd ($c1,$c1,0b01001110); - &pshufd ($b1,$b1,0b00111001); - &pshufd ($d1,$d1,0b10010011); + &pshufd ($a,$a,0b10010011); + &pshufd ($d,$d,0b01001110); + &pshufd ($c,$c,0b00111001); + &pshufd ($a1,$a1,0b10010011); + &pshufd ($d1,$d1,0b01001110); + &pshufd ($c1,$c1,0b00111001); &SSSE3ROUND_2x(); - &pshufd ($c,$c,0b01001110); - &pshufd ($b,$b,0b10010011); - &pshufd ($d,$d,0b00111001); - &pshufd ($c1,$c1,0b01001110); - &pshufd ($b1,$b1,0b10010011); - &pshufd ($d1,$d1,0b00111001); + &pshufd ($a,$a,0b00111001); + &pshufd ($d,$d,0b01001110); + &pshufd ($c,$c,0b10010011); + &pshufd ($a1,$a1,0b00111001); + &pshufd ($d1,$d1,0b01001110); + &pshufd ($c1,$c1,0b10010011); &dec ($counter); &jnz (".Loop_128"); -- cgit v1.2.3-59-g8ed1b