aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--arch/x86/crypto/aes_ctrby8_avx-x86_64.S297
1 files changed, 163 insertions, 134 deletions
diff --git a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
index 3f0fc7dd87d7..2402b9418cd7 100644
--- a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
+++ b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
@@ -1,71 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause */
/*
- * Implement AES CTR mode by8 optimization with AVX instructions. (x86_64)
- *
- * This is AES128/192/256 CTR mode optimization implementation. It requires
- * the support of Intel(R) AESNI and AVX instructions.
- *
- * This work was inspired by the AES CTR mode optimization published
- * in Intel Optimized IPSEC Cryptograhpic library.
- * Additional information on it can be found at:
- * http://downloadcenter.intel.com/Detail_Desc.aspx?agr=Y&DwnldID=22972
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
+ * AES CTR mode by8 optimization with AVX instructions. (x86_64)
*
* Copyright(c) 2014 Intel Corporation.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
* Contact Information:
* James Guilford <james.guilford@intel.com>
* Sean Gulley <sean.m.gulley@intel.com>
* Chandramouli Narayanan <mouli@linux.intel.com>
+ */
+/*
+ * This is AES128/192/256 CTR mode optimization implementation. It requires
+ * the support of Intel(R) AESNI and AVX instructions.
*
- * BSD LICENSE
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
+ * This work was inspired by the AES CTR mode optimization published
+ * in Intel Optimized IPSEC Cryptographic library.
+ * Additional information on it can be found at:
+ * https://github.com/intel/intel-ipsec-mb
*/
#include <linux/linkage.h>
#define VMOVDQ vmovdqu
+/*
+ * Note: the "x" prefix in these aliases means "this is an xmm register". The
+ * alias prefixes have no relation to XCTR where the "X" prefix means "XOR
+ * counter".
+ */
#define xdata0 %xmm0
#define xdata1 %xmm1
#define xdata2 %xmm2
@@ -74,8 +36,10 @@
#define xdata5 %xmm5
#define xdata6 %xmm6
#define xdata7 %xmm7
-#define xcounter %xmm8
-#define xbyteswap %xmm9
+#define xcounter %xmm8 // CTR mode only
+#define xiv %xmm8 // XCTR mode only
+#define xbyteswap %xmm9 // CTR mode only
+#define xtmp %xmm9 // XCTR mode only
#define xkey0 %xmm10
#define xkey4 %xmm11
#define xkey8 %xmm12
@@ -88,7 +52,7 @@
#define p_keys %rdx
#define p_out %rcx
#define num_bytes %r8
-
+#define counter %r9 // XCTR mode only
#define tmp %r10
#define DDQ_DATA 0
#define XDATA 1
@@ -145,7 +109,7 @@ ddq_add_8:
* do_aes num_in_par load_keys key_len
* This increments p_in, but not p_out
*/
-.macro do_aes b, k, key_len
+.macro do_aes b, k, key_len, xctr
.set by, \b
.set load_keys, \k
.set klen, \key_len
@@ -154,29 +118,48 @@ ddq_add_8:
vmovdqa 0*16(p_keys), xkey0
.endif
- vpshufb xbyteswap, xcounter, xdata0
-
- .set i, 1
- .rept (by - 1)
- club XDATA, i
- vpaddq (ddq_add_1 + 16 * (i - 1))(%rip), xcounter, var_xdata
- vptest ddq_low_msk(%rip), var_xdata
- jnz 1f
- vpaddq ddq_high_add_1(%rip), var_xdata, var_xdata
- vpaddq ddq_high_add_1(%rip), xcounter, xcounter
- 1:
- vpshufb xbyteswap, var_xdata, var_xdata
- .set i, (i +1)
- .endr
+ .if \xctr
+ movq counter, xtmp
+ .set i, 0
+ .rept (by)
+ club XDATA, i
+ vpaddq (ddq_add_1 + 16 * i)(%rip), xtmp, var_xdata
+ .set i, (i +1)
+ .endr
+ .set i, 0
+ .rept (by)
+ club XDATA, i
+ vpxor xiv, var_xdata, var_xdata
+ .set i, (i +1)
+ .endr
+ .else
+ vpshufb xbyteswap, xcounter, xdata0
+ .set i, 1
+ .rept (by - 1)
+ club XDATA, i
+ vpaddq (ddq_add_1 + 16 * (i - 1))(%rip), xcounter, var_xdata
+ vptest ddq_low_msk(%rip), var_xdata
+ jnz 1f
+ vpaddq ddq_high_add_1(%rip), var_xdata, var_xdata
+ vpaddq ddq_high_add_1(%rip), xcounter, xcounter
+ 1:
+ vpshufb xbyteswap, var_xdata, var_xdata
+ .set i, (i +1)
+ .endr
+ .endif
vmovdqa 1*16(p_keys), xkeyA
vpxor xkey0, xdata0, xdata0
- vpaddq (ddq_add_1 + 16 * (by - 1))(%rip), xcounter, xcounter
- vptest ddq_low_msk(%rip), xcounter
- jnz 1f
- vpaddq ddq_high_add_1(%rip), xcounter, xcounter
- 1:
+ .if \xctr
+ add $by, counter
+ .else
+ vpaddq (ddq_add_1 + 16 * (by - 1))(%rip), xcounter, xcounter
+ vptest ddq_low_msk(%rip), xcounter
+ jnz 1f
+ vpaddq ddq_high_add_1(%rip), xcounter, xcounter
+ 1:
+ .endif
.set i, 1
.rept (by - 1)
@@ -414,94 +397,99 @@ ddq_add_8:
.endr
.endm
-.macro do_aes_load val, key_len
- do_aes \val, 1, \key_len
+.macro do_aes_load val, key_len, xctr
+ do_aes \val, 1, \key_len, \xctr
.endm
-.macro do_aes_noload val, key_len
- do_aes \val, 0, \key_len
+.macro do_aes_noload val, key_len, xctr
+ do_aes \val, 0, \key_len, \xctr
.endm
/* main body of aes ctr load */
-.macro do_aes_ctrmain key_len
+.macro do_aes_ctrmain key_len, xctr
cmp $16, num_bytes
- jb .Ldo_return2\key_len
+ jb .Ldo_return2\xctr\key_len
- vmovdqa byteswap_const(%rip), xbyteswap
- vmovdqu (p_iv), xcounter
- vpshufb xbyteswap, xcounter, xcounter
+ .if \xctr
+ shr $4, counter
+ vmovdqu (p_iv), xiv
+ .else
+ vmovdqa byteswap_const(%rip), xbyteswap
+ vmovdqu (p_iv), xcounter
+ vpshufb xbyteswap, xcounter, xcounter
+ .endif
mov num_bytes, tmp
and $(7*16), tmp
- jz .Lmult_of_8_blks\key_len
+ jz .Lmult_of_8_blks\xctr\key_len
/* 1 <= tmp <= 7 */
cmp $(4*16), tmp
- jg .Lgt4\key_len
- je .Leq4\key_len
+ jg .Lgt4\xctr\key_len
+ je .Leq4\xctr\key_len
-.Llt4\key_len:
+.Llt4\xctr\key_len:
cmp $(2*16), tmp
- jg .Leq3\key_len
- je .Leq2\key_len
+ jg .Leq3\xctr\key_len
+ je .Leq2\xctr\key_len
-.Leq1\key_len:
- do_aes_load 1, \key_len
+.Leq1\xctr\key_len:
+ do_aes_load 1, \key_len, \xctr
add $(1*16), p_out
and $(~7*16), num_bytes
- jz .Ldo_return2\key_len
- jmp .Lmain_loop2\key_len
+ jz .Ldo_return2\xctr\key_len
+ jmp .Lmain_loop2\xctr\key_len
-.Leq2\key_len:
- do_aes_load 2, \key_len
+.Leq2\xctr\key_len:
+ do_aes_load 2, \key_len, \xctr
add $(2*16), p_out
and $(~7*16), num_bytes
- jz .Ldo_return2\key_len
- jmp .Lmain_loop2\key_len
+ jz .Ldo_return2\xctr\key_len
+ jmp .Lmain_loop2\xctr\key_len
-.Leq3\key_len:
- do_aes_load 3, \key_len
+.Leq3\xctr\key_len:
+ do_aes_load 3, \key_len, \xctr
add $(3*16), p_out
and $(~7*16), num_bytes
- jz .Ldo_return2\key_len
- jmp .Lmain_loop2\key_len
+ jz .Ldo_return2\xctr\key_len
+ jmp .Lmain_loop2\xctr\key_len
-.Leq4\key_len:
- do_aes_load 4, \key_len
+.Leq4\xctr\key_len:
+ do_aes_load 4, \key_len, \xctr
add $(4*16), p_out
and $(~7*16), num_bytes
- jz .Ldo_return2\key_len
- jmp .Lmain_loop2\key_len
+ jz .Ldo_return2\xctr\key_len
+ jmp .Lmain_loop2\xctr\key_len
-.Lgt4\key_len:
+.Lgt4\xctr\key_len:
cmp $(6*16), tmp
- jg .Leq7\key_len
- je .Leq6\key_len
+ jg .Leq7\xctr\key_len
+ je .Leq6\xctr\key_len
-.Leq5\key_len:
- do_aes_load 5, \key_len
+.Leq5\xctr\key_len:
+ do_aes_load 5, \key_len, \xctr
add $(5*16), p_out
and $(~7*16), num_bytes
- jz .Ldo_return2\key_len
- jmp .Lmain_loop2\key_len
+ jz .Ldo_return2\xctr\key_len
+ jmp .Lmain_loop2\xctr\key_len
-.Leq6\key_len:
- do_aes_load 6, \key_len
+.Leq6\xctr\key_len:
+ do_aes_load 6, \key_len, \xctr
add $(6*16), p_out
and $(~7*16), num_bytes
- jz .Ldo_return2\key_len
- jmp .Lmain_loop2\key_len
+ jz .Ldo_return2\xctr\key_len
+ jmp .Lmain_loop2\xctr\key_len
-.Leq7\key_len:
- do_aes_load 7, \key_len
+.Leq7\xctr\key_len:
+ do_aes_load 7, \key_len, \xctr
add $(7*16), p_out
and $(~7*16), num_bytes
- jz .Ldo_return2\key_len
- jmp .Lmain_loop2\key_len
+ jz .Ldo_return2\xctr\key_len
+ jmp .Lmain_loop2\xctr\key_len
-.Lmult_of_8_blks\key_len:
+.Lmult_of_8_blks\xctr\key_len:
.if (\key_len != KEY_128)
vmovdqa 0*16(p_keys), xkey0
vmovdqa 4*16(p_keys), xkey4
@@ -514,18 +502,20 @@ ddq_add_8:
vmovdqa 9*16(p_keys), xkey12
.endif
.align 16
-.Lmain_loop2\key_len:
+.Lmain_loop2\xctr\key_len:
/* num_bytes is a multiple of 8 and >0 */
- do_aes_noload 8, \key_len
+ do_aes_noload 8, \key_len, \xctr
add $(8*16), p_out
sub $(8*16), num_bytes
- jne .Lmain_loop2\key_len
+ jne .Lmain_loop2\xctr\key_len
-.Ldo_return2\key_len:
- /* return updated IV */
- vpshufb xbyteswap, xcounter, xcounter
- vmovdqu xcounter, (p_iv)
- ret
+.Ldo_return2\xctr\key_len:
+ .if !\xctr
+ /* return updated IV */
+ vpshufb xbyteswap, xcounter, xcounter
+ vmovdqu xcounter, (p_iv)
+ .endif
+ RET
.endm
/*
@@ -537,7 +527,7 @@ ddq_add_8:
*/
SYM_FUNC_START(aes_ctr_enc_128_avx_by8)
/* call the aes main loop */
- do_aes_ctrmain KEY_128
+ do_aes_ctrmain KEY_128 0
SYM_FUNC_END(aes_ctr_enc_128_avx_by8)
@@ -550,7 +540,7 @@ SYM_FUNC_END(aes_ctr_enc_128_avx_by8)
*/
SYM_FUNC_START(aes_ctr_enc_192_avx_by8)
/* call the aes main loop */
- do_aes_ctrmain KEY_192
+ do_aes_ctrmain KEY_192 0
SYM_FUNC_END(aes_ctr_enc_192_avx_by8)
@@ -563,6 +553,45 @@ SYM_FUNC_END(aes_ctr_enc_192_avx_by8)
*/
SYM_FUNC_START(aes_ctr_enc_256_avx_by8)
/* call the aes main loop */
- do_aes_ctrmain KEY_256
+ do_aes_ctrmain KEY_256 0
SYM_FUNC_END(aes_ctr_enc_256_avx_by8)
+
+/*
+ * routine to do AES128 XCTR enc/decrypt "by8"
+ * XMM registers are clobbered.
+ * Saving/restoring must be done at a higher level
+ * aes_xctr_enc_128_avx_by8(const u8 *in, const u8 *iv, const void *keys,
+ * u8* out, unsigned int num_bytes, unsigned int byte_ctr)
+ */
+SYM_FUNC_START(aes_xctr_enc_128_avx_by8)
+ /* call the aes main loop */
+ do_aes_ctrmain KEY_128 1
+
+SYM_FUNC_END(aes_xctr_enc_128_avx_by8)
+
+/*
+ * routine to do AES192 XCTR enc/decrypt "by8"
+ * XMM registers are clobbered.
+ * Saving/restoring must be done at a higher level
+ * aes_xctr_enc_192_avx_by8(const u8 *in, const u8 *iv, const void *keys,
+ * u8* out, unsigned int num_bytes, unsigned int byte_ctr)
+ */
+SYM_FUNC_START(aes_xctr_enc_192_avx_by8)
+ /* call the aes main loop */
+ do_aes_ctrmain KEY_192 1
+
+SYM_FUNC_END(aes_xctr_enc_192_avx_by8)
+
+/*
+ * routine to do AES256 XCTR enc/decrypt "by8"
+ * XMM registers are clobbered.
+ * Saving/restoring must be done at a higher level
+ * aes_xctr_enc_256_avx_by8(const u8 *in, const u8 *iv, const void *keys,
+ * u8* out, unsigned int num_bytes, unsigned int byte_ctr)
+ */
+SYM_FUNC_START(aes_xctr_enc_256_avx_by8)
+ /* call the aes main loop */
+ do_aes_ctrmain KEY_256 1
+
+SYM_FUNC_END(aes_xctr_enc_256_avx_by8)