From 303fd3e1c771077e32e96e5788817f025f0067e2 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 8 Dec 2020 15:34:41 +0100 Subject: crypto: tcrypt - avoid signed overflow in byte count The signed long type used for printing the number of bytes processed in tcrypt benchmarks limits the range to -/+ 2 GiB, which is not sufficient to cover the performance of common accelerated ciphers such as AES-NI when benchmarked with sec=1. So switch to u64 instead. While at it, fix up a missing printk->pr_cont conversion in the AEAD benchmark. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'crypto') diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index a647bb298fbc..a4a11d2b57bd 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -199,8 +199,8 @@ static int test_mb_aead_jiffies(struct test_mb_aead_data *data, int enc, goto out; } - pr_cont("%d operations in %d seconds (%ld bytes)\n", - bcount * num_mb, secs, (long)bcount * blen * num_mb); + pr_cont("%d operations in %d seconds (%llu bytes)\n", + bcount * num_mb, secs, (u64)bcount * blen * num_mb); out: kfree(rc); @@ -471,8 +471,8 @@ static int test_aead_jiffies(struct aead_request *req, int enc, return ret; } - printk("%d operations in %d seconds (%ld bytes)\n", - bcount, secs, (long)bcount * blen); + pr_cont("%d operations in %d seconds (%llu bytes)\n", + bcount, secs, (u64)bcount * blen); return 0; } @@ -764,8 +764,8 @@ static int test_mb_ahash_jiffies(struct test_mb_ahash_data *data, int blen, goto out; } - pr_cont("%d operations in %d seconds (%ld bytes)\n", - bcount * num_mb, secs, (long)bcount * blen * num_mb); + pr_cont("%d operations in %d seconds (%llu bytes)\n", + bcount * num_mb, secs, (u64)bcount * blen * num_mb); out: kfree(rc); @@ -1201,8 +1201,8 @@ static int test_mb_acipher_jiffies(struct test_mb_skcipher_data *data, int enc, goto out; } - pr_cont("%d operations in %d seconds (%ld bytes)\n", - bcount * num_mb, secs, (long)bcount * blen * num_mb); + pr_cont("%d operations in %d seconds (%llu bytes)\n", + bcount * num_mb, secs, (u64)bcount * blen * num_mb); out: kfree(rc); @@ -1441,8 +1441,8 @@ static int test_acipher_jiffies(struct skcipher_request *req, int enc, return ret; } - pr_cont("%d operations in %d seconds (%ld bytes)\n", - bcount, secs, (long)bcount * blen); + pr_cont("%d operations in %d seconds (%llu bytes)\n", + bcount, secs, (u64)bcount * blen); return 0; } -- cgit v1.2.3-59-g8ed1b From 0eb76ba29d16df2951d37c54ca279c4e5630b071 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Fri, 11 Dec 2020 13:27:15 +0100 Subject: crypto: remove cipher routines from public crypto API The cipher routines in the crypto API are mostly intended for templates implementing skcipher modes generically in software, and shouldn't be used outside of the crypto subsystem. So move the prototypes and all related definitions to a new header file under include/crypto/internal. Also, let's use the new module namespace feature to move the symbol exports into a new namespace CRYPTO_INTERNAL. Signed-off-by: Ard Biesheuvel Acked-by: Eric Biggers Signed-off-by: Herbert Xu --- Documentation/crypto/api-skcipher.rst | 4 +- arch/arm/crypto/aes-neonbs-glue.c | 3 + arch/s390/crypto/aes_s390.c | 2 + crypto/adiantum.c | 2 + crypto/ansi_cprng.c | 2 + crypto/cbc.c | 1 + crypto/ccm.c | 2 + crypto/cfb.c | 2 + crypto/cipher.c | 7 +- crypto/cmac.c | 2 + crypto/ctr.c | 2 + crypto/drbg.c | 2 + crypto/ecb.c | 1 + crypto/essiv.c | 2 + crypto/keywrap.c | 2 + crypto/ofb.c | 2 + crypto/pcbc.c | 2 + crypto/skcipher.c | 2 + crypto/testmgr.c | 3 + crypto/vmac.c | 2 + crypto/xcbc.c | 2 + crypto/xts.c | 2 + drivers/crypto/geode-aes.c | 2 + drivers/crypto/inside-secure/safexcel.c | 1 + drivers/crypto/inside-secure/safexcel_hash.c | 1 + drivers/crypto/qat/qat_common/adf_ctl_drv.c | 1 + drivers/crypto/qat/qat_common/qat_algs.c | 1 + drivers/crypto/vmx/aes.c | 1 + drivers/crypto/vmx/vmx.c | 1 + include/crypto/algapi.h | 39 ----- include/crypto/internal/cipher.h | 218 +++++++++++++++++++++++++++ include/crypto/internal/skcipher.h | 1 + include/linux/crypto.h | 163 -------------------- 33 files changed, 273 insertions(+), 207 deletions(-) create mode 100644 include/crypto/internal/cipher.h (limited to 'crypto') diff --git a/Documentation/crypto/api-skcipher.rst b/Documentation/crypto/api-skcipher.rst index 1aaf8985894b..04d6cc5357c8 100644 --- a/Documentation/crypto/api-skcipher.rst +++ b/Documentation/crypto/api-skcipher.rst @@ -28,8 +28,8 @@ Symmetric Key Cipher Request Handle Single Block Cipher API ----------------------- -.. kernel-doc:: include/linux/crypto.h +.. kernel-doc:: include/crypto/internal/cipher.h :doc: Single Block Cipher API -.. kernel-doc:: include/linux/crypto.h +.. kernel-doc:: include/crypto/internal/cipher.h :functions: crypto_alloc_cipher crypto_free_cipher crypto_has_cipher crypto_cipher_blocksize crypto_cipher_setkey crypto_cipher_encrypt_one crypto_cipher_decrypt_one diff --git a/arch/arm/crypto/aes-neonbs-glue.c b/arch/arm/crypto/aes-neonbs-glue.c index f70af1d0514b..5c6cd3c63cbc 100644 --- a/arch/arm/crypto/aes-neonbs-glue.c +++ b/arch/arm/crypto/aes-neonbs-glue.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -23,6 +24,8 @@ MODULE_ALIAS_CRYPTO("cbc(aes)-all"); MODULE_ALIAS_CRYPTO("ctr(aes)"); MODULE_ALIAS_CRYPTO("xts(aes)"); +MODULE_IMPORT_NS(CRYPTO_INTERNAL); + asmlinkage void aesbs_convert_key(u8 out[], u32 const rk[], int rounds); asmlinkage void aesbs_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index 73044634d342..54c7536f2482 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -1055,3 +1056,4 @@ MODULE_ALIAS_CRYPTO("aes-all"); MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm"); MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(CRYPTO_INTERNAL); diff --git a/crypto/adiantum.c b/crypto/adiantum.c index ce4d5725342c..84450130cb6b 100644 --- a/crypto/adiantum.c +++ b/crypto/adiantum.c @@ -32,6 +32,7 @@ #include #include +#include #include #include #include @@ -616,3 +617,4 @@ MODULE_DESCRIPTION("Adiantum length-preserving encryption mode"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Eric Biggers "); MODULE_ALIAS_CRYPTO("adiantum"); +MODULE_IMPORT_NS(CRYPTO_INTERNAL); diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c index c475c1129ff2..3f512efaba3a 100644 --- a/crypto/ansi_cprng.c +++ b/crypto/ansi_cprng.c @@ -7,6 +7,7 @@ * (C) Neil Horman */ +#include #include #include #include @@ -470,3 +471,4 @@ subsys_initcall(prng_mod_init); module_exit(prng_mod_fini); MODULE_ALIAS_CRYPTO("stdrng"); MODULE_ALIAS_CRYPTO("ansi_cprng"); +MODULE_IMPORT_NS(CRYPTO_INTERNAL); diff --git a/crypto/cbc.c b/crypto/cbc.c index 0d9509dff891..6c03e96b945f 100644 --- a/crypto/cbc.c +++ b/crypto/cbc.c @@ -6,6 +6,7 @@ */ #include +#include #include #include #include diff --git a/crypto/ccm.c b/crypto/ccm.c index 494d70901186..6b815ece51c6 100644 --- a/crypto/ccm.c +++ b/crypto/ccm.c @@ -6,6 +6,7 @@ */ #include +#include #include #include #include @@ -954,3 +955,4 @@ MODULE_ALIAS_CRYPTO("ccm_base"); MODULE_ALIAS_CRYPTO("rfc4309"); MODULE_ALIAS_CRYPTO("ccm"); MODULE_ALIAS_CRYPTO("cbcmac"); +MODULE_IMPORT_NS(CRYPTO_INTERNAL); diff --git a/crypto/cfb.c b/crypto/cfb.c index 4e5219bbcd19..0d664dfb47bc 100644 --- a/crypto/cfb.c +++ b/crypto/cfb.c @@ -20,6 +20,7 @@ */ #include +#include #include #include #include @@ -250,3 +251,4 @@ module_exit(crypto_cfb_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("CFB block cipher mode of operation"); MODULE_ALIAS_CRYPTO("cfb"); +MODULE_IMPORT_NS(CRYPTO_INTERNAL); diff --git a/crypto/cipher.c b/crypto/cipher.c index fd78150deb1c..b47141ed4a9f 100644 --- a/crypto/cipher.c +++ b/crypto/cipher.c @@ -9,6 +9,7 @@ */ #include +#include #include #include #include @@ -53,7 +54,7 @@ int crypto_cipher_setkey(struct crypto_cipher *tfm, return cia->cia_setkey(crypto_cipher_tfm(tfm), key, keylen); } -EXPORT_SYMBOL_GPL(crypto_cipher_setkey); +EXPORT_SYMBOL_NS_GPL(crypto_cipher_setkey, CRYPTO_INTERNAL); static inline void cipher_crypt_one(struct crypto_cipher *tfm, u8 *dst, const u8 *src, bool enc) @@ -81,11 +82,11 @@ void crypto_cipher_encrypt_one(struct crypto_cipher *tfm, { cipher_crypt_one(tfm, dst, src, true); } -EXPORT_SYMBOL_GPL(crypto_cipher_encrypt_one); +EXPORT_SYMBOL_NS_GPL(crypto_cipher_encrypt_one, CRYPTO_INTERNAL); void crypto_cipher_decrypt_one(struct crypto_cipher *tfm, u8 *dst, const u8 *src) { cipher_crypt_one(tfm, dst, src, false); } -EXPORT_SYMBOL_GPL(crypto_cipher_decrypt_one); +EXPORT_SYMBOL_NS_GPL(crypto_cipher_decrypt_one, CRYPTO_INTERNAL); diff --git a/crypto/cmac.c b/crypto/cmac.c index df36be1efb81..f4a5d3bfb376 100644 --- a/crypto/cmac.c +++ b/crypto/cmac.c @@ -11,6 +11,7 @@ * Author: Kazunori Miyazawa */ +#include #include #include #include @@ -313,3 +314,4 @@ module_exit(crypto_cmac_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("CMAC keyed hash algorithm"); MODULE_ALIAS_CRYPTO("cmac"); +MODULE_IMPORT_NS(CRYPTO_INTERNAL); diff --git a/crypto/ctr.c b/crypto/ctr.c index c39fcffba27f..23c698b22013 100644 --- a/crypto/ctr.c +++ b/crypto/ctr.c @@ -7,6 +7,7 @@ #include #include +#include #include #include #include @@ -358,3 +359,4 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("CTR block cipher mode of operation"); MODULE_ALIAS_CRYPTO("rfc3686"); MODULE_ALIAS_CRYPTO("ctr"); +MODULE_IMPORT_NS(CRYPTO_INTERNAL); diff --git a/crypto/drbg.c b/crypto/drbg.c index 3132967a1749..1b4587e0ddad 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -98,6 +98,7 @@ */ #include +#include #include /*************************************************************** @@ -2161,3 +2162,4 @@ MODULE_DESCRIPTION("NIST SP800-90A Deterministic Random Bit Generator (DRBG) " CRYPTO_DRBG_HMAC_STRING CRYPTO_DRBG_CTR_STRING); MODULE_ALIAS_CRYPTO("stdrng"); +MODULE_IMPORT_NS(CRYPTO_INTERNAL); diff --git a/crypto/ecb.c b/crypto/ecb.c index 69a687cbdf21..71fbb0543d64 100644 --- a/crypto/ecb.c +++ b/crypto/ecb.c @@ -6,6 +6,7 @@ */ #include +#include #include #include #include diff --git a/crypto/essiv.c b/crypto/essiv.c index d012be23d496..8bcc5bdcb2a9 100644 --- a/crypto/essiv.c +++ b/crypto/essiv.c @@ -30,6 +30,7 @@ #include #include +#include #include #include #include @@ -643,3 +644,4 @@ module_exit(essiv_module_exit); MODULE_DESCRIPTION("ESSIV skcipher/aead wrapper for block encryption"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS_CRYPTO("essiv"); +MODULE_IMPORT_NS(CRYPTO_INTERNAL); diff --git a/crypto/keywrap.c b/crypto/keywrap.c index 0355cce21b1e..3517773bc7f7 100644 --- a/crypto/keywrap.c +++ b/crypto/keywrap.c @@ -85,6 +85,7 @@ #include #include #include +#include #include struct crypto_kw_block { @@ -316,3 +317,4 @@ MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Stephan Mueller "); MODULE_DESCRIPTION("Key Wrapping (RFC3394 / NIST SP800-38F)"); MODULE_ALIAS_CRYPTO("kw"); +MODULE_IMPORT_NS(CRYPTO_INTERNAL); diff --git a/crypto/ofb.c b/crypto/ofb.c index 2ec68e3f2c55..b630fdecceee 100644 --- a/crypto/ofb.c +++ b/crypto/ofb.c @@ -8,6 +8,7 @@ */ #include +#include #include #include #include @@ -102,3 +103,4 @@ module_exit(crypto_ofb_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("OFB block cipher mode of operation"); MODULE_ALIAS_CRYPTO("ofb"); +MODULE_IMPORT_NS(CRYPTO_INTERNAL); diff --git a/crypto/pcbc.c b/crypto/pcbc.c index ae921fb74dc9..7030f59e46b6 100644 --- a/crypto/pcbc.c +++ b/crypto/pcbc.c @@ -10,6 +10,7 @@ */ #include +#include #include #include #include @@ -191,3 +192,4 @@ module_exit(crypto_pcbc_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("PCBC block cipher mode of operation"); MODULE_ALIAS_CRYPTO("pcbc"); +MODULE_IMPORT_NS(CRYPTO_INTERNAL); diff --git a/crypto/skcipher.c b/crypto/skcipher.c index b4dae640de9f..ff16d05644c7 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -10,6 +10,7 @@ */ #include +#include #include #include #include @@ -986,3 +987,4 @@ EXPORT_SYMBOL_GPL(skcipher_alloc_instance_simple); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Symmetric key cipher type"); +MODULE_IMPORT_NS(CRYPTO_INTERNAL); diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 321e38eef51b..a896d77e9611 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -33,10 +33,13 @@ #include #include #include +#include #include #include "internal.h" +MODULE_IMPORT_NS(CRYPTO_INTERNAL); + static bool notests; module_param(notests, bool, 0644); MODULE_PARM_DESC(notests, "disable crypto self-tests"); diff --git a/crypto/vmac.c b/crypto/vmac.c index 9b565d1040d6..4633b2dda1e0 100644 --- a/crypto/vmac.c +++ b/crypto/vmac.c @@ -36,6 +36,7 @@ #include #include #include +#include #include /* @@ -693,3 +694,4 @@ module_exit(vmac_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("VMAC hash algorithm"); MODULE_ALIAS_CRYPTO("vmac64"); +MODULE_IMPORT_NS(CRYPTO_INTERNAL); diff --git a/crypto/xcbc.c b/crypto/xcbc.c index af3b7eb5d7c7..6074c5c1da49 100644 --- a/crypto/xcbc.c +++ b/crypto/xcbc.c @@ -6,6 +6,7 @@ * Kazunori Miyazawa */ +#include #include #include #include @@ -272,3 +273,4 @@ module_exit(crypto_xcbc_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("XCBC keyed hash algorithm"); MODULE_ALIAS_CRYPTO("xcbc"); +MODULE_IMPORT_NS(CRYPTO_INTERNAL); diff --git a/crypto/xts.c b/crypto/xts.c index ad45b009774b..6c12f30dbdd6 100644 --- a/crypto/xts.c +++ b/crypto/xts.c @@ -7,6 +7,7 @@ * Based on ecb.c * Copyright (c) 2006 Herbert Xu */ +#include #include #include #include @@ -464,3 +465,4 @@ module_exit(xts_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("XTS block cipher mode"); MODULE_ALIAS_CRYPTO("xts"); +MODULE_IMPORT_NS(CRYPTO_INTERNAL); diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c index f4f18bfc2247..4ee010f39912 100644 --- a/drivers/crypto/geode-aes.c +++ b/drivers/crypto/geode-aes.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -434,3 +435,4 @@ module_pci_driver(geode_aes_driver); MODULE_AUTHOR("Advanced Micro Devices, Inc."); MODULE_DESCRIPTION("Geode LX Hardware AES driver"); MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(CRYPTO_INTERNAL); diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index 2e1562108a85..30aedfcfee7c 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -1999,3 +1999,4 @@ MODULE_AUTHOR("Ofer Heifetz "); MODULE_AUTHOR("Igal Liberman "); MODULE_DESCRIPTION("Support for SafeXcel cryptographic engines: EIP97 & EIP197"); MODULE_LICENSE("GPL v2"); +MODULE_IMPORT_NS(CRYPTO_INTERNAL); diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index 50fb6d90a2e0..bc60b5802256 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c index eb9b3be9d8eb..96b437bfe3de 100644 --- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c +++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c @@ -464,3 +464,4 @@ MODULE_AUTHOR("Intel"); MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); MODULE_ALIAS_CRYPTO("intel_qat"); MODULE_VERSION(ADF_DRV_VERSION); +MODULE_IMPORT_NS(CRYPTO_INTERNAL); diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index 31c7a206a629..ff78c73c47e3 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c index 2bc5d4e1adf4..d05c02baebcf 100644 --- a/drivers/crypto/vmx/aes.c +++ b/drivers/crypto/vmx/aes.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include "aesp8-ppc.h" diff --git a/drivers/crypto/vmx/vmx.c b/drivers/crypto/vmx/vmx.c index 3e0335fb406c..87a194455d6a 100644 --- a/drivers/crypto/vmx/vmx.c +++ b/drivers/crypto/vmx/vmx.c @@ -78,3 +78,4 @@ MODULE_DESCRIPTION("IBM VMX cryptographic acceleration instructions " "support on Power 8"); MODULE_LICENSE("GPL"); MODULE_VERSION("1.0.0"); +MODULE_IMPORT_NS(CRYPTO_INTERNAL); diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 18dd7a4aaf7d..86f0748009af 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -189,45 +189,6 @@ static inline void *crypto_instance_ctx(struct crypto_instance *inst) return inst->__ctx; } -struct crypto_cipher_spawn { - struct crypto_spawn base; -}; - -static inline int crypto_grab_cipher(struct crypto_cipher_spawn *spawn, - struct crypto_instance *inst, - const char *name, u32 type, u32 mask) -{ - type &= ~CRYPTO_ALG_TYPE_MASK; - type |= CRYPTO_ALG_TYPE_CIPHER; - mask |= CRYPTO_ALG_TYPE_MASK; - return crypto_grab_spawn(&spawn->base, inst, name, type, mask); -} - -static inline void crypto_drop_cipher(struct crypto_cipher_spawn *spawn) -{ - crypto_drop_spawn(&spawn->base); -} - -static inline struct crypto_alg *crypto_spawn_cipher_alg( - struct crypto_cipher_spawn *spawn) -{ - return spawn->base.alg; -} - -static inline struct crypto_cipher *crypto_spawn_cipher( - struct crypto_cipher_spawn *spawn) -{ - u32 type = CRYPTO_ALG_TYPE_CIPHER; - u32 mask = CRYPTO_ALG_TYPE_MASK; - - return __crypto_cipher_cast(crypto_spawn_tfm(&spawn->base, type, mask)); -} - -static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm) -{ - return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher; -} - static inline struct crypto_async_request *crypto_get_backlog( struct crypto_queue *queue) { diff --git a/include/crypto/internal/cipher.h b/include/crypto/internal/cipher.h new file mode 100644 index 000000000000..a9174ba90250 --- /dev/null +++ b/include/crypto/internal/cipher.h @@ -0,0 +1,218 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2002 James Morris + * Copyright (c) 2002 David S. Miller (davem@redhat.com) + * Copyright (c) 2005 Herbert Xu + * + * Portions derived from Cryptoapi, by Alexander Kjeldaas + * and Nettle, by Niels Möller. + */ + +#ifndef _CRYPTO_INTERNAL_CIPHER_H +#define _CRYPTO_INTERNAL_CIPHER_H + +#include + +struct crypto_cipher { + struct crypto_tfm base; +}; + +/** + * DOC: Single Block Cipher API + * + * The single block cipher API is used with the ciphers of type + * CRYPTO_ALG_TYPE_CIPHER (listed as type "cipher" in /proc/crypto). + * + * Using the single block cipher API calls, operations with the basic cipher + * primitive can be implemented. These cipher primitives exclude any block + * chaining operations including IV handling. + * + * The purpose of this single block cipher API is to support the implementation + * of templates or other concepts that only need to perform the cipher operation + * on one block at a time. Templates invoke the underlying cipher primitive + * block-wise and process either the input or the output data of these cipher + * operations. + */ + +static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm) +{ + return (struct crypto_cipher *)tfm; +} + +/** + * crypto_alloc_cipher() - allocate single block cipher handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * single block cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Allocate a cipher handle for a single block cipher. The returned struct + * crypto_cipher is the cipher handle that is required for any subsequent API + * invocation for that single block cipher. + * + * Return: allocated cipher handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ +static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name, + u32 type, u32 mask) +{ + type &= ~CRYPTO_ALG_TYPE_MASK; + type |= CRYPTO_ALG_TYPE_CIPHER; + mask |= CRYPTO_ALG_TYPE_MASK; + + return __crypto_cipher_cast(crypto_alloc_base(alg_name, type, mask)); +} + +static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm) +{ + return &tfm->base; +} + +/** + * crypto_free_cipher() - zeroize and free the single block cipher handle + * @tfm: cipher handle to be freed + */ +static inline void crypto_free_cipher(struct crypto_cipher *tfm) +{ + crypto_free_tfm(crypto_cipher_tfm(tfm)); +} + +/** + * crypto_has_cipher() - Search for the availability of a single block cipher + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * single block cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Return: true when the single block cipher is known to the kernel crypto API; + * false otherwise + */ +static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask) +{ + type &= ~CRYPTO_ALG_TYPE_MASK; + type |= CRYPTO_ALG_TYPE_CIPHER; + mask |= CRYPTO_ALG_TYPE_MASK; + + return crypto_has_alg(alg_name, type, mask); +} + +/** + * crypto_cipher_blocksize() - obtain block size for cipher + * @tfm: cipher handle + * + * The block size for the single block cipher referenced with the cipher handle + * tfm is returned. The caller may use that information to allocate appropriate + * memory for the data returned by the encryption or decryption operation + * + * Return: block size of cipher + */ +static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm) +{ + return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm)); +} + +static inline unsigned int crypto_cipher_alignmask(struct crypto_cipher *tfm) +{ + return crypto_tfm_alg_alignmask(crypto_cipher_tfm(tfm)); +} + +static inline u32 crypto_cipher_get_flags(struct crypto_cipher *tfm) +{ + return crypto_tfm_get_flags(crypto_cipher_tfm(tfm)); +} + +static inline void crypto_cipher_set_flags(struct crypto_cipher *tfm, + u32 flags) +{ + crypto_tfm_set_flags(crypto_cipher_tfm(tfm), flags); +} + +static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm, + u32 flags) +{ + crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags); +} + +/** + * crypto_cipher_setkey() - set key for cipher + * @tfm: cipher handle + * @key: buffer holding the key + * @keylen: length of the key in bytes + * + * The caller provided key is set for the single block cipher referenced by the + * cipher handle. + * + * Note, the key length determines the cipher type. Many block ciphers implement + * different cipher modes depending on the key size, such as AES-128 vs AES-192 + * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 + * is performed. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ +int crypto_cipher_setkey(struct crypto_cipher *tfm, + const u8 *key, unsigned int keylen); + +/** + * crypto_cipher_encrypt_one() - encrypt one block of plaintext + * @tfm: cipher handle + * @dst: points to the buffer that will be filled with the ciphertext + * @src: buffer holding the plaintext to be encrypted + * + * Invoke the encryption operation of one block. The caller must ensure that + * the plaintext and ciphertext buffers are at least one block in size. + */ +void crypto_cipher_encrypt_one(struct crypto_cipher *tfm, + u8 *dst, const u8 *src); + +/** + * crypto_cipher_decrypt_one() - decrypt one block of ciphertext + * @tfm: cipher handle + * @dst: points to the buffer that will be filled with the plaintext + * @src: buffer holding the ciphertext to be decrypted + * + * Invoke the decryption operation of one block. The caller must ensure that + * the plaintext and ciphertext buffers are at least one block in size. + */ +void crypto_cipher_decrypt_one(struct crypto_cipher *tfm, + u8 *dst, const u8 *src); + +struct crypto_cipher_spawn { + struct crypto_spawn base; +}; + +static inline int crypto_grab_cipher(struct crypto_cipher_spawn *spawn, + struct crypto_instance *inst, + const char *name, u32 type, u32 mask) +{ + type &= ~CRYPTO_ALG_TYPE_MASK; + type |= CRYPTO_ALG_TYPE_CIPHER; + mask |= CRYPTO_ALG_TYPE_MASK; + return crypto_grab_spawn(&spawn->base, inst, name, type, mask); +} + +static inline void crypto_drop_cipher(struct crypto_cipher_spawn *spawn) +{ + crypto_drop_spawn(&spawn->base); +} + +static inline struct crypto_alg *crypto_spawn_cipher_alg( + struct crypto_cipher_spawn *spawn) +{ + return spawn->base.alg; +} + +static inline struct crypto_cipher *crypto_spawn_cipher( + struct crypto_cipher_spawn *spawn) +{ + u32 type = CRYPTO_ALG_TYPE_CIPHER; + u32 mask = CRYPTO_ALG_TYPE_MASK; + + return __crypto_cipher_cast(crypto_spawn_tfm(&spawn->base, type, mask)); +} + +static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm) +{ + return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher; +} + +#endif diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h index 10226c12c5df..9dd6c0c17eb8 100644 --- a/include/crypto/internal/skcipher.h +++ b/include/crypto/internal/skcipher.h @@ -9,6 +9,7 @@ #define _CRYPTO_INTERNAL_SKCIPHER_H #include +#include #include #include #include diff --git a/include/linux/crypto.h b/include/linux/crypto.h index ef90e07c9635..9b55cd6b1f1b 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -636,10 +636,6 @@ struct crypto_tfm { void *__crt_ctx[] CRYPTO_MINALIGN_ATTR; }; -struct crypto_cipher { - struct crypto_tfm base; -}; - struct crypto_comp { struct crypto_tfm base; }; @@ -743,165 +739,6 @@ static inline unsigned int crypto_tfm_ctx_alignment(void) return __alignof__(tfm->__crt_ctx); } -/** - * DOC: Single Block Cipher API - * - * The single block cipher API is used with the ciphers of type - * CRYPTO_ALG_TYPE_CIPHER (listed as type "cipher" in /proc/crypto). - * - * Using the single block cipher API calls, operations with the basic cipher - * primitive can be implemented. These cipher primitives exclude any block - * chaining operations including IV handling. - * - * The purpose of this single block cipher API is to support the implementation - * of templates or other concepts that only need to perform the cipher operation - * on one block at a time. Templates invoke the underlying cipher primitive - * block-wise and process either the input or the output data of these cipher - * operations. - */ - -static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm) -{ - return (struct crypto_cipher *)tfm; -} - -/** - * crypto_alloc_cipher() - allocate single block cipher handle - * @alg_name: is the cra_name / name or cra_driver_name / driver name of the - * single block cipher - * @type: specifies the type of the cipher - * @mask: specifies the mask for the cipher - * - * Allocate a cipher handle for a single block cipher. The returned struct - * crypto_cipher is the cipher handle that is required for any subsequent API - * invocation for that single block cipher. - * - * Return: allocated cipher handle in case of success; IS_ERR() is true in case - * of an error, PTR_ERR() returns the error code. - */ -static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name, - u32 type, u32 mask) -{ - type &= ~CRYPTO_ALG_TYPE_MASK; - type |= CRYPTO_ALG_TYPE_CIPHER; - mask |= CRYPTO_ALG_TYPE_MASK; - - return __crypto_cipher_cast(crypto_alloc_base(alg_name, type, mask)); -} - -static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm) -{ - return &tfm->base; -} - -/** - * crypto_free_cipher() - zeroize and free the single block cipher handle - * @tfm: cipher handle to be freed - */ -static inline void crypto_free_cipher(struct crypto_cipher *tfm) -{ - crypto_free_tfm(crypto_cipher_tfm(tfm)); -} - -/** - * crypto_has_cipher() - Search for the availability of a single block cipher - * @alg_name: is the cra_name / name or cra_driver_name / driver name of the - * single block cipher - * @type: specifies the type of the cipher - * @mask: specifies the mask for the cipher - * - * Return: true when the single block cipher is known to the kernel crypto API; - * false otherwise - */ -static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask) -{ - type &= ~CRYPTO_ALG_TYPE_MASK; - type |= CRYPTO_ALG_TYPE_CIPHER; - mask |= CRYPTO_ALG_TYPE_MASK; - - return crypto_has_alg(alg_name, type, mask); -} - -/** - * crypto_cipher_blocksize() - obtain block size for cipher - * @tfm: cipher handle - * - * The block size for the single block cipher referenced with the cipher handle - * tfm is returned. The caller may use that information to allocate appropriate - * memory for the data returned by the encryption or decryption operation - * - * Return: block size of cipher - */ -static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm) -{ - return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm)); -} - -static inline unsigned int crypto_cipher_alignmask(struct crypto_cipher *tfm) -{ - return crypto_tfm_alg_alignmask(crypto_cipher_tfm(tfm)); -} - -static inline u32 crypto_cipher_get_flags(struct crypto_cipher *tfm) -{ - return crypto_tfm_get_flags(crypto_cipher_tfm(tfm)); -} - -static inline void crypto_cipher_set_flags(struct crypto_cipher *tfm, - u32 flags) -{ - crypto_tfm_set_flags(crypto_cipher_tfm(tfm), flags); -} - -static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm, - u32 flags) -{ - crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags); -} - -/** - * crypto_cipher_setkey() - set key for cipher - * @tfm: cipher handle - * @key: buffer holding the key - * @keylen: length of the key in bytes - * - * The caller provided key is set for the single block cipher referenced by the - * cipher handle. - * - * Note, the key length determines the cipher type. Many block ciphers implement - * different cipher modes depending on the key size, such as AES-128 vs AES-192 - * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 - * is performed. - * - * Return: 0 if the setting of the key was successful; < 0 if an error occurred - */ -int crypto_cipher_setkey(struct crypto_cipher *tfm, - const u8 *key, unsigned int keylen); - -/** - * crypto_cipher_encrypt_one() - encrypt one block of plaintext - * @tfm: cipher handle - * @dst: points to the buffer that will be filled with the ciphertext - * @src: buffer holding the plaintext to be encrypted - * - * Invoke the encryption operation of one block. The caller must ensure that - * the plaintext and ciphertext buffers are at least one block in size. - */ -void crypto_cipher_encrypt_one(struct crypto_cipher *tfm, - u8 *dst, const u8 *src); - -/** - * crypto_cipher_decrypt_one() - decrypt one block of ciphertext - * @tfm: cipher handle - * @dst: points to the buffer that will be filled with the plaintext - * @src: buffer holding the ciphertext to be decrypted - * - * Invoke the decryption operation of one block. The caller must ensure that - * the plaintext and ciphertext buffers are at least one block in size. - */ -void crypto_cipher_decrypt_one(struct crypto_cipher *tfm, - u8 *dst, const u8 *src); - static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm) { return (struct crypto_comp *)tfm; -- cgit v1.2.3-59-g8ed1b From 0d396058f92ae7e5ac62839fed54bc2bba630ab5 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 23 Dec 2020 00:09:50 -0800 Subject: crypto: blake2s - define shash_alg structs using macros The shash_alg structs for the four variants of BLAKE2s are identical except for the algorithm name, driver name, and digest size. So, avoid code duplication by using a macro to define these structs. Acked-by: Ard Biesheuvel Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/blake2s_generic.c | 88 +++++++++++++++--------------------------------- 1 file changed, 27 insertions(+), 61 deletions(-) (limited to 'crypto') diff --git a/crypto/blake2s_generic.c b/crypto/blake2s_generic.c index 005783ff45ad..e3aa6e7ff3d8 100644 --- a/crypto/blake2s_generic.c +++ b/crypto/blake2s_generic.c @@ -83,67 +83,33 @@ static int crypto_blake2s_final(struct shash_desc *desc, u8 *out) return 0; } -static struct shash_alg blake2s_algs[] = {{ - .base.cra_name = "blake2s-128", - .base.cra_driver_name = "blake2s-128-generic", - .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, - .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), - .base.cra_priority = 200, - .base.cra_blocksize = BLAKE2S_BLOCK_SIZE, - .base.cra_module = THIS_MODULE, - - .digestsize = BLAKE2S_128_HASH_SIZE, - .setkey = crypto_blake2s_setkey, - .init = crypto_blake2s_init, - .update = crypto_blake2s_update, - .final = crypto_blake2s_final, - .descsize = sizeof(struct blake2s_state), -}, { - .base.cra_name = "blake2s-160", - .base.cra_driver_name = "blake2s-160-generic", - .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, - .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), - .base.cra_priority = 200, - .base.cra_blocksize = BLAKE2S_BLOCK_SIZE, - .base.cra_module = THIS_MODULE, - - .digestsize = BLAKE2S_160_HASH_SIZE, - .setkey = crypto_blake2s_setkey, - .init = crypto_blake2s_init, - .update = crypto_blake2s_update, - .final = crypto_blake2s_final, - .descsize = sizeof(struct blake2s_state), -}, { - .base.cra_name = "blake2s-224", - .base.cra_driver_name = "blake2s-224-generic", - .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, - .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), - .base.cra_priority = 200, - .base.cra_blocksize = BLAKE2S_BLOCK_SIZE, - .base.cra_module = THIS_MODULE, - - .digestsize = BLAKE2S_224_HASH_SIZE, - .setkey = crypto_blake2s_setkey, - .init = crypto_blake2s_init, - .update = crypto_blake2s_update, - .final = crypto_blake2s_final, - .descsize = sizeof(struct blake2s_state), -}, { - .base.cra_name = "blake2s-256", - .base.cra_driver_name = "blake2s-256-generic", - .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, - .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), - .base.cra_priority = 200, - .base.cra_blocksize = BLAKE2S_BLOCK_SIZE, - .base.cra_module = THIS_MODULE, - - .digestsize = BLAKE2S_256_HASH_SIZE, - .setkey = crypto_blake2s_setkey, - .init = crypto_blake2s_init, - .update = crypto_blake2s_update, - .final = crypto_blake2s_final, - .descsize = sizeof(struct blake2s_state), -}}; +#define BLAKE2S_ALG(name, driver_name, digest_size) \ + { \ + .base.cra_name = name, \ + .base.cra_driver_name = driver_name, \ + .base.cra_priority = 100, \ + .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \ + .base.cra_blocksize = BLAKE2S_BLOCK_SIZE, \ + .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), \ + .base.cra_module = THIS_MODULE, \ + .digestsize = digest_size, \ + .setkey = crypto_blake2s_setkey, \ + .init = crypto_blake2s_init, \ + .update = crypto_blake2s_update, \ + .final = crypto_blake2s_final, \ + .descsize = sizeof(struct blake2s_state), \ + } + +static struct shash_alg blake2s_algs[] = { + BLAKE2S_ALG("blake2s-128", "blake2s-128-generic", + BLAKE2S_128_HASH_SIZE), + BLAKE2S_ALG("blake2s-160", "blake2s-160-generic", + BLAKE2S_160_HASH_SIZE), + BLAKE2S_ALG("blake2s-224", "blake2s-224-generic", + BLAKE2S_224_HASH_SIZE), + BLAKE2S_ALG("blake2s-256", "blake2s-256-generic", + BLAKE2S_256_HASH_SIZE), +}; static int __init blake2s_mod_init(void) { -- cgit v1.2.3-59-g8ed1b From df412e7efda1e2c5b5fcb06701bba77434cbd1e8 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 23 Dec 2020 00:09:52 -0800 Subject: crypto: blake2s - remove unneeded includes It doesn't make sense for the generic implementation of BLAKE2s to include and , as these are things that would only be useful in an architecture-specific implementation. Remove these unnecessary includes. Acked-by: Ard Biesheuvel Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/blake2s_generic.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'crypto') diff --git a/crypto/blake2s_generic.c b/crypto/blake2s_generic.c index e3aa6e7ff3d8..b89536c3671c 100644 --- a/crypto/blake2s_generic.c +++ b/crypto/blake2s_generic.c @@ -4,11 +4,9 @@ */ #include -#include #include #include -#include #include #include -- cgit v1.2.3-59-g8ed1b From 8c4a93a1270ddffc7660ae43fa8030ecfe9c06d9 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 23 Dec 2020 00:09:54 -0800 Subject: crypto: blake2s - share the "shash" API boilerplate code Add helper functions for shash implementations of BLAKE2s to include/crypto/internal/blake2s.h, taking advantage of __blake2s_update() and __blake2s_final() that were added by the previous patch to share more code between the library and shash implementations. crypto_blake2s_setkey() and crypto_blake2s_init() are usable as shash_alg::setkey and shash_alg::init directly, while crypto_blake2s_update() and crypto_blake2s_final() take an extra 'blake2s_compress_t' function pointer parameter. This allows the implementation of the compression function to be overridden, which is the only part that optimized implementations really care about. The new functions are inline functions (similar to those in sha1_base.h, sha256_base.h, and sm3_base.h) because this avoids needing to add a new module blake2s_helpers.ko, they aren't *too* long, and this avoids indirect calls which are expensive these days. Note that they can't go in blake2s_generic.ko, as that would require selecting CRYPTO_BLAKE2S from CRYPTO_BLAKE2S_X86, which would cause a recursive dependency. Finally, use these new helper functions in the x86 implementation of BLAKE2s. (This part should be a separate patch, but unfortunately the x86 implementation used the exact same function names like "crypto_blake2s_update()", so it had to be updated at the same time.) Signed-off-by: Eric Biggers Acked-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- arch/x86/crypto/blake2s-glue.c | 74 ++++---------------------------------- crypto/blake2s_generic.c | 76 +++++---------------------------------- include/crypto/internal/blake2s.h | 65 ++++++++++++++++++++++++++++++--- 3 files changed, 76 insertions(+), 139 deletions(-) (limited to 'crypto') diff --git a/arch/x86/crypto/blake2s-glue.c b/arch/x86/crypto/blake2s-glue.c index 4dcb2ee89efc..a40365ab301e 100644 --- a/arch/x86/crypto/blake2s-glue.c +++ b/arch/x86/crypto/blake2s-glue.c @@ -58,75 +58,15 @@ void blake2s_compress_arch(struct blake2s_state *state, } EXPORT_SYMBOL(blake2s_compress_arch); -static int crypto_blake2s_setkey(struct crypto_shash *tfm, const u8 *key, - unsigned int keylen) +static int crypto_blake2s_update_x86(struct shash_desc *desc, + const u8 *in, unsigned int inlen) { - struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(tfm); - - if (keylen == 0 || keylen > BLAKE2S_KEY_SIZE) - return -EINVAL; - - memcpy(tctx->key, key, keylen); - tctx->keylen = keylen; - - return 0; -} - -static int crypto_blake2s_init(struct shash_desc *desc) -{ - struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); - struct blake2s_state *state = shash_desc_ctx(desc); - const int outlen = crypto_shash_digestsize(desc->tfm); - - if (tctx->keylen) - blake2s_init_key(state, outlen, tctx->key, tctx->keylen); - else - blake2s_init(state, outlen); - - return 0; -} - -static int crypto_blake2s_update(struct shash_desc *desc, const u8 *in, - unsigned int inlen) -{ - struct blake2s_state *state = shash_desc_ctx(desc); - const size_t fill = BLAKE2S_BLOCK_SIZE - state->buflen; - - if (unlikely(!inlen)) - return 0; - if (inlen > fill) { - memcpy(state->buf + state->buflen, in, fill); - blake2s_compress_arch(state, state->buf, 1, BLAKE2S_BLOCK_SIZE); - state->buflen = 0; - in += fill; - inlen -= fill; - } - if (inlen > BLAKE2S_BLOCK_SIZE) { - const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE); - /* Hash one less (full) block than strictly possible */ - blake2s_compress_arch(state, in, nblocks - 1, BLAKE2S_BLOCK_SIZE); - in += BLAKE2S_BLOCK_SIZE * (nblocks - 1); - inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1); - } - memcpy(state->buf + state->buflen, in, inlen); - state->buflen += inlen; - - return 0; + return crypto_blake2s_update(desc, in, inlen, blake2s_compress_arch); } -static int crypto_blake2s_final(struct shash_desc *desc, u8 *out) +static int crypto_blake2s_final_x86(struct shash_desc *desc, u8 *out) { - struct blake2s_state *state = shash_desc_ctx(desc); - - blake2s_set_lastblock(state); - memset(state->buf + state->buflen, 0, - BLAKE2S_BLOCK_SIZE - state->buflen); /* Padding */ - blake2s_compress_arch(state, state->buf, 1, state->buflen); - cpu_to_le32_array(state->h, ARRAY_SIZE(state->h)); - memcpy(out, state->h, state->outlen); - memzero_explicit(state, sizeof(*state)); - - return 0; + return crypto_blake2s_final(desc, out, blake2s_compress_arch); } #define BLAKE2S_ALG(name, driver_name, digest_size) \ @@ -141,8 +81,8 @@ static int crypto_blake2s_final(struct shash_desc *desc, u8 *out) .digestsize = digest_size, \ .setkey = crypto_blake2s_setkey, \ .init = crypto_blake2s_init, \ - .update = crypto_blake2s_update, \ - .final = crypto_blake2s_final, \ + .update = crypto_blake2s_update_x86, \ + .final = crypto_blake2s_final_x86, \ .descsize = sizeof(struct blake2s_state), \ } diff --git a/crypto/blake2s_generic.c b/crypto/blake2s_generic.c index b89536c3671c..72fe480f9bd6 100644 --- a/crypto/blake2s_generic.c +++ b/crypto/blake2s_generic.c @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR MIT /* + * shash interface to the generic implementation of BLAKE2s + * * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. */ @@ -10,75 +12,15 @@ #include #include -static int crypto_blake2s_setkey(struct crypto_shash *tfm, const u8 *key, - unsigned int keylen) +static int crypto_blake2s_update_generic(struct shash_desc *desc, + const u8 *in, unsigned int inlen) { - struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(tfm); - - if (keylen == 0 || keylen > BLAKE2S_KEY_SIZE) - return -EINVAL; - - memcpy(tctx->key, key, keylen); - tctx->keylen = keylen; - - return 0; + return crypto_blake2s_update(desc, in, inlen, blake2s_compress_generic); } -static int crypto_blake2s_init(struct shash_desc *desc) +static int crypto_blake2s_final_generic(struct shash_desc *desc, u8 *out) { - struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); - struct blake2s_state *state = shash_desc_ctx(desc); - const int outlen = crypto_shash_digestsize(desc->tfm); - - if (tctx->keylen) - blake2s_init_key(state, outlen, tctx->key, tctx->keylen); - else - blake2s_init(state, outlen); - - return 0; -} - -static int crypto_blake2s_update(struct shash_desc *desc, const u8 *in, - unsigned int inlen) -{ - struct blake2s_state *state = shash_desc_ctx(desc); - const size_t fill = BLAKE2S_BLOCK_SIZE - state->buflen; - - if (unlikely(!inlen)) - return 0; - if (inlen > fill) { - memcpy(state->buf + state->buflen, in, fill); - blake2s_compress_generic(state, state->buf, 1, BLAKE2S_BLOCK_SIZE); - state->buflen = 0; - in += fill; - inlen -= fill; - } - if (inlen > BLAKE2S_BLOCK_SIZE) { - const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE); - /* Hash one less (full) block than strictly possible */ - blake2s_compress_generic(state, in, nblocks - 1, BLAKE2S_BLOCK_SIZE); - in += BLAKE2S_BLOCK_SIZE * (nblocks - 1); - inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1); - } - memcpy(state->buf + state->buflen, in, inlen); - state->buflen += inlen; - - return 0; -} - -static int crypto_blake2s_final(struct shash_desc *desc, u8 *out) -{ - struct blake2s_state *state = shash_desc_ctx(desc); - - blake2s_set_lastblock(state); - memset(state->buf + state->buflen, 0, - BLAKE2S_BLOCK_SIZE - state->buflen); /* Padding */ - blake2s_compress_generic(state, state->buf, 1, state->buflen); - cpu_to_le32_array(state->h, ARRAY_SIZE(state->h)); - memcpy(out, state->h, state->outlen); - memzero_explicit(state, sizeof(*state)); - - return 0; + return crypto_blake2s_final(desc, out, blake2s_compress_generic); } #define BLAKE2S_ALG(name, driver_name, digest_size) \ @@ -93,8 +35,8 @@ static int crypto_blake2s_final(struct shash_desc *desc, u8 *out) .digestsize = digest_size, \ .setkey = crypto_blake2s_setkey, \ .init = crypto_blake2s_init, \ - .update = crypto_blake2s_update, \ - .final = crypto_blake2s_final, \ + .update = crypto_blake2s_update_generic, \ + .final = crypto_blake2s_final_generic, \ .descsize = sizeof(struct blake2s_state), \ } diff --git a/include/crypto/internal/blake2s.h b/include/crypto/internal/blake2s.h index 42deba4b8cee..2ea0a8f5e7f4 100644 --- a/include/crypto/internal/blake2s.h +++ b/include/crypto/internal/blake2s.h @@ -1,16 +1,16 @@ /* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * Helper functions for BLAKE2s implementations. + * Keep this in sync with the corresponding BLAKE2b header. + */ #ifndef BLAKE2S_INTERNAL_H #define BLAKE2S_INTERNAL_H #include +#include #include -struct blake2s_tfm_ctx { - u8 key[BLAKE2S_KEY_SIZE]; - unsigned int keylen; -}; - void blake2s_compress_generic(struct blake2s_state *state,const u8 *block, size_t nblocks, const u32 inc); @@ -27,6 +27,8 @@ static inline void blake2s_set_lastblock(struct blake2s_state *state) typedef void (*blake2s_compress_t)(struct blake2s_state *state, const u8 *block, size_t nblocks, u32 inc); +/* Helper functions for BLAKE2s shared by the library and shash APIs */ + static inline void __blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen, blake2s_compress_t compress) @@ -64,4 +66,57 @@ static inline void __blake2s_final(struct blake2s_state *state, u8 *out, memcpy(out, state->h, state->outlen); } +/* Helper functions for shash implementations of BLAKE2s */ + +struct blake2s_tfm_ctx { + u8 key[BLAKE2S_KEY_SIZE]; + unsigned int keylen; +}; + +static inline int crypto_blake2s_setkey(struct crypto_shash *tfm, + const u8 *key, unsigned int keylen) +{ + struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(tfm); + + if (keylen == 0 || keylen > BLAKE2S_KEY_SIZE) + return -EINVAL; + + memcpy(tctx->key, key, keylen); + tctx->keylen = keylen; + + return 0; +} + +static inline int crypto_blake2s_init(struct shash_desc *desc) +{ + const struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); + struct blake2s_state *state = shash_desc_ctx(desc); + unsigned int outlen = crypto_shash_digestsize(desc->tfm); + + if (tctx->keylen) + blake2s_init_key(state, outlen, tctx->key, tctx->keylen); + else + blake2s_init(state, outlen); + return 0; +} + +static inline int crypto_blake2s_update(struct shash_desc *desc, + const u8 *in, unsigned int inlen, + blake2s_compress_t compress) +{ + struct blake2s_state *state = shash_desc_ctx(desc); + + __blake2s_update(state, in, inlen, compress); + return 0; +} + +static inline int crypto_blake2s_final(struct shash_desc *desc, u8 *out, + blake2s_compress_t compress) +{ + struct blake2s_state *state = shash_desc_ctx(desc); + + __blake2s_final(state, out, compress); + return 0; +} + #endif /* BLAKE2S_INTERNAL_H */ -- cgit v1.2.3-59-g8ed1b From 28dcca4cc0c01e2467549a36b1b0eacfdb01236c Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 23 Dec 2020 00:10:01 -0800 Subject: crypto: blake2b - sync with blake2s implementation Sync the BLAKE2b code with the BLAKE2s code as much as possible: - Move a lot of code into new headers and , and adjust it to be like the corresponding BLAKE2s code, i.e. like and . - Rename constants, e.g. BLAKE2B_*_DIGEST_SIZE => BLAKE2B_*_HASH_SIZE. - Use a macro BLAKE2B_ALG() to define the shash_alg structs. - Export blake2b_compress_generic() for use as a fallback. This makes it much easier to add optimized implementations of BLAKE2b, as optimized implementations can use the helper functions crypto_blake2b_{setkey,init,update,final}() and blake2b_compress_generic(). The ARM implementation will use these. But this change is also helpful because it eliminates unnecessary differences between the BLAKE2b and BLAKE2s code, so that the same improvements can easily be made to both. (The two algorithms are basically identical, except for the word size and constants.) It also makes it straightforward to add a library API for BLAKE2b in the future if/when it's needed. This change does make the BLAKE2b code slightly more complicated than it needs to be, as it doesn't actually provide a library API yet. For example, __blake2b_update() doesn't really need to exist yet; it could just be inlined into crypto_blake2b_update(). But I believe this is outweighed by the benefits of keeping the code in sync. Signed-off-by: Eric Biggers Acked-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/blake2b_generic.c | 226 ++++++++------------------------------ include/crypto/blake2b.h | 67 +++++++++++ include/crypto/internal/blake2b.h | 115 +++++++++++++++++++ 3 files changed, 230 insertions(+), 178 deletions(-) create mode 100644 include/crypto/blake2b.h create mode 100644 include/crypto/internal/blake2b.h (limited to 'crypto') diff --git a/crypto/blake2b_generic.c b/crypto/blake2b_generic.c index a2ffe60e06d3..963f7fe0e4ea 100644 --- a/crypto/blake2b_generic.c +++ b/crypto/blake2b_generic.c @@ -20,36 +20,11 @@ #include #include -#include #include #include +#include #include -#define BLAKE2B_160_DIGEST_SIZE (160 / 8) -#define BLAKE2B_256_DIGEST_SIZE (256 / 8) -#define BLAKE2B_384_DIGEST_SIZE (384 / 8) -#define BLAKE2B_512_DIGEST_SIZE (512 / 8) - -enum blake2b_constant { - BLAKE2B_BLOCKBYTES = 128, - BLAKE2B_KEYBYTES = 64, -}; - -struct blake2b_state { - u64 h[8]; - u64 t[2]; - u64 f[2]; - u8 buf[BLAKE2B_BLOCKBYTES]; - size_t buflen; -}; - -static const u64 blake2b_IV[8] = { - 0x6a09e667f3bcc908ULL, 0xbb67ae8584caa73bULL, - 0x3c6ef372fe94f82bULL, 0xa54ff53a5f1d36f1ULL, - 0x510e527fade682d1ULL, 0x9b05688c2b3e6c1fULL, - 0x1f83d9abfb41bd6bULL, 0x5be0cd19137e2179ULL -}; - static const u8 blake2b_sigma[12][16] = { { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 }, @@ -95,8 +70,8 @@ static void blake2b_increment_counter(struct blake2b_state *S, const u64 inc) G(r,7,v[ 3],v[ 4],v[ 9],v[14]); \ } while (0) -static void blake2b_compress(struct blake2b_state *S, - const u8 block[BLAKE2B_BLOCKBYTES]) +static void blake2b_compress_one_generic(struct blake2b_state *S, + const u8 block[BLAKE2B_BLOCK_SIZE]) { u64 m[16]; u64 v[16]; @@ -108,14 +83,14 @@ static void blake2b_compress(struct blake2b_state *S, for (i = 0; i < 8; ++i) v[i] = S->h[i]; - v[ 8] = blake2b_IV[0]; - v[ 9] = blake2b_IV[1]; - v[10] = blake2b_IV[2]; - v[11] = blake2b_IV[3]; - v[12] = blake2b_IV[4] ^ S->t[0]; - v[13] = blake2b_IV[5] ^ S->t[1]; - v[14] = blake2b_IV[6] ^ S->f[0]; - v[15] = blake2b_IV[7] ^ S->f[1]; + v[ 8] = BLAKE2B_IV0; + v[ 9] = BLAKE2B_IV1; + v[10] = BLAKE2B_IV2; + v[11] = BLAKE2B_IV3; + v[12] = BLAKE2B_IV4 ^ S->t[0]; + v[13] = BLAKE2B_IV5 ^ S->t[1]; + v[14] = BLAKE2B_IV6 ^ S->f[0]; + v[15] = BLAKE2B_IV7 ^ S->f[1]; ROUND(0); ROUND(1); @@ -139,159 +114,54 @@ static void blake2b_compress(struct blake2b_state *S, #undef G #undef ROUND -struct blake2b_tfm_ctx { - u8 key[BLAKE2B_KEYBYTES]; - unsigned int keylen; -}; - -static int blake2b_setkey(struct crypto_shash *tfm, const u8 *key, - unsigned int keylen) +void blake2b_compress_generic(struct blake2b_state *state, + const u8 *block, size_t nblocks, u32 inc) { - struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(tfm); - - if (keylen == 0 || keylen > BLAKE2B_KEYBYTES) - return -EINVAL; - - memcpy(tctx->key, key, keylen); - tctx->keylen = keylen; - - return 0; + do { + blake2b_increment_counter(state, inc); + blake2b_compress_one_generic(state, block); + block += BLAKE2B_BLOCK_SIZE; + } while (--nblocks); } +EXPORT_SYMBOL(blake2b_compress_generic); -static int blake2b_init(struct shash_desc *desc) +static int crypto_blake2b_update_generic(struct shash_desc *desc, + const u8 *in, unsigned int inlen) { - struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); - struct blake2b_state *state = shash_desc_ctx(desc); - const int digestsize = crypto_shash_digestsize(desc->tfm); - - memset(state, 0, sizeof(*state)); - memcpy(state->h, blake2b_IV, sizeof(state->h)); - - /* Parameter block is all zeros except index 0, no xor for 1..7 */ - state->h[0] ^= 0x01010000 | tctx->keylen << 8 | digestsize; - - if (tctx->keylen) { - /* - * Prefill the buffer with the key, next call to _update or - * _final will process it - */ - memcpy(state->buf, tctx->key, tctx->keylen); - state->buflen = BLAKE2B_BLOCKBYTES; - } - return 0; + return crypto_blake2b_update(desc, in, inlen, blake2b_compress_generic); } -static int blake2b_update(struct shash_desc *desc, const u8 *in, - unsigned int inlen) +static int crypto_blake2b_final_generic(struct shash_desc *desc, u8 *out) { - struct blake2b_state *state = shash_desc_ctx(desc); - const size_t left = state->buflen; - const size_t fill = BLAKE2B_BLOCKBYTES - left; - - if (!inlen) - return 0; - - if (inlen > fill) { - state->buflen = 0; - /* Fill buffer */ - memcpy(state->buf + left, in, fill); - blake2b_increment_counter(state, BLAKE2B_BLOCKBYTES); - /* Compress */ - blake2b_compress(state, state->buf); - in += fill; - inlen -= fill; - while (inlen > BLAKE2B_BLOCKBYTES) { - blake2b_increment_counter(state, BLAKE2B_BLOCKBYTES); - blake2b_compress(state, in); - in += BLAKE2B_BLOCKBYTES; - inlen -= BLAKE2B_BLOCKBYTES; - } - } - memcpy(state->buf + state->buflen, in, inlen); - state->buflen += inlen; - - return 0; + return crypto_blake2b_final(desc, out, blake2b_compress_generic); } -static int blake2b_final(struct shash_desc *desc, u8 *out) -{ - struct blake2b_state *state = shash_desc_ctx(desc); - const int digestsize = crypto_shash_digestsize(desc->tfm); - size_t i; - - blake2b_increment_counter(state, state->buflen); - /* Set last block */ - state->f[0] = (u64)-1; - /* Padding */ - memset(state->buf + state->buflen, 0, BLAKE2B_BLOCKBYTES - state->buflen); - blake2b_compress(state, state->buf); - - /* Avoid temporary buffer and switch the internal output to LE order */ - for (i = 0; i < ARRAY_SIZE(state->h); i++) - __cpu_to_le64s(&state->h[i]); - - memcpy(out, state->h, digestsize); - return 0; -} +#define BLAKE2B_ALG(name, driver_name, digest_size) \ + { \ + .base.cra_name = name, \ + .base.cra_driver_name = driver_name, \ + .base.cra_priority = 100, \ + .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \ + .base.cra_blocksize = BLAKE2B_BLOCK_SIZE, \ + .base.cra_ctxsize = sizeof(struct blake2b_tfm_ctx), \ + .base.cra_module = THIS_MODULE, \ + .digestsize = digest_size, \ + .setkey = crypto_blake2b_setkey, \ + .init = crypto_blake2b_init, \ + .update = crypto_blake2b_update_generic, \ + .final = crypto_blake2b_final_generic, \ + .descsize = sizeof(struct blake2b_state), \ + } static struct shash_alg blake2b_algs[] = { - { - .base.cra_name = "blake2b-160", - .base.cra_driver_name = "blake2b-160-generic", - .base.cra_priority = 100, - .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, - .base.cra_blocksize = BLAKE2B_BLOCKBYTES, - .base.cra_ctxsize = sizeof(struct blake2b_tfm_ctx), - .base.cra_module = THIS_MODULE, - .digestsize = BLAKE2B_160_DIGEST_SIZE, - .setkey = blake2b_setkey, - .init = blake2b_init, - .update = blake2b_update, - .final = blake2b_final, - .descsize = sizeof(struct blake2b_state), - }, { - .base.cra_name = "blake2b-256", - .base.cra_driver_name = "blake2b-256-generic", - .base.cra_priority = 100, - .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, - .base.cra_blocksize = BLAKE2B_BLOCKBYTES, - .base.cra_ctxsize = sizeof(struct blake2b_tfm_ctx), - .base.cra_module = THIS_MODULE, - .digestsize = BLAKE2B_256_DIGEST_SIZE, - .setkey = blake2b_setkey, - .init = blake2b_init, - .update = blake2b_update, - .final = blake2b_final, - .descsize = sizeof(struct blake2b_state), - }, { - .base.cra_name = "blake2b-384", - .base.cra_driver_name = "blake2b-384-generic", - .base.cra_priority = 100, - .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, - .base.cra_blocksize = BLAKE2B_BLOCKBYTES, - .base.cra_ctxsize = sizeof(struct blake2b_tfm_ctx), - .base.cra_module = THIS_MODULE, - .digestsize = BLAKE2B_384_DIGEST_SIZE, - .setkey = blake2b_setkey, - .init = blake2b_init, - .update = blake2b_update, - .final = blake2b_final, - .descsize = sizeof(struct blake2b_state), - }, { - .base.cra_name = "blake2b-512", - .base.cra_driver_name = "blake2b-512-generic", - .base.cra_priority = 100, - .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, - .base.cra_blocksize = BLAKE2B_BLOCKBYTES, - .base.cra_ctxsize = sizeof(struct blake2b_tfm_ctx), - .base.cra_module = THIS_MODULE, - .digestsize = BLAKE2B_512_DIGEST_SIZE, - .setkey = blake2b_setkey, - .init = blake2b_init, - .update = blake2b_update, - .final = blake2b_final, - .descsize = sizeof(struct blake2b_state), - } + BLAKE2B_ALG("blake2b-160", "blake2b-160-generic", + BLAKE2B_160_HASH_SIZE), + BLAKE2B_ALG("blake2b-256", "blake2b-256-generic", + BLAKE2B_256_HASH_SIZE), + BLAKE2B_ALG("blake2b-384", "blake2b-384-generic", + BLAKE2B_384_HASH_SIZE), + BLAKE2B_ALG("blake2b-512", "blake2b-512-generic", + BLAKE2B_512_HASH_SIZE), }; static int __init blake2b_mod_init(void) diff --git a/include/crypto/blake2b.h b/include/crypto/blake2b.h new file mode 100644 index 000000000000..18875f16f8ca --- /dev/null +++ b/include/crypto/blake2b.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ + +#ifndef _CRYPTO_BLAKE2B_H +#define _CRYPTO_BLAKE2B_H + +#include +#include +#include +#include + +enum blake2b_lengths { + BLAKE2B_BLOCK_SIZE = 128, + BLAKE2B_HASH_SIZE = 64, + BLAKE2B_KEY_SIZE = 64, + + BLAKE2B_160_HASH_SIZE = 20, + BLAKE2B_256_HASH_SIZE = 32, + BLAKE2B_384_HASH_SIZE = 48, + BLAKE2B_512_HASH_SIZE = 64, +}; + +struct blake2b_state { + /* 'h', 't', and 'f' are used in assembly code, so keep them as-is. */ + u64 h[8]; + u64 t[2]; + u64 f[2]; + u8 buf[BLAKE2B_BLOCK_SIZE]; + unsigned int buflen; + unsigned int outlen; +}; + +enum blake2b_iv { + BLAKE2B_IV0 = 0x6A09E667F3BCC908ULL, + BLAKE2B_IV1 = 0xBB67AE8584CAA73BULL, + BLAKE2B_IV2 = 0x3C6EF372FE94F82BULL, + BLAKE2B_IV3 = 0xA54FF53A5F1D36F1ULL, + BLAKE2B_IV4 = 0x510E527FADE682D1ULL, + BLAKE2B_IV5 = 0x9B05688C2B3E6C1FULL, + BLAKE2B_IV6 = 0x1F83D9ABFB41BD6BULL, + BLAKE2B_IV7 = 0x5BE0CD19137E2179ULL, +}; + +static inline void __blake2b_init(struct blake2b_state *state, size_t outlen, + const void *key, size_t keylen) +{ + state->h[0] = BLAKE2B_IV0 ^ (0x01010000 | keylen << 8 | outlen); + state->h[1] = BLAKE2B_IV1; + state->h[2] = BLAKE2B_IV2; + state->h[3] = BLAKE2B_IV3; + state->h[4] = BLAKE2B_IV4; + state->h[5] = BLAKE2B_IV5; + state->h[6] = BLAKE2B_IV6; + state->h[7] = BLAKE2B_IV7; + state->t[0] = 0; + state->t[1] = 0; + state->f[0] = 0; + state->f[1] = 0; + state->buflen = 0; + state->outlen = outlen; + if (keylen) { + memcpy(state->buf, key, keylen); + memset(&state->buf[keylen], 0, BLAKE2B_BLOCK_SIZE - keylen); + state->buflen = BLAKE2B_BLOCK_SIZE; + } +} + +#endif /* _CRYPTO_BLAKE2B_H */ diff --git a/include/crypto/internal/blake2b.h b/include/crypto/internal/blake2b.h new file mode 100644 index 000000000000..982fe5e8471c --- /dev/null +++ b/include/crypto/internal/blake2b.h @@ -0,0 +1,115 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * Helper functions for BLAKE2b implementations. + * Keep this in sync with the corresponding BLAKE2s header. + */ + +#ifndef _CRYPTO_INTERNAL_BLAKE2B_H +#define _CRYPTO_INTERNAL_BLAKE2B_H + +#include +#include +#include + +void blake2b_compress_generic(struct blake2b_state *state, + const u8 *block, size_t nblocks, u32 inc); + +static inline void blake2b_set_lastblock(struct blake2b_state *state) +{ + state->f[0] = -1; +} + +typedef void (*blake2b_compress_t)(struct blake2b_state *state, + const u8 *block, size_t nblocks, u32 inc); + +static inline void __blake2b_update(struct blake2b_state *state, + const u8 *in, size_t inlen, + blake2b_compress_t compress) +{ + const size_t fill = BLAKE2B_BLOCK_SIZE - state->buflen; + + if (unlikely(!inlen)) + return; + if (inlen > fill) { + memcpy(state->buf + state->buflen, in, fill); + (*compress)(state, state->buf, 1, BLAKE2B_BLOCK_SIZE); + state->buflen = 0; + in += fill; + inlen -= fill; + } + if (inlen > BLAKE2B_BLOCK_SIZE) { + const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2B_BLOCK_SIZE); + /* Hash one less (full) block than strictly possible */ + (*compress)(state, in, nblocks - 1, BLAKE2B_BLOCK_SIZE); + in += BLAKE2B_BLOCK_SIZE * (nblocks - 1); + inlen -= BLAKE2B_BLOCK_SIZE * (nblocks - 1); + } + memcpy(state->buf + state->buflen, in, inlen); + state->buflen += inlen; +} + +static inline void __blake2b_final(struct blake2b_state *state, u8 *out, + blake2b_compress_t compress) +{ + int i; + + blake2b_set_lastblock(state); + memset(state->buf + state->buflen, 0, + BLAKE2B_BLOCK_SIZE - state->buflen); /* Padding */ + (*compress)(state, state->buf, 1, state->buflen); + for (i = 0; i < ARRAY_SIZE(state->h); i++) + __cpu_to_le64s(&state->h[i]); + memcpy(out, state->h, state->outlen); +} + +/* Helper functions for shash implementations of BLAKE2b */ + +struct blake2b_tfm_ctx { + u8 key[BLAKE2B_KEY_SIZE]; + unsigned int keylen; +}; + +static inline int crypto_blake2b_setkey(struct crypto_shash *tfm, + const u8 *key, unsigned int keylen) +{ + struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(tfm); + + if (keylen == 0 || keylen > BLAKE2B_KEY_SIZE) + return -EINVAL; + + memcpy(tctx->key, key, keylen); + tctx->keylen = keylen; + + return 0; +} + +static inline int crypto_blake2b_init(struct shash_desc *desc) +{ + const struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); + struct blake2b_state *state = shash_desc_ctx(desc); + unsigned int outlen = crypto_shash_digestsize(desc->tfm); + + __blake2b_init(state, outlen, tctx->key, tctx->keylen); + return 0; +} + +static inline int crypto_blake2b_update(struct shash_desc *desc, + const u8 *in, unsigned int inlen, + blake2b_compress_t compress) +{ + struct blake2b_state *state = shash_desc_ctx(desc); + + __blake2b_update(state, in, inlen, compress); + return 0; +} + +static inline int crypto_blake2b_final(struct shash_desc *desc, u8 *out, + blake2b_compress_t compress) +{ + struct blake2b_state *state = shash_desc_ctx(desc); + + __blake2b_final(state, out, compress); + return 0; +} + +#endif /* _CRYPTO_INTERNAL_BLAKE2B_H */ -- cgit v1.2.3-59-g8ed1b From 0cdc438e6e13436b0190910ef7da49ce4f5a44f4 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 23 Dec 2020 00:10:02 -0800 Subject: crypto: blake2b - update file comment The file comment for blake2b_generic.c makes it sound like it's the reference implementation of BLAKE2b with only minor changes. But it's actually been changed a lot. Update the comment to make this clearer. Reviewed-by: David Sterba Acked-by: Ard Biesheuvel Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/blake2b_generic.c | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) (limited to 'crypto') diff --git a/crypto/blake2b_generic.c b/crypto/blake2b_generic.c index 963f7fe0e4ea..6704c0355889 100644 --- a/crypto/blake2b_generic.c +++ b/crypto/blake2b_generic.c @@ -1,21 +1,18 @@ // SPDX-License-Identifier: (GPL-2.0-only OR Apache-2.0) /* - * BLAKE2b reference source code package - reference C implementations + * Generic implementation of the BLAKE2b digest algorithm. Based on the BLAKE2b + * reference implementation, but it has been heavily modified for use in the + * kernel. The reference implementation was: * - * Copyright 2012, Samuel Neves . You may use this under the - * terms of the CC0, the OpenSSL Licence, or the Apache Public License 2.0, at - * your option. The terms of these licenses can be found at: + * Copyright 2012, Samuel Neves . You may use this under + * the terms of the CC0, the OpenSSL Licence, or the Apache Public License + * 2.0, at your option. The terms of these licenses can be found at: * - * - CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0 - * - OpenSSL license : https://www.openssl.org/source/license.html - * - Apache 2.0 : https://www.apache.org/licenses/LICENSE-2.0 + * - CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0 + * - OpenSSL license : https://www.openssl.org/source/license.html + * - Apache 2.0 : https://www.apache.org/licenses/LICENSE-2.0 * - * More information about the BLAKE2 hash function can be found at - * https://blake2.net. - * - * Note: the original sources have been modified for inclusion in linux kernel - * in terms of coding style, using generic helpers and simplifications of error - * handling. + * More information about BLAKE2 can be found at https://blake2.net. */ #include -- cgit v1.2.3-59-g8ed1b From 2481104fe98d5b016fdd95d649b1235f21e491ba Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 31 Dec 2020 17:41:55 +0100 Subject: crypto: x86/aes-ni-xts - rewrite and drop indirections via glue helper The AES-NI driver implements XTS via the glue helper, which consumes a struct with sets of function pointers which are invoked on chunks of input data of the appropriate size, as annotated in the struct. Let's get rid of this indirection, so that we can perform direct calls to the assembler helpers. Instead, let's adopt the arm64 strategy, i.e., provide a helper which can consume inputs of any size, provided that the penultimate, full block is passed via the last call if ciphertext stealing needs to be applied. This also allows us to enable the XTS mode for i386. Tested-by: Eric Biggers # x86_64 Signed-off-by: Ard Biesheuvel Reported-by: kernel test robot Reported-by: kernel test robot Reported-by: kernel test robot Signed-off-by: Herbert Xu --- arch/x86/crypto/aesni-intel_asm.S | 280 ++++++++++++++++++++++++++++++------- arch/x86/crypto/aesni-intel_glue.c | 220 ++++++++++++++++------------- crypto/Kconfig | 1 - 3 files changed, 356 insertions(+), 145 deletions(-) (limited to 'crypto') diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S index 84d8a156cdcd..4e3972570916 100644 --- a/arch/x86/crypto/aesni-intel_asm.S +++ b/arch/x86/crypto/aesni-intel_asm.S @@ -43,10 +43,6 @@ #ifdef __x86_64__ # constants in mergeable sections, linker can reorder and merge -.section .rodata.cst16.gf128mul_x_ble_mask, "aM", @progbits, 16 -.align 16 -.Lgf128mul_x_ble_mask: - .octa 0x00000000000000010000000000000087 .section .rodata.cst16.POLY, "aM", @progbits, 16 .align 16 POLY: .octa 0xC2000000000000000000000000000001 @@ -146,7 +142,7 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff #define CTR %xmm11 #define INC %xmm12 -#define GF128MUL_MASK %xmm10 +#define GF128MUL_MASK %xmm7 #ifdef __x86_64__ #define AREG %rax @@ -2823,6 +2819,14 @@ SYM_FUNC_START(aesni_ctr_enc) ret SYM_FUNC_END(aesni_ctr_enc) +#endif + +.section .rodata.cst16.gf128mul_x_ble_mask, "aM", @progbits, 16 +.align 16 +.Lgf128mul_x_ble_mask: + .octa 0x00000000000000010000000000000087 +.previous + /* * _aesni_gf128mul_x_ble: internal ABI * Multiply in GF(2^128) for XTS IVs @@ -2835,11 +2839,11 @@ SYM_FUNC_END(aesni_ctr_enc) * CTR: == temporary value */ #define _aesni_gf128mul_x_ble() \ - pshufd $0x13, IV, CTR; \ + pshufd $0x13, IV, KEY; \ paddq IV, IV; \ - psrad $31, CTR; \ - pand GF128MUL_MASK, CTR; \ - pxor CTR, IV; + psrad $31, KEY; \ + pand GF128MUL_MASK, KEY; \ + pxor KEY, IV; /* * void aesni_xts_encrypt(const struct crypto_aes_ctx *ctx, u8 *dst, @@ -2847,65 +2851,153 @@ SYM_FUNC_END(aesni_ctr_enc) */ SYM_FUNC_START(aesni_xts_encrypt) FRAME_BEGIN - +#ifndef __x86_64__ + pushl IVP + pushl LEN + pushl KEYP + pushl KLEN + movl (FRAME_OFFSET+20)(%esp), KEYP # ctx + movl (FRAME_OFFSET+24)(%esp), OUTP # dst + movl (FRAME_OFFSET+28)(%esp), INP # src + movl (FRAME_OFFSET+32)(%esp), LEN # len + movl (FRAME_OFFSET+36)(%esp), IVP # iv movdqa .Lgf128mul_x_ble_mask, GF128MUL_MASK +#else + movdqa .Lgf128mul_x_ble_mask(%rip), GF128MUL_MASK +#endif movups (IVP), IV mov 480(KEYP), KLEN .Lxts_enc_loop4: + sub $64, LEN + jl .Lxts_enc_1x + movdqa IV, STATE1 - movdqu 0x00(INP), INC - pxor INC, STATE1 + movdqu 0x00(INP), IN + pxor IN, STATE1 movdqu IV, 0x00(OUTP) _aesni_gf128mul_x_ble() movdqa IV, STATE2 - movdqu 0x10(INP), INC - pxor INC, STATE2 + movdqu 0x10(INP), IN + pxor IN, STATE2 movdqu IV, 0x10(OUTP) _aesni_gf128mul_x_ble() movdqa IV, STATE3 - movdqu 0x20(INP), INC - pxor INC, STATE3 + movdqu 0x20(INP), IN + pxor IN, STATE3 movdqu IV, 0x20(OUTP) _aesni_gf128mul_x_ble() movdqa IV, STATE4 - movdqu 0x30(INP), INC - pxor INC, STATE4 + movdqu 0x30(INP), IN + pxor IN, STATE4 movdqu IV, 0x30(OUTP) call _aesni_enc4 - movdqu 0x00(OUTP), INC - pxor INC, STATE1 + movdqu 0x00(OUTP), IN + pxor IN, STATE1 movdqu STATE1, 0x00(OUTP) - movdqu 0x10(OUTP), INC - pxor INC, STATE2 + movdqu 0x10(OUTP), IN + pxor IN, STATE2 movdqu STATE2, 0x10(OUTP) - movdqu 0x20(OUTP), INC - pxor INC, STATE3 + movdqu 0x20(OUTP), IN + pxor IN, STATE3 movdqu STATE3, 0x20(OUTP) - movdqu 0x30(OUTP), INC - pxor INC, STATE4 + movdqu 0x30(OUTP), IN + pxor IN, STATE4 movdqu STATE4, 0x30(OUTP) _aesni_gf128mul_x_ble() add $64, INP add $64, OUTP - sub $64, LEN - ja .Lxts_enc_loop4 + test LEN, LEN + jnz .Lxts_enc_loop4 +.Lxts_enc_ret_iv: movups IV, (IVP) +.Lxts_enc_ret: +#ifndef __x86_64__ + popl KLEN + popl KEYP + popl LEN + popl IVP +#endif FRAME_END ret + +.Lxts_enc_1x: + add $64, LEN + jz .Lxts_enc_ret_iv + sub $16, LEN + jl .Lxts_enc_cts4 + +.Lxts_enc_loop1: + movdqu (INP), STATE + pxor IV, STATE + call _aesni_enc1 + pxor IV, STATE + _aesni_gf128mul_x_ble() + + test LEN, LEN + jz .Lxts_enc_out + + add $16, INP + sub $16, LEN + jl .Lxts_enc_cts1 + + movdqu STATE, (OUTP) + add $16, OUTP + jmp .Lxts_enc_loop1 + +.Lxts_enc_out: + movdqu STATE, (OUTP) + jmp .Lxts_enc_ret_iv + +.Lxts_enc_cts4: + movdqa STATE4, STATE + sub $16, OUTP + +.Lxts_enc_cts1: +#ifndef __x86_64__ + lea .Lcts_permute_table, T1 +#else + lea .Lcts_permute_table(%rip), T1 +#endif + add LEN, INP /* rewind input pointer */ + add $16, LEN /* # bytes in final block */ + movups (INP), IN1 + + mov T1, IVP + add $32, IVP + add LEN, T1 + sub LEN, IVP + add OUTP, LEN + + movups (T1), %xmm4 + movaps STATE, IN2 + pshufb %xmm4, STATE + movups STATE, (LEN) + + movups (IVP), %xmm0 + pshufb %xmm0, IN1 + pblendvb IN2, IN1 + movaps IN1, STATE + + pxor IV, STATE + call _aesni_enc1 + pxor IV, STATE + + movups STATE, (OUTP) + jmp .Lxts_enc_ret SYM_FUNC_END(aesni_xts_encrypt) /* @@ -2914,66 +3006,158 @@ SYM_FUNC_END(aesni_xts_encrypt) */ SYM_FUNC_START(aesni_xts_decrypt) FRAME_BEGIN - +#ifndef __x86_64__ + pushl IVP + pushl LEN + pushl KEYP + pushl KLEN + movl (FRAME_OFFSET+20)(%esp), KEYP # ctx + movl (FRAME_OFFSET+24)(%esp), OUTP # dst + movl (FRAME_OFFSET+28)(%esp), INP # src + movl (FRAME_OFFSET+32)(%esp), LEN # len + movl (FRAME_OFFSET+36)(%esp), IVP # iv movdqa .Lgf128mul_x_ble_mask, GF128MUL_MASK +#else + movdqa .Lgf128mul_x_ble_mask(%rip), GF128MUL_MASK +#endif movups (IVP), IV mov 480(KEYP), KLEN add $240, KEYP + test $15, LEN + jz .Lxts_dec_loop4 + sub $16, LEN + .Lxts_dec_loop4: + sub $64, LEN + jl .Lxts_dec_1x + movdqa IV, STATE1 - movdqu 0x00(INP), INC - pxor INC, STATE1 + movdqu 0x00(INP), IN + pxor IN, STATE1 movdqu IV, 0x00(OUTP) _aesni_gf128mul_x_ble() movdqa IV, STATE2 - movdqu 0x10(INP), INC - pxor INC, STATE2 + movdqu 0x10(INP), IN + pxor IN, STATE2 movdqu IV, 0x10(OUTP) _aesni_gf128mul_x_ble() movdqa IV, STATE3 - movdqu 0x20(INP), INC - pxor INC, STATE3 + movdqu 0x20(INP), IN + pxor IN, STATE3 movdqu IV, 0x20(OUTP) _aesni_gf128mul_x_ble() movdqa IV, STATE4 - movdqu 0x30(INP), INC - pxor INC, STATE4 + movdqu 0x30(INP), IN + pxor IN, STATE4 movdqu IV, 0x30(OUTP) call _aesni_dec4 - movdqu 0x00(OUTP), INC - pxor INC, STATE1 + movdqu 0x00(OUTP), IN + pxor IN, STATE1 movdqu STATE1, 0x00(OUTP) - movdqu 0x10(OUTP), INC - pxor INC, STATE2 + movdqu 0x10(OUTP), IN + pxor IN, STATE2 movdqu STATE2, 0x10(OUTP) - movdqu 0x20(OUTP), INC - pxor INC, STATE3 + movdqu 0x20(OUTP), IN + pxor IN, STATE3 movdqu STATE3, 0x20(OUTP) - movdqu 0x30(OUTP), INC - pxor INC, STATE4 + movdqu 0x30(OUTP), IN + pxor IN, STATE4 movdqu STATE4, 0x30(OUTP) _aesni_gf128mul_x_ble() add $64, INP add $64, OUTP - sub $64, LEN - ja .Lxts_dec_loop4 + test LEN, LEN + jnz .Lxts_dec_loop4 +.Lxts_dec_ret_iv: movups IV, (IVP) +.Lxts_dec_ret: +#ifndef __x86_64__ + popl KLEN + popl KEYP + popl LEN + popl IVP +#endif FRAME_END ret -SYM_FUNC_END(aesni_xts_decrypt) +.Lxts_dec_1x: + add $64, LEN + jz .Lxts_dec_ret_iv + +.Lxts_dec_loop1: + movdqu (INP), STATE + + add $16, INP + sub $16, LEN + jl .Lxts_dec_cts1 + + pxor IV, STATE + call _aesni_dec1 + pxor IV, STATE + _aesni_gf128mul_x_ble() + + test LEN, LEN + jz .Lxts_dec_out + + movdqu STATE, (OUTP) + add $16, OUTP + jmp .Lxts_dec_loop1 + +.Lxts_dec_out: + movdqu STATE, (OUTP) + jmp .Lxts_dec_ret_iv + +.Lxts_dec_cts1: + movdqa IV, STATE4 + _aesni_gf128mul_x_ble() + + pxor IV, STATE + call _aesni_dec1 + pxor IV, STATE + +#ifndef __x86_64__ + lea .Lcts_permute_table, T1 +#else + lea .Lcts_permute_table(%rip), T1 #endif + add LEN, INP /* rewind input pointer */ + add $16, LEN /* # bytes in final block */ + movups (INP), IN1 + + mov T1, IVP + add $32, IVP + add LEN, T1 + sub LEN, IVP + add OUTP, LEN + + movups (T1), %xmm4 + movaps STATE, IN2 + pshufb %xmm4, STATE + movups STATE, (LEN) + + movups (IVP), %xmm0 + pshufb %xmm0, IN1 + pblendvb IN2, IN1 + movaps IN1, STATE + + pxor STATE4, STATE + call _aesni_dec1 + pxor STATE4, STATE + + movups STATE, (OUTP) + jmp .Lxts_dec_ret +SYM_FUNC_END(aesni_xts_decrypt) diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 84e3ed49b35d..2116bc2b9507 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -33,9 +33,6 @@ #include #include #include -#ifdef CONFIG_X86_64 -#include -#endif #define AESNI_ALIGN 16 @@ -632,98 +629,6 @@ static int ctr_crypt(struct skcipher_request *req) return err; } -static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key, - unsigned int keylen) -{ - struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - int err; - - err = xts_verify_key(tfm, key, keylen); - if (err) - return err; - - keylen /= 2; - - /* first half of xts-key is for crypt */ - err = aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_crypt_ctx, - key, keylen); - if (err) - return err; - - /* second half of xts-key is for tweak */ - return aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_tweak_ctx, - key + keylen, keylen); -} - - -static void aesni_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv) -{ - glue_xts_crypt_128bit_one(ctx, dst, src, iv, aesni_enc); -} - -static void aesni_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv) -{ - glue_xts_crypt_128bit_one(ctx, dst, src, iv, aesni_dec); -} - -static void aesni_xts_enc32(const void *ctx, u8 *dst, const u8 *src, le128 *iv) -{ - aesni_xts_encrypt(ctx, dst, src, 32 * AES_BLOCK_SIZE, (u8 *)iv); -} - -static void aesni_xts_dec32(const void *ctx, u8 *dst, const u8 *src, le128 *iv) -{ - aesni_xts_decrypt(ctx, dst, src, 32 * AES_BLOCK_SIZE, (u8 *)iv); -} - -static const struct common_glue_ctx aesni_enc_xts = { - .num_funcs = 2, - .fpu_blocks_limit = 1, - - .funcs = { { - .num_blocks = 32, - .fn_u = { .xts = aesni_xts_enc32 } - }, { - .num_blocks = 1, - .fn_u = { .xts = aesni_xts_enc } - } } -}; - -static const struct common_glue_ctx aesni_dec_xts = { - .num_funcs = 2, - .fpu_blocks_limit = 1, - - .funcs = { { - .num_blocks = 32, - .fn_u = { .xts = aesni_xts_dec32 } - }, { - .num_blocks = 1, - .fn_u = { .xts = aesni_xts_dec } - } } -}; - -static int xts_encrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - - return glue_xts_req_128bit(&aesni_enc_xts, req, aesni_enc, - aes_ctx(ctx->raw_tweak_ctx), - aes_ctx(ctx->raw_crypt_ctx), - false); -} - -static int xts_decrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - - return glue_xts_req_128bit(&aesni_dec_xts, req, aesni_enc, - aes_ctx(ctx->raw_tweak_ctx), - aes_ctx(ctx->raw_crypt_ctx), - true); -} - static int rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len) { @@ -996,6 +901,128 @@ static int helper_rfc4106_decrypt(struct aead_request *req) } #endif +static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm); + int err; + + err = xts_verify_key(tfm, key, keylen); + if (err) + return err; + + keylen /= 2; + + /* first half of xts-key is for crypt */ + err = aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_crypt_ctx, + key, keylen); + if (err) + return err; + + /* second half of xts-key is for tweak */ + return aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_tweak_ctx, + key + keylen, keylen); +} + +static int xts_crypt(struct skcipher_request *req, bool encrypt) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm); + int tail = req->cryptlen % AES_BLOCK_SIZE; + struct skcipher_request subreq; + struct skcipher_walk walk; + int err; + + if (req->cryptlen < AES_BLOCK_SIZE) + return -EINVAL; + + err = skcipher_walk_virt(&walk, req, false); + + if (unlikely(tail > 0 && walk.nbytes < walk.total)) { + int blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2; + + skcipher_walk_abort(&walk); + + skcipher_request_set_tfm(&subreq, tfm); + skcipher_request_set_callback(&subreq, + skcipher_request_flags(req), + NULL, NULL); + skcipher_request_set_crypt(&subreq, req->src, req->dst, + blocks * AES_BLOCK_SIZE, req->iv); + req = &subreq; + err = skcipher_walk_virt(&walk, req, false); + } else { + tail = 0; + } + + kernel_fpu_begin(); + + /* calculate first value of T */ + aesni_enc(aes_ctx(ctx->raw_tweak_ctx), walk.iv, walk.iv); + + while (walk.nbytes > 0) { + int nbytes = walk.nbytes; + + if (nbytes < walk.total) + nbytes &= ~(AES_BLOCK_SIZE - 1); + + if (encrypt) + aesni_xts_encrypt(aes_ctx(ctx->raw_crypt_ctx), + walk.dst.virt.addr, walk.src.virt.addr, + nbytes, walk.iv); + else + aesni_xts_decrypt(aes_ctx(ctx->raw_crypt_ctx), + walk.dst.virt.addr, walk.src.virt.addr, + nbytes, walk.iv); + kernel_fpu_end(); + + err = skcipher_walk_done(&walk, walk.nbytes - nbytes); + + if (walk.nbytes > 0) + kernel_fpu_begin(); + } + + if (unlikely(tail > 0 && !err)) { + struct scatterlist sg_src[2], sg_dst[2]; + struct scatterlist *src, *dst; + + dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen); + if (req->dst != req->src) + dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen); + + skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail, + req->iv); + + err = skcipher_walk_virt(&walk, &subreq, false); + if (err) + return err; + + kernel_fpu_begin(); + if (encrypt) + aesni_xts_encrypt(aes_ctx(ctx->raw_crypt_ctx), + walk.dst.virt.addr, walk.src.virt.addr, + walk.nbytes, walk.iv); + else + aesni_xts_decrypt(aes_ctx(ctx->raw_crypt_ctx), + walk.dst.virt.addr, walk.src.virt.addr, + walk.nbytes, walk.iv); + kernel_fpu_end(); + + err = skcipher_walk_done(&walk, 0); + } + return err; +} + +static int xts_encrypt(struct skcipher_request *req) +{ + return xts_crypt(req, true); +} + +static int xts_decrypt(struct skcipher_request *req) +{ + return xts_crypt(req, false); +} + static struct crypto_alg aesni_cipher_alg = { .cra_name = "aes", .cra_driver_name = "aes-aesni", @@ -1082,6 +1109,7 @@ static struct skcipher_alg aesni_skciphers[] = { .setkey = aesni_skcipher_setkey, .encrypt = ctr_crypt, .decrypt = ctr_crypt, +#endif }, { .base = { .cra_name = "__xts(aes)", @@ -1095,10 +1123,10 @@ static struct skcipher_alg aesni_skciphers[] = { .min_keysize = 2 * AES_MIN_KEY_SIZE, .max_keysize = 2 * AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, + .walksize = 2 * AES_BLOCK_SIZE, .setkey = xts_aesni_setkey, .encrypt = xts_encrypt, .decrypt = xts_decrypt, -#endif } }; diff --git a/crypto/Kconfig b/crypto/Kconfig index a367fcfeb5d4..c48ca26e2169 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1133,7 +1133,6 @@ config CRYPTO_AES_NI_INTEL select CRYPTO_LIB_AES select CRYPTO_ALGAPI select CRYPTO_SKCIPHER - select CRYPTO_GLUE_HELPER_X86 if 64BIT select CRYPTO_SIMD help Use Intel AES-NI instructions for AES algorithm. -- cgit v1.2.3-59-g8ed1b From 55a7e88f016873ef1717295d8460416b1ccd05a5 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 5 Jan 2021 17:47:49 +0100 Subject: crypto: x86/camellia - switch to XTS template Now that the XTS template can wrap accelerated ECB modes, it can be used to implement Camellia in XTS mode as well, which turns out to be at least as fast, and sometimes even faster. Acked-by: Eric Biggers Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- arch/x86/crypto/camellia-aesni-avx-asm_64.S | 181 ----------------------- arch/x86/crypto/camellia-aesni-avx2-asm_64.S | 207 --------------------------- arch/x86/crypto/camellia_aesni_avx2_glue.c | 70 --------- arch/x86/crypto/camellia_aesni_avx_glue.c | 101 +------------ arch/x86/include/asm/crypto/camellia.h | 18 --- crypto/Kconfig | 2 +- 6 files changed, 2 insertions(+), 577 deletions(-) (limited to 'crypto') diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S index ecc0a9a905c4..471c34e6cac2 100644 --- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S +++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S @@ -17,7 +17,6 @@ #include #include -#include #define CAMELLIA_TABLE_BYTE_LEN 272 @@ -593,10 +592,6 @@ SYM_FUNC_END(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) .Lbswap128_mask: .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 -/* For XTS mode IV generation */ -.Lxts_gf128mul_and_shl1_mask: - .byte 0x87, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0 - /* * pre-SubByte transform * @@ -1111,179 +1106,3 @@ SYM_FUNC_START(camellia_ctr_16way) FRAME_END ret; SYM_FUNC_END(camellia_ctr_16way) - -#define gf128mul_x_ble(iv, mask, tmp) \ - vpsrad $31, iv, tmp; \ - vpaddq iv, iv, iv; \ - vpshufd $0x13, tmp, tmp; \ - vpand mask, tmp, tmp; \ - vpxor tmp, iv, iv; - -.align 8 -SYM_FUNC_START_LOCAL(camellia_xts_crypt_16way) - /* input: - * %rdi: ctx, CTX - * %rsi: dst (16 blocks) - * %rdx: src (16 blocks) - * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) - * %r8: index for input whitening key - * %r9: pointer to __camellia_enc_blk16 or __camellia_dec_blk16 - */ - FRAME_BEGIN - - subq $(16 * 16), %rsp; - movq %rsp, %rax; - - vmovdqa .Lxts_gf128mul_and_shl1_mask, %xmm14; - - /* load IV */ - vmovdqu (%rcx), %xmm0; - vpxor 0 * 16(%rdx), %xmm0, %xmm15; - vmovdqu %xmm15, 15 * 16(%rax); - vmovdqu %xmm0, 0 * 16(%rsi); - - /* construct IVs */ - gf128mul_x_ble(%xmm0, %xmm14, %xmm15); - vpxor 1 * 16(%rdx), %xmm0, %xmm15; - vmovdqu %xmm15, 14 * 16(%rax); - vmovdqu %xmm0, 1 * 16(%rsi); - - gf128mul_x_ble(%xmm0, %xmm14, %xmm15); - vpxor 2 * 16(%rdx), %xmm0, %xmm13; - vmovdqu %xmm0, 2 * 16(%rsi); - - gf128mul_x_ble(%xmm0, %xmm14, %xmm15); - vpxor 3 * 16(%rdx), %xmm0, %xmm12; - vmovdqu %xmm0, 3 * 16(%rsi); - - gf128mul_x_ble(%xmm0, %xmm14, %xmm15); - vpxor 4 * 16(%rdx), %xmm0, %xmm11; - vmovdqu %xmm0, 4 * 16(%rsi); - - gf128mul_x_ble(%xmm0, %xmm14, %xmm15); - vpxor 5 * 16(%rdx), %xmm0, %xmm10; - vmovdqu %xmm0, 5 * 16(%rsi); - - gf128mul_x_ble(%xmm0, %xmm14, %xmm15); - vpxor 6 * 16(%rdx), %xmm0, %xmm9; - vmovdqu %xmm0, 6 * 16(%rsi); - - gf128mul_x_ble(%xmm0, %xmm14, %xmm15); - vpxor 7 * 16(%rdx), %xmm0, %xmm8; - vmovdqu %xmm0, 7 * 16(%rsi); - - gf128mul_x_ble(%xmm0, %xmm14, %xmm15); - vpxor 8 * 16(%rdx), %xmm0, %xmm7; - vmovdqu %xmm0, 8 * 16(%rsi); - - gf128mul_x_ble(%xmm0, %xmm14, %xmm15); - vpxor 9 * 16(%rdx), %xmm0, %xmm6; - vmovdqu %xmm0, 9 * 16(%rsi); - - gf128mul_x_ble(%xmm0, %xmm14, %xmm15); - vpxor 10 * 16(%rdx), %xmm0, %xmm5; - vmovdqu %xmm0, 10 * 16(%rsi); - - gf128mul_x_ble(%xmm0, %xmm14, %xmm15); - vpxor 11 * 16(%rdx), %xmm0, %xmm4; - vmovdqu %xmm0, 11 * 16(%rsi); - - gf128mul_x_ble(%xmm0, %xmm14, %xmm15); - vpxor 12 * 16(%rdx), %xmm0, %xmm3; - vmovdqu %xmm0, 12 * 16(%rsi); - - gf128mul_x_ble(%xmm0, %xmm14, %xmm15); - vpxor 13 * 16(%rdx), %xmm0, %xmm2; - vmovdqu %xmm0, 13 * 16(%rsi); - - gf128mul_x_ble(%xmm0, %xmm14, %xmm15); - vpxor 14 * 16(%rdx), %xmm0, %xmm1; - vmovdqu %xmm0, 14 * 16(%rsi); - - gf128mul_x_ble(%xmm0, %xmm14, %xmm15); - vpxor 15 * 16(%rdx), %xmm0, %xmm15; - vmovdqu %xmm15, 0 * 16(%rax); - vmovdqu %xmm0, 15 * 16(%rsi); - - gf128mul_x_ble(%xmm0, %xmm14, %xmm15); - vmovdqu %xmm0, (%rcx); - - /* inpack16_pre: */ - vmovq (key_table)(CTX, %r8, 8), %xmm15; - vpshufb .Lpack_bswap, %xmm15, %xmm15; - vpxor 0 * 16(%rax), %xmm15, %xmm0; - vpxor %xmm1, %xmm15, %xmm1; - vpxor %xmm2, %xmm15, %xmm2; - vpxor %xmm3, %xmm15, %xmm3; - vpxor %xmm4, %xmm15, %xmm4; - vpxor %xmm5, %xmm15, %xmm5; - vpxor %xmm6, %xmm15, %xmm6; - vpxor %xmm7, %xmm15, %xmm7; - vpxor %xmm8, %xmm15, %xmm8; - vpxor %xmm9, %xmm15, %xmm9; - vpxor %xmm10, %xmm15, %xmm10; - vpxor %xmm11, %xmm15, %xmm11; - vpxor %xmm12, %xmm15, %xmm12; - vpxor %xmm13, %xmm15, %xmm13; - vpxor 14 * 16(%rax), %xmm15, %xmm14; - vpxor 15 * 16(%rax), %xmm15, %xmm15; - - CALL_NOSPEC r9; - - addq $(16 * 16), %rsp; - - vpxor 0 * 16(%rsi), %xmm7, %xmm7; - vpxor 1 * 16(%rsi), %xmm6, %xmm6; - vpxor 2 * 16(%rsi), %xmm5, %xmm5; - vpxor 3 * 16(%rsi), %xmm4, %xmm4; - vpxor 4 * 16(%rsi), %xmm3, %xmm3; - vpxor 5 * 16(%rsi), %xmm2, %xmm2; - vpxor 6 * 16(%rsi), %xmm1, %xmm1; - vpxor 7 * 16(%rsi), %xmm0, %xmm0; - vpxor 8 * 16(%rsi), %xmm15, %xmm15; - vpxor 9 * 16(%rsi), %xmm14, %xmm14; - vpxor 10 * 16(%rsi), %xmm13, %xmm13; - vpxor 11 * 16(%rsi), %xmm12, %xmm12; - vpxor 12 * 16(%rsi), %xmm11, %xmm11; - vpxor 13 * 16(%rsi), %xmm10, %xmm10; - vpxor 14 * 16(%rsi), %xmm9, %xmm9; - vpxor 15 * 16(%rsi), %xmm8, %xmm8; - write_output(%xmm7, %xmm6, %xmm5, %xmm4, %xmm3, %xmm2, %xmm1, %xmm0, - %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9, - %xmm8, %rsi); - - FRAME_END - ret; -SYM_FUNC_END(camellia_xts_crypt_16way) - -SYM_FUNC_START(camellia_xts_enc_16way) - /* input: - * %rdi: ctx, CTX - * %rsi: dst (16 blocks) - * %rdx: src (16 blocks) - * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) - */ - xorl %r8d, %r8d; /* input whitening key, 0 for enc */ - - leaq __camellia_enc_blk16, %r9; - - jmp camellia_xts_crypt_16way; -SYM_FUNC_END(camellia_xts_enc_16way) - -SYM_FUNC_START(camellia_xts_dec_16way) - /* input: - * %rdi: ctx, CTX - * %rsi: dst (16 blocks) - * %rdx: src (16 blocks) - * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) - */ - - cmpl $16, key_length(CTX); - movl $32, %r8d; - movl $24, %eax; - cmovel %eax, %r8d; /* input whitening key, last for dec */ - - leaq __camellia_dec_blk16, %r9; - - jmp camellia_xts_crypt_16way; -SYM_FUNC_END(camellia_xts_dec_16way) diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S index 0907243c501c..9561dee52de0 100644 --- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S +++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S @@ -7,7 +7,6 @@ #include #include -#include #define CAMELLIA_TABLE_BYTE_LEN 272 @@ -629,12 +628,6 @@ SYM_FUNC_END(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) .Lbswap128_mask: .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 -/* For XTS mode */ -.Lxts_gf128mul_and_shl1_mask_0: - .byte 0x87, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0 -.Lxts_gf128mul_and_shl1_mask_1: - .byte 0x0e, 1, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0 - /* * pre-SubByte transform * @@ -1201,203 +1194,3 @@ SYM_FUNC_START(camellia_ctr_32way) FRAME_END ret; SYM_FUNC_END(camellia_ctr_32way) - -#define gf128mul_x_ble(iv, mask, tmp) \ - vpsrad $31, iv, tmp; \ - vpaddq iv, iv, iv; \ - vpshufd $0x13, tmp, tmp; \ - vpand mask, tmp, tmp; \ - vpxor tmp, iv, iv; - -#define gf128mul_x2_ble(iv, mask1, mask2, tmp0, tmp1) \ - vpsrad $31, iv, tmp0; \ - vpaddq iv, iv, tmp1; \ - vpsllq $2, iv, iv; \ - vpshufd $0x13, tmp0, tmp0; \ - vpsrad $31, tmp1, tmp1; \ - vpand mask2, tmp0, tmp0; \ - vpshufd $0x13, tmp1, tmp1; \ - vpxor tmp0, iv, iv; \ - vpand mask1, tmp1, tmp1; \ - vpxor tmp1, iv, iv; - -.align 8 -SYM_FUNC_START_LOCAL(camellia_xts_crypt_32way) - /* input: - * %rdi: ctx, CTX - * %rsi: dst (32 blocks) - * %rdx: src (32 blocks) - * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) - * %r8: index for input whitening key - * %r9: pointer to __camellia_enc_blk32 or __camellia_dec_blk32 - */ - FRAME_BEGIN - - vzeroupper; - - subq $(16 * 32), %rsp; - movq %rsp, %rax; - - vbroadcasti128 .Lxts_gf128mul_and_shl1_mask_0, %ymm12; - - /* load IV and construct second IV */ - vmovdqu (%rcx), %xmm0; - vmovdqa %xmm0, %xmm15; - gf128mul_x_ble(%xmm0, %xmm12, %xmm13); - vbroadcasti128 .Lxts_gf128mul_and_shl1_mask_1, %ymm13; - vinserti128 $1, %xmm0, %ymm15, %ymm0; - vpxor 0 * 32(%rdx), %ymm0, %ymm15; - vmovdqu %ymm15, 15 * 32(%rax); - vmovdqu %ymm0, 0 * 32(%rsi); - - /* construct IVs */ - gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15); - vpxor 1 * 32(%rdx), %ymm0, %ymm15; - vmovdqu %ymm15, 14 * 32(%rax); - vmovdqu %ymm0, 1 * 32(%rsi); - - gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15); - vpxor 2 * 32(%rdx), %ymm0, %ymm15; - vmovdqu %ymm15, 13 * 32(%rax); - vmovdqu %ymm0, 2 * 32(%rsi); - - gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15); - vpxor 3 * 32(%rdx), %ymm0, %ymm15; - vmovdqu %ymm15, 12 * 32(%rax); - vmovdqu %ymm0, 3 * 32(%rsi); - - gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15); - vpxor 4 * 32(%rdx), %ymm0, %ymm11; - vmovdqu %ymm0, 4 * 32(%rsi); - - gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15); - vpxor 5 * 32(%rdx), %ymm0, %ymm10; - vmovdqu %ymm0, 5 * 32(%rsi); - - gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15); - vpxor 6 * 32(%rdx), %ymm0, %ymm9; - vmovdqu %ymm0, 6 * 32(%rsi); - - gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15); - vpxor 7 * 32(%rdx), %ymm0, %ymm8; - vmovdqu %ymm0, 7 * 32(%rsi); - - gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15); - vpxor 8 * 32(%rdx), %ymm0, %ymm7; - vmovdqu %ymm0, 8 * 32(%rsi); - - gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15); - vpxor 9 * 32(%rdx), %ymm0, %ymm6; - vmovdqu %ymm0, 9 * 32(%rsi); - - gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15); - vpxor 10 * 32(%rdx), %ymm0, %ymm5; - vmovdqu %ymm0, 10 * 32(%rsi); - - gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15); - vpxor 11 * 32(%rdx), %ymm0, %ymm4; - vmovdqu %ymm0, 11 * 32(%rsi); - - gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15); - vpxor 12 * 32(%rdx), %ymm0, %ymm3; - vmovdqu %ymm0, 12 * 32(%rsi); - - gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15); - vpxor 13 * 32(%rdx), %ymm0, %ymm2; - vmovdqu %ymm0, 13 * 32(%rsi); - - gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15); - vpxor 14 * 32(%rdx), %ymm0, %ymm1; - vmovdqu %ymm0, 14 * 32(%rsi); - - gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15); - vpxor 15 * 32(%rdx), %ymm0, %ymm15; - vmovdqu %ymm15, 0 * 32(%rax); - vmovdqu %ymm0, 15 * 32(%rsi); - - vextracti128 $1, %ymm0, %xmm0; - gf128mul_x_ble(%xmm0, %xmm12, %xmm15); - vmovdqu %xmm0, (%rcx); - - /* inpack32_pre: */ - vpbroadcastq (key_table)(CTX, %r8, 8), %ymm15; - vpshufb .Lpack_bswap, %ymm15, %ymm15; - vpxor 0 * 32(%rax), %ymm15, %ymm0; - vpxor %ymm1, %ymm15, %ymm1; - vpxor %ymm2, %ymm15, %ymm2; - vpxor %ymm3, %ymm15, %ymm3; - vpxor %ymm4, %ymm15, %ymm4; - vpxor %ymm5, %ymm15, %ymm5; - vpxor %ymm6, %ymm15, %ymm6; - vpxor %ymm7, %ymm15, %ymm7; - vpxor %ymm8, %ymm15, %ymm8; - vpxor %ymm9, %ymm15, %ymm9; - vpxor %ymm10, %ymm15, %ymm10; - vpxor %ymm11, %ymm15, %ymm11; - vpxor 12 * 32(%rax), %ymm15, %ymm12; - vpxor 13 * 32(%rax), %ymm15, %ymm13; - vpxor 14 * 32(%rax), %ymm15, %ymm14; - vpxor 15 * 32(%rax), %ymm15, %ymm15; - - CALL_NOSPEC r9; - - addq $(16 * 32), %rsp; - - vpxor 0 * 32(%rsi), %ymm7, %ymm7; - vpxor 1 * 32(%rsi), %ymm6, %ymm6; - vpxor 2 * 32(%rsi), %ymm5, %ymm5; - vpxor 3 * 32(%rsi), %ymm4, %ymm4; - vpxor 4 * 32(%rsi), %ymm3, %ymm3; - vpxor 5 * 32(%rsi), %ymm2, %ymm2; - vpxor 6 * 32(%rsi), %ymm1, %ymm1; - vpxor 7 * 32(%rsi), %ymm0, %ymm0; - vpxor 8 * 32(%rsi), %ymm15, %ymm15; - vpxor 9 * 32(%rsi), %ymm14, %ymm14; - vpxor 10 * 32(%rsi), %ymm13, %ymm13; - vpxor 11 * 32(%rsi), %ymm12, %ymm12; - vpxor 12 * 32(%rsi), %ymm11, %ymm11; - vpxor 13 * 32(%rsi), %ymm10, %ymm10; - vpxor 14 * 32(%rsi), %ymm9, %ymm9; - vpxor 15 * 32(%rsi), %ymm8, %ymm8; - write_output(%ymm7, %ymm6, %ymm5, %ymm4, %ymm3, %ymm2, %ymm1, %ymm0, - %ymm15, %ymm14, %ymm13, %ymm12, %ymm11, %ymm10, %ymm9, - %ymm8, %rsi); - - vzeroupper; - - FRAME_END - ret; -SYM_FUNC_END(camellia_xts_crypt_32way) - -SYM_FUNC_START(camellia_xts_enc_32way) - /* input: - * %rdi: ctx, CTX - * %rsi: dst (32 blocks) - * %rdx: src (32 blocks) - * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) - */ - - xorl %r8d, %r8d; /* input whitening key, 0 for enc */ - - leaq __camellia_enc_blk32, %r9; - - jmp camellia_xts_crypt_32way; -SYM_FUNC_END(camellia_xts_enc_32way) - -SYM_FUNC_START(camellia_xts_dec_32way) - /* input: - * %rdi: ctx, CTX - * %rsi: dst (32 blocks) - * %rdx: src (32 blocks) - * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) - */ - - cmpl $16, key_length(CTX); - movl $32, %r8d; - movl $24, %eax; - cmovel %eax, %r8d; /* input whitening key, last for dec */ - - leaq __camellia_dec_blk32, %r9; - - jmp camellia_xts_crypt_32way; -SYM_FUNC_END(camellia_xts_dec_32way) diff --git a/arch/x86/crypto/camellia_aesni_avx2_glue.c b/arch/x86/crypto/camellia_aesni_avx2_glue.c index ccda647422d6..d956d0473668 100644 --- a/arch/x86/crypto/camellia_aesni_avx2_glue.c +++ b/arch/x86/crypto/camellia_aesni_avx2_glue.c @@ -9,7 +9,6 @@ #include #include #include -#include #include #include #include @@ -26,11 +25,6 @@ asmlinkage void camellia_cbc_dec_32way(const void *ctx, u8 *dst, const u8 *src); asmlinkage void camellia_ctr_32way(const void *ctx, u8 *dst, const u8 *src, le128 *iv); -asmlinkage void camellia_xts_enc_32way(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); -asmlinkage void camellia_xts_dec_32way(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); - static const struct common_glue_ctx camellia_enc = { .num_funcs = 4, .fpu_blocks_limit = CAMELLIA_AESNI_PARALLEL_BLOCKS, @@ -69,22 +63,6 @@ static const struct common_glue_ctx camellia_ctr = { } } }; -static const struct common_glue_ctx camellia_enc_xts = { - .num_funcs = 3, - .fpu_blocks_limit = CAMELLIA_AESNI_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS, - .fn_u = { .xts = camellia_xts_enc_32way } - }, { - .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, - .fn_u = { .xts = camellia_xts_enc_16way } - }, { - .num_blocks = 1, - .fn_u = { .xts = camellia_xts_enc } - } } -}; - static const struct common_glue_ctx camellia_dec = { .num_funcs = 4, .fpu_blocks_limit = CAMELLIA_AESNI_PARALLEL_BLOCKS, @@ -123,22 +101,6 @@ static const struct common_glue_ctx camellia_dec_cbc = { } } }; -static const struct common_glue_ctx camellia_dec_xts = { - .num_funcs = 3, - .fpu_blocks_limit = CAMELLIA_AESNI_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS, - .fn_u = { .xts = camellia_xts_dec_32way } - }, { - .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, - .fn_u = { .xts = camellia_xts_dec_16way } - }, { - .num_blocks = 1, - .fn_u = { .xts = camellia_xts_dec } - } } -}; - static int camellia_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { @@ -170,24 +132,6 @@ static int ctr_crypt(struct skcipher_request *req) return glue_ctr_req_128bit(&camellia_ctr, req); } -static int xts_encrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - - return glue_xts_req_128bit(&camellia_enc_xts, req, camellia_enc_blk, - &ctx->tweak_ctx, &ctx->crypt_ctx, false); -} - -static int xts_decrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - - return glue_xts_req_128bit(&camellia_dec_xts, req, camellia_enc_blk, - &ctx->tweak_ctx, &ctx->crypt_ctx, true); -} - static struct skcipher_alg camellia_algs[] = { { .base.cra_name = "__ecb(camellia)", @@ -231,20 +175,6 @@ static struct skcipher_alg camellia_algs[] = { .setkey = camellia_setkey, .encrypt = ctr_crypt, .decrypt = ctr_crypt, - }, { - .base.cra_name = "__xts(camellia)", - .base.cra_driver_name = "__xts-camellia-aesni-avx2", - .base.cra_priority = 500, - .base.cra_flags = CRYPTO_ALG_INTERNAL, - .base.cra_blocksize = CAMELLIA_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct camellia_xts_ctx), - .base.cra_module = THIS_MODULE, - .min_keysize = 2 * CAMELLIA_MIN_KEY_SIZE, - .max_keysize = 2 * CAMELLIA_MAX_KEY_SIZE, - .ivsize = CAMELLIA_BLOCK_SIZE, - .setkey = xts_camellia_setkey, - .encrypt = xts_encrypt, - .decrypt = xts_decrypt, }, }; diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c index 4e5de6ef206e..44614f8a452c 100644 --- a/arch/x86/crypto/camellia_aesni_avx_glue.c +++ b/arch/x86/crypto/camellia_aesni_avx_glue.c @@ -9,7 +9,6 @@ #include #include #include -#include #include #include #include @@ -31,26 +30,6 @@ asmlinkage void camellia_ctr_16way(const void *ctx, u8 *dst, const u8 *src, le128 *iv); EXPORT_SYMBOL_GPL(camellia_ctr_16way); -asmlinkage void camellia_xts_enc_16way(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); -EXPORT_SYMBOL_GPL(camellia_xts_enc_16way); - -asmlinkage void camellia_xts_dec_16way(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); -EXPORT_SYMBOL_GPL(camellia_xts_dec_16way); - -void camellia_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv) -{ - glue_xts_crypt_128bit_one(ctx, dst, src, iv, camellia_enc_blk); -} -EXPORT_SYMBOL_GPL(camellia_xts_enc); - -void camellia_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv) -{ - glue_xts_crypt_128bit_one(ctx, dst, src, iv, camellia_dec_blk); -} -EXPORT_SYMBOL_GPL(camellia_xts_dec); - static const struct common_glue_ctx camellia_enc = { .num_funcs = 3, .fpu_blocks_limit = CAMELLIA_AESNI_PARALLEL_BLOCKS, @@ -83,19 +62,6 @@ static const struct common_glue_ctx camellia_ctr = { } } }; -static const struct common_glue_ctx camellia_enc_xts = { - .num_funcs = 2, - .fpu_blocks_limit = CAMELLIA_AESNI_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, - .fn_u = { .xts = camellia_xts_enc_16way } - }, { - .num_blocks = 1, - .fn_u = { .xts = camellia_xts_enc } - } } -}; - static const struct common_glue_ctx camellia_dec = { .num_funcs = 3, .fpu_blocks_limit = CAMELLIA_AESNI_PARALLEL_BLOCKS, @@ -128,19 +94,6 @@ static const struct common_glue_ctx camellia_dec_cbc = { } } }; -static const struct common_glue_ctx camellia_dec_xts = { - .num_funcs = 2, - .fpu_blocks_limit = CAMELLIA_AESNI_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, - .fn_u = { .xts = camellia_xts_dec_16way } - }, { - .num_blocks = 1, - .fn_u = { .xts = camellia_xts_dec } - } } -}; - static int camellia_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { @@ -172,44 +125,6 @@ static int ctr_crypt(struct skcipher_request *req) return glue_ctr_req_128bit(&camellia_ctr, req); } -int xts_camellia_setkey(struct crypto_skcipher *tfm, const u8 *key, - unsigned int keylen) -{ - struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - int err; - - err = xts_verify_key(tfm, key, keylen); - if (err) - return err; - - /* first half of xts-key is for crypt */ - err = __camellia_setkey(&ctx->crypt_ctx, key, keylen / 2); - if (err) - return err; - - /* second half of xts-key is for tweak */ - return __camellia_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2); -} -EXPORT_SYMBOL_GPL(xts_camellia_setkey); - -static int xts_encrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - - return glue_xts_req_128bit(&camellia_enc_xts, req, camellia_enc_blk, - &ctx->tweak_ctx, &ctx->crypt_ctx, false); -} - -static int xts_decrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - - return glue_xts_req_128bit(&camellia_dec_xts, req, camellia_enc_blk, - &ctx->tweak_ctx, &ctx->crypt_ctx, true); -} - static struct skcipher_alg camellia_algs[] = { { .base.cra_name = "__ecb(camellia)", @@ -253,21 +168,7 @@ static struct skcipher_alg camellia_algs[] = { .setkey = camellia_setkey, .encrypt = ctr_crypt, .decrypt = ctr_crypt, - }, { - .base.cra_name = "__xts(camellia)", - .base.cra_driver_name = "__xts-camellia-aesni", - .base.cra_priority = 400, - .base.cra_flags = CRYPTO_ALG_INTERNAL, - .base.cra_blocksize = CAMELLIA_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct camellia_xts_ctx), - .base.cra_module = THIS_MODULE, - .min_keysize = 2 * CAMELLIA_MIN_KEY_SIZE, - .max_keysize = 2 * CAMELLIA_MAX_KEY_SIZE, - .ivsize = CAMELLIA_BLOCK_SIZE, - .setkey = xts_camellia_setkey, - .encrypt = xts_encrypt, - .decrypt = xts_decrypt, - }, + } }; static struct simd_skcipher_alg *camellia_simd_algs[ARRAY_SIZE(camellia_algs)]; diff --git a/arch/x86/include/asm/crypto/camellia.h b/arch/x86/include/asm/crypto/camellia.h index f6d91861cb14..0e5f82adbaf9 100644 --- a/arch/x86/include/asm/crypto/camellia.h +++ b/arch/x86/include/asm/crypto/camellia.h @@ -19,18 +19,10 @@ struct camellia_ctx { u32 key_length; }; -struct camellia_xts_ctx { - struct camellia_ctx tweak_ctx; - struct camellia_ctx crypt_ctx; -}; - extern int __camellia_setkey(struct camellia_ctx *cctx, const unsigned char *key, unsigned int key_len); -extern int xts_camellia_setkey(struct crypto_skcipher *tfm, const u8 *key, - unsigned int keylen); - /* regular block cipher functions */ asmlinkage void __camellia_enc_blk(const void *ctx, u8 *dst, const u8 *src, bool xor); @@ -49,11 +41,6 @@ asmlinkage void camellia_cbc_dec_16way(const void *ctx, u8 *dst, const u8 *src); asmlinkage void camellia_ctr_16way(const void *ctx, u8 *dst, const u8 *src, le128 *iv); -asmlinkage void camellia_xts_enc_16way(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); -asmlinkage void camellia_xts_dec_16way(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); - static inline void camellia_enc_blk(const void *ctx, u8 *dst, const u8 *src) { __camellia_enc_blk(ctx, dst, src, false); @@ -83,9 +70,4 @@ extern void camellia_crypt_ctr(const void *ctx, u8 *dst, const u8 *src, extern void camellia_crypt_ctr_2way(const void *ctx, u8 *dst, const u8 *src, le128 *iv); -extern void camellia_xts_enc(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); -extern void camellia_xts_dec(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); - #endif /* ASM_X86_CAMELLIA_H */ diff --git a/crypto/Kconfig b/crypto/Kconfig index c48ca26e2169..b9ea4e262ebe 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1305,7 +1305,7 @@ config CRYPTO_CAMELLIA_AESNI_AVX_X86_64 select CRYPTO_CAMELLIA_X86_64 select CRYPTO_GLUE_HELPER_X86 select CRYPTO_SIMD - select CRYPTO_XTS + imply CRYPTO_XTS help Camellia cipher algorithm module (x86_64/AES-NI/AVX). -- cgit v1.2.3-59-g8ed1b From 2cc0fedb8124ac7a75d132988f1e11f5de30c61f Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 5 Jan 2021 17:47:50 +0100 Subject: crypto: x86/cast6 - switch to XTS template Now that the XTS template can wrap accelerated ECB modes, it can be used to implement CAST6 in XTS mode as well, which turns out to be at least as fast, and sometimes even faster Acked-by: Eric Biggers Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- arch/x86/crypto/cast6-avx-x86_64-asm_64.S | 56 ------------------ arch/x86/crypto/cast6_avx_glue.c | 98 ------------------------------- crypto/Kconfig | 2 +- 3 files changed, 1 insertion(+), 155 deletions(-) (limited to 'crypto') diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S index 932a3ce32a88..0c1ea836215a 100644 --- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S +++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S @@ -212,8 +212,6 @@ .section .rodata.cst16, "aM", @progbits, 16 .align 16 -.Lxts_gf128mul_and_shl1_mask: - .byte 0x87, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0 .Lbswap_mask: .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 .Lbswap128_mask: @@ -440,57 +438,3 @@ SYM_FUNC_START(cast6_ctr_8way) FRAME_END ret; SYM_FUNC_END(cast6_ctr_8way) - -SYM_FUNC_START(cast6_xts_enc_8way) - /* input: - * %rdi: ctx, CTX - * %rsi: dst - * %rdx: src - * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) - */ - FRAME_BEGIN - pushq %r15; - - movq %rdi, CTX - movq %rsi, %r11; - - /* regs <= src, dst <= IVs, regs <= regs xor IVs */ - load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2, - RX, RKR, RKM, .Lxts_gf128mul_and_shl1_mask); - - call __cast6_enc_blk8; - - /* dst <= regs xor IVs(in dst) */ - store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); - - popq %r15; - FRAME_END - ret; -SYM_FUNC_END(cast6_xts_enc_8way) - -SYM_FUNC_START(cast6_xts_dec_8way) - /* input: - * %rdi: ctx, CTX - * %rsi: dst - * %rdx: src - * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) - */ - FRAME_BEGIN - pushq %r15; - - movq %rdi, CTX - movq %rsi, %r11; - - /* regs <= src, dst <= IVs, regs <= regs xor IVs */ - load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2, - RX, RKR, RKM, .Lxts_gf128mul_and_shl1_mask); - - call __cast6_dec_blk8; - - /* dst <= regs xor IVs(in dst) */ - store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); - - popq %r15; - FRAME_END - ret; -SYM_FUNC_END(cast6_xts_dec_8way) diff --git a/arch/x86/crypto/cast6_avx_glue.c b/arch/x86/crypto/cast6_avx_glue.c index 48e0f37796fa..5a21d3e9041c 100644 --- a/arch/x86/crypto/cast6_avx_glue.c +++ b/arch/x86/crypto/cast6_avx_glue.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #define CAST6_PARALLEL_BLOCKS 8 @@ -27,27 +26,12 @@ asmlinkage void cast6_cbc_dec_8way(const void *ctx, u8 *dst, const u8 *src); asmlinkage void cast6_ctr_8way(const void *ctx, u8 *dst, const u8 *src, le128 *iv); -asmlinkage void cast6_xts_enc_8way(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); -asmlinkage void cast6_xts_dec_8way(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); - static int cast6_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { return cast6_setkey(&tfm->base, key, keylen); } -static void cast6_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv) -{ - glue_xts_crypt_128bit_one(ctx, dst, src, iv, __cast6_encrypt); -} - -static void cast6_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv) -{ - glue_xts_crypt_128bit_one(ctx, dst, src, iv, __cast6_decrypt); -} - static void cast6_crypt_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv) { be128 ctrblk; @@ -87,19 +71,6 @@ static const struct common_glue_ctx cast6_ctr = { } } }; -static const struct common_glue_ctx cast6_enc_xts = { - .num_funcs = 2, - .fpu_blocks_limit = CAST6_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = CAST6_PARALLEL_BLOCKS, - .fn_u = { .xts = cast6_xts_enc_8way } - }, { - .num_blocks = 1, - .fn_u = { .xts = cast6_xts_enc } - } } -}; - static const struct common_glue_ctx cast6_dec = { .num_funcs = 2, .fpu_blocks_limit = CAST6_PARALLEL_BLOCKS, @@ -126,19 +97,6 @@ static const struct common_glue_ctx cast6_dec_cbc = { } } }; -static const struct common_glue_ctx cast6_dec_xts = { - .num_funcs = 2, - .fpu_blocks_limit = CAST6_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = CAST6_PARALLEL_BLOCKS, - .fn_u = { .xts = cast6_xts_dec_8way } - }, { - .num_blocks = 1, - .fn_u = { .xts = cast6_xts_dec } - } } -}; - static int ecb_encrypt(struct skcipher_request *req) { return glue_ecb_req_128bit(&cast6_enc, req); @@ -164,48 +122,6 @@ static int ctr_crypt(struct skcipher_request *req) return glue_ctr_req_128bit(&cast6_ctr, req); } -struct cast6_xts_ctx { - struct cast6_ctx tweak_ctx; - struct cast6_ctx crypt_ctx; -}; - -static int xts_cast6_setkey(struct crypto_skcipher *tfm, const u8 *key, - unsigned int keylen) -{ - struct cast6_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - int err; - - err = xts_verify_key(tfm, key, keylen); - if (err) - return err; - - /* first half of xts-key is for crypt */ - err = __cast6_setkey(&ctx->crypt_ctx, key, keylen / 2); - if (err) - return err; - - /* second half of xts-key is for tweak */ - return __cast6_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2); -} - -static int xts_encrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct cast6_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - - return glue_xts_req_128bit(&cast6_enc_xts, req, __cast6_encrypt, - &ctx->tweak_ctx, &ctx->crypt_ctx, false); -} - -static int xts_decrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct cast6_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - - return glue_xts_req_128bit(&cast6_dec_xts, req, __cast6_encrypt, - &ctx->tweak_ctx, &ctx->crypt_ctx, true); -} - static struct skcipher_alg cast6_algs[] = { { .base.cra_name = "__ecb(cast6)", @@ -249,20 +165,6 @@ static struct skcipher_alg cast6_algs[] = { .setkey = cast6_setkey_skcipher, .encrypt = ctr_crypt, .decrypt = ctr_crypt, - }, { - .base.cra_name = "__xts(cast6)", - .base.cra_driver_name = "__xts-cast6-avx", - .base.cra_priority = 200, - .base.cra_flags = CRYPTO_ALG_INTERNAL, - .base.cra_blocksize = CAST6_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct cast6_xts_ctx), - .base.cra_module = THIS_MODULE, - .min_keysize = 2 * CAST6_MIN_KEY_SIZE, - .max_keysize = 2 * CAST6_MAX_KEY_SIZE, - .ivsize = CAST6_BLOCK_SIZE, - .setkey = xts_cast6_setkey, - .encrypt = xts_encrypt, - .decrypt = xts_decrypt, }, }; diff --git a/crypto/Kconfig b/crypto/Kconfig index b9ea4e262ebe..03e8468e57df 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1394,7 +1394,7 @@ config CRYPTO_CAST6_AVX_X86_64 select CRYPTO_CAST_COMMON select CRYPTO_GLUE_HELPER_X86 select CRYPTO_SIMD - select CRYPTO_XTS + imply CRYPTO_XTS help The CAST6 encryption algorithm (synonymous with CAST-256) is described in RFC2612. -- cgit v1.2.3-59-g8ed1b From 9ec0af8aa6038163e7cd01dea3b8e085712d19fc Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 5 Jan 2021 17:47:51 +0100 Subject: crypto: x86/serpent- switch to XTS template Now that the XTS template can wrap accelerated ECB modes, it can be used to implement Serpent in XTS mode as well, which turns out to be at least as fast, and sometimes even faster Acked-by: Eric Biggers Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- arch/x86/crypto/serpent-avx-x86_64-asm_64.S | 48 ------------- arch/x86/crypto/serpent-avx2-asm_64.S | 62 ----------------- arch/x86/crypto/serpent_avx2_glue.c | 72 -------------------- arch/x86/crypto/serpent_avx_glue.c | 101 ---------------------------- arch/x86/include/asm/crypto/serpent-avx.h | 21 ------ crypto/Kconfig | 2 +- 6 files changed, 1 insertion(+), 305 deletions(-) (limited to 'crypto') diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S index ba9e4c1e7f5c..6b41f46bcc76 100644 --- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S +++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S @@ -18,10 +18,6 @@ .align 16 .Lbswap128_mask: .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 -.section .rodata.cst16.xts_gf128mul_and_shl1_mask, "aM", @progbits, 16 -.align 16 -.Lxts_gf128mul_and_shl1_mask: - .byte 0x87, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0 .text @@ -735,47 +731,3 @@ SYM_FUNC_START(serpent_ctr_8way_avx) FRAME_END ret; SYM_FUNC_END(serpent_ctr_8way_avx) - -SYM_FUNC_START(serpent_xts_enc_8way_avx) - /* input: - * %rdi: ctx, CTX - * %rsi: dst - * %rdx: src - * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) - */ - FRAME_BEGIN - - /* regs <= src, dst <= IVs, regs <= regs xor IVs */ - load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2, - RK0, RK1, RK2, .Lxts_gf128mul_and_shl1_mask); - - call __serpent_enc_blk8_avx; - - /* dst <= regs xor IVs(in dst) */ - store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); - - FRAME_END - ret; -SYM_FUNC_END(serpent_xts_enc_8way_avx) - -SYM_FUNC_START(serpent_xts_dec_8way_avx) - /* input: - * %rdi: ctx, CTX - * %rsi: dst - * %rdx: src - * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) - */ - FRAME_BEGIN - - /* regs <= src, dst <= IVs, regs <= regs xor IVs */ - load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2, - RK0, RK1, RK2, .Lxts_gf128mul_and_shl1_mask); - - call __serpent_dec_blk8_avx; - - /* dst <= regs xor IVs(in dst) */ - store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2); - - FRAME_END - ret; -SYM_FUNC_END(serpent_xts_dec_8way_avx) diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S index c9648aeae705..a510a949f02f 100644 --- a/arch/x86/crypto/serpent-avx2-asm_64.S +++ b/arch/x86/crypto/serpent-avx2-asm_64.S @@ -20,16 +20,6 @@ .Lbswap128_mask: .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 -.section .rodata.cst16.xts_gf128mul_and_shl1_mask_0, "aM", @progbits, 16 -.align 16 -.Lxts_gf128mul_and_shl1_mask_0: - .byte 0x87, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0 - -.section .rodata.cst16.xts_gf128mul_and_shl1_mask_1, "aM", @progbits, 16 -.align 16 -.Lxts_gf128mul_and_shl1_mask_1: - .byte 0x0e, 1, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0 - .text #define CTX %rdi @@ -759,55 +749,3 @@ SYM_FUNC_START(serpent_ctr_16way) FRAME_END ret; SYM_FUNC_END(serpent_ctr_16way) - -SYM_FUNC_START(serpent_xts_enc_16way) - /* input: - * %rdi: ctx, CTX - * %rsi: dst (16 blocks) - * %rdx: src (16 blocks) - * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) - */ - FRAME_BEGIN - - vzeroupper; - - load_xts_16way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, - RD2, RK0, RK0x, RK1, RK1x, RK2, RK2x, RK3, RK3x, RNOT, - .Lxts_gf128mul_and_shl1_mask_0, - .Lxts_gf128mul_and_shl1_mask_1); - - call __serpent_enc_blk16; - - store_xts_16way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); - - vzeroupper; - - FRAME_END - ret; -SYM_FUNC_END(serpent_xts_enc_16way) - -SYM_FUNC_START(serpent_xts_dec_16way) - /* input: - * %rdi: ctx, CTX - * %rsi: dst (16 blocks) - * %rdx: src (16 blocks) - * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) - */ - FRAME_BEGIN - - vzeroupper; - - load_xts_16way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, - RD2, RK0, RK0x, RK1, RK1x, RK2, RK2x, RK3, RK3x, RNOT, - .Lxts_gf128mul_and_shl1_mask_0, - .Lxts_gf128mul_and_shl1_mask_1); - - call __serpent_dec_blk16; - - store_xts_16way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2); - - vzeroupper; - - FRAME_END - ret; -SYM_FUNC_END(serpent_xts_dec_16way) diff --git a/arch/x86/crypto/serpent_avx2_glue.c b/arch/x86/crypto/serpent_avx2_glue.c index f973ace44ad3..9cdf2c078e21 100644 --- a/arch/x86/crypto/serpent_avx2_glue.c +++ b/arch/x86/crypto/serpent_avx2_glue.c @@ -12,7 +12,6 @@ #include #include #include -#include #include #include @@ -25,11 +24,6 @@ asmlinkage void serpent_cbc_dec_16way(const void *ctx, u8 *dst, const u8 *src); asmlinkage void serpent_ctr_16way(const void *ctx, u8 *dst, const u8 *src, le128 *iv); -asmlinkage void serpent_xts_enc_16way(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); -asmlinkage void serpent_xts_dec_16way(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); - static int serpent_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { @@ -68,22 +62,6 @@ static const struct common_glue_ctx serpent_ctr = { } } }; -static const struct common_glue_ctx serpent_enc_xts = { - .num_funcs = 3, - .fpu_blocks_limit = 8, - - .funcs = { { - .num_blocks = 16, - .fn_u = { .xts = serpent_xts_enc_16way } - }, { - .num_blocks = 8, - .fn_u = { .xts = serpent_xts_enc_8way_avx } - }, { - .num_blocks = 1, - .fn_u = { .xts = serpent_xts_enc } - } } -}; - static const struct common_glue_ctx serpent_dec = { .num_funcs = 3, .fpu_blocks_limit = 8, @@ -116,22 +94,6 @@ static const struct common_glue_ctx serpent_dec_cbc = { } } }; -static const struct common_glue_ctx serpent_dec_xts = { - .num_funcs = 3, - .fpu_blocks_limit = 8, - - .funcs = { { - .num_blocks = 16, - .fn_u = { .xts = serpent_xts_dec_16way } - }, { - .num_blocks = 8, - .fn_u = { .xts = serpent_xts_dec_8way_avx } - }, { - .num_blocks = 1, - .fn_u = { .xts = serpent_xts_dec } - } } -}; - static int ecb_encrypt(struct skcipher_request *req) { return glue_ecb_req_128bit(&serpent_enc, req); @@ -157,26 +119,6 @@ static int ctr_crypt(struct skcipher_request *req) return glue_ctr_req_128bit(&serpent_ctr, req); } -static int xts_encrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - - return glue_xts_req_128bit(&serpent_enc_xts, req, - __serpent_encrypt, &ctx->tweak_ctx, - &ctx->crypt_ctx, false); -} - -static int xts_decrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - - return glue_xts_req_128bit(&serpent_dec_xts, req, - __serpent_encrypt, &ctx->tweak_ctx, - &ctx->crypt_ctx, true); -} - static struct skcipher_alg serpent_algs[] = { { .base.cra_name = "__ecb(serpent)", @@ -220,20 +162,6 @@ static struct skcipher_alg serpent_algs[] = { .setkey = serpent_setkey_skcipher, .encrypt = ctr_crypt, .decrypt = ctr_crypt, - }, { - .base.cra_name = "__xts(serpent)", - .base.cra_driver_name = "__xts-serpent-avx2", - .base.cra_priority = 600, - .base.cra_flags = CRYPTO_ALG_INTERNAL, - .base.cra_blocksize = SERPENT_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct serpent_xts_ctx), - .base.cra_module = THIS_MODULE, - .min_keysize = 2 * SERPENT_MIN_KEY_SIZE, - .max_keysize = 2 * SERPENT_MAX_KEY_SIZE, - .ivsize = SERPENT_BLOCK_SIZE, - .setkey = xts_serpent_setkey, - .encrypt = xts_encrypt, - .decrypt = xts_decrypt, }, }; diff --git a/arch/x86/crypto/serpent_avx_glue.c b/arch/x86/crypto/serpent_avx_glue.c index 7806d1cbe854..b17a08b57a91 100644 --- a/arch/x86/crypto/serpent_avx_glue.c +++ b/arch/x86/crypto/serpent_avx_glue.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include @@ -36,14 +35,6 @@ asmlinkage void serpent_ctr_8way_avx(const void *ctx, u8 *dst, const u8 *src, le128 *iv); EXPORT_SYMBOL_GPL(serpent_ctr_8way_avx); -asmlinkage void serpent_xts_enc_8way_avx(const void *ctx, u8 *dst, - const u8 *src, le128 *iv); -EXPORT_SYMBOL_GPL(serpent_xts_enc_8way_avx); - -asmlinkage void serpent_xts_dec_8way_avx(const void *ctx, u8 *dst, - const u8 *src, le128 *iv); -EXPORT_SYMBOL_GPL(serpent_xts_dec_8way_avx); - void __serpent_crypt_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv) { be128 ctrblk; @@ -58,44 +49,12 @@ void __serpent_crypt_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv) } EXPORT_SYMBOL_GPL(__serpent_crypt_ctr); -void serpent_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv) -{ - glue_xts_crypt_128bit_one(ctx, dst, src, iv, __serpent_encrypt); -} -EXPORT_SYMBOL_GPL(serpent_xts_enc); - -void serpent_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv) -{ - glue_xts_crypt_128bit_one(ctx, dst, src, iv, __serpent_decrypt); -} -EXPORT_SYMBOL_GPL(serpent_xts_dec); - static int serpent_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { return __serpent_setkey(crypto_skcipher_ctx(tfm), key, keylen); } -int xts_serpent_setkey(struct crypto_skcipher *tfm, const u8 *key, - unsigned int keylen) -{ - struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - int err; - - err = xts_verify_key(tfm, key, keylen); - if (err) - return err; - - /* first half of xts-key is for crypt */ - err = __serpent_setkey(&ctx->crypt_ctx, key, keylen / 2); - if (err) - return err; - - /* second half of xts-key is for tweak */ - return __serpent_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2); -} -EXPORT_SYMBOL_GPL(xts_serpent_setkey); - static const struct common_glue_ctx serpent_enc = { .num_funcs = 2, .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS, @@ -122,19 +81,6 @@ static const struct common_glue_ctx serpent_ctr = { } } }; -static const struct common_glue_ctx serpent_enc_xts = { - .num_funcs = 2, - .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = SERPENT_PARALLEL_BLOCKS, - .fn_u = { .xts = serpent_xts_enc_8way_avx } - }, { - .num_blocks = 1, - .fn_u = { .xts = serpent_xts_enc } - } } -}; - static const struct common_glue_ctx serpent_dec = { .num_funcs = 2, .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS, @@ -161,19 +107,6 @@ static const struct common_glue_ctx serpent_dec_cbc = { } } }; -static const struct common_glue_ctx serpent_dec_xts = { - .num_funcs = 2, - .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = SERPENT_PARALLEL_BLOCKS, - .fn_u = { .xts = serpent_xts_dec_8way_avx } - }, { - .num_blocks = 1, - .fn_u = { .xts = serpent_xts_dec } - } } -}; - static int ecb_encrypt(struct skcipher_request *req) { return glue_ecb_req_128bit(&serpent_enc, req); @@ -199,26 +132,6 @@ static int ctr_crypt(struct skcipher_request *req) return glue_ctr_req_128bit(&serpent_ctr, req); } -static int xts_encrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - - return glue_xts_req_128bit(&serpent_enc_xts, req, - __serpent_encrypt, &ctx->tweak_ctx, - &ctx->crypt_ctx, false); -} - -static int xts_decrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - - return glue_xts_req_128bit(&serpent_dec_xts, req, - __serpent_encrypt, &ctx->tweak_ctx, - &ctx->crypt_ctx, true); -} - static struct skcipher_alg serpent_algs[] = { { .base.cra_name = "__ecb(serpent)", @@ -262,20 +175,6 @@ static struct skcipher_alg serpent_algs[] = { .setkey = serpent_setkey_skcipher, .encrypt = ctr_crypt, .decrypt = ctr_crypt, - }, { - .base.cra_name = "__xts(serpent)", - .base.cra_driver_name = "__xts-serpent-avx", - .base.cra_priority = 500, - .base.cra_flags = CRYPTO_ALG_INTERNAL, - .base.cra_blocksize = SERPENT_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct serpent_xts_ctx), - .base.cra_module = THIS_MODULE, - .min_keysize = 2 * SERPENT_MIN_KEY_SIZE, - .max_keysize = 2 * SERPENT_MAX_KEY_SIZE, - .ivsize = SERPENT_BLOCK_SIZE, - .setkey = xts_serpent_setkey, - .encrypt = xts_encrypt, - .decrypt = xts_decrypt, }, }; diff --git a/arch/x86/include/asm/crypto/serpent-avx.h b/arch/x86/include/asm/crypto/serpent-avx.h index 251c2c89d7cf..23f3361a0e72 100644 --- a/arch/x86/include/asm/crypto/serpent-avx.h +++ b/arch/x86/include/asm/crypto/serpent-avx.h @@ -10,11 +10,6 @@ struct crypto_skcipher; #define SERPENT_PARALLEL_BLOCKS 8 -struct serpent_xts_ctx { - struct serpent_ctx tweak_ctx; - struct serpent_ctx crypt_ctx; -}; - asmlinkage void serpent_ecb_enc_8way_avx(const void *ctx, u8 *dst, const u8 *src); asmlinkage void serpent_ecb_dec_8way_avx(const void *ctx, u8 *dst, @@ -22,21 +17,5 @@ asmlinkage void serpent_ecb_dec_8way_avx(const void *ctx, u8 *dst, asmlinkage void serpent_cbc_dec_8way_avx(const void *ctx, u8 *dst, const u8 *src); -asmlinkage void serpent_ctr_8way_avx(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); - -asmlinkage void serpent_xts_enc_8way_avx(const void *ctx, u8 *dst, - const u8 *src, le128 *iv); -asmlinkage void serpent_xts_dec_8way_avx(const void *ctx, u8 *dst, - const u8 *src, le128 *iv); - -extern void __serpent_crypt_ctr(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); - -extern void serpent_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv); -extern void serpent_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv); - -extern int xts_serpent_setkey(struct crypto_skcipher *tfm, const u8 *key, - unsigned int keylen); #endif diff --git a/crypto/Kconfig b/crypto/Kconfig index 03e8468e57df..ce69a5ae26b5 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1576,7 +1576,7 @@ config CRYPTO_SERPENT_AVX_X86_64 select CRYPTO_GLUE_HELPER_X86 select CRYPTO_SERPENT select CRYPTO_SIMD - select CRYPTO_XTS + imply CRYPTO_XTS help Serpent cipher algorithm, by Anderson, Biham & Knudsen. -- cgit v1.2.3-59-g8ed1b From da4df93a94a5aa7c5a599959d79ee99cdbe4c6b7 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 5 Jan 2021 17:47:52 +0100 Subject: crypto: x86/twofish - switch to XTS template Now that the XTS template can wrap accelerated ECB modes, it can be used to implement Twofish in XTS mode as well, which turns out to be at least as fast, and sometimes even faster Acked-by: Eric Biggers Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- arch/x86/crypto/twofish-avx-x86_64-asm_64.S | 53 ---------------- arch/x86/crypto/twofish_avx_glue.c | 98 ----------------------------- crypto/Kconfig | 1 + 3 files changed, 1 insertion(+), 151 deletions(-) (limited to 'crypto') diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S index a5151393bb2f..84e61ef03638 100644 --- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S +++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S @@ -19,11 +19,6 @@ .Lbswap128_mask: .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 -.section .rodata.cst16.xts_gf128mul_and_shl1_mask, "aM", @progbits, 16 -.align 16 -.Lxts_gf128mul_and_shl1_mask: - .byte 0x87, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0 - .text /* structure of crypto context */ @@ -406,51 +401,3 @@ SYM_FUNC_START(twofish_ctr_8way) FRAME_END ret; SYM_FUNC_END(twofish_ctr_8way) - -SYM_FUNC_START(twofish_xts_enc_8way) - /* input: - * %rdi: ctx, CTX - * %rsi: dst - * %rdx: src - * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) - */ - FRAME_BEGIN - - movq %rsi, %r11; - - /* regs <= src, dst <= IVs, regs <= regs xor IVs */ - load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2, - RX0, RX1, RY0, .Lxts_gf128mul_and_shl1_mask); - - call __twofish_enc_blk8; - - /* dst <= regs xor IVs(in dst) */ - store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2); - - FRAME_END - ret; -SYM_FUNC_END(twofish_xts_enc_8way) - -SYM_FUNC_START(twofish_xts_dec_8way) - /* input: - * %rdi: ctx, CTX - * %rsi: dst - * %rdx: src - * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) - */ - FRAME_BEGIN - - movq %rsi, %r11; - - /* regs <= src, dst <= IVs, regs <= regs xor IVs */ - load_xts_8way(%rcx, %rdx, %rsi, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2, - RX0, RX1, RY0, .Lxts_gf128mul_and_shl1_mask); - - call __twofish_dec_blk8; - - /* dst <= regs xor IVs(in dst) */ - store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); - - FRAME_END - ret; -SYM_FUNC_END(twofish_xts_dec_8way) diff --git a/arch/x86/crypto/twofish_avx_glue.c b/arch/x86/crypto/twofish_avx_glue.c index 2dbc8ce3730e..7b539bbb108f 100644 --- a/arch/x86/crypto/twofish_avx_glue.c +++ b/arch/x86/crypto/twofish_avx_glue.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include @@ -29,11 +28,6 @@ asmlinkage void twofish_cbc_dec_8way(const void *ctx, u8 *dst, const u8 *src); asmlinkage void twofish_ctr_8way(const void *ctx, u8 *dst, const u8 *src, le128 *iv); -asmlinkage void twofish_xts_enc_8way(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); -asmlinkage void twofish_xts_dec_8way(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); - static int twofish_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { @@ -45,40 +39,6 @@ static inline void twofish_enc_blk_3way(const void *ctx, u8 *dst, const u8 *src) __twofish_enc_blk_3way(ctx, dst, src, false); } -static void twofish_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv) -{ - glue_xts_crypt_128bit_one(ctx, dst, src, iv, twofish_enc_blk); -} - -static void twofish_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv) -{ - glue_xts_crypt_128bit_one(ctx, dst, src, iv, twofish_dec_blk); -} - -struct twofish_xts_ctx { - struct twofish_ctx tweak_ctx; - struct twofish_ctx crypt_ctx; -}; - -static int xts_twofish_setkey(struct crypto_skcipher *tfm, const u8 *key, - unsigned int keylen) -{ - struct twofish_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - int err; - - err = xts_verify_key(tfm, key, keylen); - if (err) - return err; - - /* first half of xts-key is for crypt */ - err = __twofish_setkey(&ctx->crypt_ctx, key, keylen / 2); - if (err) - return err; - - /* second half of xts-key is for tweak */ - return __twofish_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2); -} - static const struct common_glue_ctx twofish_enc = { .num_funcs = 3, .fpu_blocks_limit = TWOFISH_PARALLEL_BLOCKS, @@ -111,19 +71,6 @@ static const struct common_glue_ctx twofish_ctr = { } } }; -static const struct common_glue_ctx twofish_enc_xts = { - .num_funcs = 2, - .fpu_blocks_limit = TWOFISH_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = TWOFISH_PARALLEL_BLOCKS, - .fn_u = { .xts = twofish_xts_enc_8way } - }, { - .num_blocks = 1, - .fn_u = { .xts = twofish_xts_enc } - } } -}; - static const struct common_glue_ctx twofish_dec = { .num_funcs = 3, .fpu_blocks_limit = TWOFISH_PARALLEL_BLOCKS, @@ -156,19 +103,6 @@ static const struct common_glue_ctx twofish_dec_cbc = { } } }; -static const struct common_glue_ctx twofish_dec_xts = { - .num_funcs = 2, - .fpu_blocks_limit = TWOFISH_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = TWOFISH_PARALLEL_BLOCKS, - .fn_u = { .xts = twofish_xts_dec_8way } - }, { - .num_blocks = 1, - .fn_u = { .xts = twofish_xts_dec } - } } -}; - static int ecb_encrypt(struct skcipher_request *req) { return glue_ecb_req_128bit(&twofish_enc, req); @@ -194,24 +128,6 @@ static int ctr_crypt(struct skcipher_request *req) return glue_ctr_req_128bit(&twofish_ctr, req); } -static int xts_encrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct twofish_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - - return glue_xts_req_128bit(&twofish_enc_xts, req, twofish_enc_blk, - &ctx->tweak_ctx, &ctx->crypt_ctx, false); -} - -static int xts_decrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct twofish_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - - return glue_xts_req_128bit(&twofish_dec_xts, req, twofish_enc_blk, - &ctx->tweak_ctx, &ctx->crypt_ctx, true); -} - static struct skcipher_alg twofish_algs[] = { { .base.cra_name = "__ecb(twofish)", @@ -255,20 +171,6 @@ static struct skcipher_alg twofish_algs[] = { .setkey = twofish_setkey_skcipher, .encrypt = ctr_crypt, .decrypt = ctr_crypt, - }, { - .base.cra_name = "__xts(twofish)", - .base.cra_driver_name = "__xts-twofish-avx", - .base.cra_priority = 400, - .base.cra_flags = CRYPTO_ALG_INTERNAL, - .base.cra_blocksize = TF_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct twofish_xts_ctx), - .base.cra_module = THIS_MODULE, - .min_keysize = 2 * TF_MIN_KEY_SIZE, - .max_keysize = 2 * TF_MAX_KEY_SIZE, - .ivsize = TF_BLOCK_SIZE, - .setkey = xts_twofish_setkey, - .encrypt = xts_encrypt, - .decrypt = xts_decrypt, }, }; diff --git a/crypto/Kconfig b/crypto/Kconfig index ce69a5ae26b5..7ad9bf84f4a0 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1731,6 +1731,7 @@ config CRYPTO_TWOFISH_AVX_X86_64 select CRYPTO_TWOFISH_COMMON select CRYPTO_TWOFISH_X86_64 select CRYPTO_TWOFISH_X86_64_3WAY + imply CRYPTO_XTS help Twofish cipher algorithm (x86_64/AVX). -- cgit v1.2.3-59-g8ed1b From a1f91ecf812ac333ee2897f3eb2d8f4f6b4ce942 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 5 Jan 2021 17:47:54 +0100 Subject: crypto: x86/camellia - drop CTR mode implementation Camellia in CTR mode is never used by the kernel directly, and is highly unlikely to be relied upon by dm-crypt or algif_skcipher. So let's drop the accelerated CTR mode implementation, and instead, rely on the CTR template and the bare cipher. Acked-by: Eric Biggers Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- arch/x86/crypto/camellia-aesni-avx-asm_64.S | 117 ---------------------- arch/x86/crypto/camellia-aesni-avx2-asm_64.S | 144 --------------------------- arch/x86/crypto/camellia_aesni_avx2_glue.c | 41 -------- arch/x86/crypto/camellia_aesni_avx_glue.c | 40 -------- arch/x86/crypto/camellia_glue.c | 68 ------------- arch/x86/include/asm/crypto/camellia.h | 6 -- crypto/Kconfig | 1 + 7 files changed, 1 insertion(+), 416 deletions(-) (limited to 'crypto') diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S index 471c34e6cac2..e2a0e0f4bf9d 100644 --- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S +++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S @@ -588,10 +588,6 @@ SYM_FUNC_END(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) .long 0x80808080 .long 0x80808080 -/* For CTR-mode IV byteswap */ -.Lbswap128_mask: - .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 - /* * pre-SubByte transform * @@ -993,116 +989,3 @@ SYM_FUNC_START(camellia_cbc_dec_16way) FRAME_END ret; SYM_FUNC_END(camellia_cbc_dec_16way) - -#define inc_le128(x, minus_one, tmp) \ - vpcmpeqq minus_one, x, tmp; \ - vpsubq minus_one, x, x; \ - vpslldq $8, tmp, tmp; \ - vpsubq tmp, x, x; - -SYM_FUNC_START(camellia_ctr_16way) - /* input: - * %rdi: ctx, CTX - * %rsi: dst (16 blocks) - * %rdx: src (16 blocks) - * %rcx: iv (little endian, 128bit) - */ - FRAME_BEGIN - - subq $(16 * 16), %rsp; - movq %rsp, %rax; - - vmovdqa .Lbswap128_mask, %xmm14; - - /* load IV and byteswap */ - vmovdqu (%rcx), %xmm0; - vpshufb %xmm14, %xmm0, %xmm15; - vmovdqu %xmm15, 15 * 16(%rax); - - vpcmpeqd %xmm15, %xmm15, %xmm15; - vpsrldq $8, %xmm15, %xmm15; /* low: -1, high: 0 */ - - /* construct IVs */ - inc_le128(%xmm0, %xmm15, %xmm13); - vpshufb %xmm14, %xmm0, %xmm13; - vmovdqu %xmm13, 14 * 16(%rax); - inc_le128(%xmm0, %xmm15, %xmm13); - vpshufb %xmm14, %xmm0, %xmm13; - vmovdqu %xmm13, 13 * 16(%rax); - inc_le128(%xmm0, %xmm15, %xmm13); - vpshufb %xmm14, %xmm0, %xmm12; - inc_le128(%xmm0, %xmm15, %xmm13); - vpshufb %xmm14, %xmm0, %xmm11; - inc_le128(%xmm0, %xmm15, %xmm13); - vpshufb %xmm14, %xmm0, %xmm10; - inc_le128(%xmm0, %xmm15, %xmm13); - vpshufb %xmm14, %xmm0, %xmm9; - inc_le128(%xmm0, %xmm15, %xmm13); - vpshufb %xmm14, %xmm0, %xmm8; - inc_le128(%xmm0, %xmm15, %xmm13); - vpshufb %xmm14, %xmm0, %xmm7; - inc_le128(%xmm0, %xmm15, %xmm13); - vpshufb %xmm14, %xmm0, %xmm6; - inc_le128(%xmm0, %xmm15, %xmm13); - vpshufb %xmm14, %xmm0, %xmm5; - inc_le128(%xmm0, %xmm15, %xmm13); - vpshufb %xmm14, %xmm0, %xmm4; - inc_le128(%xmm0, %xmm15, %xmm13); - vpshufb %xmm14, %xmm0, %xmm3; - inc_le128(%xmm0, %xmm15, %xmm13); - vpshufb %xmm14, %xmm0, %xmm2; - inc_le128(%xmm0, %xmm15, %xmm13); - vpshufb %xmm14, %xmm0, %xmm1; - inc_le128(%xmm0, %xmm15, %xmm13); - vmovdqa %xmm0, %xmm13; - vpshufb %xmm14, %xmm0, %xmm0; - inc_le128(%xmm13, %xmm15, %xmm14); - vmovdqu %xmm13, (%rcx); - - /* inpack16_pre: */ - vmovq (key_table)(CTX), %xmm15; - vpshufb .Lpack_bswap, %xmm15, %xmm15; - vpxor %xmm0, %xmm15, %xmm0; - vpxor %xmm1, %xmm15, %xmm1; - vpxor %xmm2, %xmm15, %xmm2; - vpxor %xmm3, %xmm15, %xmm3; - vpxor %xmm4, %xmm15, %xmm4; - vpxor %xmm5, %xmm15, %xmm5; - vpxor %xmm6, %xmm15, %xmm6; - vpxor %xmm7, %xmm15, %xmm7; - vpxor %xmm8, %xmm15, %xmm8; - vpxor %xmm9, %xmm15, %xmm9; - vpxor %xmm10, %xmm15, %xmm10; - vpxor %xmm11, %xmm15, %xmm11; - vpxor %xmm12, %xmm15, %xmm12; - vpxor 13 * 16(%rax), %xmm15, %xmm13; - vpxor 14 * 16(%rax), %xmm15, %xmm14; - vpxor 15 * 16(%rax), %xmm15, %xmm15; - - call __camellia_enc_blk16; - - addq $(16 * 16), %rsp; - - vpxor 0 * 16(%rdx), %xmm7, %xmm7; - vpxor 1 * 16(%rdx), %xmm6, %xmm6; - vpxor 2 * 16(%rdx), %xmm5, %xmm5; - vpxor 3 * 16(%rdx), %xmm4, %xmm4; - vpxor 4 * 16(%rdx), %xmm3, %xmm3; - vpxor 5 * 16(%rdx), %xmm2, %xmm2; - vpxor 6 * 16(%rdx), %xmm1, %xmm1; - vpxor 7 * 16(%rdx), %xmm0, %xmm0; - vpxor 8 * 16(%rdx), %xmm15, %xmm15; - vpxor 9 * 16(%rdx), %xmm14, %xmm14; - vpxor 10 * 16(%rdx), %xmm13, %xmm13; - vpxor 11 * 16(%rdx), %xmm12, %xmm12; - vpxor 12 * 16(%rdx), %xmm11, %xmm11; - vpxor 13 * 16(%rdx), %xmm10, %xmm10; - vpxor 14 * 16(%rdx), %xmm9, %xmm9; - vpxor 15 * 16(%rdx), %xmm8, %xmm8; - write_output(%xmm7, %xmm6, %xmm5, %xmm4, %xmm3, %xmm2, %xmm1, %xmm0, - %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9, - %xmm8, %rsi); - - FRAME_END - ret; -SYM_FUNC_END(camellia_ctr_16way) diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S index 9561dee52de0..782e9712a1ec 100644 --- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S +++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S @@ -624,10 +624,6 @@ SYM_FUNC_END(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) .section .rodata.cst16, "aM", @progbits, 16 .align 16 -/* For CTR-mode IV byteswap */ -.Lbswap128_mask: - .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 - /* * pre-SubByte transform * @@ -1054,143 +1050,3 @@ SYM_FUNC_START(camellia_cbc_dec_32way) FRAME_END ret; SYM_FUNC_END(camellia_cbc_dec_32way) - -#define inc_le128(x, minus_one, tmp) \ - vpcmpeqq minus_one, x, tmp; \ - vpsubq minus_one, x, x; \ - vpslldq $8, tmp, tmp; \ - vpsubq tmp, x, x; - -#define add2_le128(x, minus_one, minus_two, tmp1, tmp2) \ - vpcmpeqq minus_one, x, tmp1; \ - vpcmpeqq minus_two, x, tmp2; \ - vpsubq minus_two, x, x; \ - vpor tmp2, tmp1, tmp1; \ - vpslldq $8, tmp1, tmp1; \ - vpsubq tmp1, x, x; - -SYM_FUNC_START(camellia_ctr_32way) - /* input: - * %rdi: ctx, CTX - * %rsi: dst (32 blocks) - * %rdx: src (32 blocks) - * %rcx: iv (little endian, 128bit) - */ - FRAME_BEGIN - - vzeroupper; - - movq %rsp, %r10; - cmpq %rsi, %rdx; - je .Lctr_use_stack; - - /* dst can be used as temporary storage, src is not overwritten. */ - movq %rsi, %rax; - jmp .Lctr_continue; - -.Lctr_use_stack: - subq $(16 * 32), %rsp; - movq %rsp, %rax; - -.Lctr_continue: - vpcmpeqd %ymm15, %ymm15, %ymm15; - vpsrldq $8, %ymm15, %ymm15; /* ab: -1:0 ; cd: -1:0 */ - vpaddq %ymm15, %ymm15, %ymm12; /* ab: -2:0 ; cd: -2:0 */ - - /* load IV and byteswap */ - vmovdqu (%rcx), %xmm0; - vmovdqa %xmm0, %xmm1; - inc_le128(%xmm0, %xmm15, %xmm14); - vbroadcasti128 .Lbswap128_mask, %ymm14; - vinserti128 $1, %xmm0, %ymm1, %ymm0; - vpshufb %ymm14, %ymm0, %ymm13; - vmovdqu %ymm13, 15 * 32(%rax); - - /* construct IVs */ - add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13); /* ab:le2 ; cd:le3 */ - vpshufb %ymm14, %ymm0, %ymm13; - vmovdqu %ymm13, 14 * 32(%rax); - add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13); - vpshufb %ymm14, %ymm0, %ymm13; - vmovdqu %ymm13, 13 * 32(%rax); - add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13); - vpshufb %ymm14, %ymm0, %ymm13; - vmovdqu %ymm13, 12 * 32(%rax); - add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13); - vpshufb %ymm14, %ymm0, %ymm13; - vmovdqu %ymm13, 11 * 32(%rax); - add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13); - vpshufb %ymm14, %ymm0, %ymm10; - add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13); - vpshufb %ymm14, %ymm0, %ymm9; - add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13); - vpshufb %ymm14, %ymm0, %ymm8; - add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13); - vpshufb %ymm14, %ymm0, %ymm7; - add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13); - vpshufb %ymm14, %ymm0, %ymm6; - add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13); - vpshufb %ymm14, %ymm0, %ymm5; - add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13); - vpshufb %ymm14, %ymm0, %ymm4; - add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13); - vpshufb %ymm14, %ymm0, %ymm3; - add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13); - vpshufb %ymm14, %ymm0, %ymm2; - add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13); - vpshufb %ymm14, %ymm0, %ymm1; - add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13); - vextracti128 $1, %ymm0, %xmm13; - vpshufb %ymm14, %ymm0, %ymm0; - inc_le128(%xmm13, %xmm15, %xmm14); - vmovdqu %xmm13, (%rcx); - - /* inpack32_pre: */ - vpbroadcastq (key_table)(CTX), %ymm15; - vpshufb .Lpack_bswap, %ymm15, %ymm15; - vpxor %ymm0, %ymm15, %ymm0; - vpxor %ymm1, %ymm15, %ymm1; - vpxor %ymm2, %ymm15, %ymm2; - vpxor %ymm3, %ymm15, %ymm3; - vpxor %ymm4, %ymm15, %ymm4; - vpxor %ymm5, %ymm15, %ymm5; - vpxor %ymm6, %ymm15, %ymm6; - vpxor %ymm7, %ymm15, %ymm7; - vpxor %ymm8, %ymm15, %ymm8; - vpxor %ymm9, %ymm15, %ymm9; - vpxor %ymm10, %ymm15, %ymm10; - vpxor 11 * 32(%rax), %ymm15, %ymm11; - vpxor 12 * 32(%rax), %ymm15, %ymm12; - vpxor 13 * 32(%rax), %ymm15, %ymm13; - vpxor 14 * 32(%rax), %ymm15, %ymm14; - vpxor 15 * 32(%rax), %ymm15, %ymm15; - - call __camellia_enc_blk32; - - movq %r10, %rsp; - - vpxor 0 * 32(%rdx), %ymm7, %ymm7; - vpxor 1 * 32(%rdx), %ymm6, %ymm6; - vpxor 2 * 32(%rdx), %ymm5, %ymm5; - vpxor 3 * 32(%rdx), %ymm4, %ymm4; - vpxor 4 * 32(%rdx), %ymm3, %ymm3; - vpxor 5 * 32(%rdx), %ymm2, %ymm2; - vpxor 6 * 32(%rdx), %ymm1, %ymm1; - vpxor 7 * 32(%rdx), %ymm0, %ymm0; - vpxor 8 * 32(%rdx), %ymm15, %ymm15; - vpxor 9 * 32(%rdx), %ymm14, %ymm14; - vpxor 10 * 32(%rdx), %ymm13, %ymm13; - vpxor 11 * 32(%rdx), %ymm12, %ymm12; - vpxor 12 * 32(%rdx), %ymm11, %ymm11; - vpxor 13 * 32(%rdx), %ymm10, %ymm10; - vpxor 14 * 32(%rdx), %ymm9, %ymm9; - vpxor 15 * 32(%rdx), %ymm8, %ymm8; - write_output(%ymm7, %ymm6, %ymm5, %ymm4, %ymm3, %ymm2, %ymm1, %ymm0, - %ymm15, %ymm14, %ymm13, %ymm12, %ymm11, %ymm10, %ymm9, - %ymm8, %rsi); - - vzeroupper; - - FRAME_END - ret; -SYM_FUNC_END(camellia_ctr_32way) diff --git a/arch/x86/crypto/camellia_aesni_avx2_glue.c b/arch/x86/crypto/camellia_aesni_avx2_glue.c index d956d0473668..8f25a2a6222e 100644 --- a/arch/x86/crypto/camellia_aesni_avx2_glue.c +++ b/arch/x86/crypto/camellia_aesni_avx2_glue.c @@ -22,8 +22,6 @@ asmlinkage void camellia_ecb_enc_32way(const void *ctx, u8 *dst, const u8 *src); asmlinkage void camellia_ecb_dec_32way(const void *ctx, u8 *dst, const u8 *src); asmlinkage void camellia_cbc_dec_32way(const void *ctx, u8 *dst, const u8 *src); -asmlinkage void camellia_ctr_32way(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); static const struct common_glue_ctx camellia_enc = { .num_funcs = 4, @@ -44,25 +42,6 @@ static const struct common_glue_ctx camellia_enc = { } } }; -static const struct common_glue_ctx camellia_ctr = { - .num_funcs = 4, - .fpu_blocks_limit = CAMELLIA_AESNI_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS, - .fn_u = { .ctr = camellia_ctr_32way } - }, { - .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, - .fn_u = { .ctr = camellia_ctr_16way } - }, { - .num_blocks = 2, - .fn_u = { .ctr = camellia_crypt_ctr_2way } - }, { - .num_blocks = 1, - .fn_u = { .ctr = camellia_crypt_ctr } - } } -}; - static const struct common_glue_ctx camellia_dec = { .num_funcs = 4, .fpu_blocks_limit = CAMELLIA_AESNI_PARALLEL_BLOCKS, @@ -127,11 +106,6 @@ static int cbc_decrypt(struct skcipher_request *req) return glue_cbc_decrypt_req_128bit(&camellia_dec_cbc, req); } -static int ctr_crypt(struct skcipher_request *req) -{ - return glue_ctr_req_128bit(&camellia_ctr, req); -} - static struct skcipher_alg camellia_algs[] = { { .base.cra_name = "__ecb(camellia)", @@ -160,21 +134,6 @@ static struct skcipher_alg camellia_algs[] = { .setkey = camellia_setkey, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, - }, { - .base.cra_name = "__ctr(camellia)", - .base.cra_driver_name = "__ctr-camellia-aesni-avx2", - .base.cra_priority = 500, - .base.cra_flags = CRYPTO_ALG_INTERNAL, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct camellia_ctx), - .base.cra_module = THIS_MODULE, - .min_keysize = CAMELLIA_MIN_KEY_SIZE, - .max_keysize = CAMELLIA_MAX_KEY_SIZE, - .ivsize = CAMELLIA_BLOCK_SIZE, - .chunksize = CAMELLIA_BLOCK_SIZE, - .setkey = camellia_setkey, - .encrypt = ctr_crypt, - .decrypt = ctr_crypt, }, }; diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c index 44614f8a452c..22a89cdfedfb 100644 --- a/arch/x86/crypto/camellia_aesni_avx_glue.c +++ b/arch/x86/crypto/camellia_aesni_avx_glue.c @@ -26,10 +26,6 @@ EXPORT_SYMBOL_GPL(camellia_ecb_dec_16way); asmlinkage void camellia_cbc_dec_16way(const void *ctx, u8 *dst, const u8 *src); EXPORT_SYMBOL_GPL(camellia_cbc_dec_16way); -asmlinkage void camellia_ctr_16way(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); -EXPORT_SYMBOL_GPL(camellia_ctr_16way); - static const struct common_glue_ctx camellia_enc = { .num_funcs = 3, .fpu_blocks_limit = CAMELLIA_AESNI_PARALLEL_BLOCKS, @@ -46,22 +42,6 @@ static const struct common_glue_ctx camellia_enc = { } } }; -static const struct common_glue_ctx camellia_ctr = { - .num_funcs = 3, - .fpu_blocks_limit = CAMELLIA_AESNI_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, - .fn_u = { .ctr = camellia_ctr_16way } - }, { - .num_blocks = 2, - .fn_u = { .ctr = camellia_crypt_ctr_2way } - }, { - .num_blocks = 1, - .fn_u = { .ctr = camellia_crypt_ctr } - } } -}; - static const struct common_glue_ctx camellia_dec = { .num_funcs = 3, .fpu_blocks_limit = CAMELLIA_AESNI_PARALLEL_BLOCKS, @@ -120,11 +100,6 @@ static int cbc_decrypt(struct skcipher_request *req) return glue_cbc_decrypt_req_128bit(&camellia_dec_cbc, req); } -static int ctr_crypt(struct skcipher_request *req) -{ - return glue_ctr_req_128bit(&camellia_ctr, req); -} - static struct skcipher_alg camellia_algs[] = { { .base.cra_name = "__ecb(camellia)", @@ -153,21 +128,6 @@ static struct skcipher_alg camellia_algs[] = { .setkey = camellia_setkey, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, - }, { - .base.cra_name = "__ctr(camellia)", - .base.cra_driver_name = "__ctr-camellia-aesni", - .base.cra_priority = 400, - .base.cra_flags = CRYPTO_ALG_INTERNAL, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct camellia_ctx), - .base.cra_module = THIS_MODULE, - .min_keysize = CAMELLIA_MIN_KEY_SIZE, - .max_keysize = CAMELLIA_MAX_KEY_SIZE, - .ivsize = CAMELLIA_BLOCK_SIZE, - .chunksize = CAMELLIA_BLOCK_SIZE, - .setkey = camellia_setkey, - .encrypt = ctr_crypt, - .decrypt = ctr_crypt, } }; diff --git a/arch/x86/crypto/camellia_glue.c b/arch/x86/crypto/camellia_glue.c index 242c056e5fa8..fefeedf2b33d 100644 --- a/arch/x86/crypto/camellia_glue.c +++ b/arch/x86/crypto/camellia_glue.c @@ -1274,42 +1274,6 @@ void camellia_decrypt_cbc_2way(const void *ctx, u8 *d, const u8 *s) } EXPORT_SYMBOL_GPL(camellia_decrypt_cbc_2way); -void camellia_crypt_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv) -{ - be128 ctrblk; - u128 *dst = (u128 *)d; - const u128 *src = (const u128 *)s; - - if (dst != src) - *dst = *src; - - le128_to_be128(&ctrblk, iv); - le128_inc(iv); - - camellia_enc_blk_xor(ctx, (u8 *)dst, (u8 *)&ctrblk); -} -EXPORT_SYMBOL_GPL(camellia_crypt_ctr); - -void camellia_crypt_ctr_2way(const void *ctx, u8 *d, const u8 *s, le128 *iv) -{ - be128 ctrblks[2]; - u128 *dst = (u128 *)d; - const u128 *src = (const u128 *)s; - - if (dst != src) { - dst[0] = src[0]; - dst[1] = src[1]; - } - - le128_to_be128(&ctrblks[0], iv); - le128_inc(iv); - le128_to_be128(&ctrblks[1], iv); - le128_inc(iv); - - camellia_enc_blk_xor_2way(ctx, (u8 *)dst, (u8 *)ctrblks); -} -EXPORT_SYMBOL_GPL(camellia_crypt_ctr_2way); - static const struct common_glue_ctx camellia_enc = { .num_funcs = 2, .fpu_blocks_limit = -1, @@ -1323,19 +1287,6 @@ static const struct common_glue_ctx camellia_enc = { } } }; -static const struct common_glue_ctx camellia_ctr = { - .num_funcs = 2, - .fpu_blocks_limit = -1, - - .funcs = { { - .num_blocks = 2, - .fn_u = { .ctr = camellia_crypt_ctr_2way } - }, { - .num_blocks = 1, - .fn_u = { .ctr = camellia_crypt_ctr } - } } -}; - static const struct common_glue_ctx camellia_dec = { .num_funcs = 2, .fpu_blocks_limit = -1, @@ -1382,11 +1333,6 @@ static int cbc_decrypt(struct skcipher_request *req) return glue_cbc_decrypt_req_128bit(&camellia_dec_cbc, req); } -static int ctr_crypt(struct skcipher_request *req) -{ - return glue_ctr_req_128bit(&camellia_ctr, req); -} - static struct crypto_alg camellia_cipher_alg = { .cra_name = "camellia", .cra_driver_name = "camellia-asm", @@ -1433,20 +1379,6 @@ static struct skcipher_alg camellia_skcipher_algs[] = { .setkey = camellia_setkey_skcipher, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, - }, { - .base.cra_name = "ctr(camellia)", - .base.cra_driver_name = "ctr-camellia-asm", - .base.cra_priority = 300, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct camellia_ctx), - .base.cra_module = THIS_MODULE, - .min_keysize = CAMELLIA_MIN_KEY_SIZE, - .max_keysize = CAMELLIA_MAX_KEY_SIZE, - .ivsize = CAMELLIA_BLOCK_SIZE, - .chunksize = CAMELLIA_BLOCK_SIZE, - .setkey = camellia_setkey_skcipher, - .encrypt = ctr_crypt, - .decrypt = ctr_crypt, } }; diff --git a/arch/x86/include/asm/crypto/camellia.h b/arch/x86/include/asm/crypto/camellia.h index 0e5f82adbaf9..1dcea79e8f8e 100644 --- a/arch/x86/include/asm/crypto/camellia.h +++ b/arch/x86/include/asm/crypto/camellia.h @@ -38,8 +38,6 @@ asmlinkage void camellia_ecb_enc_16way(const void *ctx, u8 *dst, const u8 *src); asmlinkage void camellia_ecb_dec_16way(const void *ctx, u8 *dst, const u8 *src); asmlinkage void camellia_cbc_dec_16way(const void *ctx, u8 *dst, const u8 *src); -asmlinkage void camellia_ctr_16way(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); static inline void camellia_enc_blk(const void *ctx, u8 *dst, const u8 *src) { @@ -65,9 +63,5 @@ static inline void camellia_enc_blk_xor_2way(const void *ctx, u8 *dst, /* glue helpers */ extern void camellia_decrypt_cbc_2way(const void *ctx, u8 *dst, const u8 *src); -extern void camellia_crypt_ctr(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); -extern void camellia_crypt_ctr_2way(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); #endif /* ASM_X86_CAMELLIA_H */ diff --git a/crypto/Kconfig b/crypto/Kconfig index 7ad9bf84f4a0..ea788cab8c7d 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1286,6 +1286,7 @@ config CRYPTO_CAMELLIA_X86_64 depends on CRYPTO select CRYPTO_SKCIPHER select CRYPTO_GLUE_HELPER_X86 + imply CRYPTO_CTR help Camellia cipher algorithm module (x86_64). -- cgit v1.2.3-59-g8ed1b From 2e9440ae6eab492572463d8cb266381264867723 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 5 Jan 2021 17:47:55 +0100 Subject: crypto: x86/serpent - drop CTR mode implementation Serpent in CTR mode is never used by the kernel directly, and is highly unlikely to be relied upon by dm-crypt or algif_skcipher. So let's drop the accelerated CTR mode implementation, and instead, rely on the CTR template and the bare cipher. Acked-by: Eric Biggers Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- arch/x86/crypto/serpent-avx-x86_64-asm_64.S | 20 --------- arch/x86/crypto/serpent-avx2-asm_64.S | 25 ----------- arch/x86/crypto/serpent_avx2_glue.c | 38 ---------------- arch/x86/crypto/serpent_avx_glue.c | 51 ---------------------- arch/x86/crypto/serpent_sse2_glue.c | 67 ----------------------------- crypto/Kconfig | 3 ++ 6 files changed, 3 insertions(+), 201 deletions(-) (limited to 'crypto') diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S index 6b41f46bcc76..b7ee24df7fba 100644 --- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S +++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S @@ -711,23 +711,3 @@ SYM_FUNC_START(serpent_cbc_dec_8way_avx) FRAME_END ret; SYM_FUNC_END(serpent_cbc_dec_8way_avx) - -SYM_FUNC_START(serpent_ctr_8way_avx) - /* input: - * %rdi: ctx, CTX - * %rsi: dst - * %rdx: src - * %rcx: iv (little endian, 128bit) - */ - FRAME_BEGIN - - load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2, - RD2, RK0, RK1, RK2); - - call __serpent_enc_blk8_avx; - - store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); - - FRAME_END - ret; -SYM_FUNC_END(serpent_ctr_8way_avx) diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S index a510a949f02f..9161b6e441f3 100644 --- a/arch/x86/crypto/serpent-avx2-asm_64.S +++ b/arch/x86/crypto/serpent-avx2-asm_64.S @@ -724,28 +724,3 @@ SYM_FUNC_START(serpent_cbc_dec_16way) FRAME_END ret; SYM_FUNC_END(serpent_cbc_dec_16way) - -SYM_FUNC_START(serpent_ctr_16way) - /* input: - * %rdi: ctx, CTX - * %rsi: dst (16 blocks) - * %rdx: src (16 blocks) - * %rcx: iv (little endian, 128bit) - */ - FRAME_BEGIN - - vzeroupper; - - load_ctr_16way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2, - RD2, RK0, RK0x, RK1, RK1x, RK2, RK2x, RK3, RK3x, RNOT, - tp); - - call __serpent_enc_blk16; - - store_ctr_16way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); - - vzeroupper; - - FRAME_END - ret; -SYM_FUNC_END(serpent_ctr_16way) diff --git a/arch/x86/crypto/serpent_avx2_glue.c b/arch/x86/crypto/serpent_avx2_glue.c index 9cdf2c078e21..28e542c6512a 100644 --- a/arch/x86/crypto/serpent_avx2_glue.c +++ b/arch/x86/crypto/serpent_avx2_glue.c @@ -22,8 +22,6 @@ asmlinkage void serpent_ecb_enc_16way(const void *ctx, u8 *dst, const u8 *src); asmlinkage void serpent_ecb_dec_16way(const void *ctx, u8 *dst, const u8 *src); asmlinkage void serpent_cbc_dec_16way(const void *ctx, u8 *dst, const u8 *src); -asmlinkage void serpent_ctr_16way(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); static int serpent_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { @@ -46,22 +44,6 @@ static const struct common_glue_ctx serpent_enc = { } } }; -static const struct common_glue_ctx serpent_ctr = { - .num_funcs = 3, - .fpu_blocks_limit = 8, - - .funcs = { { - .num_blocks = 16, - .fn_u = { .ctr = serpent_ctr_16way } - }, { - .num_blocks = 8, - .fn_u = { .ctr = serpent_ctr_8way_avx } - }, { - .num_blocks = 1, - .fn_u = { .ctr = __serpent_crypt_ctr } - } } -}; - static const struct common_glue_ctx serpent_dec = { .num_funcs = 3, .fpu_blocks_limit = 8, @@ -114,11 +96,6 @@ static int cbc_decrypt(struct skcipher_request *req) return glue_cbc_decrypt_req_128bit(&serpent_dec_cbc, req); } -static int ctr_crypt(struct skcipher_request *req) -{ - return glue_ctr_req_128bit(&serpent_ctr, req); -} - static struct skcipher_alg serpent_algs[] = { { .base.cra_name = "__ecb(serpent)", @@ -147,21 +124,6 @@ static struct skcipher_alg serpent_algs[] = { .setkey = serpent_setkey_skcipher, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, - }, { - .base.cra_name = "__ctr(serpent)", - .base.cra_driver_name = "__ctr-serpent-avx2", - .base.cra_priority = 600, - .base.cra_flags = CRYPTO_ALG_INTERNAL, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct serpent_ctx), - .base.cra_module = THIS_MODULE, - .min_keysize = SERPENT_MIN_KEY_SIZE, - .max_keysize = SERPENT_MAX_KEY_SIZE, - .ivsize = SERPENT_BLOCK_SIZE, - .chunksize = SERPENT_BLOCK_SIZE, - .setkey = serpent_setkey_skcipher, - .encrypt = ctr_crypt, - .decrypt = ctr_crypt, }, }; diff --git a/arch/x86/crypto/serpent_avx_glue.c b/arch/x86/crypto/serpent_avx_glue.c index b17a08b57a91..aa4605baf9d4 100644 --- a/arch/x86/crypto/serpent_avx_glue.c +++ b/arch/x86/crypto/serpent_avx_glue.c @@ -31,24 +31,6 @@ asmlinkage void serpent_cbc_dec_8way_avx(const void *ctx, u8 *dst, const u8 *src); EXPORT_SYMBOL_GPL(serpent_cbc_dec_8way_avx); -asmlinkage void serpent_ctr_8way_avx(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); -EXPORT_SYMBOL_GPL(serpent_ctr_8way_avx); - -void __serpent_crypt_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv) -{ - be128 ctrblk; - u128 *dst = (u128 *)d; - const u128 *src = (const u128 *)s; - - le128_to_be128(&ctrblk, iv); - le128_inc(iv); - - __serpent_encrypt(ctx, (u8 *)&ctrblk, (u8 *)&ctrblk); - u128_xor(dst, src, (u128 *)&ctrblk); -} -EXPORT_SYMBOL_GPL(__serpent_crypt_ctr); - static int serpent_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { @@ -68,19 +50,6 @@ static const struct common_glue_ctx serpent_enc = { } } }; -static const struct common_glue_ctx serpent_ctr = { - .num_funcs = 2, - .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = SERPENT_PARALLEL_BLOCKS, - .fn_u = { .ctr = serpent_ctr_8way_avx } - }, { - .num_blocks = 1, - .fn_u = { .ctr = __serpent_crypt_ctr } - } } -}; - static const struct common_glue_ctx serpent_dec = { .num_funcs = 2, .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS, @@ -127,11 +96,6 @@ static int cbc_decrypt(struct skcipher_request *req) return glue_cbc_decrypt_req_128bit(&serpent_dec_cbc, req); } -static int ctr_crypt(struct skcipher_request *req) -{ - return glue_ctr_req_128bit(&serpent_ctr, req); -} - static struct skcipher_alg serpent_algs[] = { { .base.cra_name = "__ecb(serpent)", @@ -160,21 +124,6 @@ static struct skcipher_alg serpent_algs[] = { .setkey = serpent_setkey_skcipher, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, - }, { - .base.cra_name = "__ctr(serpent)", - .base.cra_driver_name = "__ctr-serpent-avx", - .base.cra_priority = 500, - .base.cra_flags = CRYPTO_ALG_INTERNAL, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct serpent_ctx), - .base.cra_module = THIS_MODULE, - .min_keysize = SERPENT_MIN_KEY_SIZE, - .max_keysize = SERPENT_MAX_KEY_SIZE, - .ivsize = SERPENT_BLOCK_SIZE, - .chunksize = SERPENT_BLOCK_SIZE, - .setkey = serpent_setkey_skcipher, - .encrypt = ctr_crypt, - .decrypt = ctr_crypt, }, }; diff --git a/arch/x86/crypto/serpent_sse2_glue.c b/arch/x86/crypto/serpent_sse2_glue.c index 4fed8d26b91a..9acb3bf28feb 100644 --- a/arch/x86/crypto/serpent_sse2_glue.c +++ b/arch/x86/crypto/serpent_sse2_glue.c @@ -10,8 +10,6 @@ * * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by: * Copyright (c) 2006 Herbert Xu - * CTR part based on code (crypto/ctr.c) by: - * (C) Copyright IBM Corp. 2007 - Joy Latten */ #include @@ -47,38 +45,6 @@ static void serpent_decrypt_cbc_xway(const void *ctx, u8 *d, const u8 *s) u128_xor(dst + (j + 1), dst + (j + 1), ivs + j); } -static void serpent_crypt_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv) -{ - be128 ctrblk; - u128 *dst = (u128 *)d; - const u128 *src = (const u128 *)s; - - le128_to_be128(&ctrblk, iv); - le128_inc(iv); - - __serpent_encrypt(ctx, (u8 *)&ctrblk, (u8 *)&ctrblk); - u128_xor(dst, src, (u128 *)&ctrblk); -} - -static void serpent_crypt_ctr_xway(const void *ctx, u8 *d, const u8 *s, - le128 *iv) -{ - be128 ctrblks[SERPENT_PARALLEL_BLOCKS]; - u128 *dst = (u128 *)d; - const u128 *src = (const u128 *)s; - unsigned int i; - - for (i = 0; i < SERPENT_PARALLEL_BLOCKS; i++) { - if (dst != src) - dst[i] = src[i]; - - le128_to_be128(&ctrblks[i], iv); - le128_inc(iv); - } - - serpent_enc_blk_xway_xor(ctx, (u8 *)dst, (u8 *)ctrblks); -} - static const struct common_glue_ctx serpent_enc = { .num_funcs = 2, .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS, @@ -92,19 +58,6 @@ static const struct common_glue_ctx serpent_enc = { } } }; -static const struct common_glue_ctx serpent_ctr = { - .num_funcs = 2, - .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = SERPENT_PARALLEL_BLOCKS, - .fn_u = { .ctr = serpent_crypt_ctr_xway } - }, { - .num_blocks = 1, - .fn_u = { .ctr = serpent_crypt_ctr } - } } -}; - static const struct common_glue_ctx serpent_dec = { .num_funcs = 2, .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS, @@ -152,11 +105,6 @@ static int cbc_decrypt(struct skcipher_request *req) return glue_cbc_decrypt_req_128bit(&serpent_dec_cbc, req); } -static int ctr_crypt(struct skcipher_request *req) -{ - return glue_ctr_req_128bit(&serpent_ctr, req); -} - static struct skcipher_alg serpent_algs[] = { { .base.cra_name = "__ecb(serpent)", @@ -185,21 +133,6 @@ static struct skcipher_alg serpent_algs[] = { .setkey = serpent_setkey_skcipher, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, - }, { - .base.cra_name = "__ctr(serpent)", - .base.cra_driver_name = "__ctr-serpent-sse2", - .base.cra_priority = 400, - .base.cra_flags = CRYPTO_ALG_INTERNAL, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct serpent_ctx), - .base.cra_module = THIS_MODULE, - .min_keysize = SERPENT_MIN_KEY_SIZE, - .max_keysize = SERPENT_MAX_KEY_SIZE, - .ivsize = SERPENT_BLOCK_SIZE, - .chunksize = SERPENT_BLOCK_SIZE, - .setkey = serpent_setkey_skcipher, - .encrypt = ctr_crypt, - .decrypt = ctr_crypt, }, }; diff --git a/crypto/Kconfig b/crypto/Kconfig index ea788cab8c7d..dd48c3bab3f5 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1539,6 +1539,7 @@ config CRYPTO_SERPENT_SSE2_X86_64 select CRYPTO_GLUE_HELPER_X86 select CRYPTO_SERPENT select CRYPTO_SIMD + imply CRYPTO_CTR help Serpent cipher algorithm, by Anderson, Biham & Knudsen. @@ -1558,6 +1559,7 @@ config CRYPTO_SERPENT_SSE2_586 select CRYPTO_GLUE_HELPER_X86 select CRYPTO_SERPENT select CRYPTO_SIMD + imply CRYPTO_CTR help Serpent cipher algorithm, by Anderson, Biham & Knudsen. @@ -1578,6 +1580,7 @@ config CRYPTO_SERPENT_AVX_X86_64 select CRYPTO_SERPENT select CRYPTO_SIMD imply CRYPTO_XTS + imply CRYPTO_CTR help Serpent cipher algorithm, by Anderson, Biham & Knudsen. -- cgit v1.2.3-59-g8ed1b From e2d60e2f597a5b2a0a8724989742784bb83ada5d Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 5 Jan 2021 17:47:56 +0100 Subject: crypto: x86/cast5 - drop CTR mode implementation CAST5 in CTR mode is never used by the kernel directly, and is highly unlikely to be relied upon by dm-crypt or algif_skcipher. So let's drop the accelerated CTR mode implementation, and instead, rely on the CTR template and the bare cipher. Acked-by: Eric Biggers Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- arch/x86/crypto/cast5_avx_glue.c | 103 --------------------------------------- crypto/Kconfig | 1 + 2 files changed, 1 insertion(+), 103 deletions(-) (limited to 'crypto') diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c index 384ccb00f9e1..e0d1c7903b29 100644 --- a/arch/x86/crypto/cast5_avx_glue.c +++ b/arch/x86/crypto/cast5_avx_glue.c @@ -23,8 +23,6 @@ asmlinkage void cast5_ecb_dec_16way(struct cast5_ctx *ctx, u8 *dst, const u8 *src); asmlinkage void cast5_cbc_dec_16way(struct cast5_ctx *ctx, u8 *dst, const u8 *src); -asmlinkage void cast5_ctr_16way(struct cast5_ctx *ctx, u8 *dst, const u8 *src, - __be64 *iv); static int cast5_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) @@ -214,92 +212,6 @@ static int cbc_decrypt(struct skcipher_request *req) return err; } -static void ctr_crypt_final(struct skcipher_walk *walk, struct cast5_ctx *ctx) -{ - u8 *ctrblk = walk->iv; - u8 keystream[CAST5_BLOCK_SIZE]; - u8 *src = walk->src.virt.addr; - u8 *dst = walk->dst.virt.addr; - unsigned int nbytes = walk->nbytes; - - __cast5_encrypt(ctx, keystream, ctrblk); - crypto_xor_cpy(dst, keystream, src, nbytes); - - crypto_inc(ctrblk, CAST5_BLOCK_SIZE); -} - -static unsigned int __ctr_crypt(struct skcipher_walk *walk, - struct cast5_ctx *ctx) -{ - const unsigned int bsize = CAST5_BLOCK_SIZE; - unsigned int nbytes = walk->nbytes; - u64 *src = (u64 *)walk->src.virt.addr; - u64 *dst = (u64 *)walk->dst.virt.addr; - - /* Process multi-block batch */ - if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) { - do { - cast5_ctr_16way(ctx, (u8 *)dst, (u8 *)src, - (__be64 *)walk->iv); - - src += CAST5_PARALLEL_BLOCKS; - dst += CAST5_PARALLEL_BLOCKS; - nbytes -= bsize * CAST5_PARALLEL_BLOCKS; - } while (nbytes >= bsize * CAST5_PARALLEL_BLOCKS); - - if (nbytes < bsize) - goto done; - } - - /* Handle leftovers */ - do { - u64 ctrblk; - - if (dst != src) - *dst = *src; - - ctrblk = *(u64 *)walk->iv; - be64_add_cpu((__be64 *)walk->iv, 1); - - __cast5_encrypt(ctx, (u8 *)&ctrblk, (u8 *)&ctrblk); - *dst ^= ctrblk; - - src += 1; - dst += 1; - nbytes -= bsize; - } while (nbytes >= bsize); - -done: - return nbytes; -} - -static int ctr_crypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct cast5_ctx *ctx = crypto_skcipher_ctx(tfm); - bool fpu_enabled = false; - struct skcipher_walk walk; - unsigned int nbytes; - int err; - - err = skcipher_walk_virt(&walk, req, false); - - while ((nbytes = walk.nbytes) >= CAST5_BLOCK_SIZE) { - fpu_enabled = cast5_fpu_begin(fpu_enabled, &walk, nbytes); - nbytes = __ctr_crypt(&walk, ctx); - err = skcipher_walk_done(&walk, nbytes); - } - - cast5_fpu_end(fpu_enabled); - - if (walk.nbytes) { - ctr_crypt_final(&walk, ctx); - err = skcipher_walk_done(&walk, 0); - } - - return err; -} - static struct skcipher_alg cast5_algs[] = { { .base.cra_name = "__ecb(cast5)", @@ -328,21 +240,6 @@ static struct skcipher_alg cast5_algs[] = { .setkey = cast5_setkey_skcipher, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, - }, { - .base.cra_name = "__ctr(cast5)", - .base.cra_driver_name = "__ctr-cast5-avx", - .base.cra_priority = 200, - .base.cra_flags = CRYPTO_ALG_INTERNAL, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct cast5_ctx), - .base.cra_module = THIS_MODULE, - .min_keysize = CAST5_MIN_KEY_SIZE, - .max_keysize = CAST5_MAX_KEY_SIZE, - .ivsize = CAST5_BLOCK_SIZE, - .chunksize = CAST5_BLOCK_SIZE, - .setkey = cast5_setkey_skcipher, - .encrypt = ctr_crypt, - .decrypt = ctr_crypt, } }; diff --git a/crypto/Kconfig b/crypto/Kconfig index dd48c3bab3f5..fed73fff5a65 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1372,6 +1372,7 @@ config CRYPTO_CAST5_AVX_X86_64 select CRYPTO_CAST5 select CRYPTO_CAST_COMMON select CRYPTO_SIMD + imply CRYPTO_CTR help The CAST5 encryption algorithm (synonymous with CAST-128) is described in RFC2144. -- cgit v1.2.3-59-g8ed1b From 7a6623cc6867b5f24f750a7c16b996b0cbbc63b5 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 5 Jan 2021 17:47:57 +0100 Subject: crypto: x86/cast6 - drop CTR mode implementation CAST6 in CTR mode is never used by the kernel directly, and is highly unlikely to be relied upon by dm-crypt or algif_skcipher. So let's drop the accelerated CTR mode implementation, and instead, rely on the CTR template and the bare cipher. Acked-by: Eric Biggers Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- arch/x86/crypto/cast6-avx-x86_64-asm_64.S | 28 ------------------ arch/x86/crypto/cast6_avx_glue.c | 48 ------------------------------- crypto/Kconfig | 1 + 3 files changed, 1 insertion(+), 76 deletions(-) (limited to 'crypto') diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S index 0c1ea836215a..fbddcecc3e3f 100644 --- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S +++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S @@ -410,31 +410,3 @@ SYM_FUNC_START(cast6_cbc_dec_8way) FRAME_END ret; SYM_FUNC_END(cast6_cbc_dec_8way) - -SYM_FUNC_START(cast6_ctr_8way) - /* input: - * %rdi: ctx, CTX - * %rsi: dst - * %rdx: src - * %rcx: iv (little endian, 128bit) - */ - FRAME_BEGIN - pushq %r12; - pushq %r15 - - movq %rdi, CTX; - movq %rsi, %r11; - movq %rdx, %r12; - - load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2, - RD2, RX, RKR, RKM); - - call __cast6_enc_blk8; - - store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); - - popq %r15; - popq %r12; - FRAME_END - ret; -SYM_FUNC_END(cast6_ctr_8way) diff --git a/arch/x86/crypto/cast6_avx_glue.c b/arch/x86/crypto/cast6_avx_glue.c index 5a21d3e9041c..790efcb6df3b 100644 --- a/arch/x86/crypto/cast6_avx_glue.c +++ b/arch/x86/crypto/cast6_avx_glue.c @@ -23,8 +23,6 @@ asmlinkage void cast6_ecb_enc_8way(const void *ctx, u8 *dst, const u8 *src); asmlinkage void cast6_ecb_dec_8way(const void *ctx, u8 *dst, const u8 *src); asmlinkage void cast6_cbc_dec_8way(const void *ctx, u8 *dst, const u8 *src); -asmlinkage void cast6_ctr_8way(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); static int cast6_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) @@ -32,19 +30,6 @@ static int cast6_setkey_skcipher(struct crypto_skcipher *tfm, return cast6_setkey(&tfm->base, key, keylen); } -static void cast6_crypt_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv) -{ - be128 ctrblk; - u128 *dst = (u128 *)d; - const u128 *src = (const u128 *)s; - - le128_to_be128(&ctrblk, iv); - le128_inc(iv); - - __cast6_encrypt(ctx, (u8 *)&ctrblk, (u8 *)&ctrblk); - u128_xor(dst, src, (u128 *)&ctrblk); -} - static const struct common_glue_ctx cast6_enc = { .num_funcs = 2, .fpu_blocks_limit = CAST6_PARALLEL_BLOCKS, @@ -58,19 +43,6 @@ static const struct common_glue_ctx cast6_enc = { } } }; -static const struct common_glue_ctx cast6_ctr = { - .num_funcs = 2, - .fpu_blocks_limit = CAST6_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = CAST6_PARALLEL_BLOCKS, - .fn_u = { .ctr = cast6_ctr_8way } - }, { - .num_blocks = 1, - .fn_u = { .ctr = cast6_crypt_ctr } - } } -}; - static const struct common_glue_ctx cast6_dec = { .num_funcs = 2, .fpu_blocks_limit = CAST6_PARALLEL_BLOCKS, @@ -117,11 +89,6 @@ static int cbc_decrypt(struct skcipher_request *req) return glue_cbc_decrypt_req_128bit(&cast6_dec_cbc, req); } -static int ctr_crypt(struct skcipher_request *req) -{ - return glue_ctr_req_128bit(&cast6_ctr, req); -} - static struct skcipher_alg cast6_algs[] = { { .base.cra_name = "__ecb(cast6)", @@ -150,21 +117,6 @@ static struct skcipher_alg cast6_algs[] = { .setkey = cast6_setkey_skcipher, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, - }, { - .base.cra_name = "__ctr(cast6)", - .base.cra_driver_name = "__ctr-cast6-avx", - .base.cra_priority = 200, - .base.cra_flags = CRYPTO_ALG_INTERNAL, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct cast6_ctx), - .base.cra_module = THIS_MODULE, - .min_keysize = CAST6_MIN_KEY_SIZE, - .max_keysize = CAST6_MAX_KEY_SIZE, - .ivsize = CAST6_BLOCK_SIZE, - .chunksize = CAST6_BLOCK_SIZE, - .setkey = cast6_setkey_skcipher, - .encrypt = ctr_crypt, - .decrypt = ctr_crypt, }, }; diff --git a/crypto/Kconfig b/crypto/Kconfig index fed73fff5a65..3f51c5dfc2a9 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1397,6 +1397,7 @@ config CRYPTO_CAST6_AVX_X86_64 select CRYPTO_GLUE_HELPER_X86 select CRYPTO_SIMD imply CRYPTO_XTS + imply CRYPTO_CTR help The CAST6 encryption algorithm (synonymous with CAST-256) is described in RFC2612. -- cgit v1.2.3-59-g8ed1b From f43dcaf2c97eae986378f12c46b27fe21f8a885b Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 5 Jan 2021 17:47:58 +0100 Subject: crypto: x86/twofish - drop CTR mode implementation Twofish in CTR mode is never used by the kernel directly, and is highly unlikely to be relied upon by dm-crypt or algif_skcipher. So let's drop the accelerated CTR mode implementation, and instead, rely on the CTR template and the bare cipher. Acked-by: Eric Biggers Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- arch/x86/crypto/twofish-avx-x86_64-asm_64.S | 27 ---------- arch/x86/crypto/twofish_avx_glue.c | 38 -------------- arch/x86/crypto/twofish_glue_3way.c | 78 ----------------------------- arch/x86/include/asm/crypto/twofish.h | 4 -- crypto/Kconfig | 2 + 5 files changed, 2 insertions(+), 147 deletions(-) (limited to 'crypto') diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S index 84e61ef03638..37e63b3c664e 100644 --- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S +++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S @@ -374,30 +374,3 @@ SYM_FUNC_START(twofish_cbc_dec_8way) FRAME_END ret; SYM_FUNC_END(twofish_cbc_dec_8way) - -SYM_FUNC_START(twofish_ctr_8way) - /* input: - * %rdi: ctx, CTX - * %rsi: dst - * %rdx: src - * %rcx: iv (little endian, 128bit) - */ - FRAME_BEGIN - - pushq %r12; - - movq %rsi, %r11; - movq %rdx, %r12; - - load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2, - RD2, RX0, RX1, RY0); - - call __twofish_enc_blk8; - - store_ctr_8way(%r12, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2); - - popq %r12; - - FRAME_END - ret; -SYM_FUNC_END(twofish_ctr_8way) diff --git a/arch/x86/crypto/twofish_avx_glue.c b/arch/x86/crypto/twofish_avx_glue.c index 7b539bbb108f..13f810b61034 100644 --- a/arch/x86/crypto/twofish_avx_glue.c +++ b/arch/x86/crypto/twofish_avx_glue.c @@ -25,8 +25,6 @@ asmlinkage void twofish_ecb_enc_8way(const void *ctx, u8 *dst, const u8 *src); asmlinkage void twofish_ecb_dec_8way(const void *ctx, u8 *dst, const u8 *src); asmlinkage void twofish_cbc_dec_8way(const void *ctx, u8 *dst, const u8 *src); -asmlinkage void twofish_ctr_8way(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); static int twofish_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) @@ -55,22 +53,6 @@ static const struct common_glue_ctx twofish_enc = { } } }; -static const struct common_glue_ctx twofish_ctr = { - .num_funcs = 3, - .fpu_blocks_limit = TWOFISH_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = TWOFISH_PARALLEL_BLOCKS, - .fn_u = { .ctr = twofish_ctr_8way } - }, { - .num_blocks = 3, - .fn_u = { .ctr = twofish_enc_blk_ctr_3way } - }, { - .num_blocks = 1, - .fn_u = { .ctr = twofish_enc_blk_ctr } - } } -}; - static const struct common_glue_ctx twofish_dec = { .num_funcs = 3, .fpu_blocks_limit = TWOFISH_PARALLEL_BLOCKS, @@ -123,11 +105,6 @@ static int cbc_decrypt(struct skcipher_request *req) return glue_cbc_decrypt_req_128bit(&twofish_dec_cbc, req); } -static int ctr_crypt(struct skcipher_request *req) -{ - return glue_ctr_req_128bit(&twofish_ctr, req); -} - static struct skcipher_alg twofish_algs[] = { { .base.cra_name = "__ecb(twofish)", @@ -156,21 +133,6 @@ static struct skcipher_alg twofish_algs[] = { .setkey = twofish_setkey_skcipher, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, - }, { - .base.cra_name = "__ctr(twofish)", - .base.cra_driver_name = "__ctr-twofish-avx", - .base.cra_priority = 400, - .base.cra_flags = CRYPTO_ALG_INTERNAL, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct twofish_ctx), - .base.cra_module = THIS_MODULE, - .min_keysize = TF_MIN_KEY_SIZE, - .max_keysize = TF_MAX_KEY_SIZE, - .ivsize = TF_BLOCK_SIZE, - .chunksize = TF_BLOCK_SIZE, - .setkey = twofish_setkey_skcipher, - .encrypt = ctr_crypt, - .decrypt = ctr_crypt, }, }; diff --git a/arch/x86/crypto/twofish_glue_3way.c b/arch/x86/crypto/twofish_glue_3way.c index 768af6075479..88252370db0a 100644 --- a/arch/x86/crypto/twofish_glue_3way.c +++ b/arch/x86/crypto/twofish_glue_3way.c @@ -30,12 +30,6 @@ static inline void twofish_enc_blk_3way(const void *ctx, u8 *dst, const u8 *src) __twofish_enc_blk_3way(ctx, dst, src, false); } -static inline void twofish_enc_blk_xor_3way(const void *ctx, u8 *dst, - const u8 *src) -{ - __twofish_enc_blk_3way(ctx, dst, src, true); -} - void twofish_dec_blk_cbc_3way(const void *ctx, u8 *d, const u8 *s) { u128 ivs[2]; @@ -52,46 +46,6 @@ void twofish_dec_blk_cbc_3way(const void *ctx, u8 *d, const u8 *s) } EXPORT_SYMBOL_GPL(twofish_dec_blk_cbc_3way); -void twofish_enc_blk_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv) -{ - be128 ctrblk; - u128 *dst = (u128 *)d; - const u128 *src = (const u128 *)s; - - if (dst != src) - *dst = *src; - - le128_to_be128(&ctrblk, iv); - le128_inc(iv); - - twofish_enc_blk(ctx, (u8 *)&ctrblk, (u8 *)&ctrblk); - u128_xor(dst, dst, (u128 *)&ctrblk); -} -EXPORT_SYMBOL_GPL(twofish_enc_blk_ctr); - -void twofish_enc_blk_ctr_3way(const void *ctx, u8 *d, const u8 *s, le128 *iv) -{ - be128 ctrblks[3]; - u128 *dst = (u128 *)d; - const u128 *src = (const u128 *)s; - - if (dst != src) { - dst[0] = src[0]; - dst[1] = src[1]; - dst[2] = src[2]; - } - - le128_to_be128(&ctrblks[0], iv); - le128_inc(iv); - le128_to_be128(&ctrblks[1], iv); - le128_inc(iv); - le128_to_be128(&ctrblks[2], iv); - le128_inc(iv); - - twofish_enc_blk_xor_3way(ctx, (u8 *)dst, (u8 *)ctrblks); -} -EXPORT_SYMBOL_GPL(twofish_enc_blk_ctr_3way); - static const struct common_glue_ctx twofish_enc = { .num_funcs = 2, .fpu_blocks_limit = -1, @@ -105,19 +59,6 @@ static const struct common_glue_ctx twofish_enc = { } } }; -static const struct common_glue_ctx twofish_ctr = { - .num_funcs = 2, - .fpu_blocks_limit = -1, - - .funcs = { { - .num_blocks = 3, - .fn_u = { .ctr = twofish_enc_blk_ctr_3way } - }, { - .num_blocks = 1, - .fn_u = { .ctr = twofish_enc_blk_ctr } - } } -}; - static const struct common_glue_ctx twofish_dec = { .num_funcs = 2, .fpu_blocks_limit = -1, @@ -164,11 +105,6 @@ static int cbc_decrypt(struct skcipher_request *req) return glue_cbc_decrypt_req_128bit(&twofish_dec_cbc, req); } -static int ctr_crypt(struct skcipher_request *req) -{ - return glue_ctr_req_128bit(&twofish_ctr, req); -} - static struct skcipher_alg tf_skciphers[] = { { .base.cra_name = "ecb(twofish)", @@ -195,20 +131,6 @@ static struct skcipher_alg tf_skciphers[] = { .setkey = twofish_setkey_skcipher, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, - }, { - .base.cra_name = "ctr(twofish)", - .base.cra_driver_name = "ctr-twofish-3way", - .base.cra_priority = 300, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct twofish_ctx), - .base.cra_module = THIS_MODULE, - .min_keysize = TF_MIN_KEY_SIZE, - .max_keysize = TF_MAX_KEY_SIZE, - .ivsize = TF_BLOCK_SIZE, - .chunksize = TF_BLOCK_SIZE, - .setkey = twofish_setkey_skcipher, - .encrypt = ctr_crypt, - .decrypt = ctr_crypt, }, }; diff --git a/arch/x86/include/asm/crypto/twofish.h b/arch/x86/include/asm/crypto/twofish.h index 2c377a8042e1..12df400e6d53 100644 --- a/arch/x86/include/asm/crypto/twofish.h +++ b/arch/x86/include/asm/crypto/twofish.h @@ -17,9 +17,5 @@ asmlinkage void twofish_dec_blk_3way(const void *ctx, u8 *dst, const u8 *src); /* helpers from twofish_x86_64-3way module */ extern void twofish_dec_blk_cbc_3way(const void *ctx, u8 *dst, const u8 *src); -extern void twofish_enc_blk_ctr(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); -extern void twofish_enc_blk_ctr_3way(const void *ctx, u8 *dst, const u8 *src, - le128 *iv); #endif /* ASM_X86_TWOFISH_H */ diff --git a/crypto/Kconfig b/crypto/Kconfig index 3f51c5dfc2a9..606f94079f05 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1680,6 +1680,7 @@ config CRYPTO_TWOFISH_586 depends on (X86 || UML_X86) && !64BIT select CRYPTO_ALGAPI select CRYPTO_TWOFISH_COMMON + imply CRYPTO_CTR help Twofish cipher algorithm. @@ -1696,6 +1697,7 @@ config CRYPTO_TWOFISH_X86_64 depends on (X86 || UML_X86) && 64BIT select CRYPTO_ALGAPI select CRYPTO_TWOFISH_COMMON + imply CRYPTO_CTR help Twofish cipher algorithm (x86_64). -- cgit v1.2.3-59-g8ed1b From 768db5fee3bb338174cd078878d3c4ff815a7fcf Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 5 Jan 2021 17:48:00 +0100 Subject: crypto: x86/des - drop CTR mode implementation DES or Triple DES in counter mode is never used in the kernel, so there is no point in keeping an accelerated implementation around. Acked-by: Eric Biggers Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- arch/x86/crypto/des3_ede_glue.c | 104 ---------------------------------------- crypto/Kconfig | 1 + 2 files changed, 1 insertion(+), 104 deletions(-) (limited to 'crypto') diff --git a/arch/x86/crypto/des3_ede_glue.c b/arch/x86/crypto/des3_ede_glue.c index 89830e531350..e7cb68a3db3b 100644 --- a/arch/x86/crypto/des3_ede_glue.c +++ b/arch/x86/crypto/des3_ede_glue.c @@ -6,8 +6,6 @@ * * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by: * Copyright (c) 2006 Herbert Xu - * CTR part based on code (crypto/ctr.c) by: - * (C) Copyright IBM Corp. 2007 - Joy Latten */ #include @@ -253,94 +251,6 @@ static int cbc_decrypt(struct skcipher_request *req) return err; } -static void ctr_crypt_final(struct des3_ede_x86_ctx *ctx, - struct skcipher_walk *walk) -{ - u8 *ctrblk = walk->iv; - u8 keystream[DES3_EDE_BLOCK_SIZE]; - u8 *src = walk->src.virt.addr; - u8 *dst = walk->dst.virt.addr; - unsigned int nbytes = walk->nbytes; - - des3_ede_enc_blk(ctx, keystream, ctrblk); - crypto_xor_cpy(dst, keystream, src, nbytes); - - crypto_inc(ctrblk, DES3_EDE_BLOCK_SIZE); -} - -static unsigned int __ctr_crypt(struct des3_ede_x86_ctx *ctx, - struct skcipher_walk *walk) -{ - unsigned int bsize = DES3_EDE_BLOCK_SIZE; - unsigned int nbytes = walk->nbytes; - __be64 *src = (__be64 *)walk->src.virt.addr; - __be64 *dst = (__be64 *)walk->dst.virt.addr; - u64 ctrblk = be64_to_cpu(*(__be64 *)walk->iv); - __be64 ctrblocks[3]; - - /* Process four block batch */ - if (nbytes >= bsize * 3) { - do { - /* create ctrblks for parallel encrypt */ - ctrblocks[0] = cpu_to_be64(ctrblk++); - ctrblocks[1] = cpu_to_be64(ctrblk++); - ctrblocks[2] = cpu_to_be64(ctrblk++); - - des3_ede_enc_blk_3way(ctx, (u8 *)ctrblocks, - (u8 *)ctrblocks); - - dst[0] = src[0] ^ ctrblocks[0]; - dst[1] = src[1] ^ ctrblocks[1]; - dst[2] = src[2] ^ ctrblocks[2]; - - src += 3; - dst += 3; - } while ((nbytes -= bsize * 3) >= bsize * 3); - - if (nbytes < bsize) - goto done; - } - - /* Handle leftovers */ - do { - ctrblocks[0] = cpu_to_be64(ctrblk++); - - des3_ede_enc_blk(ctx, (u8 *)ctrblocks, (u8 *)ctrblocks); - - dst[0] = src[0] ^ ctrblocks[0]; - - src += 1; - dst += 1; - } while ((nbytes -= bsize) >= bsize); - -done: - *(__be64 *)walk->iv = cpu_to_be64(ctrblk); - return nbytes; -} - -static int ctr_crypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm); - struct skcipher_walk walk; - unsigned int nbytes; - int err; - - err = skcipher_walk_virt(&walk, req, false); - - while ((nbytes = walk.nbytes) >= DES3_EDE_BLOCK_SIZE) { - nbytes = __ctr_crypt(ctx, &walk); - err = skcipher_walk_done(&walk, nbytes); - } - - if (nbytes) { - ctr_crypt_final(ctx, &walk); - err = skcipher_walk_done(&walk, 0); - } - - return err; -} - static int des3_ede_x86_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { @@ -428,20 +338,6 @@ static struct skcipher_alg des3_ede_skciphers[] = { .setkey = des3_ede_x86_setkey_skcipher, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, - }, { - .base.cra_name = "ctr(des3_ede)", - .base.cra_driver_name = "ctr-des3_ede-asm", - .base.cra_priority = 300, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct des3_ede_x86_ctx), - .base.cra_module = THIS_MODULE, - .min_keysize = DES3_EDE_KEY_SIZE, - .max_keysize = DES3_EDE_KEY_SIZE, - .ivsize = DES3_EDE_BLOCK_SIZE, - .chunksize = DES3_EDE_BLOCK_SIZE, - .setkey = des3_ede_x86_setkey_skcipher, - .encrypt = ctr_crypt, - .decrypt = ctr_crypt, } }; diff --git a/crypto/Kconfig b/crypto/Kconfig index 606f94079f05..5e820a57d138 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1427,6 +1427,7 @@ config CRYPTO_DES3_EDE_X86_64 depends on X86 && 64BIT select CRYPTO_SKCIPHER select CRYPTO_LIB_DES + imply CRYPTO_CTR help Triple DES EDE (FIPS 46-3) algorithm. -- cgit v1.2.3-59-g8ed1b From c0a64926c53e05fc6f69c7d632967606defe5f61 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 5 Jan 2021 17:48:01 +0100 Subject: crypto: x86/blowfish - drop CTR mode implementation Blowfish in counter mode is never used in the kernel, so there is no point in keeping an accelerated implementation around. Acked-by: Eric Biggers Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- arch/x86/crypto/blowfish_glue.c | 107 ---------------------------------------- crypto/Kconfig | 1 + 2 files changed, 1 insertion(+), 107 deletions(-) (limited to 'crypto') diff --git a/arch/x86/crypto/blowfish_glue.c b/arch/x86/crypto/blowfish_glue.c index cedfdba69ce3..a880e0b1c255 100644 --- a/arch/x86/crypto/blowfish_glue.c +++ b/arch/x86/crypto/blowfish_glue.c @@ -6,8 +6,6 @@ * * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by: * Copyright (c) 2006 Herbert Xu - * CTR part based on code (crypto/ctr.c) by: - * (C) Copyright IBM Corp. 2007 - Joy Latten */ #include @@ -247,97 +245,6 @@ static int cbc_decrypt(struct skcipher_request *req) return err; } -static void ctr_crypt_final(struct bf_ctx *ctx, struct skcipher_walk *walk) -{ - u8 *ctrblk = walk->iv; - u8 keystream[BF_BLOCK_SIZE]; - u8 *src = walk->src.virt.addr; - u8 *dst = walk->dst.virt.addr; - unsigned int nbytes = walk->nbytes; - - blowfish_enc_blk(ctx, keystream, ctrblk); - crypto_xor_cpy(dst, keystream, src, nbytes); - - crypto_inc(ctrblk, BF_BLOCK_SIZE); -} - -static unsigned int __ctr_crypt(struct bf_ctx *ctx, struct skcipher_walk *walk) -{ - unsigned int bsize = BF_BLOCK_SIZE; - unsigned int nbytes = walk->nbytes; - u64 *src = (u64 *)walk->src.virt.addr; - u64 *dst = (u64 *)walk->dst.virt.addr; - u64 ctrblk = be64_to_cpu(*(__be64 *)walk->iv); - __be64 ctrblocks[4]; - - /* Process four block batch */ - if (nbytes >= bsize * 4) { - do { - if (dst != src) { - dst[0] = src[0]; - dst[1] = src[1]; - dst[2] = src[2]; - dst[3] = src[3]; - } - - /* create ctrblks for parallel encrypt */ - ctrblocks[0] = cpu_to_be64(ctrblk++); - ctrblocks[1] = cpu_to_be64(ctrblk++); - ctrblocks[2] = cpu_to_be64(ctrblk++); - ctrblocks[3] = cpu_to_be64(ctrblk++); - - blowfish_enc_blk_xor_4way(ctx, (u8 *)dst, - (u8 *)ctrblocks); - - src += 4; - dst += 4; - } while ((nbytes -= bsize * 4) >= bsize * 4); - - if (nbytes < bsize) - goto done; - } - - /* Handle leftovers */ - do { - if (dst != src) - *dst = *src; - - ctrblocks[0] = cpu_to_be64(ctrblk++); - - blowfish_enc_blk_xor(ctx, (u8 *)dst, (u8 *)ctrblocks); - - src += 1; - dst += 1; - } while ((nbytes -= bsize) >= bsize); - -done: - *(__be64 *)walk->iv = cpu_to_be64(ctrblk); - return nbytes; -} - -static int ctr_crypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct bf_ctx *ctx = crypto_skcipher_ctx(tfm); - struct skcipher_walk walk; - unsigned int nbytes; - int err; - - err = skcipher_walk_virt(&walk, req, false); - - while ((nbytes = walk.nbytes) >= BF_BLOCK_SIZE) { - nbytes = __ctr_crypt(ctx, &walk); - err = skcipher_walk_done(&walk, nbytes); - } - - if (nbytes) { - ctr_crypt_final(ctx, &walk); - err = skcipher_walk_done(&walk, 0); - } - - return err; -} - static struct crypto_alg bf_cipher_alg = { .cra_name = "blowfish", .cra_driver_name = "blowfish-asm", @@ -384,20 +291,6 @@ static struct skcipher_alg bf_skcipher_algs[] = { .setkey = blowfish_setkey_skcipher, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, - }, { - .base.cra_name = "ctr(blowfish)", - .base.cra_driver_name = "ctr-blowfish-asm", - .base.cra_priority = 300, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct bf_ctx), - .base.cra_module = THIS_MODULE, - .min_keysize = BF_MIN_KEY_SIZE, - .max_keysize = BF_MAX_KEY_SIZE, - .ivsize = BF_BLOCK_SIZE, - .chunksize = BF_BLOCK_SIZE, - .setkey = blowfish_setkey_skcipher, - .encrypt = ctr_crypt, - .decrypt = ctr_crypt, }, }; diff --git a/crypto/Kconfig b/crypto/Kconfig index 5e820a57d138..24c0e001d06d 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1255,6 +1255,7 @@ config CRYPTO_BLOWFISH_X86_64 depends on X86 && 64BIT select CRYPTO_SKCIPHER select CRYPTO_BLOWFISH_COMMON + imply CRYPTO_CTR help Blowfish cipher algorithm (x86_64), by Bruce Schneier. -- cgit v1.2.3-59-g8ed1b From 407d409a8102a5ba042215aed7b2ef2d6e6c67a8 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 5 Jan 2021 17:48:03 +0100 Subject: crypto: x86/camellia - drop dependency on glue helper Replace the glue helper dependency with implementations of ECB and CBC based on the new CPP macros, which avoid the need for indirect calls. Acked-by: Eric Biggers Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- arch/x86/crypto/camellia_aesni_avx2_glue.c | 85 ++++++++---------------------- arch/x86/crypto/camellia_aesni_avx_glue.c | 73 +++++++------------------ arch/x86/crypto/camellia_glue.c | 75 +++++++++----------------- crypto/Kconfig | 2 - 4 files changed, 67 insertions(+), 168 deletions(-) (limited to 'crypto') diff --git a/arch/x86/crypto/camellia_aesni_avx2_glue.c b/arch/x86/crypto/camellia_aesni_avx2_glue.c index 8f25a2a6222e..ef5c0f094584 100644 --- a/arch/x86/crypto/camellia_aesni_avx2_glue.c +++ b/arch/x86/crypto/camellia_aesni_avx2_glue.c @@ -6,7 +6,6 @@ */ #include -#include #include #include #include @@ -14,6 +13,8 @@ #include #include +#include "ecb_cbc_helpers.h" + #define CAMELLIA_AESNI_PARALLEL_BLOCKS 16 #define CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS 32 @@ -23,63 +24,6 @@ asmlinkage void camellia_ecb_dec_32way(const void *ctx, u8 *dst, const u8 *src); asmlinkage void camellia_cbc_dec_32way(const void *ctx, u8 *dst, const u8 *src); -static const struct common_glue_ctx camellia_enc = { - .num_funcs = 4, - .fpu_blocks_limit = CAMELLIA_AESNI_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS, - .fn_u = { .ecb = camellia_ecb_enc_32way } - }, { - .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, - .fn_u = { .ecb = camellia_ecb_enc_16way } - }, { - .num_blocks = 2, - .fn_u = { .ecb = camellia_enc_blk_2way } - }, { - .num_blocks = 1, - .fn_u = { .ecb = camellia_enc_blk } - } } -}; - -static const struct common_glue_ctx camellia_dec = { - .num_funcs = 4, - .fpu_blocks_limit = CAMELLIA_AESNI_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS, - .fn_u = { .ecb = camellia_ecb_dec_32way } - }, { - .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, - .fn_u = { .ecb = camellia_ecb_dec_16way } - }, { - .num_blocks = 2, - .fn_u = { .ecb = camellia_dec_blk_2way } - }, { - .num_blocks = 1, - .fn_u = { .ecb = camellia_dec_blk } - } } -}; - -static const struct common_glue_ctx camellia_dec_cbc = { - .num_funcs = 4, - .fpu_blocks_limit = CAMELLIA_AESNI_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS, - .fn_u = { .cbc = camellia_cbc_dec_32way } - }, { - .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, - .fn_u = { .cbc = camellia_cbc_dec_16way } - }, { - .num_blocks = 2, - .fn_u = { .cbc = camellia_decrypt_cbc_2way } - }, { - .num_blocks = 1, - .fn_u = { .cbc = camellia_dec_blk } - } } -}; - static int camellia_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { @@ -88,22 +32,39 @@ static int camellia_setkey(struct crypto_skcipher *tfm, const u8 *key, static int ecb_encrypt(struct skcipher_request *req) { - return glue_ecb_req_128bit(&camellia_enc, req); + ECB_WALK_START(req, CAMELLIA_BLOCK_SIZE, CAMELLIA_AESNI_PARALLEL_BLOCKS); + ECB_BLOCK(CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS, camellia_ecb_enc_32way); + ECB_BLOCK(CAMELLIA_AESNI_PARALLEL_BLOCKS, camellia_ecb_enc_16way); + ECB_BLOCK(2, camellia_enc_blk_2way); + ECB_BLOCK(1, camellia_enc_blk); + ECB_WALK_END(); } static int ecb_decrypt(struct skcipher_request *req) { - return glue_ecb_req_128bit(&camellia_dec, req); + ECB_WALK_START(req, CAMELLIA_BLOCK_SIZE, CAMELLIA_AESNI_PARALLEL_BLOCKS); + ECB_BLOCK(CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS, camellia_ecb_dec_32way); + ECB_BLOCK(CAMELLIA_AESNI_PARALLEL_BLOCKS, camellia_ecb_dec_16way); + ECB_BLOCK(2, camellia_dec_blk_2way); + ECB_BLOCK(1, camellia_dec_blk); + ECB_WALK_END(); } static int cbc_encrypt(struct skcipher_request *req) { - return glue_cbc_encrypt_req_128bit(camellia_enc_blk, req); + CBC_WALK_START(req, CAMELLIA_BLOCK_SIZE, -1); + CBC_ENC_BLOCK(camellia_enc_blk); + CBC_WALK_END(); } static int cbc_decrypt(struct skcipher_request *req) { - return glue_cbc_decrypt_req_128bit(&camellia_dec_cbc, req); + CBC_WALK_START(req, CAMELLIA_BLOCK_SIZE, CAMELLIA_AESNI_PARALLEL_BLOCKS); + CBC_DEC_BLOCK(CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS, camellia_cbc_dec_32way); + CBC_DEC_BLOCK(CAMELLIA_AESNI_PARALLEL_BLOCKS, camellia_cbc_dec_16way); + CBC_DEC_BLOCK(2, camellia_decrypt_cbc_2way); + CBC_DEC_BLOCK(1, camellia_dec_blk); + CBC_WALK_END(); } static struct skcipher_alg camellia_algs[] = { diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c index 22a89cdfedfb..68fed0a79889 100644 --- a/arch/x86/crypto/camellia_aesni_avx_glue.c +++ b/arch/x86/crypto/camellia_aesni_avx_glue.c @@ -6,7 +6,6 @@ */ #include -#include #include #include #include @@ -14,6 +13,8 @@ #include #include +#include "ecb_cbc_helpers.h" + #define CAMELLIA_AESNI_PARALLEL_BLOCKS 16 /* 16-way parallel cipher functions (avx/aes-ni) */ @@ -26,54 +27,6 @@ EXPORT_SYMBOL_GPL(camellia_ecb_dec_16way); asmlinkage void camellia_cbc_dec_16way(const void *ctx, u8 *dst, const u8 *src); EXPORT_SYMBOL_GPL(camellia_cbc_dec_16way); -static const struct common_glue_ctx camellia_enc = { - .num_funcs = 3, - .fpu_blocks_limit = CAMELLIA_AESNI_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, - .fn_u = { .ecb = camellia_ecb_enc_16way } - }, { - .num_blocks = 2, - .fn_u = { .ecb = camellia_enc_blk_2way } - }, { - .num_blocks = 1, - .fn_u = { .ecb = camellia_enc_blk } - } } -}; - -static const struct common_glue_ctx camellia_dec = { - .num_funcs = 3, - .fpu_blocks_limit = CAMELLIA_AESNI_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, - .fn_u = { .ecb = camellia_ecb_dec_16way } - }, { - .num_blocks = 2, - .fn_u = { .ecb = camellia_dec_blk_2way } - }, { - .num_blocks = 1, - .fn_u = { .ecb = camellia_dec_blk } - } } -}; - -static const struct common_glue_ctx camellia_dec_cbc = { - .num_funcs = 3, - .fpu_blocks_limit = CAMELLIA_AESNI_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, - .fn_u = { .cbc = camellia_cbc_dec_16way } - }, { - .num_blocks = 2, - .fn_u = { .cbc = camellia_decrypt_cbc_2way } - }, { - .num_blocks = 1, - .fn_u = { .cbc = camellia_dec_blk } - } } -}; - static int camellia_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { @@ -82,22 +35,36 @@ static int camellia_setkey(struct crypto_skcipher *tfm, const u8 *key, static int ecb_encrypt(struct skcipher_request *req) { - return glue_ecb_req_128bit(&camellia_enc, req); + ECB_WALK_START(req, CAMELLIA_BLOCK_SIZE, CAMELLIA_AESNI_PARALLEL_BLOCKS); + ECB_BLOCK(CAMELLIA_AESNI_PARALLEL_BLOCKS, camellia_ecb_enc_16way); + ECB_BLOCK(2, camellia_enc_blk_2way); + ECB_BLOCK(1, camellia_enc_blk); + ECB_WALK_END(); } static int ecb_decrypt(struct skcipher_request *req) { - return glue_ecb_req_128bit(&camellia_dec, req); + ECB_WALK_START(req, CAMELLIA_BLOCK_SIZE, CAMELLIA_AESNI_PARALLEL_BLOCKS); + ECB_BLOCK(CAMELLIA_AESNI_PARALLEL_BLOCKS, camellia_ecb_dec_16way); + ECB_BLOCK(2, camellia_dec_blk_2way); + ECB_BLOCK(1, camellia_dec_blk); + ECB_WALK_END(); } static int cbc_encrypt(struct skcipher_request *req) { - return glue_cbc_encrypt_req_128bit(camellia_enc_blk, req); + CBC_WALK_START(req, CAMELLIA_BLOCK_SIZE, -1); + CBC_ENC_BLOCK(camellia_enc_blk); + CBC_WALK_END(); } static int cbc_decrypt(struct skcipher_request *req) { - return glue_cbc_decrypt_req_128bit(&camellia_dec_cbc, req); + CBC_WALK_START(req, CAMELLIA_BLOCK_SIZE, CAMELLIA_AESNI_PARALLEL_BLOCKS); + CBC_DEC_BLOCK(CAMELLIA_AESNI_PARALLEL_BLOCKS, camellia_cbc_dec_16way); + CBC_DEC_BLOCK(2, camellia_decrypt_cbc_2way); + CBC_DEC_BLOCK(1, camellia_dec_blk); + CBC_WALK_END(); } static struct skcipher_alg camellia_algs[] = { diff --git a/arch/x86/crypto/camellia_glue.c b/arch/x86/crypto/camellia_glue.c index fefeedf2b33d..0bc00ce68484 100644 --- a/arch/x86/crypto/camellia_glue.c +++ b/arch/x86/crypto/camellia_glue.c @@ -15,7 +15,8 @@ #include #include #include -#include + +#include "ecb_cbc_helpers.h" /* regular block cipher functions */ asmlinkage void __camellia_enc_blk(const void *ctx, u8 *dst, const u8 *src, @@ -1262,75 +1263,47 @@ static int camellia_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, return camellia_setkey(&tfm->base, key, key_len); } -void camellia_decrypt_cbc_2way(const void *ctx, u8 *d, const u8 *s) +void camellia_decrypt_cbc_2way(const void *ctx, u8 *dst, const u8 *src) { - u128 *dst = (u128 *)d; - const u128 *src = (const u128 *)s; - u128 iv = *src; - - camellia_dec_blk_2way(ctx, (u8 *)dst, (u8 *)src); + u8 buf[CAMELLIA_BLOCK_SIZE]; + const u8 *iv = src; - u128_xor(&dst[1], &dst[1], &iv); + if (dst == src) + iv = memcpy(buf, iv, sizeof(buf)); + camellia_dec_blk_2way(ctx, dst, src); + crypto_xor(dst + CAMELLIA_BLOCK_SIZE, iv, CAMELLIA_BLOCK_SIZE); } EXPORT_SYMBOL_GPL(camellia_decrypt_cbc_2way); -static const struct common_glue_ctx camellia_enc = { - .num_funcs = 2, - .fpu_blocks_limit = -1, - - .funcs = { { - .num_blocks = 2, - .fn_u = { .ecb = camellia_enc_blk_2way } - }, { - .num_blocks = 1, - .fn_u = { .ecb = camellia_enc_blk } - } } -}; - -static const struct common_glue_ctx camellia_dec = { - .num_funcs = 2, - .fpu_blocks_limit = -1, - - .funcs = { { - .num_blocks = 2, - .fn_u = { .ecb = camellia_dec_blk_2way } - }, { - .num_blocks = 1, - .fn_u = { .ecb = camellia_dec_blk } - } } -}; - -static const struct common_glue_ctx camellia_dec_cbc = { - .num_funcs = 2, - .fpu_blocks_limit = -1, - - .funcs = { { - .num_blocks = 2, - .fn_u = { .cbc = camellia_decrypt_cbc_2way } - }, { - .num_blocks = 1, - .fn_u = { .cbc = camellia_dec_blk } - } } -}; - static int ecb_encrypt(struct skcipher_request *req) { - return glue_ecb_req_128bit(&camellia_enc, req); + ECB_WALK_START(req, CAMELLIA_BLOCK_SIZE, -1); + ECB_BLOCK(2, camellia_enc_blk_2way); + ECB_BLOCK(1, camellia_enc_blk); + ECB_WALK_END(); } static int ecb_decrypt(struct skcipher_request *req) { - return glue_ecb_req_128bit(&camellia_dec, req); + ECB_WALK_START(req, CAMELLIA_BLOCK_SIZE, -1); + ECB_BLOCK(2, camellia_dec_blk_2way); + ECB_BLOCK(1, camellia_dec_blk); + ECB_WALK_END(); } static int cbc_encrypt(struct skcipher_request *req) { - return glue_cbc_encrypt_req_128bit(camellia_enc_blk, req); + CBC_WALK_START(req, CAMELLIA_BLOCK_SIZE, -1); + CBC_ENC_BLOCK(camellia_enc_blk); + CBC_WALK_END(); } static int cbc_decrypt(struct skcipher_request *req) { - return glue_cbc_decrypt_req_128bit(&camellia_dec_cbc, req); + CBC_WALK_START(req, CAMELLIA_BLOCK_SIZE, -1); + CBC_DEC_BLOCK(2, camellia_decrypt_cbc_2way); + CBC_DEC_BLOCK(1, camellia_dec_blk); + CBC_WALK_END(); } static struct crypto_alg camellia_cipher_alg = { diff --git a/crypto/Kconfig b/crypto/Kconfig index 24c0e001d06d..f8518ff389bb 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1286,7 +1286,6 @@ config CRYPTO_CAMELLIA_X86_64 depends on X86 && 64BIT depends on CRYPTO select CRYPTO_SKCIPHER - select CRYPTO_GLUE_HELPER_X86 imply CRYPTO_CTR help Camellia cipher algorithm module (x86_64). @@ -1305,7 +1304,6 @@ config CRYPTO_CAMELLIA_AESNI_AVX_X86_64 depends on CRYPTO select CRYPTO_SKCIPHER select CRYPTO_CAMELLIA_X86_64 - select CRYPTO_GLUE_HELPER_X86 select CRYPTO_SIMD imply CRYPTO_XTS help -- cgit v1.2.3-59-g8ed1b From 9ad58b46f814edd5b8b288b66f94cf57c97eaea3 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 5 Jan 2021 17:48:04 +0100 Subject: crypto: x86/serpent - drop dependency on glue helper Replace the glue helper dependency with implementations of ECB and CBC based on the new CPP macros, which avoid the need for indirect calls. Acked-by: Eric Biggers Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- arch/x86/crypto/serpent_avx2_glue.c | 73 +++++++++------------------------ arch/x86/crypto/serpent_avx_glue.c | 61 ++++++++-------------------- arch/x86/crypto/serpent_sse2_glue.c | 81 +++++++++++-------------------------- crypto/Kconfig | 3 -- 4 files changed, 61 insertions(+), 157 deletions(-) (limited to 'crypto') diff --git a/arch/x86/crypto/serpent_avx2_glue.c b/arch/x86/crypto/serpent_avx2_glue.c index 28e542c6512a..261c9ac2d762 100644 --- a/arch/x86/crypto/serpent_avx2_glue.c +++ b/arch/x86/crypto/serpent_avx2_glue.c @@ -12,9 +12,10 @@ #include #include #include -#include #include +#include "ecb_cbc_helpers.h" + #define SERPENT_AVX2_PARALLEL_BLOCKS 16 /* 16-way AVX2 parallel cipher functions */ @@ -28,72 +29,38 @@ static int serpent_setkey_skcipher(struct crypto_skcipher *tfm, return __serpent_setkey(crypto_skcipher_ctx(tfm), key, keylen); } -static const struct common_glue_ctx serpent_enc = { - .num_funcs = 3, - .fpu_blocks_limit = 8, - - .funcs = { { - .num_blocks = 16, - .fn_u = { .ecb = serpent_ecb_enc_16way } - }, { - .num_blocks = 8, - .fn_u = { .ecb = serpent_ecb_enc_8way_avx } - }, { - .num_blocks = 1, - .fn_u = { .ecb = __serpent_encrypt } - } } -}; - -static const struct common_glue_ctx serpent_dec = { - .num_funcs = 3, - .fpu_blocks_limit = 8, - - .funcs = { { - .num_blocks = 16, - .fn_u = { .ecb = serpent_ecb_dec_16way } - }, { - .num_blocks = 8, - .fn_u = { .ecb = serpent_ecb_dec_8way_avx } - }, { - .num_blocks = 1, - .fn_u = { .ecb = __serpent_decrypt } - } } -}; - -static const struct common_glue_ctx serpent_dec_cbc = { - .num_funcs = 3, - .fpu_blocks_limit = 8, - - .funcs = { { - .num_blocks = 16, - .fn_u = { .cbc = serpent_cbc_dec_16way } - }, { - .num_blocks = 8, - .fn_u = { .cbc = serpent_cbc_dec_8way_avx } - }, { - .num_blocks = 1, - .fn_u = { .cbc = __serpent_decrypt } - } } -}; - static int ecb_encrypt(struct skcipher_request *req) { - return glue_ecb_req_128bit(&serpent_enc, req); + ECB_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS); + ECB_BLOCK(SERPENT_AVX2_PARALLEL_BLOCKS, serpent_ecb_enc_16way); + ECB_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_ecb_enc_8way_avx); + ECB_BLOCK(1, __serpent_encrypt); + ECB_WALK_END(); } static int ecb_decrypt(struct skcipher_request *req) { - return glue_ecb_req_128bit(&serpent_dec, req); + ECB_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS); + ECB_BLOCK(SERPENT_AVX2_PARALLEL_BLOCKS, serpent_ecb_dec_16way); + ECB_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_ecb_dec_8way_avx); + ECB_BLOCK(1, __serpent_decrypt); + ECB_WALK_END(); } static int cbc_encrypt(struct skcipher_request *req) { - return glue_cbc_encrypt_req_128bit(__serpent_encrypt, req); + CBC_WALK_START(req, SERPENT_BLOCK_SIZE, -1); + CBC_ENC_BLOCK(__serpent_encrypt); + CBC_WALK_END(); } static int cbc_decrypt(struct skcipher_request *req) { - return glue_cbc_decrypt_req_128bit(&serpent_dec_cbc, req); + CBC_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS); + CBC_DEC_BLOCK(SERPENT_AVX2_PARALLEL_BLOCKS, serpent_cbc_dec_16way); + CBC_DEC_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_cbc_dec_8way_avx); + CBC_DEC_BLOCK(1, __serpent_decrypt); + CBC_WALK_END(); } static struct skcipher_alg serpent_algs[] = { diff --git a/arch/x86/crypto/serpent_avx_glue.c b/arch/x86/crypto/serpent_avx_glue.c index aa4605baf9d4..5fe01d2a5b1d 100644 --- a/arch/x86/crypto/serpent_avx_glue.c +++ b/arch/x86/crypto/serpent_avx_glue.c @@ -15,9 +15,10 @@ #include #include #include -#include #include +#include "ecb_cbc_helpers.h" + /* 8-way parallel cipher functions */ asmlinkage void serpent_ecb_enc_8way_avx(const void *ctx, u8 *dst, const u8 *src); @@ -37,63 +38,35 @@ static int serpent_setkey_skcipher(struct crypto_skcipher *tfm, return __serpent_setkey(crypto_skcipher_ctx(tfm), key, keylen); } -static const struct common_glue_ctx serpent_enc = { - .num_funcs = 2, - .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = SERPENT_PARALLEL_BLOCKS, - .fn_u = { .ecb = serpent_ecb_enc_8way_avx } - }, { - .num_blocks = 1, - .fn_u = { .ecb = __serpent_encrypt } - } } -}; - -static const struct common_glue_ctx serpent_dec = { - .num_funcs = 2, - .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = SERPENT_PARALLEL_BLOCKS, - .fn_u = { .ecb = serpent_ecb_dec_8way_avx } - }, { - .num_blocks = 1, - .fn_u = { .ecb = __serpent_decrypt } - } } -}; - -static const struct common_glue_ctx serpent_dec_cbc = { - .num_funcs = 2, - .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = SERPENT_PARALLEL_BLOCKS, - .fn_u = { .cbc = serpent_cbc_dec_8way_avx } - }, { - .num_blocks = 1, - .fn_u = { .cbc = __serpent_decrypt } - } } -}; - static int ecb_encrypt(struct skcipher_request *req) { - return glue_ecb_req_128bit(&serpent_enc, req); + ECB_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS); + ECB_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_ecb_enc_8way_avx); + ECB_BLOCK(1, __serpent_encrypt); + ECB_WALK_END(); } static int ecb_decrypt(struct skcipher_request *req) { - return glue_ecb_req_128bit(&serpent_dec, req); + ECB_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS); + ECB_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_ecb_dec_8way_avx); + ECB_BLOCK(1, __serpent_decrypt); + ECB_WALK_END(); } static int cbc_encrypt(struct skcipher_request *req) { - return glue_cbc_encrypt_req_128bit(__serpent_encrypt, req); + CBC_WALK_START(req, SERPENT_BLOCK_SIZE, -1); + CBC_ENC_BLOCK(__serpent_encrypt); + CBC_WALK_END(); } static int cbc_decrypt(struct skcipher_request *req) { - return glue_cbc_decrypt_req_128bit(&serpent_dec_cbc, req); + CBC_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS); + CBC_DEC_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_cbc_dec_8way_avx); + CBC_DEC_BLOCK(1, __serpent_decrypt); + CBC_WALK_END(); } static struct skcipher_alg serpent_algs[] = { diff --git a/arch/x86/crypto/serpent_sse2_glue.c b/arch/x86/crypto/serpent_sse2_glue.c index 9acb3bf28feb..e28d60949c16 100644 --- a/arch/x86/crypto/serpent_sse2_glue.c +++ b/arch/x86/crypto/serpent_sse2_glue.c @@ -21,7 +21,8 @@ #include #include #include -#include + +#include "ecb_cbc_helpers.h" static int serpent_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) @@ -29,80 +30,46 @@ static int serpent_setkey_skcipher(struct crypto_skcipher *tfm, return __serpent_setkey(crypto_skcipher_ctx(tfm), key, keylen); } -static void serpent_decrypt_cbc_xway(const void *ctx, u8 *d, const u8 *s) +static void serpent_decrypt_cbc_xway(const void *ctx, u8 *dst, const u8 *src) { - u128 ivs[SERPENT_PARALLEL_BLOCKS - 1]; - u128 *dst = (u128 *)d; - const u128 *src = (const u128 *)s; - unsigned int j; - - for (j = 0; j < SERPENT_PARALLEL_BLOCKS - 1; j++) - ivs[j] = src[j]; + u8 buf[SERPENT_PARALLEL_BLOCKS - 1][SERPENT_BLOCK_SIZE]; + const u8 *s = src; - serpent_dec_blk_xway(ctx, (u8 *)dst, (u8 *)src); - - for (j = 0; j < SERPENT_PARALLEL_BLOCKS - 1; j++) - u128_xor(dst + (j + 1), dst + (j + 1), ivs + j); + if (dst == src) + s = memcpy(buf, src, sizeof(buf)); + serpent_dec_blk_xway(ctx, dst, src); + crypto_xor(dst + SERPENT_BLOCK_SIZE, s, sizeof(buf)); } -static const struct common_glue_ctx serpent_enc = { - .num_funcs = 2, - .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = SERPENT_PARALLEL_BLOCKS, - .fn_u = { .ecb = serpent_enc_blk_xway } - }, { - .num_blocks = 1, - .fn_u = { .ecb = __serpent_encrypt } - } } -}; - -static const struct common_glue_ctx serpent_dec = { - .num_funcs = 2, - .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = SERPENT_PARALLEL_BLOCKS, - .fn_u = { .ecb = serpent_dec_blk_xway } - }, { - .num_blocks = 1, - .fn_u = { .ecb = __serpent_decrypt } - } } -}; - -static const struct common_glue_ctx serpent_dec_cbc = { - .num_funcs = 2, - .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = SERPENT_PARALLEL_BLOCKS, - .fn_u = { .cbc = serpent_decrypt_cbc_xway } - }, { - .num_blocks = 1, - .fn_u = { .cbc = __serpent_decrypt } - } } -}; - static int ecb_encrypt(struct skcipher_request *req) { - return glue_ecb_req_128bit(&serpent_enc, req); + ECB_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS); + ECB_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_enc_blk_xway); + ECB_BLOCK(1, __serpent_encrypt); + ECB_WALK_END(); } static int ecb_decrypt(struct skcipher_request *req) { - return glue_ecb_req_128bit(&serpent_dec, req); + ECB_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS); + ECB_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_dec_blk_xway); + ECB_BLOCK(1, __serpent_decrypt); + ECB_WALK_END(); } static int cbc_encrypt(struct skcipher_request *req) { - return glue_cbc_encrypt_req_128bit(__serpent_encrypt, - req); + CBC_WALK_START(req, SERPENT_BLOCK_SIZE, -1); + CBC_ENC_BLOCK(__serpent_encrypt); + CBC_WALK_END(); } static int cbc_decrypt(struct skcipher_request *req) { - return glue_cbc_decrypt_req_128bit(&serpent_dec_cbc, req); + CBC_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS); + CBC_DEC_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_decrypt_cbc_xway); + CBC_DEC_BLOCK(1, __serpent_decrypt); + CBC_WALK_END(); } static struct skcipher_alg serpent_algs[] = { diff --git a/crypto/Kconfig b/crypto/Kconfig index f8518ff389bb..29dce7efc443 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1538,7 +1538,6 @@ config CRYPTO_SERPENT_SSE2_X86_64 tristate "Serpent cipher algorithm (x86_64/SSE2)" depends on X86 && 64BIT select CRYPTO_SKCIPHER - select CRYPTO_GLUE_HELPER_X86 select CRYPTO_SERPENT select CRYPTO_SIMD imply CRYPTO_CTR @@ -1558,7 +1557,6 @@ config CRYPTO_SERPENT_SSE2_586 tristate "Serpent cipher algorithm (i586/SSE2)" depends on X86 && !64BIT select CRYPTO_SKCIPHER - select CRYPTO_GLUE_HELPER_X86 select CRYPTO_SERPENT select CRYPTO_SIMD imply CRYPTO_CTR @@ -1578,7 +1576,6 @@ config CRYPTO_SERPENT_AVX_X86_64 tristate "Serpent cipher algorithm (x86_64/AVX)" depends on X86 && 64BIT select CRYPTO_SKCIPHER - select CRYPTO_GLUE_HELPER_X86 select CRYPTO_SERPENT select CRYPTO_SIMD imply CRYPTO_XTS -- cgit v1.2.3-59-g8ed1b From ea55cfc3f920c95ee8d01ddc51e586b09a1194ee Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 5 Jan 2021 17:48:06 +0100 Subject: crypto: x86/cast6 - drop dependency on glue helper Replace the glue helper dependency with implementations of ECB and CBC based on the new CPP macros, which avoid the need for indirect calls. Acked-by: Eric Biggers Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- arch/x86/crypto/cast6_avx_glue.c | 61 +++++++++++----------------------------- crypto/Kconfig | 1 - 2 files changed, 17 insertions(+), 45 deletions(-) (limited to 'crypto') diff --git a/arch/x86/crypto/cast6_avx_glue.c b/arch/x86/crypto/cast6_avx_glue.c index 790efcb6df3b..7e2aea372349 100644 --- a/arch/x86/crypto/cast6_avx_glue.c +++ b/arch/x86/crypto/cast6_avx_glue.c @@ -15,7 +15,8 @@ #include #include #include -#include + +#include "ecb_cbc_helpers.h" #define CAST6_PARALLEL_BLOCKS 8 @@ -30,63 +31,35 @@ static int cast6_setkey_skcipher(struct crypto_skcipher *tfm, return cast6_setkey(&tfm->base, key, keylen); } -static const struct common_glue_ctx cast6_enc = { - .num_funcs = 2, - .fpu_blocks_limit = CAST6_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = CAST6_PARALLEL_BLOCKS, - .fn_u = { .ecb = cast6_ecb_enc_8way } - }, { - .num_blocks = 1, - .fn_u = { .ecb = __cast6_encrypt } - } } -}; - -static const struct common_glue_ctx cast6_dec = { - .num_funcs = 2, - .fpu_blocks_limit = CAST6_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = CAST6_PARALLEL_BLOCKS, - .fn_u = { .ecb = cast6_ecb_dec_8way } - }, { - .num_blocks = 1, - .fn_u = { .ecb = __cast6_decrypt } - } } -}; - -static const struct common_glue_ctx cast6_dec_cbc = { - .num_funcs = 2, - .fpu_blocks_limit = CAST6_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = CAST6_PARALLEL_BLOCKS, - .fn_u = { .cbc = cast6_cbc_dec_8way } - }, { - .num_blocks = 1, - .fn_u = { .cbc = __cast6_decrypt } - } } -}; - static int ecb_encrypt(struct skcipher_request *req) { - return glue_ecb_req_128bit(&cast6_enc, req); + ECB_WALK_START(req, CAST6_BLOCK_SIZE, CAST6_PARALLEL_BLOCKS); + ECB_BLOCK(CAST6_PARALLEL_BLOCKS, cast6_ecb_enc_8way); + ECB_BLOCK(1, __cast6_encrypt); + ECB_WALK_END(); } static int ecb_decrypt(struct skcipher_request *req) { - return glue_ecb_req_128bit(&cast6_dec, req); + ECB_WALK_START(req, CAST6_BLOCK_SIZE, CAST6_PARALLEL_BLOCKS); + ECB_BLOCK(CAST6_PARALLEL_BLOCKS, cast6_ecb_dec_8way); + ECB_BLOCK(1, __cast6_decrypt); + ECB_WALK_END(); } static int cbc_encrypt(struct skcipher_request *req) { - return glue_cbc_encrypt_req_128bit(__cast6_encrypt, req); + CBC_WALK_START(req, CAST6_BLOCK_SIZE, -1); + CBC_ENC_BLOCK(__cast6_encrypt); + CBC_WALK_END(); } static int cbc_decrypt(struct skcipher_request *req) { - return glue_cbc_decrypt_req_128bit(&cast6_dec_cbc, req); + CBC_WALK_START(req, CAST6_BLOCK_SIZE, CAST6_PARALLEL_BLOCKS); + CBC_DEC_BLOCK(CAST6_PARALLEL_BLOCKS, cast6_cbc_dec_8way); + CBC_DEC_BLOCK(1, __cast6_decrypt); + CBC_WALK_END(); } static struct skcipher_alg cast6_algs[] = { diff --git a/crypto/Kconfig b/crypto/Kconfig index 29dce7efc443..25101558acb5 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1393,7 +1393,6 @@ config CRYPTO_CAST6_AVX_X86_64 select CRYPTO_SKCIPHER select CRYPTO_CAST6 select CRYPTO_CAST_COMMON - select CRYPTO_GLUE_HELPER_X86 select CRYPTO_SIMD imply CRYPTO_XTS imply CRYPTO_CTR -- cgit v1.2.3-59-g8ed1b From 165f357334cc92435aa9b5c9161567e0d0ab8f2a Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 5 Jan 2021 17:48:07 +0100 Subject: crypto: x86/twofish - drop dependency on glue helper Replace the glue helper dependency with implementations of ECB and CBC based on the new CPP macros, which avoid the need for indirect calls. Acked-by: Eric Biggers Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- arch/x86/crypto/twofish_avx_glue.c | 73 ++++++++++----------------------- arch/x86/crypto/twofish_glue_3way.c | 80 +++++++++++-------------------------- crypto/Kconfig | 2 - 3 files changed, 44 insertions(+), 111 deletions(-) (limited to 'crypto') diff --git a/arch/x86/crypto/twofish_avx_glue.c b/arch/x86/crypto/twofish_avx_glue.c index 13f810b61034..6ce198f808a5 100644 --- a/arch/x86/crypto/twofish_avx_glue.c +++ b/arch/x86/crypto/twofish_avx_glue.c @@ -15,9 +15,10 @@ #include #include #include -#include #include +#include "ecb_cbc_helpers.h" + #define TWOFISH_PARALLEL_BLOCKS 8 /* 8-way parallel cipher functions */ @@ -37,72 +38,38 @@ static inline void twofish_enc_blk_3way(const void *ctx, u8 *dst, const u8 *src) __twofish_enc_blk_3way(ctx, dst, src, false); } -static const struct common_glue_ctx twofish_enc = { - .num_funcs = 3, - .fpu_blocks_limit = TWOFISH_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = TWOFISH_PARALLEL_BLOCKS, - .fn_u = { .ecb = twofish_ecb_enc_8way } - }, { - .num_blocks = 3, - .fn_u = { .ecb = twofish_enc_blk_3way } - }, { - .num_blocks = 1, - .fn_u = { .ecb = twofish_enc_blk } - } } -}; - -static const struct common_glue_ctx twofish_dec = { - .num_funcs = 3, - .fpu_blocks_limit = TWOFISH_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = TWOFISH_PARALLEL_BLOCKS, - .fn_u = { .ecb = twofish_ecb_dec_8way } - }, { - .num_blocks = 3, - .fn_u = { .ecb = twofish_dec_blk_3way } - }, { - .num_blocks = 1, - .fn_u = { .ecb = twofish_dec_blk } - } } -}; - -static const struct common_glue_ctx twofish_dec_cbc = { - .num_funcs = 3, - .fpu_blocks_limit = TWOFISH_PARALLEL_BLOCKS, - - .funcs = { { - .num_blocks = TWOFISH_PARALLEL_BLOCKS, - .fn_u = { .cbc = twofish_cbc_dec_8way } - }, { - .num_blocks = 3, - .fn_u = { .cbc = twofish_dec_blk_cbc_3way } - }, { - .num_blocks = 1, - .fn_u = { .cbc = twofish_dec_blk } - } } -}; - static int ecb_encrypt(struct skcipher_request *req) { - return glue_ecb_req_128bit(&twofish_enc, req); + ECB_WALK_START(req, TF_BLOCK_SIZE, TWOFISH_PARALLEL_BLOCKS); + ECB_BLOCK(TWOFISH_PARALLEL_BLOCKS, twofish_ecb_enc_8way); + ECB_BLOCK(3, twofish_enc_blk_3way); + ECB_BLOCK(1, twofish_enc_blk); + ECB_WALK_END(); } static int ecb_decrypt(struct skcipher_request *req) { - return glue_ecb_req_128bit(&twofish_dec, req); + ECB_WALK_START(req, TF_BLOCK_SIZE, TWOFISH_PARALLEL_BLOCKS); + ECB_BLOCK(TWOFISH_PARALLEL_BLOCKS, twofish_ecb_dec_8way); + ECB_BLOCK(3, twofish_dec_blk_3way); + ECB_BLOCK(1, twofish_dec_blk); + ECB_WALK_END(); } static int cbc_encrypt(struct skcipher_request *req) { - return glue_cbc_encrypt_req_128bit(twofish_enc_blk, req); + CBC_WALK_START(req, TF_BLOCK_SIZE, -1); + CBC_ENC_BLOCK(twofish_enc_blk); + CBC_WALK_END(); } static int cbc_decrypt(struct skcipher_request *req) { - return glue_cbc_decrypt_req_128bit(&twofish_dec_cbc, req); + CBC_WALK_START(req, TF_BLOCK_SIZE, TWOFISH_PARALLEL_BLOCKS); + CBC_DEC_BLOCK(TWOFISH_PARALLEL_BLOCKS, twofish_cbc_dec_8way); + CBC_DEC_BLOCK(3, twofish_dec_blk_cbc_3way); + CBC_DEC_BLOCK(1, twofish_dec_blk); + CBC_WALK_END(); } static struct skcipher_alg twofish_algs[] = { diff --git a/arch/x86/crypto/twofish_glue_3way.c b/arch/x86/crypto/twofish_glue_3way.c index 88252370db0a..d1fdefa5195a 100644 --- a/arch/x86/crypto/twofish_glue_3way.c +++ b/arch/x86/crypto/twofish_glue_3way.c @@ -5,17 +5,16 @@ * Copyright (c) 2011 Jussi Kivilinna */ -#include #include #include -#include -#include #include #include #include #include #include +#include "ecb_cbc_helpers.h" + EXPORT_SYMBOL_GPL(__twofish_enc_blk_3way); EXPORT_SYMBOL_GPL(twofish_dec_blk_3way); @@ -30,79 +29,48 @@ static inline void twofish_enc_blk_3way(const void *ctx, u8 *dst, const u8 *src) __twofish_enc_blk_3way(ctx, dst, src, false); } -void twofish_dec_blk_cbc_3way(const void *ctx, u8 *d, const u8 *s) +void twofish_dec_blk_cbc_3way(const void *ctx, u8 *dst, const u8 *src) { - u128 ivs[2]; - u128 *dst = (u128 *)d; - const u128 *src = (const u128 *)s; - - ivs[0] = src[0]; - ivs[1] = src[1]; + u8 buf[2][TF_BLOCK_SIZE]; + const u8 *s = src; - twofish_dec_blk_3way(ctx, (u8 *)dst, (u8 *)src); + if (dst == src) + s = memcpy(buf, src, sizeof(buf)); + twofish_dec_blk_3way(ctx, dst, src); + crypto_xor(dst + TF_BLOCK_SIZE, s, sizeof(buf)); - u128_xor(&dst[1], &dst[1], &ivs[0]); - u128_xor(&dst[2], &dst[2], &ivs[1]); } EXPORT_SYMBOL_GPL(twofish_dec_blk_cbc_3way); -static const struct common_glue_ctx twofish_enc = { - .num_funcs = 2, - .fpu_blocks_limit = -1, - - .funcs = { { - .num_blocks = 3, - .fn_u = { .ecb = twofish_enc_blk_3way } - }, { - .num_blocks = 1, - .fn_u = { .ecb = twofish_enc_blk } - } } -}; - -static const struct common_glue_ctx twofish_dec = { - .num_funcs = 2, - .fpu_blocks_limit = -1, - - .funcs = { { - .num_blocks = 3, - .fn_u = { .ecb = twofish_dec_blk_3way } - }, { - .num_blocks = 1, - .fn_u = { .ecb = twofish_dec_blk } - } } -}; - -static const struct common_glue_ctx twofish_dec_cbc = { - .num_funcs = 2, - .fpu_blocks_limit = -1, - - .funcs = { { - .num_blocks = 3, - .fn_u = { .cbc = twofish_dec_blk_cbc_3way } - }, { - .num_blocks = 1, - .fn_u = { .cbc = twofish_dec_blk } - } } -}; - static int ecb_encrypt(struct skcipher_request *req) { - return glue_ecb_req_128bit(&twofish_enc, req); + ECB_WALK_START(req, TF_BLOCK_SIZE, -1); + ECB_BLOCK(3, twofish_enc_blk_3way); + ECB_BLOCK(1, twofish_enc_blk); + ECB_WALK_END(); } static int ecb_decrypt(struct skcipher_request *req) { - return glue_ecb_req_128bit(&twofish_dec, req); + ECB_WALK_START(req, TF_BLOCK_SIZE, -1); + ECB_BLOCK(3, twofish_dec_blk_3way); + ECB_BLOCK(1, twofish_dec_blk); + ECB_WALK_END(); } static int cbc_encrypt(struct skcipher_request *req) { - return glue_cbc_encrypt_req_128bit(twofish_enc_blk, req); + CBC_WALK_START(req, TF_BLOCK_SIZE, -1); + CBC_ENC_BLOCK(twofish_enc_blk); + CBC_WALK_END(); } static int cbc_decrypt(struct skcipher_request *req) { - return glue_cbc_decrypt_req_128bit(&twofish_dec_cbc, req); + CBC_WALK_START(req, TF_BLOCK_SIZE, -1); + CBC_DEC_BLOCK(3, twofish_dec_blk_cbc_3way); + CBC_DEC_BLOCK(1, twofish_dec_blk); + CBC_WALK_END(); } static struct skcipher_alg tf_skciphers[] = { diff --git a/crypto/Kconfig b/crypto/Kconfig index 25101558acb5..b2182658c55e 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1711,7 +1711,6 @@ config CRYPTO_TWOFISH_X86_64_3WAY select CRYPTO_SKCIPHER select CRYPTO_TWOFISH_COMMON select CRYPTO_TWOFISH_X86_64 - select CRYPTO_GLUE_HELPER_X86 help Twofish cipher algorithm (x86_64, 3-way parallel). @@ -1730,7 +1729,6 @@ config CRYPTO_TWOFISH_AVX_X86_64 tristate "Twofish cipher algorithm (x86_64/AVX)" depends on X86 && 64BIT select CRYPTO_SKCIPHER - select CRYPTO_GLUE_HELPER_X86 select CRYPTO_SIMD select CRYPTO_TWOFISH_COMMON select CRYPTO_TWOFISH_X86_64 -- cgit v1.2.3-59-g8ed1b From 64ca771cd6bf48bd01f630ad1440ab151d1d19d5 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 5 Jan 2021 17:48:08 +0100 Subject: crypto: x86 - remove glue helper module All dependencies on the x86 glue helper module have been replaced by local instantiations of the new ECB/CBC preprocessor helper macros, so the glue helper module can be retired. Acked-by: Eric Biggers Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- arch/x86/crypto/Makefile | 2 - arch/x86/crypto/glue_helper.c | 155 ------------------------------ arch/x86/include/asm/crypto/glue_helper.h | 74 -------------- crypto/Kconfig | 5 - crypto/skcipher.c | 6 -- include/crypto/internal/skcipher.h | 1 - 6 files changed, 243 deletions(-) delete mode 100644 arch/x86/crypto/glue_helper.c delete mode 100644 arch/x86/include/asm/crypto/glue_helper.h (limited to 'crypto') diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index a31de0c6ccde..b28e36b7c96b 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -4,8 +4,6 @@ OBJECT_FILES_NON_STANDARD := y -obj-$(CONFIG_CRYPTO_GLUE_HELPER_X86) += glue_helper.o - obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o twofish-i586-y := twofish-i586-asm_32.o twofish_glue.o obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o diff --git a/arch/x86/crypto/glue_helper.c b/arch/x86/crypto/glue_helper.c deleted file mode 100644 index 895d34150c3f..000000000000 --- a/arch/x86/crypto/glue_helper.c +++ /dev/null @@ -1,155 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Shared glue code for 128bit block ciphers - * - * Copyright © 2012-2013 Jussi Kivilinna - * - * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by: - * Copyright (c) 2006 Herbert Xu - */ - -#include -#include -#include -#include -#include - -int glue_ecb_req_128bit(const struct common_glue_ctx *gctx, - struct skcipher_request *req) -{ - void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); - const unsigned int bsize = 128 / 8; - struct skcipher_walk walk; - bool fpu_enabled = false; - unsigned int nbytes; - int err; - - err = skcipher_walk_virt(&walk, req, false); - - while ((nbytes = walk.nbytes)) { - const u8 *src = walk.src.virt.addr; - u8 *dst = walk.dst.virt.addr; - unsigned int func_bytes; - unsigned int i; - - fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, - &walk, fpu_enabled, nbytes); - for (i = 0; i < gctx->num_funcs; i++) { - func_bytes = bsize * gctx->funcs[i].num_blocks; - - if (nbytes < func_bytes) - continue; - - /* Process multi-block batch */ - do { - gctx->funcs[i].fn_u.ecb(ctx, dst, src); - src += func_bytes; - dst += func_bytes; - nbytes -= func_bytes; - } while (nbytes >= func_bytes); - - if (nbytes < bsize) - break; - } - err = skcipher_walk_done(&walk, nbytes); - } - - glue_fpu_end(fpu_enabled); - return err; -} -EXPORT_SYMBOL_GPL(glue_ecb_req_128bit); - -int glue_cbc_encrypt_req_128bit(const common_glue_func_t fn, - struct skcipher_request *req) -{ - void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); - const unsigned int bsize = 128 / 8; - struct skcipher_walk walk; - unsigned int nbytes; - int err; - - err = skcipher_walk_virt(&walk, req, false); - - while ((nbytes = walk.nbytes)) { - const u128 *src = (u128 *)walk.src.virt.addr; - u128 *dst = (u128 *)walk.dst.virt.addr; - u128 *iv = (u128 *)walk.iv; - - do { - u128_xor(dst, src, iv); - fn(ctx, (u8 *)dst, (u8 *)dst); - iv = dst; - src++; - dst++; - nbytes -= bsize; - } while (nbytes >= bsize); - - *(u128 *)walk.iv = *iv; - err = skcipher_walk_done(&walk, nbytes); - } - return err; -} -EXPORT_SYMBOL_GPL(glue_cbc_encrypt_req_128bit); - -int glue_cbc_decrypt_req_128bit(const struct common_glue_ctx *gctx, - struct skcipher_request *req) -{ - void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); - const unsigned int bsize = 128 / 8; - struct skcipher_walk walk; - bool fpu_enabled = false; - unsigned int nbytes; - int err; - - err = skcipher_walk_virt(&walk, req, false); - - while ((nbytes = walk.nbytes)) { - const u128 *src = walk.src.virt.addr; - u128 *dst = walk.dst.virt.addr; - unsigned int func_bytes, num_blocks; - unsigned int i; - u128 last_iv; - - fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, - &walk, fpu_enabled, nbytes); - /* Start of the last block. */ - src += nbytes / bsize - 1; - dst += nbytes / bsize - 1; - - last_iv = *src; - - for (i = 0; i < gctx->num_funcs; i++) { - num_blocks = gctx->funcs[i].num_blocks; - func_bytes = bsize * num_blocks; - - if (nbytes < func_bytes) - continue; - - /* Process multi-block batch */ - do { - src -= num_blocks - 1; - dst -= num_blocks - 1; - - gctx->funcs[i].fn_u.cbc(ctx, (u8 *)dst, - (const u8 *)src); - - nbytes -= func_bytes; - if (nbytes < bsize) - goto done; - - u128_xor(dst, dst, --src); - dst--; - } while (nbytes >= func_bytes); - } -done: - u128_xor(dst, dst, (u128 *)walk.iv); - *(u128 *)walk.iv = last_iv; - err = skcipher_walk_done(&walk, nbytes); - } - - glue_fpu_end(fpu_enabled); - return err; -} -EXPORT_SYMBOL_GPL(glue_cbc_decrypt_req_128bit); - -MODULE_LICENSE("GPL"); diff --git a/arch/x86/include/asm/crypto/glue_helper.h b/arch/x86/include/asm/crypto/glue_helper.h deleted file mode 100644 index 23e09efd2aa6..000000000000 --- a/arch/x86/include/asm/crypto/glue_helper.h +++ /dev/null @@ -1,74 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Shared glue code for 128bit block ciphers - */ - -#ifndef _CRYPTO_GLUE_HELPER_H -#define _CRYPTO_GLUE_HELPER_H - -#include -#include -#include - -typedef void (*common_glue_func_t)(const void *ctx, u8 *dst, const u8 *src); -typedef void (*common_glue_cbc_func_t)(const void *ctx, u8 *dst, const u8 *src); - -struct common_glue_func_entry { - unsigned int num_blocks; /* number of blocks that @fn will process */ - union { - common_glue_func_t ecb; - common_glue_cbc_func_t cbc; - } fn_u; -}; - -struct common_glue_ctx { - unsigned int num_funcs; - int fpu_blocks_limit; /* -1 means fpu not needed at all */ - - /* - * First funcs entry must have largest num_blocks and last funcs entry - * must have num_blocks == 1! - */ - struct common_glue_func_entry funcs[]; -}; - -static inline bool glue_fpu_begin(unsigned int bsize, int fpu_blocks_limit, - struct skcipher_walk *walk, - bool fpu_enabled, unsigned int nbytes) -{ - if (likely(fpu_blocks_limit < 0)) - return false; - - if (fpu_enabled) - return true; - - /* - * Vector-registers are only used when chunk to be processed is large - * enough, so do not enable FPU until it is necessary. - */ - if (nbytes < bsize * (unsigned int)fpu_blocks_limit) - return false; - - /* prevent sleeping if FPU is in use */ - skcipher_walk_atomise(walk); - - kernel_fpu_begin(); - return true; -} - -static inline void glue_fpu_end(bool fpu_enabled) -{ - if (fpu_enabled) - kernel_fpu_end(); -} - -extern int glue_ecb_req_128bit(const struct common_glue_ctx *gctx, - struct skcipher_request *req); - -extern int glue_cbc_encrypt_req_128bit(const common_glue_func_t fn, - struct skcipher_request *req); - -extern int glue_cbc_decrypt_req_128bit(const struct common_glue_ctx *gctx, - struct skcipher_request *req); - -#endif /* _CRYPTO_GLUE_HELPER_H */ diff --git a/crypto/Kconfig b/crypto/Kconfig index b2182658c55e..94f0fde06b94 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -210,11 +210,6 @@ config CRYPTO_SIMD tristate select CRYPTO_CRYPTD -config CRYPTO_GLUE_HELPER_X86 - tristate - depends on X86 - select CRYPTO_SKCIPHER - config CRYPTO_ENGINE tristate diff --git a/crypto/skcipher.c b/crypto/skcipher.c index ff16d05644c7..a15376245416 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -491,12 +491,6 @@ int skcipher_walk_virt(struct skcipher_walk *walk, } EXPORT_SYMBOL_GPL(skcipher_walk_virt); -void skcipher_walk_atomise(struct skcipher_walk *walk) -{ - walk->flags &= ~SKCIPHER_WALK_SLEEP; -} -EXPORT_SYMBOL_GPL(skcipher_walk_atomise); - int skcipher_walk_async(struct skcipher_walk *walk, struct skcipher_request *req) { diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h index 9dd6c0c17eb8..a2339f80a615 100644 --- a/include/crypto/internal/skcipher.h +++ b/include/crypto/internal/skcipher.h @@ -133,7 +133,6 @@ int skcipher_walk_done(struct skcipher_walk *walk, int err); int skcipher_walk_virt(struct skcipher_walk *walk, struct skcipher_request *req, bool atomic); -void skcipher_walk_atomise(struct skcipher_walk *walk); int skcipher_walk_async(struct skcipher_walk *walk, struct skcipher_request *req); int skcipher_walk_aead_encrypt(struct skcipher_walk *walk, -- cgit v1.2.3-59-g8ed1b From b21b9a5e0aef025aafd2c57622a5f0cb9562c886 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 21 Jan 2021 14:07:29 +0100 Subject: crypto: rmd128 - remove RIPE-MD 128 hash algorithm RIPE-MD 128 is never referenced anywhere in the kernel, and unlikely to be depended upon by userspace via AF_ALG. So let's remove it. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/Kconfig | 13 --- crypto/Makefile | 1 - crypto/ripemd.h | 3 - crypto/rmd128.c | 323 ------------------------------------------------------- crypto/tcrypt.c | 18 +--- crypto/testmgr.c | 12 --- crypto/testmgr.h | 137 ----------------------- 7 files changed, 1 insertion(+), 506 deletions(-) delete mode 100644 crypto/rmd128.c (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index 94f0fde06b94..a14da8290abb 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -817,19 +817,6 @@ config CRYPTO_MICHAEL_MIC should not be used for other purposes because of the weakness of the algorithm. -config CRYPTO_RMD128 - tristate "RIPEMD-128 digest algorithm" - select CRYPTO_HASH - help - RIPEMD-128 (ISO/IEC 10118-3:2004). - - RIPEMD-128 is a 128-bit cryptographic hash function. It should only - be used as a secure replacement for RIPEMD. For other use cases, - RIPEMD-160 should be used. - - Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. - See - config CRYPTO_RMD160 tristate "RIPEMD-160 digest algorithm" select CRYPTO_HASH diff --git a/crypto/Makefile b/crypto/Makefile index b279483fba50..c4d8f86a106c 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -67,7 +67,6 @@ obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o obj-$(CONFIG_CRYPTO_NULL2) += crypto_null.o obj-$(CONFIG_CRYPTO_MD4) += md4.o obj-$(CONFIG_CRYPTO_MD5) += md5.o -obj-$(CONFIG_CRYPTO_RMD128) += rmd128.o obj-$(CONFIG_CRYPTO_RMD160) += rmd160.o obj-$(CONFIG_CRYPTO_RMD256) += rmd256.o obj-$(CONFIG_CRYPTO_RMD320) += rmd320.o diff --git a/crypto/ripemd.h b/crypto/ripemd.h index 93edbf52197d..0f66e3c86a2b 100644 --- a/crypto/ripemd.h +++ b/crypto/ripemd.h @@ -6,9 +6,6 @@ #ifndef _CRYPTO_RMD_H #define _CRYPTO_RMD_H -#define RMD128_DIGEST_SIZE 16 -#define RMD128_BLOCK_SIZE 64 - #define RMD160_DIGEST_SIZE 20 #define RMD160_BLOCK_SIZE 64 diff --git a/crypto/rmd128.c b/crypto/rmd128.c deleted file mode 100644 index 29308fb97e7e..000000000000 --- a/crypto/rmd128.c +++ /dev/null @@ -1,323 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Cryptographic API. - * - * RIPEMD-128 - RACE Integrity Primitives Evaluation Message Digest. - * - * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC - * - * Copyright (c) 2008 Adrian-Ken Rueegsegger - */ -#include -#include -#include -#include -#include -#include - -#include "ripemd.h" - -struct rmd128_ctx { - u64 byte_count; - u32 state[4]; - __le32 buffer[16]; -}; - -#define K1 RMD_K1 -#define K2 RMD_K2 -#define K3 RMD_K3 -#define K4 RMD_K4 -#define KK1 RMD_K6 -#define KK2 RMD_K7 -#define KK3 RMD_K8 -#define KK4 RMD_K1 - -#define F1(x, y, z) (x ^ y ^ z) /* XOR */ -#define F2(x, y, z) (z ^ (x & (y ^ z))) /* x ? y : z */ -#define F3(x, y, z) ((x | ~y) ^ z) -#define F4(x, y, z) (y ^ (z & (x ^ y))) /* z ? x : y */ - -#define ROUND(a, b, c, d, f, k, x, s) { \ - (a) += f((b), (c), (d)) + le32_to_cpup(&(x)) + (k); \ - (a) = rol32((a), (s)); \ -} - -static void rmd128_transform(u32 *state, const __le32 *in) -{ - u32 aa, bb, cc, dd, aaa, bbb, ccc, ddd; - - /* Initialize left lane */ - aa = state[0]; - bb = state[1]; - cc = state[2]; - dd = state[3]; - - /* Initialize right lane */ - aaa = state[0]; - bbb = state[1]; - ccc = state[2]; - ddd = state[3]; - - /* round 1: left lane */ - ROUND(aa, bb, cc, dd, F1, K1, in[0], 11); - ROUND(dd, aa, bb, cc, F1, K1, in[1], 14); - ROUND(cc, dd, aa, bb, F1, K1, in[2], 15); - ROUND(bb, cc, dd, aa, F1, K1, in[3], 12); - ROUND(aa, bb, cc, dd, F1, K1, in[4], 5); - ROUND(dd, aa, bb, cc, F1, K1, in[5], 8); - ROUND(cc, dd, aa, bb, F1, K1, in[6], 7); - ROUND(bb, cc, dd, aa, F1, K1, in[7], 9); - ROUND(aa, bb, cc, dd, F1, K1, in[8], 11); - ROUND(dd, aa, bb, cc, F1, K1, in[9], 13); - ROUND(cc, dd, aa, bb, F1, K1, in[10], 14); - ROUND(bb, cc, dd, aa, F1, K1, in[11], 15); - ROUND(aa, bb, cc, dd, F1, K1, in[12], 6); - ROUND(dd, aa, bb, cc, F1, K1, in[13], 7); - ROUND(cc, dd, aa, bb, F1, K1, in[14], 9); - ROUND(bb, cc, dd, aa, F1, K1, in[15], 8); - - /* round 2: left lane */ - ROUND(aa, bb, cc, dd, F2, K2, in[7], 7); - ROUND(dd, aa, bb, cc, F2, K2, in[4], 6); - ROUND(cc, dd, aa, bb, F2, K2, in[13], 8); - ROUND(bb, cc, dd, aa, F2, K2, in[1], 13); - ROUND(aa, bb, cc, dd, F2, K2, in[10], 11); - ROUND(dd, aa, bb, cc, F2, K2, in[6], 9); - ROUND(cc, dd, aa, bb, F2, K2, in[15], 7); - ROUND(bb, cc, dd, aa, F2, K2, in[3], 15); - ROUND(aa, bb, cc, dd, F2, K2, in[12], 7); - ROUND(dd, aa, bb, cc, F2, K2, in[0], 12); - ROUND(cc, dd, aa, bb, F2, K2, in[9], 15); - ROUND(bb, cc, dd, aa, F2, K2, in[5], 9); - ROUND(aa, bb, cc, dd, F2, K2, in[2], 11); - ROUND(dd, aa, bb, cc, F2, K2, in[14], 7); - ROUND(cc, dd, aa, bb, F2, K2, in[11], 13); - ROUND(bb, cc, dd, aa, F2, K2, in[8], 12); - - /* round 3: left lane */ - ROUND(aa, bb, cc, dd, F3, K3, in[3], 11); - ROUND(dd, aa, bb, cc, F3, K3, in[10], 13); - ROUND(cc, dd, aa, bb, F3, K3, in[14], 6); - ROUND(bb, cc, dd, aa, F3, K3, in[4], 7); - ROUND(aa, bb, cc, dd, F3, K3, in[9], 14); - ROUND(dd, aa, bb, cc, F3, K3, in[15], 9); - ROUND(cc, dd, aa, bb, F3, K3, in[8], 13); - ROUND(bb, cc, dd, aa, F3, K3, in[1], 15); - ROUND(aa, bb, cc, dd, F3, K3, in[2], 14); - ROUND(dd, aa, bb, cc, F3, K3, in[7], 8); - ROUND(cc, dd, aa, bb, F3, K3, in[0], 13); - ROUND(bb, cc, dd, aa, F3, K3, in[6], 6); - ROUND(aa, bb, cc, dd, F3, K3, in[13], 5); - ROUND(dd, aa, bb, cc, F3, K3, in[11], 12); - ROUND(cc, dd, aa, bb, F3, K3, in[5], 7); - ROUND(bb, cc, dd, aa, F3, K3, in[12], 5); - - /* round 4: left lane */ - ROUND(aa, bb, cc, dd, F4, K4, in[1], 11); - ROUND(dd, aa, bb, cc, F4, K4, in[9], 12); - ROUND(cc, dd, aa, bb, F4, K4, in[11], 14); - ROUND(bb, cc, dd, aa, F4, K4, in[10], 15); - ROUND(aa, bb, cc, dd, F4, K4, in[0], 14); - ROUND(dd, aa, bb, cc, F4, K4, in[8], 15); - ROUND(cc, dd, aa, bb, F4, K4, in[12], 9); - ROUND(bb, cc, dd, aa, F4, K4, in[4], 8); - ROUND(aa, bb, cc, dd, F4, K4, in[13], 9); - ROUND(dd, aa, bb, cc, F4, K4, in[3], 14); - ROUND(cc, dd, aa, bb, F4, K4, in[7], 5); - ROUND(bb, cc, dd, aa, F4, K4, in[15], 6); - ROUND(aa, bb, cc, dd, F4, K4, in[14], 8); - ROUND(dd, aa, bb, cc, F4, K4, in[5], 6); - ROUND(cc, dd, aa, bb, F4, K4, in[6], 5); - ROUND(bb, cc, dd, aa, F4, K4, in[2], 12); - - /* round 1: right lane */ - ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[5], 8); - ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[14], 9); - ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[7], 9); - ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[0], 11); - ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[9], 13); - ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[2], 15); - ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[11], 15); - ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[4], 5); - ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[13], 7); - ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[6], 7); - ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[15], 8); - ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[8], 11); - ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[1], 14); - ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[10], 14); - ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[3], 12); - ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[12], 6); - - /* round 2: right lane */ - ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[6], 9); - ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[11], 13); - ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[3], 15); - ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[7], 7); - ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[0], 12); - ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[13], 8); - ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[5], 9); - ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[10], 11); - ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[14], 7); - ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[15], 7); - ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[8], 12); - ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[12], 7); - ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[4], 6); - ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[9], 15); - ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[1], 13); - ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[2], 11); - - /* round 3: right lane */ - ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[15], 9); - ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[5], 7); - ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[1], 15); - ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[3], 11); - ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[7], 8); - ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[14], 6); - ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[6], 6); - ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[9], 14); - ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[11], 12); - ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[8], 13); - ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[12], 5); - ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[2], 14); - ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[10], 13); - ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[0], 13); - ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[4], 7); - ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[13], 5); - - /* round 4: right lane */ - ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[8], 15); - ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[6], 5); - ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[4], 8); - ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[1], 11); - ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[3], 14); - ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[11], 14); - ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[15], 6); - ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[0], 14); - ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[5], 6); - ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[12], 9); - ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[2], 12); - ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[13], 9); - ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[9], 12); - ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[7], 5); - ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[10], 15); - ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[14], 8); - - /* combine results */ - ddd += cc + state[1]; /* final result for state[0] */ - state[1] = state[2] + dd + aaa; - state[2] = state[3] + aa + bbb; - state[3] = state[0] + bb + ccc; - state[0] = ddd; -} - -static int rmd128_init(struct shash_desc *desc) -{ - struct rmd128_ctx *rctx = shash_desc_ctx(desc); - - rctx->byte_count = 0; - - rctx->state[0] = RMD_H0; - rctx->state[1] = RMD_H1; - rctx->state[2] = RMD_H2; - rctx->state[3] = RMD_H3; - - memset(rctx->buffer, 0, sizeof(rctx->buffer)); - - return 0; -} - -static int rmd128_update(struct shash_desc *desc, const u8 *data, - unsigned int len) -{ - struct rmd128_ctx *rctx = shash_desc_ctx(desc); - const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f); - - rctx->byte_count += len; - - /* Enough space in buffer? If so copy and we're done */ - if (avail > len) { - memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail), - data, len); - goto out; - } - - memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail), - data, avail); - - rmd128_transform(rctx->state, rctx->buffer); - data += avail; - len -= avail; - - while (len >= sizeof(rctx->buffer)) { - memcpy(rctx->buffer, data, sizeof(rctx->buffer)); - rmd128_transform(rctx->state, rctx->buffer); - data += sizeof(rctx->buffer); - len -= sizeof(rctx->buffer); - } - - memcpy(rctx->buffer, data, len); - -out: - return 0; -} - -/* Add padding and return the message digest. */ -static int rmd128_final(struct shash_desc *desc, u8 *out) -{ - struct rmd128_ctx *rctx = shash_desc_ctx(desc); - u32 i, index, padlen; - __le64 bits; - __le32 *dst = (__le32 *)out; - static const u8 padding[64] = { 0x80, }; - - bits = cpu_to_le64(rctx->byte_count << 3); - - /* Pad out to 56 mod 64 */ - index = rctx->byte_count & 0x3f; - padlen = (index < 56) ? (56 - index) : ((64+56) - index); - rmd128_update(desc, padding, padlen); - - /* Append length */ - rmd128_update(desc, (const u8 *)&bits, sizeof(bits)); - - /* Store state in digest */ - for (i = 0; i < 4; i++) - dst[i] = cpu_to_le32p(&rctx->state[i]); - - /* Wipe context */ - memset(rctx, 0, sizeof(*rctx)); - - return 0; -} - -static struct shash_alg alg = { - .digestsize = RMD128_DIGEST_SIZE, - .init = rmd128_init, - .update = rmd128_update, - .final = rmd128_final, - .descsize = sizeof(struct rmd128_ctx), - .base = { - .cra_name = "rmd128", - .cra_driver_name = "rmd128-generic", - .cra_blocksize = RMD128_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -}; - -static int __init rmd128_mod_init(void) -{ - return crypto_register_shash(&alg); -} - -static void __exit rmd128_mod_fini(void) -{ - crypto_unregister_shash(&alg); -} - -subsys_initcall(rmd128_mod_init); -module_exit(rmd128_mod_fini); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Adrian-Ken Rueegsegger "); -MODULE_DESCRIPTION("RIPEMD-128 Message Digest"); -MODULE_ALIAS_CRYPTO("rmd128"); diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index a4a11d2b57bd..bc9e2910f5c3 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -71,7 +71,7 @@ static const char *check[] = { "blowfish", "twofish", "serpent", "sha384", "sha512", "md4", "aes", "cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea", "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt", - "camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320", + "camellia", "seed", "salsa20", "rmd160", "rmd256", "rmd320", "lzo", "lzo-rle", "cts", "sha3-224", "sha3-256", "sha3-384", "sha3-512", "streebog256", "streebog512", NULL @@ -1867,10 +1867,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) ret += tcrypt_test("cts(cbc(aes))"); break; - case 39: - ret += tcrypt_test("rmd128"); - break; - case 40: ret += tcrypt_test("rmd160"); break; @@ -1955,10 +1951,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) ret += tcrypt_test("xcbc(aes)"); break; - case 107: - ret += tcrypt_test("hmac(rmd128)"); - break; - case 108: ret += tcrypt_test("hmac(rmd160)"); break; @@ -2409,10 +2401,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) test_hash_speed("sha224", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; fallthrough; - case 314: - test_hash_speed("rmd128", sec, generic_hash_speed_template); - if (mode > 300 && mode < 400) break; - fallthrough; case 315: test_hash_speed("rmd160", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; @@ -2533,10 +2521,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) test_ahash_speed("sha224", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; fallthrough; - case 414: - test_ahash_speed("rmd128", sec, generic_hash_speed_template); - if (mode > 400 && mode < 500) break; - fallthrough; case 415: test_ahash_speed("rmd160", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; diff --git a/crypto/testmgr.c b/crypto/testmgr.c index a896d77e9611..f8a5cec614d6 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -4957,12 +4957,6 @@ static const struct alg_test_desc alg_test_descs[] = { .suite = { .hash = __VECS(hmac_md5_tv_template) } - }, { - .alg = "hmac(rmd128)", - .test = alg_test_hash, - .suite = { - .hash = __VECS(hmac_rmd128_tv_template) - } }, { .alg = "hmac(rmd160)", .test = alg_test_hash, @@ -5275,12 +5269,6 @@ static const struct alg_test_desc alg_test_descs[] = { .aad_iv = 1, } } - }, { - .alg = "rmd128", - .test = alg_test_hash, - .suite = { - .hash = __VECS(rmd128_tv_template) - } }, { .alg = "rmd160", .test = alg_test_hash, diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 8c83811c0e35..05807872846c 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -3140,66 +3140,6 @@ static const struct hash_testvec md5_tv_template[] = { }; -/* - * RIPEMD-128 test vectors from ISO/IEC 10118-3:2004(E) - */ -static const struct hash_testvec rmd128_tv_template[] = { - { - .digest = "\xcd\xf2\x62\x13\xa1\x50\xdc\x3e" - "\xcb\x61\x0f\x18\xf6\xb3\x8b\x46", - }, { - .plaintext = "a", - .psize = 1, - .digest = "\x86\xbe\x7a\xfa\x33\x9d\x0f\xc7" - "\xcf\xc7\x85\xe7\x2f\x57\x8d\x33", - }, { - .plaintext = "abc", - .psize = 3, - .digest = "\xc1\x4a\x12\x19\x9c\x66\xe4\xba" - "\x84\x63\x6b\x0f\x69\x14\x4c\x77", - }, { - .plaintext = "message digest", - .psize = 14, - .digest = "\x9e\x32\x7b\x3d\x6e\x52\x30\x62" - "\xaf\xc1\x13\x2d\x7d\xf9\xd1\xb8", - }, { - .plaintext = "abcdefghijklmnopqrstuvwxyz", - .psize = 26, - .digest = "\xfd\x2a\xa6\x07\xf7\x1d\xc8\xf5" - "\x10\x71\x49\x22\xb3\x71\x83\x4e", - }, { - .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcde" - "fghijklmnopqrstuvwxyz0123456789", - .psize = 62, - .digest = "\xd1\xe9\x59\xeb\x17\x9c\x91\x1f" - "\xae\xa4\x62\x4c\x60\xc5\xc7\x02", - }, { - .plaintext = "1234567890123456789012345678901234567890" - "1234567890123456789012345678901234567890", - .psize = 80, - .digest = "\x3f\x45\xef\x19\x47\x32\xc2\xdb" - "\xb2\xc4\xa2\xc7\x69\x79\x5f\xa3", - }, { - .plaintext = "abcdbcdecdefdefgefghfghighij" - "hijkijkljklmklmnlmnomnopnopq", - .psize = 56, - .digest = "\xa1\xaa\x06\x89\xd0\xfa\xfa\x2d" - "\xdc\x22\xe8\x8b\x49\x13\x3a\x06", - }, { - .plaintext = "abcdefghbcdefghicdefghijdefghijkefghijklfghi" - "jklmghijklmnhijklmnoijklmnopjklmnopqklmnopqr" - "lmnopqrsmnopqrstnopqrstu", - .psize = 112, - .digest = "\xd4\xec\xc9\x13\xe1\xdf\x77\x6b" - "\xf4\x8d\xe9\xd5\x5b\x1f\x25\x46", - }, { - .plaintext = "abcdbcdecdefdefgefghfghighijhijk", - .psize = 32, - .digest = "\x13\xfc\x13\xe8\xef\xff\x34\x7d" - "\xe1\x93\xff\x46\xdb\xac\xcf\xd4", - } -}; - /* * RIPEMD-160 test vectors from ISO/IEC 10118-3:2004(E) */ @@ -5452,83 +5392,6 @@ static const struct hash_testvec hmac_md5_tv_template[] = }, }; -/* - * HMAC-RIPEMD128 test vectors from RFC2286 - */ -static const struct hash_testvec hmac_rmd128_tv_template[] = { - { - .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b", - .ksize = 16, - .plaintext = "Hi There", - .psize = 8, - .digest = "\xfb\xf6\x1f\x94\x92\xaa\x4b\xbf" - "\x81\xc1\x72\xe8\x4e\x07\x34\xdb", - }, { - .key = "Jefe", - .ksize = 4, - .plaintext = "what do ya want for nothing?", - .psize = 28, - .digest = "\x87\x5f\x82\x88\x62\xb6\xb3\x34" - "\xb4\x27\xc5\x5f\x9f\x7f\xf0\x9b", - }, { - .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa", - .ksize = 16, - .plaintext = "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd" - "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd" - "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd" - "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd", - .psize = 50, - .digest = "\x09\xf0\xb2\x84\x6d\x2f\x54\x3d" - "\xa3\x63\xcb\xec\x8d\x62\xa3\x8d", - }, { - .key = "\x01\x02\x03\x04\x05\x06\x07\x08" - "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10" - "\x11\x12\x13\x14\x15\x16\x17\x18\x19", - .ksize = 25, - .plaintext = "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd" - "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd" - "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd" - "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd", - .psize = 50, - .digest = "\xbd\xbb\xd7\xcf\x03\xe4\x4b\x5a" - "\xa6\x0a\xf8\x15\xbe\x4d\x22\x94", - }, { - .key = "\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c", - .ksize = 16, - .plaintext = "Test With Truncation", - .psize = 20, - .digest = "\xe7\x98\x08\xf2\x4b\x25\xfd\x03" - "\x1c\x15\x5f\x0d\x55\x1d\x9a\x3a", - }, { - .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" - "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" - "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" - "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" - "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" - "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" - "\xaa\xaa", - .ksize = 80, - .plaintext = "Test Using Larger Than Block-Size Key - Hash Key First", - .psize = 54, - .digest = "\xdc\x73\x29\x28\xde\x98\x10\x4a" - "\x1f\x59\xd3\x73\xc1\x50\xac\xbb", - }, { - .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" - "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" - "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" - "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" - "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" - "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" - "\xaa\xaa", - .ksize = 80, - .plaintext = "Test Using Larger Than Block-Size Key and Larger Than One " - "Block-Size Data", - .psize = 73, - .digest = "\x5c\x6b\xec\x96\x79\x3e\x16\xd4" - "\x06\x90\xc2\x37\x63\x5f\x30\xc5", - }, -}; - /* * HMAC-RIPEMD160 test vectors from RFC2286 */ -- cgit v1.2.3-59-g8ed1b From c15d4167f0b0465b71c0619dc30b122f1b0e5b7a Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 21 Jan 2021 14:07:30 +0100 Subject: crypto: rmd256 - remove RIPE-MD 256 hash algorithm RIPE-MD 256 is never referenced anywhere in the kernel, and unlikely to be depended upon by userspace via AF_ALG. So let's remove it Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/Kconfig | 12 -- crypto/Makefile | 1 - crypto/ripemd.h | 3 - crypto/rmd256.c | 342 ------------------------------------------------------- crypto/tcrypt.c | 14 +-- crypto/testmgr.c | 6 - crypto/testmgr.h | 64 ----------- 7 files changed, 1 insertion(+), 441 deletions(-) delete mode 100644 crypto/rmd256.c (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index a14da8290abb..8e93dce161b0 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -834,18 +834,6 @@ config CRYPTO_RMD160 Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. See -config CRYPTO_RMD256 - tristate "RIPEMD-256 digest algorithm" - select CRYPTO_HASH - help - RIPEMD-256 is an optional extension of RIPEMD-128 with a - 256 bit hash. It is intended for applications that require - longer hash-results, without needing a larger security level - (than RIPEMD-128). - - Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. - See - config CRYPTO_RMD320 tristate "RIPEMD-320 digest algorithm" select CRYPTO_HASH diff --git a/crypto/Makefile b/crypto/Makefile index c4d8f86a106c..946e821f1874 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -68,7 +68,6 @@ obj-$(CONFIG_CRYPTO_NULL2) += crypto_null.o obj-$(CONFIG_CRYPTO_MD4) += md4.o obj-$(CONFIG_CRYPTO_MD5) += md5.o obj-$(CONFIG_CRYPTO_RMD160) += rmd160.o -obj-$(CONFIG_CRYPTO_RMD256) += rmd256.o obj-$(CONFIG_CRYPTO_RMD320) += rmd320.o obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o diff --git a/crypto/ripemd.h b/crypto/ripemd.h index 0f66e3c86a2b..a19c3c27a466 100644 --- a/crypto/ripemd.h +++ b/crypto/ripemd.h @@ -9,9 +9,6 @@ #define RMD160_DIGEST_SIZE 20 #define RMD160_BLOCK_SIZE 64 -#define RMD256_DIGEST_SIZE 32 -#define RMD256_BLOCK_SIZE 64 - #define RMD320_DIGEST_SIZE 40 #define RMD320_BLOCK_SIZE 64 diff --git a/crypto/rmd256.c b/crypto/rmd256.c deleted file mode 100644 index 3c730e9de5fd..000000000000 --- a/crypto/rmd256.c +++ /dev/null @@ -1,342 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Cryptographic API. - * - * RIPEMD-256 - RACE Integrity Primitives Evaluation Message Digest. - * - * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC - * - * Copyright (c) 2008 Adrian-Ken Rueegsegger - */ -#include -#include -#include -#include -#include -#include - -#include "ripemd.h" - -struct rmd256_ctx { - u64 byte_count; - u32 state[8]; - __le32 buffer[16]; -}; - -#define K1 RMD_K1 -#define K2 RMD_K2 -#define K3 RMD_K3 -#define K4 RMD_K4 -#define KK1 RMD_K6 -#define KK2 RMD_K7 -#define KK3 RMD_K8 -#define KK4 RMD_K1 - -#define F1(x, y, z) (x ^ y ^ z) /* XOR */ -#define F2(x, y, z) (z ^ (x & (y ^ z))) /* x ? y : z */ -#define F3(x, y, z) ((x | ~y) ^ z) -#define F4(x, y, z) (y ^ (z & (x ^ y))) /* z ? x : y */ - -#define ROUND(a, b, c, d, f, k, x, s) { \ - (a) += f((b), (c), (d)) + le32_to_cpup(&(x)) + (k); \ - (a) = rol32((a), (s)); \ -} - -static void rmd256_transform(u32 *state, const __le32 *in) -{ - u32 aa, bb, cc, dd, aaa, bbb, ccc, ddd; - - /* Initialize left lane */ - aa = state[0]; - bb = state[1]; - cc = state[2]; - dd = state[3]; - - /* Initialize right lane */ - aaa = state[4]; - bbb = state[5]; - ccc = state[6]; - ddd = state[7]; - - /* round 1: left lane */ - ROUND(aa, bb, cc, dd, F1, K1, in[0], 11); - ROUND(dd, aa, bb, cc, F1, K1, in[1], 14); - ROUND(cc, dd, aa, bb, F1, K1, in[2], 15); - ROUND(bb, cc, dd, aa, F1, K1, in[3], 12); - ROUND(aa, bb, cc, dd, F1, K1, in[4], 5); - ROUND(dd, aa, bb, cc, F1, K1, in[5], 8); - ROUND(cc, dd, aa, bb, F1, K1, in[6], 7); - ROUND(bb, cc, dd, aa, F1, K1, in[7], 9); - ROUND(aa, bb, cc, dd, F1, K1, in[8], 11); - ROUND(dd, aa, bb, cc, F1, K1, in[9], 13); - ROUND(cc, dd, aa, bb, F1, K1, in[10], 14); - ROUND(bb, cc, dd, aa, F1, K1, in[11], 15); - ROUND(aa, bb, cc, dd, F1, K1, in[12], 6); - ROUND(dd, aa, bb, cc, F1, K1, in[13], 7); - ROUND(cc, dd, aa, bb, F1, K1, in[14], 9); - ROUND(bb, cc, dd, aa, F1, K1, in[15], 8); - - /* round 1: right lane */ - ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[5], 8); - ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[14], 9); - ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[7], 9); - ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[0], 11); - ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[9], 13); - ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[2], 15); - ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[11], 15); - ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[4], 5); - ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[13], 7); - ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[6], 7); - ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[15], 8); - ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[8], 11); - ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[1], 14); - ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[10], 14); - ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[3], 12); - ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[12], 6); - - /* Swap contents of "a" registers */ - swap(aa, aaa); - - /* round 2: left lane */ - ROUND(aa, bb, cc, dd, F2, K2, in[7], 7); - ROUND(dd, aa, bb, cc, F2, K2, in[4], 6); - ROUND(cc, dd, aa, bb, F2, K2, in[13], 8); - ROUND(bb, cc, dd, aa, F2, K2, in[1], 13); - ROUND(aa, bb, cc, dd, F2, K2, in[10], 11); - ROUND(dd, aa, bb, cc, F2, K2, in[6], 9); - ROUND(cc, dd, aa, bb, F2, K2, in[15], 7); - ROUND(bb, cc, dd, aa, F2, K2, in[3], 15); - ROUND(aa, bb, cc, dd, F2, K2, in[12], 7); - ROUND(dd, aa, bb, cc, F2, K2, in[0], 12); - ROUND(cc, dd, aa, bb, F2, K2, in[9], 15); - ROUND(bb, cc, dd, aa, F2, K2, in[5], 9); - ROUND(aa, bb, cc, dd, F2, K2, in[2], 11); - ROUND(dd, aa, bb, cc, F2, K2, in[14], 7); - ROUND(cc, dd, aa, bb, F2, K2, in[11], 13); - ROUND(bb, cc, dd, aa, F2, K2, in[8], 12); - - /* round 2: right lane */ - ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[6], 9); - ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[11], 13); - ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[3], 15); - ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[7], 7); - ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[0], 12); - ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[13], 8); - ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[5], 9); - ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[10], 11); - ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[14], 7); - ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[15], 7); - ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[8], 12); - ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[12], 7); - ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[4], 6); - ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[9], 15); - ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[1], 13); - ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[2], 11); - - /* Swap contents of "b" registers */ - swap(bb, bbb); - - /* round 3: left lane */ - ROUND(aa, bb, cc, dd, F3, K3, in[3], 11); - ROUND(dd, aa, bb, cc, F3, K3, in[10], 13); - ROUND(cc, dd, aa, bb, F3, K3, in[14], 6); - ROUND(bb, cc, dd, aa, F3, K3, in[4], 7); - ROUND(aa, bb, cc, dd, F3, K3, in[9], 14); - ROUND(dd, aa, bb, cc, F3, K3, in[15], 9); - ROUND(cc, dd, aa, bb, F3, K3, in[8], 13); - ROUND(bb, cc, dd, aa, F3, K3, in[1], 15); - ROUND(aa, bb, cc, dd, F3, K3, in[2], 14); - ROUND(dd, aa, bb, cc, F3, K3, in[7], 8); - ROUND(cc, dd, aa, bb, F3, K3, in[0], 13); - ROUND(bb, cc, dd, aa, F3, K3, in[6], 6); - ROUND(aa, bb, cc, dd, F3, K3, in[13], 5); - ROUND(dd, aa, bb, cc, F3, K3, in[11], 12); - ROUND(cc, dd, aa, bb, F3, K3, in[5], 7); - ROUND(bb, cc, dd, aa, F3, K3, in[12], 5); - - /* round 3: right lane */ - ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[15], 9); - ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[5], 7); - ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[1], 15); - ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[3], 11); - ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[7], 8); - ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[14], 6); - ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[6], 6); - ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[9], 14); - ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[11], 12); - ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[8], 13); - ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[12], 5); - ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[2], 14); - ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[10], 13); - ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[0], 13); - ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[4], 7); - ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[13], 5); - - /* Swap contents of "c" registers */ - swap(cc, ccc); - - /* round 4: left lane */ - ROUND(aa, bb, cc, dd, F4, K4, in[1], 11); - ROUND(dd, aa, bb, cc, F4, K4, in[9], 12); - ROUND(cc, dd, aa, bb, F4, K4, in[11], 14); - ROUND(bb, cc, dd, aa, F4, K4, in[10], 15); - ROUND(aa, bb, cc, dd, F4, K4, in[0], 14); - ROUND(dd, aa, bb, cc, F4, K4, in[8], 15); - ROUND(cc, dd, aa, bb, F4, K4, in[12], 9); - ROUND(bb, cc, dd, aa, F4, K4, in[4], 8); - ROUND(aa, bb, cc, dd, F4, K4, in[13], 9); - ROUND(dd, aa, bb, cc, F4, K4, in[3], 14); - ROUND(cc, dd, aa, bb, F4, K4, in[7], 5); - ROUND(bb, cc, dd, aa, F4, K4, in[15], 6); - ROUND(aa, bb, cc, dd, F4, K4, in[14], 8); - ROUND(dd, aa, bb, cc, F4, K4, in[5], 6); - ROUND(cc, dd, aa, bb, F4, K4, in[6], 5); - ROUND(bb, cc, dd, aa, F4, K4, in[2], 12); - - /* round 4: right lane */ - ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[8], 15); - ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[6], 5); - ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[4], 8); - ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[1], 11); - ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[3], 14); - ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[11], 14); - ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[15], 6); - ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[0], 14); - ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[5], 6); - ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[12], 9); - ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[2], 12); - ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[13], 9); - ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[9], 12); - ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[7], 5); - ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[10], 15); - ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[14], 8); - - /* Swap contents of "d" registers */ - swap(dd, ddd); - - /* combine results */ - state[0] += aa; - state[1] += bb; - state[2] += cc; - state[3] += dd; - state[4] += aaa; - state[5] += bbb; - state[6] += ccc; - state[7] += ddd; -} - -static int rmd256_init(struct shash_desc *desc) -{ - struct rmd256_ctx *rctx = shash_desc_ctx(desc); - - rctx->byte_count = 0; - - rctx->state[0] = RMD_H0; - rctx->state[1] = RMD_H1; - rctx->state[2] = RMD_H2; - rctx->state[3] = RMD_H3; - rctx->state[4] = RMD_H5; - rctx->state[5] = RMD_H6; - rctx->state[6] = RMD_H7; - rctx->state[7] = RMD_H8; - - memset(rctx->buffer, 0, sizeof(rctx->buffer)); - - return 0; -} - -static int rmd256_update(struct shash_desc *desc, const u8 *data, - unsigned int len) -{ - struct rmd256_ctx *rctx = shash_desc_ctx(desc); - const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f); - - rctx->byte_count += len; - - /* Enough space in buffer? If so copy and we're done */ - if (avail > len) { - memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail), - data, len); - goto out; - } - - memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail), - data, avail); - - rmd256_transform(rctx->state, rctx->buffer); - data += avail; - len -= avail; - - while (len >= sizeof(rctx->buffer)) { - memcpy(rctx->buffer, data, sizeof(rctx->buffer)); - rmd256_transform(rctx->state, rctx->buffer); - data += sizeof(rctx->buffer); - len -= sizeof(rctx->buffer); - } - - memcpy(rctx->buffer, data, len); - -out: - return 0; -} - -/* Add padding and return the message digest. */ -static int rmd256_final(struct shash_desc *desc, u8 *out) -{ - struct rmd256_ctx *rctx = shash_desc_ctx(desc); - u32 i, index, padlen; - __le64 bits; - __le32 *dst = (__le32 *)out; - static const u8 padding[64] = { 0x80, }; - - bits = cpu_to_le64(rctx->byte_count << 3); - - /* Pad out to 56 mod 64 */ - index = rctx->byte_count & 0x3f; - padlen = (index < 56) ? (56 - index) : ((64+56) - index); - rmd256_update(desc, padding, padlen); - - /* Append length */ - rmd256_update(desc, (const u8 *)&bits, sizeof(bits)); - - /* Store state in digest */ - for (i = 0; i < 8; i++) - dst[i] = cpu_to_le32p(&rctx->state[i]); - - /* Wipe context */ - memset(rctx, 0, sizeof(*rctx)); - - return 0; -} - -static struct shash_alg alg = { - .digestsize = RMD256_DIGEST_SIZE, - .init = rmd256_init, - .update = rmd256_update, - .final = rmd256_final, - .descsize = sizeof(struct rmd256_ctx), - .base = { - .cra_name = "rmd256", - .cra_driver_name = "rmd256-generic", - .cra_blocksize = RMD256_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -}; - -static int __init rmd256_mod_init(void) -{ - return crypto_register_shash(&alg); -} - -static void __exit rmd256_mod_fini(void) -{ - crypto_unregister_shash(&alg); -} - -subsys_initcall(rmd256_mod_init); -module_exit(rmd256_mod_fini); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Adrian-Ken Rueegsegger "); -MODULE_DESCRIPTION("RIPEMD-256 Message Digest"); -MODULE_ALIAS_CRYPTO("rmd256"); diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index bc9e2910f5c3..3fb842cb2c67 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -71,7 +71,7 @@ static const char *check[] = { "blowfish", "twofish", "serpent", "sha384", "sha512", "md4", "aes", "cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea", "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt", - "camellia", "seed", "salsa20", "rmd160", "rmd256", "rmd320", + "camellia", "seed", "salsa20", "rmd160", "rmd320", "lzo", "lzo-rle", "cts", "sha3-224", "sha3-256", "sha3-384", "sha3-512", "streebog256", "streebog512", NULL @@ -1871,10 +1871,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) ret += tcrypt_test("rmd160"); break; - case 41: - ret += tcrypt_test("rmd256"); - break; - case 42: ret += tcrypt_test("rmd320"); break; @@ -2405,10 +2401,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) test_hash_speed("rmd160", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; fallthrough; - case 316: - test_hash_speed("rmd256", sec, generic_hash_speed_template); - if (mode > 300 && mode < 400) break; - fallthrough; case 317: test_hash_speed("rmd320", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; @@ -2525,10 +2517,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) test_ahash_speed("rmd160", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; fallthrough; - case 416: - test_ahash_speed("rmd256", sec, generic_hash_speed_template); - if (mode > 400 && mode < 500) break; - fallthrough; case 417: test_ahash_speed("rmd320", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; diff --git a/crypto/testmgr.c b/crypto/testmgr.c index f8a5cec614d6..c35de56fc25a 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -5275,12 +5275,6 @@ static const struct alg_test_desc alg_test_descs[] = { .suite = { .hash = __VECS(rmd160_tv_template) } - }, { - .alg = "rmd256", - .test = alg_test_hash, - .suite = { - .hash = __VECS(rmd256_tv_template) - } }, { .alg = "rmd320", .test = alg_test_hash, diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 05807872846c..86abd1f79aab 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -3200,70 +3200,6 @@ static const struct hash_testvec rmd160_tv_template[] = { } }; -/* - * RIPEMD-256 test vectors - */ -static const struct hash_testvec rmd256_tv_template[] = { - { - .digest = "\x02\xba\x4c\x4e\x5f\x8e\xcd\x18" - "\x77\xfc\x52\xd6\x4d\x30\xe3\x7a" - "\x2d\x97\x74\xfb\x1e\x5d\x02\x63" - "\x80\xae\x01\x68\xe3\xc5\x52\x2d", - }, { - .plaintext = "a", - .psize = 1, - .digest = "\xf9\x33\x3e\x45\xd8\x57\xf5\xd9" - "\x0a\x91\xba\xb7\x0a\x1e\xba\x0c" - "\xfb\x1b\xe4\xb0\x78\x3c\x9a\xcf" - "\xcd\x88\x3a\x91\x34\x69\x29\x25", - }, { - .plaintext = "abc", - .psize = 3, - .digest = "\xaf\xbd\x6e\x22\x8b\x9d\x8c\xbb" - "\xce\xf5\xca\x2d\x03\xe6\xdb\xa1" - "\x0a\xc0\xbc\x7d\xcb\xe4\x68\x0e" - "\x1e\x42\xd2\xe9\x75\x45\x9b\x65", - }, { - .plaintext = "message digest", - .psize = 14, - .digest = "\x87\xe9\x71\x75\x9a\x1c\xe4\x7a" - "\x51\x4d\x5c\x91\x4c\x39\x2c\x90" - "\x18\xc7\xc4\x6b\xc1\x44\x65\x55" - "\x4a\xfc\xdf\x54\xa5\x07\x0c\x0e", - }, { - .plaintext = "abcdefghijklmnopqrstuvwxyz", - .psize = 26, - .digest = "\x64\x9d\x30\x34\x75\x1e\xa2\x16" - "\x77\x6b\xf9\xa1\x8a\xcc\x81\xbc" - "\x78\x96\x11\x8a\x51\x97\x96\x87" - "\x82\xdd\x1f\xd9\x7d\x8d\x51\x33", - }, { - .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcde" - "fghijklmnopqrstuvwxyz0123456789", - .psize = 62, - .digest = "\x57\x40\xa4\x08\xac\x16\xb7\x20" - "\xb8\x44\x24\xae\x93\x1c\xbb\x1f" - "\xe3\x63\xd1\xd0\xbf\x40\x17\xf1" - "\xa8\x9f\x7e\xa6\xde\x77\xa0\xb8", - }, { - .plaintext = "1234567890123456789012345678901234567890" - "1234567890123456789012345678901234567890", - .psize = 80, - .digest = "\x06\xfd\xcc\x7a\x40\x95\x48\xaa" - "\xf9\x13\x68\xc0\x6a\x62\x75\xb5" - "\x53\xe3\xf0\x99\xbf\x0e\xa4\xed" - "\xfd\x67\x78\xdf\x89\xa8\x90\xdd", - }, { - .plaintext = "abcdbcdecdefdefgefghfghighij" - "hijkijkljklmklmnlmnomnopnopq", - .psize = 56, - .digest = "\x38\x43\x04\x55\x83\xaa\xc6\xc8" - "\xc8\xd9\x12\x85\x73\xe7\xa9\x80" - "\x9a\xfb\x2a\x0f\x34\xcc\xc3\x6e" - "\xa9\xe7\x2f\x16\xf6\x36\x8e\x3f", - } -}; - /* * RIPEMD-320 test vectors */ -- cgit v1.2.3-59-g8ed1b From 93f64202926f606d67b1095b59137f903c6ab304 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 21 Jan 2021 14:07:31 +0100 Subject: crypto: rmd320 - remove RIPE-MD 320 hash algorithm RIPE-MD 320 is never referenced anywhere in the kernel, and unlikely to be depended upon by userspace via AF_ALG. So let's remove it Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/Kconfig | 12 -- crypto/ripemd.h | 8 -- crypto/rmd320.c | 391 ------------------------------------------------------- crypto/tcrypt.c | 14 +- crypto/testmgr.c | 6 - crypto/testmgr.h | 64 --------- 6 files changed, 1 insertion(+), 494 deletions(-) delete mode 100644 crypto/rmd320.c (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index 8e93dce161b0..a32e25cca2b4 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -834,18 +834,6 @@ config CRYPTO_RMD160 Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. See -config CRYPTO_RMD320 - tristate "RIPEMD-320 digest algorithm" - select CRYPTO_HASH - help - RIPEMD-320 is an optional extension of RIPEMD-160 with a - 320 bit hash. It is intended for applications that require - longer hash-results, without needing a larger security level - (than RIPEMD-160). - - Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. - See - config CRYPTO_SHA1 tristate "SHA1 digest algorithm" select CRYPTO_HASH diff --git a/crypto/ripemd.h b/crypto/ripemd.h index a19c3c27a466..b977785e2a62 100644 --- a/crypto/ripemd.h +++ b/crypto/ripemd.h @@ -9,20 +9,12 @@ #define RMD160_DIGEST_SIZE 20 #define RMD160_BLOCK_SIZE 64 -#define RMD320_DIGEST_SIZE 40 -#define RMD320_BLOCK_SIZE 64 - /* initial values */ #define RMD_H0 0x67452301UL #define RMD_H1 0xefcdab89UL #define RMD_H2 0x98badcfeUL #define RMD_H3 0x10325476UL #define RMD_H4 0xc3d2e1f0UL -#define RMD_H5 0x76543210UL -#define RMD_H6 0xfedcba98UL -#define RMD_H7 0x89abcdefUL -#define RMD_H8 0x01234567UL -#define RMD_H9 0x3c2d1e0fUL /* constants */ #define RMD_K1 0x00000000UL diff --git a/crypto/rmd320.c b/crypto/rmd320.c deleted file mode 100644 index c919ad6c4705..000000000000 --- a/crypto/rmd320.c +++ /dev/null @@ -1,391 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Cryptographic API. - * - * RIPEMD-320 - RACE Integrity Primitives Evaluation Message Digest. - * - * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC - * - * Copyright (c) 2008 Adrian-Ken Rueegsegger - */ -#include -#include -#include -#include -#include -#include - -#include "ripemd.h" - -struct rmd320_ctx { - u64 byte_count; - u32 state[10]; - __le32 buffer[16]; -}; - -#define K1 RMD_K1 -#define K2 RMD_K2 -#define K3 RMD_K3 -#define K4 RMD_K4 -#define K5 RMD_K5 -#define KK1 RMD_K6 -#define KK2 RMD_K7 -#define KK3 RMD_K8 -#define KK4 RMD_K9 -#define KK5 RMD_K1 - -#define F1(x, y, z) (x ^ y ^ z) /* XOR */ -#define F2(x, y, z) (z ^ (x & (y ^ z))) /* x ? y : z */ -#define F3(x, y, z) ((x | ~y) ^ z) -#define F4(x, y, z) (y ^ (z & (x ^ y))) /* z ? x : y */ -#define F5(x, y, z) (x ^ (y | ~z)) - -#define ROUND(a, b, c, d, e, f, k, x, s) { \ - (a) += f((b), (c), (d)) + le32_to_cpup(&(x)) + (k); \ - (a) = rol32((a), (s)) + (e); \ - (c) = rol32((c), 10); \ -} - -static void rmd320_transform(u32 *state, const __le32 *in) -{ - u32 aa, bb, cc, dd, ee, aaa, bbb, ccc, ddd, eee; - - /* Initialize left lane */ - aa = state[0]; - bb = state[1]; - cc = state[2]; - dd = state[3]; - ee = state[4]; - - /* Initialize right lane */ - aaa = state[5]; - bbb = state[6]; - ccc = state[7]; - ddd = state[8]; - eee = state[9]; - - /* round 1: left lane */ - ROUND(aa, bb, cc, dd, ee, F1, K1, in[0], 11); - ROUND(ee, aa, bb, cc, dd, F1, K1, in[1], 14); - ROUND(dd, ee, aa, bb, cc, F1, K1, in[2], 15); - ROUND(cc, dd, ee, aa, bb, F1, K1, in[3], 12); - ROUND(bb, cc, dd, ee, aa, F1, K1, in[4], 5); - ROUND(aa, bb, cc, dd, ee, F1, K1, in[5], 8); - ROUND(ee, aa, bb, cc, dd, F1, K1, in[6], 7); - ROUND(dd, ee, aa, bb, cc, F1, K1, in[7], 9); - ROUND(cc, dd, ee, aa, bb, F1, K1, in[8], 11); - ROUND(bb, cc, dd, ee, aa, F1, K1, in[9], 13); - ROUND(aa, bb, cc, dd, ee, F1, K1, in[10], 14); - ROUND(ee, aa, bb, cc, dd, F1, K1, in[11], 15); - ROUND(dd, ee, aa, bb, cc, F1, K1, in[12], 6); - ROUND(cc, dd, ee, aa, bb, F1, K1, in[13], 7); - ROUND(bb, cc, dd, ee, aa, F1, K1, in[14], 9); - ROUND(aa, bb, cc, dd, ee, F1, K1, in[15], 8); - - /* round 1: right lane */ - ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[5], 8); - ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[14], 9); - ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[7], 9); - ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[0], 11); - ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[9], 13); - ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[2], 15); - ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[11], 15); - ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[4], 5); - ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[13], 7); - ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[6], 7); - ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[15], 8); - ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[8], 11); - ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[1], 14); - ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[10], 14); - ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[3], 12); - ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[12], 6); - - /* Swap contents of "a" registers */ - swap(aa, aaa); - - /* round 2: left lane" */ - ROUND(ee, aa, bb, cc, dd, F2, K2, in[7], 7); - ROUND(dd, ee, aa, bb, cc, F2, K2, in[4], 6); - ROUND(cc, dd, ee, aa, bb, F2, K2, in[13], 8); - ROUND(bb, cc, dd, ee, aa, F2, K2, in[1], 13); - ROUND(aa, bb, cc, dd, ee, F2, K2, in[10], 11); - ROUND(ee, aa, bb, cc, dd, F2, K2, in[6], 9); - ROUND(dd, ee, aa, bb, cc, F2, K2, in[15], 7); - ROUND(cc, dd, ee, aa, bb, F2, K2, in[3], 15); - ROUND(bb, cc, dd, ee, aa, F2, K2, in[12], 7); - ROUND(aa, bb, cc, dd, ee, F2, K2, in[0], 12); - ROUND(ee, aa, bb, cc, dd, F2, K2, in[9], 15); - ROUND(dd, ee, aa, bb, cc, F2, K2, in[5], 9); - ROUND(cc, dd, ee, aa, bb, F2, K2, in[2], 11); - ROUND(bb, cc, dd, ee, aa, F2, K2, in[14], 7); - ROUND(aa, bb, cc, dd, ee, F2, K2, in[11], 13); - ROUND(ee, aa, bb, cc, dd, F2, K2, in[8], 12); - - /* round 2: right lane */ - ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[6], 9); - ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[11], 13); - ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[3], 15); - ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[7], 7); - ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[0], 12); - ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[13], 8); - ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[5], 9); - ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[10], 11); - ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[14], 7); - ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[15], 7); - ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[8], 12); - ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[12], 7); - ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[4], 6); - ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[9], 15); - ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[1], 13); - ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[2], 11); - - /* Swap contents of "b" registers */ - swap(bb, bbb); - - /* round 3: left lane" */ - ROUND(dd, ee, aa, bb, cc, F3, K3, in[3], 11); - ROUND(cc, dd, ee, aa, bb, F3, K3, in[10], 13); - ROUND(bb, cc, dd, ee, aa, F3, K3, in[14], 6); - ROUND(aa, bb, cc, dd, ee, F3, K3, in[4], 7); - ROUND(ee, aa, bb, cc, dd, F3, K3, in[9], 14); - ROUND(dd, ee, aa, bb, cc, F3, K3, in[15], 9); - ROUND(cc, dd, ee, aa, bb, F3, K3, in[8], 13); - ROUND(bb, cc, dd, ee, aa, F3, K3, in[1], 15); - ROUND(aa, bb, cc, dd, ee, F3, K3, in[2], 14); - ROUND(ee, aa, bb, cc, dd, F3, K3, in[7], 8); - ROUND(dd, ee, aa, bb, cc, F3, K3, in[0], 13); - ROUND(cc, dd, ee, aa, bb, F3, K3, in[6], 6); - ROUND(bb, cc, dd, ee, aa, F3, K3, in[13], 5); - ROUND(aa, bb, cc, dd, ee, F3, K3, in[11], 12); - ROUND(ee, aa, bb, cc, dd, F3, K3, in[5], 7); - ROUND(dd, ee, aa, bb, cc, F3, K3, in[12], 5); - - /* round 3: right lane */ - ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[15], 9); - ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[5], 7); - ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[1], 15); - ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[3], 11); - ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[7], 8); - ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[14], 6); - ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[6], 6); - ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[9], 14); - ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[11], 12); - ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[8], 13); - ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[12], 5); - ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[2], 14); - ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[10], 13); - ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[0], 13); - ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[4], 7); - ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[13], 5); - - /* Swap contents of "c" registers */ - swap(cc, ccc); - - /* round 4: left lane" */ - ROUND(cc, dd, ee, aa, bb, F4, K4, in[1], 11); - ROUND(bb, cc, dd, ee, aa, F4, K4, in[9], 12); - ROUND(aa, bb, cc, dd, ee, F4, K4, in[11], 14); - ROUND(ee, aa, bb, cc, dd, F4, K4, in[10], 15); - ROUND(dd, ee, aa, bb, cc, F4, K4, in[0], 14); - ROUND(cc, dd, ee, aa, bb, F4, K4, in[8], 15); - ROUND(bb, cc, dd, ee, aa, F4, K4, in[12], 9); - ROUND(aa, bb, cc, dd, ee, F4, K4, in[4], 8); - ROUND(ee, aa, bb, cc, dd, F4, K4, in[13], 9); - ROUND(dd, ee, aa, bb, cc, F4, K4, in[3], 14); - ROUND(cc, dd, ee, aa, bb, F4, K4, in[7], 5); - ROUND(bb, cc, dd, ee, aa, F4, K4, in[15], 6); - ROUND(aa, bb, cc, dd, ee, F4, K4, in[14], 8); - ROUND(ee, aa, bb, cc, dd, F4, K4, in[5], 6); - ROUND(dd, ee, aa, bb, cc, F4, K4, in[6], 5); - ROUND(cc, dd, ee, aa, bb, F4, K4, in[2], 12); - - /* round 4: right lane */ - ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[8], 15); - ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[6], 5); - ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[4], 8); - ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[1], 11); - ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[3], 14); - ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[11], 14); - ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[15], 6); - ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[0], 14); - ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[5], 6); - ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[12], 9); - ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[2], 12); - ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[13], 9); - ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[9], 12); - ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[7], 5); - ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[10], 15); - ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[14], 8); - - /* Swap contents of "d" registers */ - swap(dd, ddd); - - /* round 5: left lane" */ - ROUND(bb, cc, dd, ee, aa, F5, K5, in[4], 9); - ROUND(aa, bb, cc, dd, ee, F5, K5, in[0], 15); - ROUND(ee, aa, bb, cc, dd, F5, K5, in[5], 5); - ROUND(dd, ee, aa, bb, cc, F5, K5, in[9], 11); - ROUND(cc, dd, ee, aa, bb, F5, K5, in[7], 6); - ROUND(bb, cc, dd, ee, aa, F5, K5, in[12], 8); - ROUND(aa, bb, cc, dd, ee, F5, K5, in[2], 13); - ROUND(ee, aa, bb, cc, dd, F5, K5, in[10], 12); - ROUND(dd, ee, aa, bb, cc, F5, K5, in[14], 5); - ROUND(cc, dd, ee, aa, bb, F5, K5, in[1], 12); - ROUND(bb, cc, dd, ee, aa, F5, K5, in[3], 13); - ROUND(aa, bb, cc, dd, ee, F5, K5, in[8], 14); - ROUND(ee, aa, bb, cc, dd, F5, K5, in[11], 11); - ROUND(dd, ee, aa, bb, cc, F5, K5, in[6], 8); - ROUND(cc, dd, ee, aa, bb, F5, K5, in[15], 5); - ROUND(bb, cc, dd, ee, aa, F5, K5, in[13], 6); - - /* round 5: right lane */ - ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[12], 8); - ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[15], 5); - ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[10], 12); - ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[4], 9); - ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[1], 12); - ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[5], 5); - ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[8], 14); - ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[7], 6); - ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[6], 8); - ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[2], 13); - ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[13], 6); - ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[14], 5); - ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[0], 15); - ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[3], 13); - ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[9], 11); - ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[11], 11); - - /* Swap contents of "e" registers */ - swap(ee, eee); - - /* combine results */ - state[0] += aa; - state[1] += bb; - state[2] += cc; - state[3] += dd; - state[4] += ee; - state[5] += aaa; - state[6] += bbb; - state[7] += ccc; - state[8] += ddd; - state[9] += eee; -} - -static int rmd320_init(struct shash_desc *desc) -{ - struct rmd320_ctx *rctx = shash_desc_ctx(desc); - - rctx->byte_count = 0; - - rctx->state[0] = RMD_H0; - rctx->state[1] = RMD_H1; - rctx->state[2] = RMD_H2; - rctx->state[3] = RMD_H3; - rctx->state[4] = RMD_H4; - rctx->state[5] = RMD_H5; - rctx->state[6] = RMD_H6; - rctx->state[7] = RMD_H7; - rctx->state[8] = RMD_H8; - rctx->state[9] = RMD_H9; - - memset(rctx->buffer, 0, sizeof(rctx->buffer)); - - return 0; -} - -static int rmd320_update(struct shash_desc *desc, const u8 *data, - unsigned int len) -{ - struct rmd320_ctx *rctx = shash_desc_ctx(desc); - const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f); - - rctx->byte_count += len; - - /* Enough space in buffer? If so copy and we're done */ - if (avail > len) { - memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail), - data, len); - goto out; - } - - memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail), - data, avail); - - rmd320_transform(rctx->state, rctx->buffer); - data += avail; - len -= avail; - - while (len >= sizeof(rctx->buffer)) { - memcpy(rctx->buffer, data, sizeof(rctx->buffer)); - rmd320_transform(rctx->state, rctx->buffer); - data += sizeof(rctx->buffer); - len -= sizeof(rctx->buffer); - } - - memcpy(rctx->buffer, data, len); - -out: - return 0; -} - -/* Add padding and return the message digest. */ -static int rmd320_final(struct shash_desc *desc, u8 *out) -{ - struct rmd320_ctx *rctx = shash_desc_ctx(desc); - u32 i, index, padlen; - __le64 bits; - __le32 *dst = (__le32 *)out; - static const u8 padding[64] = { 0x80, }; - - bits = cpu_to_le64(rctx->byte_count << 3); - - /* Pad out to 56 mod 64 */ - index = rctx->byte_count & 0x3f; - padlen = (index < 56) ? (56 - index) : ((64+56) - index); - rmd320_update(desc, padding, padlen); - - /* Append length */ - rmd320_update(desc, (const u8 *)&bits, sizeof(bits)); - - /* Store state in digest */ - for (i = 0; i < 10; i++) - dst[i] = cpu_to_le32p(&rctx->state[i]); - - /* Wipe context */ - memset(rctx, 0, sizeof(*rctx)); - - return 0; -} - -static struct shash_alg alg = { - .digestsize = RMD320_DIGEST_SIZE, - .init = rmd320_init, - .update = rmd320_update, - .final = rmd320_final, - .descsize = sizeof(struct rmd320_ctx), - .base = { - .cra_name = "rmd320", - .cra_driver_name = "rmd320-generic", - .cra_blocksize = RMD320_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -}; - -static int __init rmd320_mod_init(void) -{ - return crypto_register_shash(&alg); -} - -static void __exit rmd320_mod_fini(void) -{ - crypto_unregister_shash(&alg); -} - -subsys_initcall(rmd320_mod_init); -module_exit(rmd320_mod_fini); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Adrian-Ken Rueegsegger "); -MODULE_DESCRIPTION("RIPEMD-320 Message Digest"); -MODULE_ALIAS_CRYPTO("rmd320"); diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 3fb842cb2c67..a231df72ca7d 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -71,7 +71,7 @@ static const char *check[] = { "blowfish", "twofish", "serpent", "sha384", "sha512", "md4", "aes", "cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea", "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt", - "camellia", "seed", "salsa20", "rmd160", "rmd320", + "camellia", "seed", "salsa20", "rmd160", "lzo", "lzo-rle", "cts", "sha3-224", "sha3-256", "sha3-384", "sha3-512", "streebog256", "streebog512", NULL @@ -1871,10 +1871,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) ret += tcrypt_test("rmd160"); break; - case 42: - ret += tcrypt_test("rmd320"); - break; - case 43: ret += tcrypt_test("ecb(seed)"); break; @@ -2401,10 +2397,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) test_hash_speed("rmd160", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; fallthrough; - case 317: - test_hash_speed("rmd320", sec, generic_hash_speed_template); - if (mode > 300 && mode < 400) break; - fallthrough; case 318: klen = 16; test_hash_speed("ghash", sec, generic_hash_speed_template); @@ -2517,10 +2509,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) test_ahash_speed("rmd160", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; fallthrough; - case 417: - test_ahash_speed("rmd320", sec, generic_hash_speed_template); - if (mode > 400 && mode < 500) break; - fallthrough; case 418: test_ahash_speed("sha3-224", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; diff --git a/crypto/testmgr.c b/crypto/testmgr.c index c35de56fc25a..d12cec6ab003 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -5275,12 +5275,6 @@ static const struct alg_test_desc alg_test_descs[] = { .suite = { .hash = __VECS(rmd160_tv_template) } - }, { - .alg = "rmd320", - .test = alg_test_hash, - .suite = { - .hash = __VECS(rmd320_tv_template) - } }, { .alg = "rsa", .test = alg_test_akcipher, diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 86abd1f79aab..5625164cda54 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -3200,70 +3200,6 @@ static const struct hash_testvec rmd160_tv_template[] = { } }; -/* - * RIPEMD-320 test vectors - */ -static const struct hash_testvec rmd320_tv_template[] = { - { - .digest = "\x22\xd6\x5d\x56\x61\x53\x6c\xdc\x75\xc1" - "\xfd\xf5\xc6\xde\x7b\x41\xb9\xf2\x73\x25" - "\xeb\xc6\x1e\x85\x57\x17\x7d\x70\x5a\x0e" - "\xc8\x80\x15\x1c\x3a\x32\xa0\x08\x99\xb8", - }, { - .plaintext = "a", - .psize = 1, - .digest = "\xce\x78\x85\x06\x38\xf9\x26\x58\xa5\xa5" - "\x85\x09\x75\x79\x92\x6d\xda\x66\x7a\x57" - "\x16\x56\x2c\xfc\xf6\xfb\xe7\x7f\x63\x54" - "\x2f\x99\xb0\x47\x05\xd6\x97\x0d\xff\x5d", - }, { - .plaintext = "abc", - .psize = 3, - .digest = "\xde\x4c\x01\xb3\x05\x4f\x89\x30\xa7\x9d" - "\x09\xae\x73\x8e\x92\x30\x1e\x5a\x17\x08" - "\x5b\xef\xfd\xc1\xb8\xd1\x16\x71\x3e\x74" - "\xf8\x2f\xa9\x42\xd6\x4c\xdb\xc4\x68\x2d", - }, { - .plaintext = "message digest", - .psize = 14, - .digest = "\x3a\x8e\x28\x50\x2e\xd4\x5d\x42\x2f\x68" - "\x84\x4f\x9d\xd3\x16\xe7\xb9\x85\x33\xfa" - "\x3f\x2a\x91\xd2\x9f\x84\xd4\x25\xc8\x8d" - "\x6b\x4e\xff\x72\x7d\xf6\x6a\x7c\x01\x97", - }, { - .plaintext = "abcdefghijklmnopqrstuvwxyz", - .psize = 26, - .digest = "\xca\xbd\xb1\x81\x0b\x92\x47\x0a\x20\x93" - "\xaa\x6b\xce\x05\x95\x2c\x28\x34\x8c\xf4" - "\x3f\xf6\x08\x41\x97\x51\x66\xbb\x40\xed" - "\x23\x40\x04\xb8\x82\x44\x63\xe6\xb0\x09", - }, { - .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcde" - "fghijklmnopqrstuvwxyz0123456789", - .psize = 62, - .digest = "\xed\x54\x49\x40\xc8\x6d\x67\xf2\x50\xd2" - "\x32\xc3\x0b\x7b\x3e\x57\x70\xe0\xc6\x0c" - "\x8c\xb9\xa4\xca\xfe\x3b\x11\x38\x8a\xf9" - "\x92\x0e\x1b\x99\x23\x0b\x84\x3c\x86\xa4", - }, { - .plaintext = "1234567890123456789012345678901234567890" - "1234567890123456789012345678901234567890", - .psize = 80, - .digest = "\x55\x78\x88\xaf\x5f\x6d\x8e\xd6\x2a\xb6" - "\x69\x45\xc6\xd2\xa0\xa4\x7e\xcd\x53\x41" - "\xe9\x15\xeb\x8f\xea\x1d\x05\x24\x95\x5f" - "\x82\x5d\xc7\x17\xe4\xa0\x08\xab\x2d\x42", - }, { - .plaintext = "abcdbcdecdefdefgefghfghighij" - "hijkijkljklmklmnlmnomnopnopq", - .psize = 56, - .digest = "\xd0\x34\xa7\x95\x0c\xf7\x22\x02\x1b\xa4" - "\xb8\x4d\xf7\x69\xa5\xde\x20\x60\xe2\x59" - "\xdf\x4c\x9b\xb4\xa4\x26\x8c\x0e\x93\x5b" - "\xbc\x74\x70\xa9\x69\xc9\xd0\x72\xa1\xac", - } -}; - static const struct hash_testvec crct10dif_tv_template[] = { { .plaintext = "abc", -- cgit v1.2.3-59-g8ed1b From 87cd723f8978c59bc4e28593da45d09ebf5d92a2 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 21 Jan 2021 14:07:32 +0100 Subject: crypto: tgr192 - remove Tiger 128/160/192 hash algorithms Tiger is never referenced anywhere in the kernel, and unlikely to be depended upon by userspace via AF_ALG. So let's remove it. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/Kconfig | 13 -- crypto/Makefile | 1 - crypto/tcrypt.c | 36 --- crypto/testmgr.c | 18 -- crypto/testmgr.h | 126 ---------- crypto/tgr192.c | 682 ------------------------------------------------------- 6 files changed, 876 deletions(-) delete mode 100644 crypto/tgr192.c (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index a32e25cca2b4..8d25d689a705 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1009,19 +1009,6 @@ config CRYPTO_STREEBOG https://tc26.ru/upload/iblock/fed/feddbb4d26b685903faa2ba11aea43f6.pdf https://tools.ietf.org/html/rfc6986 -config CRYPTO_TGR192 - tristate "Tiger digest algorithms" - select CRYPTO_HASH - help - Tiger hash algorithm 192, 160 and 128-bit hashes - - Tiger is a hash function optimized for 64-bit processors while - still having decent performance on 32-bit processors. - Tiger was developed by Ross Anderson and Eli Biham. - - See also: - . - config CRYPTO_WP512 tristate "Whirlpool digest algorithms" select CRYPTO_HASH diff --git a/crypto/Makefile b/crypto/Makefile index 946e821f1874..6b9622f21f7f 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -77,7 +77,6 @@ obj-$(CONFIG_CRYPTO_SM3) += sm3_generic.o obj-$(CONFIG_CRYPTO_STREEBOG) += streebog_generic.o obj-$(CONFIG_CRYPTO_WP512) += wp512.o CFLAGS_wp512.o := $(call cc-option,-fno-schedule-insns) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149 -obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o obj-$(CONFIG_CRYPTO_BLAKE2B) += blake2b_generic.o obj-$(CONFIG_CRYPTO_BLAKE2S) += blake2s_generic.o obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index a231df72ca7d..696c44ef465e 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -1815,18 +1815,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) ret += tcrypt_test("cbc(anubis)"); break; - case 27: - ret += tcrypt_test("tgr192"); - break; - - case 28: - ret += tcrypt_test("tgr160"); - break; - - case 29: - ret += tcrypt_test("tgr128"); - break; - case 30: ret += tcrypt_test("ecb(xeta)"); break; @@ -2377,18 +2365,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) test_hash_speed("wp512", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; fallthrough; - case 310: - test_hash_speed("tgr128", sec, generic_hash_speed_template); - if (mode > 300 && mode < 400) break; - fallthrough; - case 311: - test_hash_speed("tgr160", sec, generic_hash_speed_template); - if (mode > 300 && mode < 400) break; - fallthrough; - case 312: - test_hash_speed("tgr192", sec, generic_hash_speed_template); - if (mode > 300 && mode < 400) break; - fallthrough; case 313: test_hash_speed("sha224", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; @@ -2489,18 +2465,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) test_ahash_speed("wp512", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; fallthrough; - case 410: - test_ahash_speed("tgr128", sec, generic_hash_speed_template); - if (mode > 400 && mode < 500) break; - fallthrough; - case 411: - test_ahash_speed("tgr160", sec, generic_hash_speed_template); - if (mode > 400 && mode < 500) break; - fallthrough; - case 412: - test_ahash_speed("tgr192", sec, generic_hash_speed_template); - if (mode > 400 && mode < 500) break; - fallthrough; case 413: test_ahash_speed("sha224", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; diff --git a/crypto/testmgr.c b/crypto/testmgr.c index d12cec6ab003..b87802ffb554 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -5375,24 +5375,6 @@ static const struct alg_test_desc alg_test_descs[] = { .suite = { .hash = __VECS(streebog512_tv_template) } - }, { - .alg = "tgr128", - .test = alg_test_hash, - .suite = { - .hash = __VECS(tgr128_tv_template) - } - }, { - .alg = "tgr160", - .test = alg_test_hash, - .suite = { - .hash = __VECS(tgr160_tv_template) - } - }, { - .alg = "tgr192", - .test = alg_test_hash, - .suite = { - .hash = __VECS(tgr192_tv_template) - } }, { .alg = "vmac64(aes)", .test = alg_test_hash, diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 5625164cda54..851c107a5584 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -4950,132 +4950,6 @@ static const struct hash_testvec wp256_tv_template[] = { }, }; -/* - * TIGER test vectors from Tiger website - */ -static const struct hash_testvec tgr192_tv_template[] = { - { - .plaintext = "", - .psize = 0, - .digest = "\x24\xf0\x13\x0c\x63\xac\x93\x32" - "\x16\x16\x6e\x76\xb1\xbb\x92\x5f" - "\xf3\x73\xde\x2d\x49\x58\x4e\x7a", - }, { - .plaintext = "abc", - .psize = 3, - .digest = "\xf2\x58\xc1\xe8\x84\x14\xab\x2a" - "\x52\x7a\xb5\x41\xff\xc5\xb8\xbf" - "\x93\x5f\x7b\x95\x1c\x13\x29\x51", - }, { - .plaintext = "Tiger", - .psize = 5, - .digest = "\x9f\x00\xf5\x99\x07\x23\x00\xdd" - "\x27\x6a\xbb\x38\xc8\xeb\x6d\xec" - "\x37\x79\x0c\x11\x6f\x9d\x2b\xdf", - }, { - .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+-", - .psize = 64, - .digest = "\x87\xfb\x2a\x90\x83\x85\x1c\xf7" - "\x47\x0d\x2c\xf8\x10\xe6\xdf\x9e" - "\xb5\x86\x44\x50\x34\xa5\xa3\x86", - }, { - .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZ=abcdefghijklmnopqrstuvwxyz+0123456789", - .psize = 64, - .digest = "\x46\x7d\xb8\x08\x63\xeb\xce\x48" - "\x8d\xf1\xcd\x12\x61\x65\x5d\xe9" - "\x57\x89\x65\x65\x97\x5f\x91\x97", - }, { - .plaintext = "Tiger - A Fast New Hash Function, " - "by Ross Anderson and Eli Biham, " - "proceedings of Fast Software Encryption 3, " - "Cambridge, 1996.", - .psize = 125, - .digest = "\x3d\x9a\xeb\x03\xd1\xbd\x1a\x63" - "\x57\xb2\x77\x4d\xfd\x6d\x5b\x24" - "\xdd\x68\x15\x1d\x50\x39\x74\xfc", - }, -}; - -static const struct hash_testvec tgr160_tv_template[] = { - { - .plaintext = "", - .psize = 0, - .digest = "\x24\xf0\x13\x0c\x63\xac\x93\x32" - "\x16\x16\x6e\x76\xb1\xbb\x92\x5f" - "\xf3\x73\xde\x2d", - }, { - .plaintext = "abc", - .psize = 3, - .digest = "\xf2\x58\xc1\xe8\x84\x14\xab\x2a" - "\x52\x7a\xb5\x41\xff\xc5\xb8\xbf" - "\x93\x5f\x7b\x95", - }, { - .plaintext = "Tiger", - .psize = 5, - .digest = "\x9f\x00\xf5\x99\x07\x23\x00\xdd" - "\x27\x6a\xbb\x38\xc8\xeb\x6d\xec" - "\x37\x79\x0c\x11", - }, { - .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+-", - .psize = 64, - .digest = "\x87\xfb\x2a\x90\x83\x85\x1c\xf7" - "\x47\x0d\x2c\xf8\x10\xe6\xdf\x9e" - "\xb5\x86\x44\x50", - }, { - .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZ=abcdefghijklmnopqrstuvwxyz+0123456789", - .psize = 64, - .digest = "\x46\x7d\xb8\x08\x63\xeb\xce\x48" - "\x8d\xf1\xcd\x12\x61\x65\x5d\xe9" - "\x57\x89\x65\x65", - }, { - .plaintext = "Tiger - A Fast New Hash Function, " - "by Ross Anderson and Eli Biham, " - "proceedings of Fast Software Encryption 3, " - "Cambridge, 1996.", - .psize = 125, - .digest = "\x3d\x9a\xeb\x03\xd1\xbd\x1a\x63" - "\x57\xb2\x77\x4d\xfd\x6d\x5b\x24" - "\xdd\x68\x15\x1d", - }, -}; - -static const struct hash_testvec tgr128_tv_template[] = { - { - .plaintext = "", - .psize = 0, - .digest = "\x24\xf0\x13\x0c\x63\xac\x93\x32" - "\x16\x16\x6e\x76\xb1\xbb\x92\x5f", - }, { - .plaintext = "abc", - .psize = 3, - .digest = "\xf2\x58\xc1\xe8\x84\x14\xab\x2a" - "\x52\x7a\xb5\x41\xff\xc5\xb8\xbf", - }, { - .plaintext = "Tiger", - .psize = 5, - .digest = "\x9f\x00\xf5\x99\x07\x23\x00\xdd" - "\x27\x6a\xbb\x38\xc8\xeb\x6d\xec", - }, { - .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+-", - .psize = 64, - .digest = "\x87\xfb\x2a\x90\x83\x85\x1c\xf7" - "\x47\x0d\x2c\xf8\x10\xe6\xdf\x9e", - }, { - .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZ=abcdefghijklmnopqrstuvwxyz+0123456789", - .psize = 64, - .digest = "\x46\x7d\xb8\x08\x63\xeb\xce\x48" - "\x8d\xf1\xcd\x12\x61\x65\x5d\xe9", - }, { - .plaintext = "Tiger - A Fast New Hash Function, " - "by Ross Anderson and Eli Biham, " - "proceedings of Fast Software Encryption 3, " - "Cambridge, 1996.", - .psize = 125, - .digest = "\x3d\x9a\xeb\x03\xd1\xbd\x1a\x63" - "\x57\xb2\x77\x4d\xfd\x6d\x5b\x24", - }, -}; - static const struct hash_testvec ghash_tv_template[] = { { diff --git a/crypto/tgr192.c b/crypto/tgr192.c deleted file mode 100644 index aa29c529b44e..000000000000 --- a/crypto/tgr192.c +++ /dev/null @@ -1,682 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Cryptographic API. - * - * Tiger hashing Algorithm - * - * Copyright (C) 1998 Free Software Foundation, Inc. - * - * The Tiger algorithm was developed by Ross Anderson and Eli Biham. - * It was optimized for 64-bit processors while still delievering - * decent performance on 32 and 16-bit processors. - * - * This version is derived from the GnuPG implementation and the - * Tiger-Perl interface written by Rafael Sevilla - * - * Adapted for Linux Kernel Crypto by Aaron Grothe - * ajgrothe@yahoo.com, February 22, 2005 - */ -#include -#include -#include -#include -#include -#include -#include - -#define TGR192_DIGEST_SIZE 24 -#define TGR160_DIGEST_SIZE 20 -#define TGR128_DIGEST_SIZE 16 - -#define TGR192_BLOCK_SIZE 64 - -struct tgr192_ctx { - u64 a, b, c; - u8 hash[64]; - int count; - u32 nblocks; -}; - -static const u64 sbox1[256] = { - 0x02aab17cf7e90c5eULL, 0xac424b03e243a8ecULL, 0x72cd5be30dd5fcd3ULL, - 0x6d019b93f6f97f3aULL, 0xcd9978ffd21f9193ULL, 0x7573a1c9708029e2ULL, - 0xb164326b922a83c3ULL, 0x46883eee04915870ULL, 0xeaace3057103ece6ULL, - 0xc54169b808a3535cULL, 0x4ce754918ddec47cULL, 0x0aa2f4dfdc0df40cULL, - 0x10b76f18a74dbefaULL, 0xc6ccb6235ad1ab6aULL, 0x13726121572fe2ffULL, - 0x1a488c6f199d921eULL, 0x4bc9f9f4da0007caULL, 0x26f5e6f6e85241c7ULL, - 0x859079dbea5947b6ULL, 0x4f1885c5c99e8c92ULL, 0xd78e761ea96f864bULL, - 0x8e36428c52b5c17dULL, 0x69cf6827373063c1ULL, 0xb607c93d9bb4c56eULL, - 0x7d820e760e76b5eaULL, 0x645c9cc6f07fdc42ULL, 0xbf38a078243342e0ULL, - 0x5f6b343c9d2e7d04ULL, 0xf2c28aeb600b0ec6ULL, 0x6c0ed85f7254bcacULL, - 0x71592281a4db4fe5ULL, 0x1967fa69ce0fed9fULL, 0xfd5293f8b96545dbULL, - 0xc879e9d7f2a7600bULL, 0x860248920193194eULL, 0xa4f9533b2d9cc0b3ULL, - 0x9053836c15957613ULL, 0xdb6dcf8afc357bf1ULL, 0x18beea7a7a370f57ULL, - 0x037117ca50b99066ULL, 0x6ab30a9774424a35ULL, 0xf4e92f02e325249bULL, - 0x7739db07061ccae1ULL, 0xd8f3b49ceca42a05ULL, 0xbd56be3f51382f73ULL, - 0x45faed5843b0bb28ULL, 0x1c813d5c11bf1f83ULL, 0x8af0e4b6d75fa169ULL, - 0x33ee18a487ad9999ULL, 0x3c26e8eab1c94410ULL, 0xb510102bc0a822f9ULL, - 0x141eef310ce6123bULL, 0xfc65b90059ddb154ULL, 0xe0158640c5e0e607ULL, - 0x884e079826c3a3cfULL, 0x930d0d9523c535fdULL, 0x35638d754e9a2b00ULL, - 0x4085fccf40469dd5ULL, 0xc4b17ad28be23a4cULL, 0xcab2f0fc6a3e6a2eULL, - 0x2860971a6b943fcdULL, 0x3dde6ee212e30446ULL, 0x6222f32ae01765aeULL, - 0x5d550bb5478308feULL, 0xa9efa98da0eda22aULL, 0xc351a71686c40da7ULL, - 0x1105586d9c867c84ULL, 0xdcffee85fda22853ULL, 0xccfbd0262c5eef76ULL, - 0xbaf294cb8990d201ULL, 0xe69464f52afad975ULL, 0x94b013afdf133e14ULL, - 0x06a7d1a32823c958ULL, 0x6f95fe5130f61119ULL, 0xd92ab34e462c06c0ULL, - 0xed7bde33887c71d2ULL, 0x79746d6e6518393eULL, 0x5ba419385d713329ULL, - 0x7c1ba6b948a97564ULL, 0x31987c197bfdac67ULL, 0xde6c23c44b053d02ULL, - 0x581c49fed002d64dULL, 0xdd474d6338261571ULL, 0xaa4546c3e473d062ULL, - 0x928fce349455f860ULL, 0x48161bbacaab94d9ULL, 0x63912430770e6f68ULL, - 0x6ec8a5e602c6641cULL, 0x87282515337ddd2bULL, 0x2cda6b42034b701bULL, - 0xb03d37c181cb096dULL, 0xe108438266c71c6fULL, 0x2b3180c7eb51b255ULL, - 0xdf92b82f96c08bbcULL, 0x5c68c8c0a632f3baULL, 0x5504cc861c3d0556ULL, - 0xabbfa4e55fb26b8fULL, 0x41848b0ab3baceb4ULL, 0xb334a273aa445d32ULL, - 0xbca696f0a85ad881ULL, 0x24f6ec65b528d56cULL, 0x0ce1512e90f4524aULL, - 0x4e9dd79d5506d35aULL, 0x258905fac6ce9779ULL, 0x2019295b3e109b33ULL, - 0xf8a9478b73a054ccULL, 0x2924f2f934417eb0ULL, 0x3993357d536d1bc4ULL, - 0x38a81ac21db6ff8bULL, 0x47c4fbf17d6016bfULL, 0x1e0faadd7667e3f5ULL, - 0x7abcff62938beb96ULL, 0xa78dad948fc179c9ULL, 0x8f1f98b72911e50dULL, - 0x61e48eae27121a91ULL, 0x4d62f7ad31859808ULL, 0xeceba345ef5ceaebULL, - 0xf5ceb25ebc9684ceULL, 0xf633e20cb7f76221ULL, 0xa32cdf06ab8293e4ULL, - 0x985a202ca5ee2ca4ULL, 0xcf0b8447cc8a8fb1ULL, 0x9f765244979859a3ULL, - 0xa8d516b1a1240017ULL, 0x0bd7ba3ebb5dc726ULL, 0xe54bca55b86adb39ULL, - 0x1d7a3afd6c478063ULL, 0x519ec608e7669eddULL, 0x0e5715a2d149aa23ULL, - 0x177d4571848ff194ULL, 0xeeb55f3241014c22ULL, 0x0f5e5ca13a6e2ec2ULL, - 0x8029927b75f5c361ULL, 0xad139fabc3d6e436ULL, 0x0d5df1a94ccf402fULL, - 0x3e8bd948bea5dfc8ULL, 0xa5a0d357bd3ff77eULL, 0xa2d12e251f74f645ULL, - 0x66fd9e525e81a082ULL, 0x2e0c90ce7f687a49ULL, 0xc2e8bcbeba973bc5ULL, - 0x000001bce509745fULL, 0x423777bbe6dab3d6ULL, 0xd1661c7eaef06eb5ULL, - 0xa1781f354daacfd8ULL, 0x2d11284a2b16affcULL, 0xf1fc4f67fa891d1fULL, - 0x73ecc25dcb920adaULL, 0xae610c22c2a12651ULL, 0x96e0a810d356b78aULL, - 0x5a9a381f2fe7870fULL, 0xd5ad62ede94e5530ULL, 0xd225e5e8368d1427ULL, - 0x65977b70c7af4631ULL, 0x99f889b2de39d74fULL, 0x233f30bf54e1d143ULL, - 0x9a9675d3d9a63c97ULL, 0x5470554ff334f9a8ULL, 0x166acb744a4f5688ULL, - 0x70c74caab2e4aeadULL, 0xf0d091646f294d12ULL, 0x57b82a89684031d1ULL, - 0xefd95a5a61be0b6bULL, 0x2fbd12e969f2f29aULL, 0x9bd37013feff9fe8ULL, - 0x3f9b0404d6085a06ULL, 0x4940c1f3166cfe15ULL, 0x09542c4dcdf3defbULL, - 0xb4c5218385cd5ce3ULL, 0xc935b7dc4462a641ULL, 0x3417f8a68ed3b63fULL, - 0xb80959295b215b40ULL, 0xf99cdaef3b8c8572ULL, 0x018c0614f8fcb95dULL, - 0x1b14accd1a3acdf3ULL, 0x84d471f200bb732dULL, 0xc1a3110e95e8da16ULL, - 0x430a7220bf1a82b8ULL, 0xb77e090d39df210eULL, 0x5ef4bd9f3cd05e9dULL, - 0x9d4ff6da7e57a444ULL, 0xda1d60e183d4a5f8ULL, 0xb287c38417998e47ULL, - 0xfe3edc121bb31886ULL, 0xc7fe3ccc980ccbefULL, 0xe46fb590189bfd03ULL, - 0x3732fd469a4c57dcULL, 0x7ef700a07cf1ad65ULL, 0x59c64468a31d8859ULL, - 0x762fb0b4d45b61f6ULL, 0x155baed099047718ULL, 0x68755e4c3d50baa6ULL, - 0xe9214e7f22d8b4dfULL, 0x2addbf532eac95f4ULL, 0x32ae3909b4bd0109ULL, - 0x834df537b08e3450ULL, 0xfa209da84220728dULL, 0x9e691d9b9efe23f7ULL, - 0x0446d288c4ae8d7fULL, 0x7b4cc524e169785bULL, 0x21d87f0135ca1385ULL, - 0xcebb400f137b8aa5ULL, 0x272e2b66580796beULL, 0x3612264125c2b0deULL, - 0x057702bdad1efbb2ULL, 0xd4babb8eacf84be9ULL, 0x91583139641bc67bULL, - 0x8bdc2de08036e024ULL, 0x603c8156f49f68edULL, 0xf7d236f7dbef5111ULL, - 0x9727c4598ad21e80ULL, 0xa08a0896670a5fd7ULL, 0xcb4a8f4309eba9cbULL, - 0x81af564b0f7036a1ULL, 0xc0b99aa778199abdULL, 0x959f1ec83fc8e952ULL, - 0x8c505077794a81b9ULL, 0x3acaaf8f056338f0ULL, 0x07b43f50627a6778ULL, - 0x4a44ab49f5eccc77ULL, 0x3bc3d6e4b679ee98ULL, 0x9cc0d4d1cf14108cULL, - 0x4406c00b206bc8a0ULL, 0x82a18854c8d72d89ULL, 0x67e366b35c3c432cULL, - 0xb923dd61102b37f2ULL, 0x56ab2779d884271dULL, 0xbe83e1b0ff1525afULL, - 0xfb7c65d4217e49a9ULL, 0x6bdbe0e76d48e7d4ULL, 0x08df828745d9179eULL, - 0x22ea6a9add53bd34ULL, 0xe36e141c5622200aULL, 0x7f805d1b8cb750eeULL, - 0xafe5c7a59f58e837ULL, 0xe27f996a4fb1c23cULL, 0xd3867dfb0775f0d0ULL, - 0xd0e673de6e88891aULL, 0x123aeb9eafb86c25ULL, 0x30f1d5d5c145b895ULL, - 0xbb434a2dee7269e7ULL, 0x78cb67ecf931fa38ULL, 0xf33b0372323bbf9cULL, - 0x52d66336fb279c74ULL, 0x505f33ac0afb4eaaULL, 0xe8a5cd99a2cce187ULL, - 0x534974801e2d30bbULL, 0x8d2d5711d5876d90ULL, 0x1f1a412891bc038eULL, - 0xd6e2e71d82e56648ULL, 0x74036c3a497732b7ULL, 0x89b67ed96361f5abULL, - 0xffed95d8f1ea02a2ULL, 0xe72b3bd61464d43dULL, 0xa6300f170bdc4820ULL, - 0xebc18760ed78a77aULL -}; - -static const u64 sbox2[256] = { - 0xe6a6be5a05a12138ULL, 0xb5a122a5b4f87c98ULL, 0x563c6089140b6990ULL, - 0x4c46cb2e391f5dd5ULL, 0xd932addbc9b79434ULL, 0x08ea70e42015aff5ULL, - 0xd765a6673e478cf1ULL, 0xc4fb757eab278d99ULL, 0xdf11c6862d6e0692ULL, - 0xddeb84f10d7f3b16ULL, 0x6f2ef604a665ea04ULL, 0x4a8e0f0ff0e0dfb3ULL, - 0xa5edeef83dbcba51ULL, 0xfc4f0a2a0ea4371eULL, 0xe83e1da85cb38429ULL, - 0xdc8ff882ba1b1ce2ULL, 0xcd45505e8353e80dULL, 0x18d19a00d4db0717ULL, - 0x34a0cfeda5f38101ULL, 0x0be77e518887caf2ULL, 0x1e341438b3c45136ULL, - 0xe05797f49089ccf9ULL, 0xffd23f9df2591d14ULL, 0x543dda228595c5cdULL, - 0x661f81fd99052a33ULL, 0x8736e641db0f7b76ULL, 0x15227725418e5307ULL, - 0xe25f7f46162eb2faULL, 0x48a8b2126c13d9feULL, 0xafdc541792e76eeaULL, - 0x03d912bfc6d1898fULL, 0x31b1aafa1b83f51bULL, 0xf1ac2796e42ab7d9ULL, - 0x40a3a7d7fcd2ebacULL, 0x1056136d0afbbcc5ULL, 0x7889e1dd9a6d0c85ULL, - 0xd33525782a7974aaULL, 0xa7e25d09078ac09bULL, 0xbd4138b3eac6edd0ULL, - 0x920abfbe71eb9e70ULL, 0xa2a5d0f54fc2625cULL, 0xc054e36b0b1290a3ULL, - 0xf6dd59ff62fe932bULL, 0x3537354511a8ac7dULL, 0xca845e9172fadcd4ULL, - 0x84f82b60329d20dcULL, 0x79c62ce1cd672f18ULL, 0x8b09a2add124642cULL, - 0xd0c1e96a19d9e726ULL, 0x5a786a9b4ba9500cULL, 0x0e020336634c43f3ULL, - 0xc17b474aeb66d822ULL, 0x6a731ae3ec9baac2ULL, 0x8226667ae0840258ULL, - 0x67d4567691caeca5ULL, 0x1d94155c4875adb5ULL, 0x6d00fd985b813fdfULL, - 0x51286efcb774cd06ULL, 0x5e8834471fa744afULL, 0xf72ca0aee761ae2eULL, - 0xbe40e4cdaee8e09aULL, 0xe9970bbb5118f665ULL, 0x726e4beb33df1964ULL, - 0x703b000729199762ULL, 0x4631d816f5ef30a7ULL, 0xb880b5b51504a6beULL, - 0x641793c37ed84b6cULL, 0x7b21ed77f6e97d96ULL, 0x776306312ef96b73ULL, - 0xae528948e86ff3f4ULL, 0x53dbd7f286a3f8f8ULL, 0x16cadce74cfc1063ULL, - 0x005c19bdfa52c6ddULL, 0x68868f5d64d46ad3ULL, 0x3a9d512ccf1e186aULL, - 0x367e62c2385660aeULL, 0xe359e7ea77dcb1d7ULL, 0x526c0773749abe6eULL, - 0x735ae5f9d09f734bULL, 0x493fc7cc8a558ba8ULL, 0xb0b9c1533041ab45ULL, - 0x321958ba470a59bdULL, 0x852db00b5f46c393ULL, 0x91209b2bd336b0e5ULL, - 0x6e604f7d659ef19fULL, 0xb99a8ae2782ccb24ULL, 0xccf52ab6c814c4c7ULL, - 0x4727d9afbe11727bULL, 0x7e950d0c0121b34dULL, 0x756f435670ad471fULL, - 0xf5add442615a6849ULL, 0x4e87e09980b9957aULL, 0x2acfa1df50aee355ULL, - 0xd898263afd2fd556ULL, 0xc8f4924dd80c8fd6ULL, 0xcf99ca3d754a173aULL, - 0xfe477bacaf91bf3cULL, 0xed5371f6d690c12dULL, 0x831a5c285e687094ULL, - 0xc5d3c90a3708a0a4ULL, 0x0f7f903717d06580ULL, 0x19f9bb13b8fdf27fULL, - 0xb1bd6f1b4d502843ULL, 0x1c761ba38fff4012ULL, 0x0d1530c4e2e21f3bULL, - 0x8943ce69a7372c8aULL, 0xe5184e11feb5ce66ULL, 0x618bdb80bd736621ULL, - 0x7d29bad68b574d0bULL, 0x81bb613e25e6fe5bULL, 0x071c9c10bc07913fULL, - 0xc7beeb7909ac2d97ULL, 0xc3e58d353bc5d757ULL, 0xeb017892f38f61e8ULL, - 0xd4effb9c9b1cc21aULL, 0x99727d26f494f7abULL, 0xa3e063a2956b3e03ULL, - 0x9d4a8b9a4aa09c30ULL, 0x3f6ab7d500090fb4ULL, 0x9cc0f2a057268ac0ULL, - 0x3dee9d2dedbf42d1ULL, 0x330f49c87960a972ULL, 0xc6b2720287421b41ULL, - 0x0ac59ec07c00369cULL, 0xef4eac49cb353425ULL, 0xf450244eef0129d8ULL, - 0x8acc46e5caf4deb6ULL, 0x2ffeab63989263f7ULL, 0x8f7cb9fe5d7a4578ULL, - 0x5bd8f7644e634635ULL, 0x427a7315bf2dc900ULL, 0x17d0c4aa2125261cULL, - 0x3992486c93518e50ULL, 0xb4cbfee0a2d7d4c3ULL, 0x7c75d6202c5ddd8dULL, - 0xdbc295d8e35b6c61ULL, 0x60b369d302032b19ULL, 0xce42685fdce44132ULL, - 0x06f3ddb9ddf65610ULL, 0x8ea4d21db5e148f0ULL, 0x20b0fce62fcd496fULL, - 0x2c1b912358b0ee31ULL, 0xb28317b818f5a308ULL, 0xa89c1e189ca6d2cfULL, - 0x0c6b18576aaadbc8ULL, 0xb65deaa91299fae3ULL, 0xfb2b794b7f1027e7ULL, - 0x04e4317f443b5bebULL, 0x4b852d325939d0a6ULL, 0xd5ae6beefb207ffcULL, - 0x309682b281c7d374ULL, 0xbae309a194c3b475ULL, 0x8cc3f97b13b49f05ULL, - 0x98a9422ff8293967ULL, 0x244b16b01076ff7cULL, 0xf8bf571c663d67eeULL, - 0x1f0d6758eee30da1ULL, 0xc9b611d97adeb9b7ULL, 0xb7afd5887b6c57a2ULL, - 0x6290ae846b984fe1ULL, 0x94df4cdeacc1a5fdULL, 0x058a5bd1c5483affULL, - 0x63166cc142ba3c37ULL, 0x8db8526eb2f76f40ULL, 0xe10880036f0d6d4eULL, - 0x9e0523c9971d311dULL, 0x45ec2824cc7cd691ULL, 0x575b8359e62382c9ULL, - 0xfa9e400dc4889995ULL, 0xd1823ecb45721568ULL, 0xdafd983b8206082fULL, - 0xaa7d29082386a8cbULL, 0x269fcd4403b87588ULL, 0x1b91f5f728bdd1e0ULL, - 0xe4669f39040201f6ULL, 0x7a1d7c218cf04adeULL, 0x65623c29d79ce5ceULL, - 0x2368449096c00bb1ULL, 0xab9bf1879da503baULL, 0xbc23ecb1a458058eULL, - 0x9a58df01bb401eccULL, 0xa070e868a85f143dULL, 0x4ff188307df2239eULL, - 0x14d565b41a641183ULL, 0xee13337452701602ULL, 0x950e3dcf3f285e09ULL, - 0x59930254b9c80953ULL, 0x3bf299408930da6dULL, 0xa955943f53691387ULL, - 0xa15edecaa9cb8784ULL, 0x29142127352be9a0ULL, 0x76f0371fff4e7afbULL, - 0x0239f450274f2228ULL, 0xbb073af01d5e868bULL, 0xbfc80571c10e96c1ULL, - 0xd267088568222e23ULL, 0x9671a3d48e80b5b0ULL, 0x55b5d38ae193bb81ULL, - 0x693ae2d0a18b04b8ULL, 0x5c48b4ecadd5335fULL, 0xfd743b194916a1caULL, - 0x2577018134be98c4ULL, 0xe77987e83c54a4adULL, 0x28e11014da33e1b9ULL, - 0x270cc59e226aa213ULL, 0x71495f756d1a5f60ULL, 0x9be853fb60afef77ULL, - 0xadc786a7f7443dbfULL, 0x0904456173b29a82ULL, 0x58bc7a66c232bd5eULL, - 0xf306558c673ac8b2ULL, 0x41f639c6b6c9772aULL, 0x216defe99fda35daULL, - 0x11640cc71c7be615ULL, 0x93c43694565c5527ULL, 0xea038e6246777839ULL, - 0xf9abf3ce5a3e2469ULL, 0x741e768d0fd312d2ULL, 0x0144b883ced652c6ULL, - 0xc20b5a5ba33f8552ULL, 0x1ae69633c3435a9dULL, 0x97a28ca4088cfdecULL, - 0x8824a43c1e96f420ULL, 0x37612fa66eeea746ULL, 0x6b4cb165f9cf0e5aULL, - 0x43aa1c06a0abfb4aULL, 0x7f4dc26ff162796bULL, 0x6cbacc8e54ed9b0fULL, - 0xa6b7ffefd2bb253eULL, 0x2e25bc95b0a29d4fULL, 0x86d6a58bdef1388cULL, - 0xded74ac576b6f054ULL, 0x8030bdbc2b45805dULL, 0x3c81af70e94d9289ULL, - 0x3eff6dda9e3100dbULL, 0xb38dc39fdfcc8847ULL, 0x123885528d17b87eULL, - 0xf2da0ed240b1b642ULL, 0x44cefadcd54bf9a9ULL, 0x1312200e433c7ee6ULL, - 0x9ffcc84f3a78c748ULL, 0xf0cd1f72248576bbULL, 0xec6974053638cfe4ULL, - 0x2ba7b67c0cec4e4cULL, 0xac2f4df3e5ce32edULL, 0xcb33d14326ea4c11ULL, - 0xa4e9044cc77e58bcULL, 0x5f513293d934fcefULL, 0x5dc9645506e55444ULL, - 0x50de418f317de40aULL, 0x388cb31a69dde259ULL, 0x2db4a83455820a86ULL, - 0x9010a91e84711ae9ULL, 0x4df7f0b7b1498371ULL, 0xd62a2eabc0977179ULL, - 0x22fac097aa8d5c0eULL -}; - -static const u64 sbox3[256] = { - 0xf49fcc2ff1daf39bULL, 0x487fd5c66ff29281ULL, 0xe8a30667fcdca83fULL, - 0x2c9b4be3d2fcce63ULL, 0xda3ff74b93fbbbc2ULL, 0x2fa165d2fe70ba66ULL, - 0xa103e279970e93d4ULL, 0xbecdec77b0e45e71ULL, 0xcfb41e723985e497ULL, - 0xb70aaa025ef75017ULL, 0xd42309f03840b8e0ULL, 0x8efc1ad035898579ULL, - 0x96c6920be2b2abc5ULL, 0x66af4163375a9172ULL, 0x2174abdcca7127fbULL, - 0xb33ccea64a72ff41ULL, 0xf04a4933083066a5ULL, 0x8d970acdd7289af5ULL, - 0x8f96e8e031c8c25eULL, 0xf3fec02276875d47ULL, 0xec7bf310056190ddULL, - 0xf5adb0aebb0f1491ULL, 0x9b50f8850fd58892ULL, 0x4975488358b74de8ULL, - 0xa3354ff691531c61ULL, 0x0702bbe481d2c6eeULL, 0x89fb24057deded98ULL, - 0xac3075138596e902ULL, 0x1d2d3580172772edULL, 0xeb738fc28e6bc30dULL, - 0x5854ef8f63044326ULL, 0x9e5c52325add3bbeULL, 0x90aa53cf325c4623ULL, - 0xc1d24d51349dd067ULL, 0x2051cfeea69ea624ULL, 0x13220f0a862e7e4fULL, - 0xce39399404e04864ULL, 0xd9c42ca47086fcb7ULL, 0x685ad2238a03e7ccULL, - 0x066484b2ab2ff1dbULL, 0xfe9d5d70efbf79ecULL, 0x5b13b9dd9c481854ULL, - 0x15f0d475ed1509adULL, 0x0bebcd060ec79851ULL, 0xd58c6791183ab7f8ULL, - 0xd1187c5052f3eee4ULL, 0xc95d1192e54e82ffULL, 0x86eea14cb9ac6ca2ULL, - 0x3485beb153677d5dULL, 0xdd191d781f8c492aULL, 0xf60866baa784ebf9ULL, - 0x518f643ba2d08c74ULL, 0x8852e956e1087c22ULL, 0xa768cb8dc410ae8dULL, - 0x38047726bfec8e1aULL, 0xa67738b4cd3b45aaULL, 0xad16691cec0dde19ULL, - 0xc6d4319380462e07ULL, 0xc5a5876d0ba61938ULL, 0x16b9fa1fa58fd840ULL, - 0x188ab1173ca74f18ULL, 0xabda2f98c99c021fULL, 0x3e0580ab134ae816ULL, - 0x5f3b05b773645abbULL, 0x2501a2be5575f2f6ULL, 0x1b2f74004e7e8ba9ULL, - 0x1cd7580371e8d953ULL, 0x7f6ed89562764e30ULL, 0xb15926ff596f003dULL, - 0x9f65293da8c5d6b9ULL, 0x6ecef04dd690f84cULL, 0x4782275fff33af88ULL, - 0xe41433083f820801ULL, 0xfd0dfe409a1af9b5ULL, 0x4325a3342cdb396bULL, - 0x8ae77e62b301b252ULL, 0xc36f9e9f6655615aULL, 0x85455a2d92d32c09ULL, - 0xf2c7dea949477485ULL, 0x63cfb4c133a39ebaULL, 0x83b040cc6ebc5462ULL, - 0x3b9454c8fdb326b0ULL, 0x56f56a9e87ffd78cULL, 0x2dc2940d99f42bc6ULL, - 0x98f7df096b096e2dULL, 0x19a6e01e3ad852bfULL, 0x42a99ccbdbd4b40bULL, - 0xa59998af45e9c559ULL, 0x366295e807d93186ULL, 0x6b48181bfaa1f773ULL, - 0x1fec57e2157a0a1dULL, 0x4667446af6201ad5ULL, 0xe615ebcacfb0f075ULL, - 0xb8f31f4f68290778ULL, 0x22713ed6ce22d11eULL, 0x3057c1a72ec3c93bULL, - 0xcb46acc37c3f1f2fULL, 0xdbb893fd02aaf50eULL, 0x331fd92e600b9fcfULL, - 0xa498f96148ea3ad6ULL, 0xa8d8426e8b6a83eaULL, 0xa089b274b7735cdcULL, - 0x87f6b3731e524a11ULL, 0x118808e5cbc96749ULL, 0x9906e4c7b19bd394ULL, - 0xafed7f7e9b24a20cULL, 0x6509eadeeb3644a7ULL, 0x6c1ef1d3e8ef0edeULL, - 0xb9c97d43e9798fb4ULL, 0xa2f2d784740c28a3ULL, 0x7b8496476197566fULL, - 0x7a5be3e6b65f069dULL, 0xf96330ed78be6f10ULL, 0xeee60de77a076a15ULL, - 0x2b4bee4aa08b9bd0ULL, 0x6a56a63ec7b8894eULL, 0x02121359ba34fef4ULL, - 0x4cbf99f8283703fcULL, 0x398071350caf30c8ULL, 0xd0a77a89f017687aULL, - 0xf1c1a9eb9e423569ULL, 0x8c7976282dee8199ULL, 0x5d1737a5dd1f7abdULL, - 0x4f53433c09a9fa80ULL, 0xfa8b0c53df7ca1d9ULL, 0x3fd9dcbc886ccb77ULL, - 0xc040917ca91b4720ULL, 0x7dd00142f9d1dcdfULL, 0x8476fc1d4f387b58ULL, - 0x23f8e7c5f3316503ULL, 0x032a2244e7e37339ULL, 0x5c87a5d750f5a74bULL, - 0x082b4cc43698992eULL, 0xdf917becb858f63cULL, 0x3270b8fc5bf86ddaULL, - 0x10ae72bb29b5dd76ULL, 0x576ac94e7700362bULL, 0x1ad112dac61efb8fULL, - 0x691bc30ec5faa427ULL, 0xff246311cc327143ULL, 0x3142368e30e53206ULL, - 0x71380e31e02ca396ULL, 0x958d5c960aad76f1ULL, 0xf8d6f430c16da536ULL, - 0xc8ffd13f1be7e1d2ULL, 0x7578ae66004ddbe1ULL, 0x05833f01067be646ULL, - 0xbb34b5ad3bfe586dULL, 0x095f34c9a12b97f0ULL, 0x247ab64525d60ca8ULL, - 0xdcdbc6f3017477d1ULL, 0x4a2e14d4decad24dULL, 0xbdb5e6d9be0a1eebULL, - 0x2a7e70f7794301abULL, 0xdef42d8a270540fdULL, 0x01078ec0a34c22c1ULL, - 0xe5de511af4c16387ULL, 0x7ebb3a52bd9a330aULL, 0x77697857aa7d6435ULL, - 0x004e831603ae4c32ULL, 0xe7a21020ad78e312ULL, 0x9d41a70c6ab420f2ULL, - 0x28e06c18ea1141e6ULL, 0xd2b28cbd984f6b28ULL, 0x26b75f6c446e9d83ULL, - 0xba47568c4d418d7fULL, 0xd80badbfe6183d8eULL, 0x0e206d7f5f166044ULL, - 0xe258a43911cbca3eULL, 0x723a1746b21dc0bcULL, 0xc7caa854f5d7cdd3ULL, - 0x7cac32883d261d9cULL, 0x7690c26423ba942cULL, 0x17e55524478042b8ULL, - 0xe0be477656a2389fULL, 0x4d289b5e67ab2da0ULL, 0x44862b9c8fbbfd31ULL, - 0xb47cc8049d141365ULL, 0x822c1b362b91c793ULL, 0x4eb14655fb13dfd8ULL, - 0x1ecbba0714e2a97bULL, 0x6143459d5cde5f14ULL, 0x53a8fbf1d5f0ac89ULL, - 0x97ea04d81c5e5b00ULL, 0x622181a8d4fdb3f3ULL, 0xe9bcd341572a1208ULL, - 0x1411258643cce58aULL, 0x9144c5fea4c6e0a4ULL, 0x0d33d06565cf620fULL, - 0x54a48d489f219ca1ULL, 0xc43e5eac6d63c821ULL, 0xa9728b3a72770dafULL, - 0xd7934e7b20df87efULL, 0xe35503b61a3e86e5ULL, 0xcae321fbc819d504ULL, - 0x129a50b3ac60bfa6ULL, 0xcd5e68ea7e9fb6c3ULL, 0xb01c90199483b1c7ULL, - 0x3de93cd5c295376cULL, 0xaed52edf2ab9ad13ULL, 0x2e60f512c0a07884ULL, - 0xbc3d86a3e36210c9ULL, 0x35269d9b163951ceULL, 0x0c7d6e2ad0cdb5faULL, - 0x59e86297d87f5733ULL, 0x298ef221898db0e7ULL, 0x55000029d1a5aa7eULL, - 0x8bc08ae1b5061b45ULL, 0xc2c31c2b6c92703aULL, 0x94cc596baf25ef42ULL, - 0x0a1d73db22540456ULL, 0x04b6a0f9d9c4179aULL, 0xeffdafa2ae3d3c60ULL, - 0xf7c8075bb49496c4ULL, 0x9cc5c7141d1cd4e3ULL, 0x78bd1638218e5534ULL, - 0xb2f11568f850246aULL, 0xedfabcfa9502bc29ULL, 0x796ce5f2da23051bULL, - 0xaae128b0dc93537cULL, 0x3a493da0ee4b29aeULL, 0xb5df6b2c416895d7ULL, - 0xfcabbd25122d7f37ULL, 0x70810b58105dc4b1ULL, 0xe10fdd37f7882a90ULL, - 0x524dcab5518a3f5cULL, 0x3c9e85878451255bULL, 0x4029828119bd34e2ULL, - 0x74a05b6f5d3ceccbULL, 0xb610021542e13ecaULL, 0x0ff979d12f59e2acULL, - 0x6037da27e4f9cc50ULL, 0x5e92975a0df1847dULL, 0xd66de190d3e623feULL, - 0x5032d6b87b568048ULL, 0x9a36b7ce8235216eULL, 0x80272a7a24f64b4aULL, - 0x93efed8b8c6916f7ULL, 0x37ddbff44cce1555ULL, 0x4b95db5d4b99bd25ULL, - 0x92d3fda169812fc0ULL, 0xfb1a4a9a90660bb6ULL, 0x730c196946a4b9b2ULL, - 0x81e289aa7f49da68ULL, 0x64669a0f83b1a05fULL, 0x27b3ff7d9644f48bULL, - 0xcc6b615c8db675b3ULL, 0x674f20b9bcebbe95ULL, 0x6f31238275655982ULL, - 0x5ae488713e45cf05ULL, 0xbf619f9954c21157ULL, 0xeabac46040a8eae9ULL, - 0x454c6fe9f2c0c1cdULL, 0x419cf6496412691cULL, 0xd3dc3bef265b0f70ULL, - 0x6d0e60f5c3578a9eULL -}; - -static const u64 sbox4[256] = { - 0x5b0e608526323c55ULL, 0x1a46c1a9fa1b59f5ULL, 0xa9e245a17c4c8ffaULL, - 0x65ca5159db2955d7ULL, 0x05db0a76ce35afc2ULL, 0x81eac77ea9113d45ULL, - 0x528ef88ab6ac0a0dULL, 0xa09ea253597be3ffULL, 0x430ddfb3ac48cd56ULL, - 0xc4b3a67af45ce46fULL, 0x4ececfd8fbe2d05eULL, 0x3ef56f10b39935f0ULL, - 0x0b22d6829cd619c6ULL, 0x17fd460a74df2069ULL, 0x6cf8cc8e8510ed40ULL, - 0xd6c824bf3a6ecaa7ULL, 0x61243d581a817049ULL, 0x048bacb6bbc163a2ULL, - 0xd9a38ac27d44cc32ULL, 0x7fddff5baaf410abULL, 0xad6d495aa804824bULL, - 0xe1a6a74f2d8c9f94ULL, 0xd4f7851235dee8e3ULL, 0xfd4b7f886540d893ULL, - 0x247c20042aa4bfdaULL, 0x096ea1c517d1327cULL, 0xd56966b4361a6685ULL, - 0x277da5c31221057dULL, 0x94d59893a43acff7ULL, 0x64f0c51ccdc02281ULL, - 0x3d33bcc4ff6189dbULL, 0xe005cb184ce66af1ULL, 0xff5ccd1d1db99beaULL, - 0xb0b854a7fe42980fULL, 0x7bd46a6a718d4b9fULL, 0xd10fa8cc22a5fd8cULL, - 0xd31484952be4bd31ULL, 0xc7fa975fcb243847ULL, 0x4886ed1e5846c407ULL, - 0x28cddb791eb70b04ULL, 0xc2b00be2f573417fULL, 0x5c9590452180f877ULL, - 0x7a6bddfff370eb00ULL, 0xce509e38d6d9d6a4ULL, 0xebeb0f00647fa702ULL, - 0x1dcc06cf76606f06ULL, 0xe4d9f28ba286ff0aULL, 0xd85a305dc918c262ULL, - 0x475b1d8732225f54ULL, 0x2d4fb51668ccb5feULL, 0xa679b9d9d72bba20ULL, - 0x53841c0d912d43a5ULL, 0x3b7eaa48bf12a4e8ULL, 0x781e0e47f22f1ddfULL, - 0xeff20ce60ab50973ULL, 0x20d261d19dffb742ULL, 0x16a12b03062a2e39ULL, - 0x1960eb2239650495ULL, 0x251c16fed50eb8b8ULL, 0x9ac0c330f826016eULL, - 0xed152665953e7671ULL, 0x02d63194a6369570ULL, 0x5074f08394b1c987ULL, - 0x70ba598c90b25ce1ULL, 0x794a15810b9742f6ULL, 0x0d5925e9fcaf8c6cULL, - 0x3067716cd868744eULL, 0x910ab077e8d7731bULL, 0x6a61bbdb5ac42f61ULL, - 0x93513efbf0851567ULL, 0xf494724b9e83e9d5ULL, 0xe887e1985c09648dULL, - 0x34b1d3c675370cfdULL, 0xdc35e433bc0d255dULL, 0xd0aab84234131be0ULL, - 0x08042a50b48b7eafULL, 0x9997c4ee44a3ab35ULL, 0x829a7b49201799d0ULL, - 0x263b8307b7c54441ULL, 0x752f95f4fd6a6ca6ULL, 0x927217402c08c6e5ULL, - 0x2a8ab754a795d9eeULL, 0xa442f7552f72943dULL, 0x2c31334e19781208ULL, - 0x4fa98d7ceaee6291ULL, 0x55c3862f665db309ULL, 0xbd0610175d53b1f3ULL, - 0x46fe6cb840413f27ULL, 0x3fe03792df0cfa59ULL, 0xcfe700372eb85e8fULL, - 0xa7be29e7adbce118ULL, 0xe544ee5cde8431ddULL, 0x8a781b1b41f1873eULL, - 0xa5c94c78a0d2f0e7ULL, 0x39412e2877b60728ULL, 0xa1265ef3afc9a62cULL, - 0xbcc2770c6a2506c5ULL, 0x3ab66dd5dce1ce12ULL, 0xe65499d04a675b37ULL, - 0x7d8f523481bfd216ULL, 0x0f6f64fcec15f389ULL, 0x74efbe618b5b13c8ULL, - 0xacdc82b714273e1dULL, 0xdd40bfe003199d17ULL, 0x37e99257e7e061f8ULL, - 0xfa52626904775aaaULL, 0x8bbbf63a463d56f9ULL, 0xf0013f1543a26e64ULL, - 0xa8307e9f879ec898ULL, 0xcc4c27a4150177ccULL, 0x1b432f2cca1d3348ULL, - 0xde1d1f8f9f6fa013ULL, 0x606602a047a7ddd6ULL, 0xd237ab64cc1cb2c7ULL, - 0x9b938e7225fcd1d3ULL, 0xec4e03708e0ff476ULL, 0xfeb2fbda3d03c12dULL, - 0xae0bced2ee43889aULL, 0x22cb8923ebfb4f43ULL, 0x69360d013cf7396dULL, - 0x855e3602d2d4e022ULL, 0x073805bad01f784cULL, 0x33e17a133852f546ULL, - 0xdf4874058ac7b638ULL, 0xba92b29c678aa14aULL, 0x0ce89fc76cfaadcdULL, - 0x5f9d4e0908339e34ULL, 0xf1afe9291f5923b9ULL, 0x6e3480f60f4a265fULL, - 0xeebf3a2ab29b841cULL, 0xe21938a88f91b4adULL, 0x57dfeff845c6d3c3ULL, - 0x2f006b0bf62caaf2ULL, 0x62f479ef6f75ee78ULL, 0x11a55ad41c8916a9ULL, - 0xf229d29084fed453ULL, 0x42f1c27b16b000e6ULL, 0x2b1f76749823c074ULL, - 0x4b76eca3c2745360ULL, 0x8c98f463b91691bdULL, 0x14bcc93cf1ade66aULL, - 0x8885213e6d458397ULL, 0x8e177df0274d4711ULL, 0xb49b73b5503f2951ULL, - 0x10168168c3f96b6bULL, 0x0e3d963b63cab0aeULL, 0x8dfc4b5655a1db14ULL, - 0xf789f1356e14de5cULL, 0x683e68af4e51dac1ULL, 0xc9a84f9d8d4b0fd9ULL, - 0x3691e03f52a0f9d1ULL, 0x5ed86e46e1878e80ULL, 0x3c711a0e99d07150ULL, - 0x5a0865b20c4e9310ULL, 0x56fbfc1fe4f0682eULL, 0xea8d5de3105edf9bULL, - 0x71abfdb12379187aULL, 0x2eb99de1bee77b9cULL, 0x21ecc0ea33cf4523ULL, - 0x59a4d7521805c7a1ULL, 0x3896f5eb56ae7c72ULL, 0xaa638f3db18f75dcULL, - 0x9f39358dabe9808eULL, 0xb7defa91c00b72acULL, 0x6b5541fd62492d92ULL, - 0x6dc6dee8f92e4d5bULL, 0x353f57abc4beea7eULL, 0x735769d6da5690ceULL, - 0x0a234aa642391484ULL, 0xf6f9508028f80d9dULL, 0xb8e319a27ab3f215ULL, - 0x31ad9c1151341a4dULL, 0x773c22a57bef5805ULL, 0x45c7561a07968633ULL, - 0xf913da9e249dbe36ULL, 0xda652d9b78a64c68ULL, 0x4c27a97f3bc334efULL, - 0x76621220e66b17f4ULL, 0x967743899acd7d0bULL, 0xf3ee5bcae0ed6782ULL, - 0x409f753600c879fcULL, 0x06d09a39b5926db6ULL, 0x6f83aeb0317ac588ULL, - 0x01e6ca4a86381f21ULL, 0x66ff3462d19f3025ULL, 0x72207c24ddfd3bfbULL, - 0x4af6b6d3e2ece2ebULL, 0x9c994dbec7ea08deULL, 0x49ace597b09a8bc4ULL, - 0xb38c4766cf0797baULL, 0x131b9373c57c2a75ULL, 0xb1822cce61931e58ULL, - 0x9d7555b909ba1c0cULL, 0x127fafdd937d11d2ULL, 0x29da3badc66d92e4ULL, - 0xa2c1d57154c2ecbcULL, 0x58c5134d82f6fe24ULL, 0x1c3ae3515b62274fULL, - 0xe907c82e01cb8126ULL, 0xf8ed091913e37fcbULL, 0x3249d8f9c80046c9ULL, - 0x80cf9bede388fb63ULL, 0x1881539a116cf19eULL, 0x5103f3f76bd52457ULL, - 0x15b7e6f5ae47f7a8ULL, 0xdbd7c6ded47e9ccfULL, 0x44e55c410228bb1aULL, - 0xb647d4255edb4e99ULL, 0x5d11882bb8aafc30ULL, 0xf5098bbb29d3212aULL, - 0x8fb5ea14e90296b3ULL, 0x677b942157dd025aULL, 0xfb58e7c0a390acb5ULL, - 0x89d3674c83bd4a01ULL, 0x9e2da4df4bf3b93bULL, 0xfcc41e328cab4829ULL, - 0x03f38c96ba582c52ULL, 0xcad1bdbd7fd85db2ULL, 0xbbb442c16082ae83ULL, - 0xb95fe86ba5da9ab0ULL, 0xb22e04673771a93fULL, 0x845358c9493152d8ULL, - 0xbe2a488697b4541eULL, 0x95a2dc2dd38e6966ULL, 0xc02c11ac923c852bULL, - 0x2388b1990df2a87bULL, 0x7c8008fa1b4f37beULL, 0x1f70d0c84d54e503ULL, - 0x5490adec7ece57d4ULL, 0x002b3c27d9063a3aULL, 0x7eaea3848030a2bfULL, - 0xc602326ded2003c0ULL, 0x83a7287d69a94086ULL, 0xc57a5fcb30f57a8aULL, - 0xb56844e479ebe779ULL, 0xa373b40f05dcbce9ULL, 0xd71a786e88570ee2ULL, - 0x879cbacdbde8f6a0ULL, 0x976ad1bcc164a32fULL, 0xab21e25e9666d78bULL, - 0x901063aae5e5c33cULL, 0x9818b34448698d90ULL, 0xe36487ae3e1e8abbULL, - 0xafbdf931893bdcb4ULL, 0x6345a0dc5fbbd519ULL, 0x8628fe269b9465caULL, - 0x1e5d01603f9c51ecULL, 0x4de44006a15049b7ULL, 0xbf6c70e5f776cbb1ULL, - 0x411218f2ef552bedULL, 0xcb0c0708705a36a3ULL, 0xe74d14754f986044ULL, - 0xcd56d9430ea8280eULL, 0xc12591d7535f5065ULL, 0xc83223f1720aef96ULL, - 0xc3a0396f7363a51fULL -}; - - -static void tgr192_round(u64 * ra, u64 * rb, u64 * rc, u64 x, int mul) -{ - u64 a = *ra; - u64 b = *rb; - u64 c = *rc; - - c ^= x; - a -= sbox1[c & 0xff] ^ sbox2[(c >> 16) & 0xff] - ^ sbox3[(c >> 32) & 0xff] ^ sbox4[(c >> 48) & 0xff]; - b += sbox4[(c >> 8) & 0xff] ^ sbox3[(c >> 24) & 0xff] - ^ sbox2[(c >> 40) & 0xff] ^ sbox1[(c >> 56) & 0xff]; - b *= mul; - - *ra = a; - *rb = b; - *rc = c; -} - - -static void tgr192_pass(u64 * ra, u64 * rb, u64 * rc, u64 * x, int mul) -{ - u64 a = *ra; - u64 b = *rb; - u64 c = *rc; - - tgr192_round(&a, &b, &c, x[0], mul); - tgr192_round(&b, &c, &a, x[1], mul); - tgr192_round(&c, &a, &b, x[2], mul); - tgr192_round(&a, &b, &c, x[3], mul); - tgr192_round(&b, &c, &a, x[4], mul); - tgr192_round(&c, &a, &b, x[5], mul); - tgr192_round(&a, &b, &c, x[6], mul); - tgr192_round(&b, &c, &a, x[7], mul); - - *ra = a; - *rb = b; - *rc = c; -} - - -static void tgr192_key_schedule(u64 * x) -{ - x[0] -= x[7] ^ 0xa5a5a5a5a5a5a5a5ULL; - x[1] ^= x[0]; - x[2] += x[1]; - x[3] -= x[2] ^ ((~x[1]) << 19); - x[4] ^= x[3]; - x[5] += x[4]; - x[6] -= x[5] ^ ((~x[4]) >> 23); - x[7] ^= x[6]; - x[0] += x[7]; - x[1] -= x[0] ^ ((~x[7]) << 19); - x[2] ^= x[1]; - x[3] += x[2]; - x[4] -= x[3] ^ ((~x[2]) >> 23); - x[5] ^= x[4]; - x[6] += x[5]; - x[7] -= x[6] ^ 0x0123456789abcdefULL; -} - - -/**************** - * Transform the message DATA which consists of 512 bytes (8 words) - */ - -static void tgr192_transform(struct tgr192_ctx *tctx, const u8 * data) -{ - u64 a, b, c, aa, bb, cc; - u64 x[8]; - int i; - - for (i = 0; i < 8; i++) - x[i] = get_unaligned_le64(data + i * sizeof(__le64)); - - /* save */ - a = aa = tctx->a; - b = bb = tctx->b; - c = cc = tctx->c; - - tgr192_pass(&a, &b, &c, x, 5); - tgr192_key_schedule(x); - tgr192_pass(&c, &a, &b, x, 7); - tgr192_key_schedule(x); - tgr192_pass(&b, &c, &a, x, 9); - - - /* feedforward */ - a ^= aa; - b -= bb; - c += cc; - /* store */ - tctx->a = a; - tctx->b = b; - tctx->c = c; -} - -static int tgr192_init(struct shash_desc *desc) -{ - struct tgr192_ctx *tctx = shash_desc_ctx(desc); - - tctx->a = 0x0123456789abcdefULL; - tctx->b = 0xfedcba9876543210ULL; - tctx->c = 0xf096a5b4c3b2e187ULL; - tctx->nblocks = 0; - tctx->count = 0; - - return 0; -} - - -/* Update the message digest with the contents - * of INBUF with length INLEN. */ -static int tgr192_update(struct shash_desc *desc, const u8 *inbuf, - unsigned int len) -{ - struct tgr192_ctx *tctx = shash_desc_ctx(desc); - - if (tctx->count == 64) { /* flush the buffer */ - tgr192_transform(tctx, tctx->hash); - tctx->count = 0; - tctx->nblocks++; - } - if (!inbuf) { - return 0; - } - if (tctx->count) { - for (; len && tctx->count < 64; len--) { - tctx->hash[tctx->count++] = *inbuf++; - } - tgr192_update(desc, NULL, 0); - if (!len) { - return 0; - } - - } - - while (len >= 64) { - tgr192_transform(tctx, inbuf); - tctx->count = 0; - tctx->nblocks++; - len -= 64; - inbuf += 64; - } - for (; len && tctx->count < 64; len--) { - tctx->hash[tctx->count++] = *inbuf++; - } - - return 0; -} - - - -/* The routine terminates the computation */ -static int tgr192_final(struct shash_desc *desc, u8 * out) -{ - struct tgr192_ctx *tctx = shash_desc_ctx(desc); - __be64 *dst = (__be64 *)out; - __be64 *be64p; - __le32 *le32p; - u32 t, msb, lsb; - - tgr192_update(desc, NULL, 0); /* flush */ - - msb = 0; - t = tctx->nblocks; - if ((lsb = t << 6) < t) { /* multiply by 64 to make a byte count */ - msb++; - } - msb += t >> 26; - t = lsb; - if ((lsb = t + tctx->count) < t) { /* add the count */ - msb++; - } - t = lsb; - if ((lsb = t << 3) < t) { /* multiply by 8 to make a bit count */ - msb++; - } - msb += t >> 29; - - if (tctx->count < 56) { /* enough room */ - tctx->hash[tctx->count++] = 0x01; /* pad */ - while (tctx->count < 56) { - tctx->hash[tctx->count++] = 0; /* pad */ - } - } else { /* need one extra block */ - tctx->hash[tctx->count++] = 0x01; /* pad character */ - while (tctx->count < 64) { - tctx->hash[tctx->count++] = 0; - } - tgr192_update(desc, NULL, 0); /* flush */ - memset(tctx->hash, 0, 56); /* fill next block with zeroes */ - } - /* append the 64 bit count */ - le32p = (__le32 *)&tctx->hash[56]; - le32p[0] = cpu_to_le32(lsb); - le32p[1] = cpu_to_le32(msb); - - tgr192_transform(tctx, tctx->hash); - - be64p = (__be64 *)tctx->hash; - dst[0] = be64p[0] = cpu_to_be64(tctx->a); - dst[1] = be64p[1] = cpu_to_be64(tctx->b); - dst[2] = be64p[2] = cpu_to_be64(tctx->c); - - return 0; -} - -static int tgr160_final(struct shash_desc *desc, u8 * out) -{ - u8 D[64]; - - tgr192_final(desc, D); - memcpy(out, D, TGR160_DIGEST_SIZE); - memzero_explicit(D, TGR192_DIGEST_SIZE); - - return 0; -} - -static int tgr128_final(struct shash_desc *desc, u8 * out) -{ - u8 D[64]; - - tgr192_final(desc, D); - memcpy(out, D, TGR128_DIGEST_SIZE); - memzero_explicit(D, TGR192_DIGEST_SIZE); - - return 0; -} - -static struct shash_alg tgr_algs[3] = { { - .digestsize = TGR192_DIGEST_SIZE, - .init = tgr192_init, - .update = tgr192_update, - .final = tgr192_final, - .descsize = sizeof(struct tgr192_ctx), - .base = { - .cra_name = "tgr192", - .cra_driver_name = "tgr192-generic", - .cra_blocksize = TGR192_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -}, { - .digestsize = TGR160_DIGEST_SIZE, - .init = tgr192_init, - .update = tgr192_update, - .final = tgr160_final, - .descsize = sizeof(struct tgr192_ctx), - .base = { - .cra_name = "tgr160", - .cra_driver_name = "tgr160-generic", - .cra_blocksize = TGR192_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -}, { - .digestsize = TGR128_DIGEST_SIZE, - .init = tgr192_init, - .update = tgr192_update, - .final = tgr128_final, - .descsize = sizeof(struct tgr192_ctx), - .base = { - .cra_name = "tgr128", - .cra_driver_name = "tgr128-generic", - .cra_blocksize = TGR192_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -} }; - -static int __init tgr192_mod_init(void) -{ - return crypto_register_shashes(tgr_algs, ARRAY_SIZE(tgr_algs)); -} - -static void __exit tgr192_mod_fini(void) -{ - crypto_unregister_shashes(tgr_algs, ARRAY_SIZE(tgr_algs)); -} - -MODULE_ALIAS_CRYPTO("tgr192"); -MODULE_ALIAS_CRYPTO("tgr160"); -MODULE_ALIAS_CRYPTO("tgr128"); - -subsys_initcall(tgr192_mod_init); -module_exit(tgr192_mod_fini); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Tiger Message Digest Algorithm"); -- cgit v1.2.3-59-g8ed1b From 663f63ee6d9cdc68adf9afca5427e5c2b5b4ae2d Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 21 Jan 2021 14:07:33 +0100 Subject: crypto: salsa20 - remove Salsa20 stream cipher algorithm Salsa20 is not used anywhere in the kernel, is not suitable for disk encryption, and widely considered to have been superseded by ChaCha20. So let's remove it. Signed-off-by: Ard Biesheuvel Acked-by: Mike Snitzer Signed-off-by: Herbert Xu --- .../admin-guide/device-mapper/dm-integrity.rst | 4 +- crypto/Kconfig | 12 - crypto/Makefile | 1 - crypto/salsa20_generic.c | 212 ---- crypto/tcrypt.c | 11 +- crypto/testmgr.c | 6 - crypto/testmgr.h | 1162 -------------------- 7 files changed, 3 insertions(+), 1405 deletions(-) delete mode 100644 crypto/salsa20_generic.c (limited to 'crypto') diff --git a/Documentation/admin-guide/device-mapper/dm-integrity.rst b/Documentation/admin-guide/device-mapper/dm-integrity.rst index 4e6f504474ac..d56112e2e354 100644 --- a/Documentation/admin-guide/device-mapper/dm-integrity.rst +++ b/Documentation/admin-guide/device-mapper/dm-integrity.rst @@ -143,8 +143,8 @@ recalculate journal_crypt:algorithm(:key) (the key is optional) Encrypt the journal using given algorithm to make sure that the attacker can't read the journal. You can use a block cipher here - (such as "cbc(aes)") or a stream cipher (for example "chacha20", - "salsa20" or "ctr(aes)"). + (such as "cbc(aes)") or a stream cipher (for example "chacha20" + or "ctr(aes)"). The journal contains history of last writes to the block device, an attacker reading the journal could see the last sector numbers diff --git a/crypto/Kconfig b/crypto/Kconfig index 8d25d689a705..9779c7f7531f 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1400,18 +1400,6 @@ config CRYPTO_KHAZAD See also: -config CRYPTO_SALSA20 - tristate "Salsa20 stream cipher algorithm" - select CRYPTO_SKCIPHER - help - Salsa20 stream cipher algorithm. - - Salsa20 is a stream cipher submitted to eSTREAM, the ECRYPT - Stream Cipher Project. See - - The Salsa20 stream cipher algorithm is designed by Daniel J. - Bernstein . See - config CRYPTO_CHACHA20 tristate "ChaCha stream cipher algorithms" select CRYPTO_LIB_CHACHA_GENERIC diff --git a/crypto/Makefile b/crypto/Makefile index 6b9622f21f7f..cf23affb1678 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -138,7 +138,6 @@ obj-$(CONFIG_CRYPTO_TEA) += tea.o obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o obj-$(CONFIG_CRYPTO_SEED) += seed.o -obj-$(CONFIG_CRYPTO_SALSA20) += salsa20_generic.o obj-$(CONFIG_CRYPTO_CHACHA20) += chacha_generic.o obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c deleted file mode 100644 index 3418869dabef..000000000000 --- a/crypto/salsa20_generic.c +++ /dev/null @@ -1,212 +0,0 @@ -/* - * Salsa20: Salsa20 stream cipher algorithm - * - * Copyright (c) 2007 Tan Swee Heng - * - * Derived from: - * - salsa20.c: Public domain C code by Daniel J. Bernstein - * - * Salsa20 is a stream cipher candidate in eSTREAM, the ECRYPT Stream - * Cipher Project. It is designed by Daniel J. Bernstein . - * More information about eSTREAM and Salsa20 can be found here: - * https://www.ecrypt.eu.org/stream/ - * https://cr.yp.to/snuffle.html - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - */ - -#include -#include -#include - -#define SALSA20_IV_SIZE 8 -#define SALSA20_MIN_KEY_SIZE 16 -#define SALSA20_MAX_KEY_SIZE 32 -#define SALSA20_BLOCK_SIZE 64 - -struct salsa20_ctx { - u32 initial_state[16]; -}; - -static void salsa20_block(u32 *state, __le32 *stream) -{ - u32 x[16]; - int i; - - memcpy(x, state, sizeof(x)); - - for (i = 0; i < 20; i += 2) { - x[ 4] ^= rol32((x[ 0] + x[12]), 7); - x[ 8] ^= rol32((x[ 4] + x[ 0]), 9); - x[12] ^= rol32((x[ 8] + x[ 4]), 13); - x[ 0] ^= rol32((x[12] + x[ 8]), 18); - x[ 9] ^= rol32((x[ 5] + x[ 1]), 7); - x[13] ^= rol32((x[ 9] + x[ 5]), 9); - x[ 1] ^= rol32((x[13] + x[ 9]), 13); - x[ 5] ^= rol32((x[ 1] + x[13]), 18); - x[14] ^= rol32((x[10] + x[ 6]), 7); - x[ 2] ^= rol32((x[14] + x[10]), 9); - x[ 6] ^= rol32((x[ 2] + x[14]), 13); - x[10] ^= rol32((x[ 6] + x[ 2]), 18); - x[ 3] ^= rol32((x[15] + x[11]), 7); - x[ 7] ^= rol32((x[ 3] + x[15]), 9); - x[11] ^= rol32((x[ 7] + x[ 3]), 13); - x[15] ^= rol32((x[11] + x[ 7]), 18); - x[ 1] ^= rol32((x[ 0] + x[ 3]), 7); - x[ 2] ^= rol32((x[ 1] + x[ 0]), 9); - x[ 3] ^= rol32((x[ 2] + x[ 1]), 13); - x[ 0] ^= rol32((x[ 3] + x[ 2]), 18); - x[ 6] ^= rol32((x[ 5] + x[ 4]), 7); - x[ 7] ^= rol32((x[ 6] + x[ 5]), 9); - x[ 4] ^= rol32((x[ 7] + x[ 6]), 13); - x[ 5] ^= rol32((x[ 4] + x[ 7]), 18); - x[11] ^= rol32((x[10] + x[ 9]), 7); - x[ 8] ^= rol32((x[11] + x[10]), 9); - x[ 9] ^= rol32((x[ 8] + x[11]), 13); - x[10] ^= rol32((x[ 9] + x[ 8]), 18); - x[12] ^= rol32((x[15] + x[14]), 7); - x[13] ^= rol32((x[12] + x[15]), 9); - x[14] ^= rol32((x[13] + x[12]), 13); - x[15] ^= rol32((x[14] + x[13]), 18); - } - - for (i = 0; i < 16; i++) - stream[i] = cpu_to_le32(x[i] + state[i]); - - if (++state[8] == 0) - state[9]++; -} - -static void salsa20_docrypt(u32 *state, u8 *dst, const u8 *src, - unsigned int bytes) -{ - __le32 stream[SALSA20_BLOCK_SIZE / sizeof(__le32)]; - - while (bytes >= SALSA20_BLOCK_SIZE) { - salsa20_block(state, stream); - crypto_xor_cpy(dst, src, (const u8 *)stream, - SALSA20_BLOCK_SIZE); - bytes -= SALSA20_BLOCK_SIZE; - dst += SALSA20_BLOCK_SIZE; - src += SALSA20_BLOCK_SIZE; - } - if (bytes) { - salsa20_block(state, stream); - crypto_xor_cpy(dst, src, (const u8 *)stream, bytes); - } -} - -static void salsa20_init(u32 *state, const struct salsa20_ctx *ctx, - const u8 *iv) -{ - memcpy(state, ctx->initial_state, sizeof(ctx->initial_state)); - state[6] = get_unaligned_le32(iv + 0); - state[7] = get_unaligned_le32(iv + 4); -} - -static int salsa20_setkey(struct crypto_skcipher *tfm, const u8 *key, - unsigned int keysize) -{ - static const char sigma[16] = "expand 32-byte k"; - static const char tau[16] = "expand 16-byte k"; - struct salsa20_ctx *ctx = crypto_skcipher_ctx(tfm); - const char *constants; - - if (keysize != SALSA20_MIN_KEY_SIZE && - keysize != SALSA20_MAX_KEY_SIZE) - return -EINVAL; - - ctx->initial_state[1] = get_unaligned_le32(key + 0); - ctx->initial_state[2] = get_unaligned_le32(key + 4); - ctx->initial_state[3] = get_unaligned_le32(key + 8); - ctx->initial_state[4] = get_unaligned_le32(key + 12); - if (keysize == 32) { /* recommended */ - key += 16; - constants = sigma; - } else { /* keysize == 16 */ - constants = tau; - } - ctx->initial_state[11] = get_unaligned_le32(key + 0); - ctx->initial_state[12] = get_unaligned_le32(key + 4); - ctx->initial_state[13] = get_unaligned_le32(key + 8); - ctx->initial_state[14] = get_unaligned_le32(key + 12); - ctx->initial_state[0] = get_unaligned_le32(constants + 0); - ctx->initial_state[5] = get_unaligned_le32(constants + 4); - ctx->initial_state[10] = get_unaligned_le32(constants + 8); - ctx->initial_state[15] = get_unaligned_le32(constants + 12); - - /* space for the nonce; it will be overridden for each request */ - ctx->initial_state[6] = 0; - ctx->initial_state[7] = 0; - - /* initial block number */ - ctx->initial_state[8] = 0; - ctx->initial_state[9] = 0; - - return 0; -} - -static int salsa20_crypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - const struct salsa20_ctx *ctx = crypto_skcipher_ctx(tfm); - struct skcipher_walk walk; - u32 state[16]; - int err; - - err = skcipher_walk_virt(&walk, req, false); - - salsa20_init(state, ctx, req->iv); - - while (walk.nbytes > 0) { - unsigned int nbytes = walk.nbytes; - - if (nbytes < walk.total) - nbytes = round_down(nbytes, walk.stride); - - salsa20_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr, - nbytes); - err = skcipher_walk_done(&walk, walk.nbytes - nbytes); - } - - return err; -} - -static struct skcipher_alg alg = { - .base.cra_name = "salsa20", - .base.cra_driver_name = "salsa20-generic", - .base.cra_priority = 100, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct salsa20_ctx), - .base.cra_module = THIS_MODULE, - - .min_keysize = SALSA20_MIN_KEY_SIZE, - .max_keysize = SALSA20_MAX_KEY_SIZE, - .ivsize = SALSA20_IV_SIZE, - .chunksize = SALSA20_BLOCK_SIZE, - .setkey = salsa20_setkey, - .encrypt = salsa20_crypt, - .decrypt = salsa20_crypt, -}; - -static int __init salsa20_generic_mod_init(void) -{ - return crypto_register_skcipher(&alg); -} - -static void __exit salsa20_generic_mod_fini(void) -{ - crypto_unregister_skcipher(&alg); -} - -subsys_initcall(salsa20_generic_mod_init); -module_exit(salsa20_generic_mod_fini); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION ("Salsa20 stream cipher algorithm"); -MODULE_ALIAS_CRYPTO("salsa20"); -MODULE_ALIAS_CRYPTO("salsa20-generic"); diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 696c44ef465e..2877b88cfa45 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -71,7 +71,7 @@ static const char *check[] = { "blowfish", "twofish", "serpent", "sha384", "sha512", "md4", "aes", "cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea", "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt", - "camellia", "seed", "salsa20", "rmd160", + "camellia", "seed", "rmd160", "lzo", "lzo-rle", "cts", "sha3-224", "sha3-256", "sha3-384", "sha3-512", "streebog256", "streebog512", NULL @@ -1835,10 +1835,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) ret += tcrypt_test("sha224"); break; - case 34: - ret += tcrypt_test("salsa20"); - break; - case 35: ret += tcrypt_test("gcm(aes)"); break; @@ -2153,11 +2149,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) speed_template_32_48_64); break; - case 206: - test_cipher_speed("salsa20", ENCRYPT, sec, NULL, 0, - speed_template_16_32); - break; - case 207: test_cipher_speed("ecb(serpent)", ENCRYPT, sec, NULL, 0, speed_template_16_32); diff --git a/crypto/testmgr.c b/crypto/testmgr.c index b87802ffb554..1a4103b1b202 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -5282,12 +5282,6 @@ static const struct alg_test_desc alg_test_descs[] = { .suite = { .akcipher = __VECS(rsa_tv_template) } - }, { - .alg = "salsa20", - .test = alg_test_skcipher, - .suite = { - .cipher = __VECS(salsa20_stream_tv_template) - } }, { .alg = "sha1", .test = alg_test_hash, diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 851c107a5584..99aca08263d2 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -24409,1168 +24409,6 @@ static const struct cipher_testvec seed_tv_template[] = { } }; -static const struct cipher_testvec salsa20_stream_tv_template[] = { - /* - * Testvectors from verified.test-vectors submitted to ECRYPT. - * They are truncated to size 39, 64, 111, 129 to test a variety - * of input length. - */ - { /* Set 3, vector 0 */ - .key = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F", - .klen = 16, - .iv = "\x00\x00\x00\x00\x00\x00\x00\x00", - .ptext = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00", - .ctext = "\x2D\xD5\xC3\xF7\xBA\x2B\x20\xF7" - "\x68\x02\x41\x0C\x68\x86\x88\x89" - "\x5A\xD8\xC1\xBD\x4E\xA6\xC9\xB1" - "\x40\xFB\x9B\x90\xE2\x10\x49\xBF" - "\x58\x3F\x52\x79\x70\xEB\xC1", - .len = 39, - }, { /* Set 5, vector 0 */ - .key = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .klen = 16, - .iv = "\x80\x00\x00\x00\x00\x00\x00\x00", - .ptext = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .ctext = "\xB6\x6C\x1E\x44\x46\xDD\x95\x57" - "\xE5\x78\xE2\x23\xB0\xB7\x68\x01" - "\x7B\x23\xB2\x67\xBB\x02\x34\xAE" - "\x46\x26\xBF\x44\x3F\x21\x97\x76" - "\x43\x6F\xB1\x9F\xD0\xE8\x86\x6F" - "\xCD\x0D\xE9\xA9\x53\x8F\x4A\x09" - "\xCA\x9A\xC0\x73\x2E\x30\xBC\xF9" - "\x8E\x4F\x13\xE4\xB9\xE2\x01\xD9", - .len = 64, - }, { /* Set 3, vector 27 */ - .key = "\x1B\x1C\x1D\x1E\x1F\x20\x21\x22" - "\x23\x24\x25\x26\x27\x28\x29\x2A" - "\x2B\x2C\x2D\x2E\x2F\x30\x31\x32" - "\x33\x34\x35\x36\x37\x38\x39\x3A", - .klen = 32, - .iv = "\x00\x00\x00\x00\x00\x00\x00\x00", - .ptext = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00", - .ctext = "\xAE\x39\x50\x8E\xAC\x9A\xEC\xE7" - "\xBF\x97\xBB\x20\xB9\xDE\xE4\x1F" - "\x87\xD9\x47\xF8\x28\x91\x35\x98" - "\xDB\x72\xCC\x23\x29\x48\x56\x5E" - "\x83\x7E\x0B\xF3\x7D\x5D\x38\x7B" - "\x2D\x71\x02\xB4\x3B\xB5\xD8\x23" - "\xB0\x4A\xDF\x3C\xEC\xB6\xD9\x3B" - "\x9B\xA7\x52\xBE\xC5\xD4\x50\x59" - "\x15\x14\xB4\x0E\x40\xE6\x53\xD1" - "\x83\x9C\x5B\xA0\x92\x29\x6B\x5E" - "\x96\x5B\x1E\x2F\xD3\xAC\xC1\x92" - "\xB1\x41\x3F\x19\x2F\xC4\x3B\xC6" - "\x95\x46\x45\x54\xE9\x75\x03\x08" - "\x44\xAF\xE5\x8A\x81\x12\x09", - .len = 111, - }, { /* Set 5, vector 27 */ - .key = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .klen = 32, - .iv = "\x00\x00\x00\x10\x00\x00\x00\x00", - .ptext = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00", - .ctext = "\xD2\xDB\x1A\x5C\xF1\xC1\xAC\xDB" - "\xE8\x1A\x7A\x43\x40\xEF\x53\x43" - "\x5E\x7F\x4B\x1A\x50\x52\x3F\x8D" - "\x28\x3D\xCF\x85\x1D\x69\x6E\x60" - "\xF2\xDE\x74\x56\x18\x1B\x84\x10" - "\xD4\x62\xBA\x60\x50\xF0\x61\xF2" - "\x1C\x78\x7F\xC1\x24\x34\xAF\x58" - "\xBF\x2C\x59\xCA\x90\x77\xF3\xB0" - "\x5B\x4A\xDF\x89\xCE\x2C\x2F\xFC" - "\x67\xF0\xE3\x45\xE8\xB3\xB3\x75" - "\xA0\x95\x71\xA1\x29\x39\x94\xCA" - "\x45\x2F\xBD\xCB\x10\xB6\xBE\x9F" - "\x8E\xF9\xB2\x01\x0A\x5A\x0A\xB7" - "\x6B\x9D\x70\x8E\x4B\xD6\x2F\xCD" - "\x2E\x40\x48\x75\xE9\xE2\x21\x45" - "\x0B\xC9\xB6\xB5\x66\xBC\x9A\x59" - "\x5A", - .len = 129, - }, { /* large test vector generated using Crypto++ */ - .key = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" - "\x10\x11\x12\x13\x14\x15\x16\x17" - "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", - .klen = 32, - .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .ptext = - "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" - "\x10\x11\x12\x13\x14\x15\x16\x17" - "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" - "\x20\x21\x22\x23\x24\x25\x26\x27" - "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" - "\x30\x31\x32\x33\x34\x35\x36\x37" - "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" - "\x40\x41\x42\x43\x44\x45\x46\x47" - "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" - "\x50\x51\x52\x53\x54\x55\x56\x57" - "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" - "\x60\x61\x62\x63\x64\x65\x66\x67" - "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" - "\x70\x71\x72\x73\x74\x75\x76\x77" - "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" - "\x80\x81\x82\x83\x84\x85\x86\x87" - "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" - "\x90\x91\x92\x93\x94\x95\x96\x97" - "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" - "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" - "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" - "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" - "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" - "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" - "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" - "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" - "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" - "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" - "\xe8\xe9\xea\xeb\xec\xed\xee\xef" - "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" - "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff" - "\x00\x03\x06\x09\x0c\x0f\x12\x15" - "\x18\x1b\x1e\x21\x24\x27\x2a\x2d" - "\x30\x33\x36\x39\x3c\x3f\x42\x45" - "\x48\x4b\x4e\x51\x54\x57\x5a\x5d" - "\x60\x63\x66\x69\x6c\x6f\x72\x75" - "\x78\x7b\x7e\x81\x84\x87\x8a\x8d" - "\x90\x93\x96\x99\x9c\x9f\xa2\xa5" - "\xa8\xab\xae\xb1\xb4\xb7\xba\xbd" - "\xc0\xc3\xc6\xc9\xcc\xcf\xd2\xd5" - "\xd8\xdb\xde\xe1\xe4\xe7\xea\xed" - "\xf0\xf3\xf6\xf9\xfc\xff\x02\x05" - "\x08\x0b\x0e\x11\x14\x17\x1a\x1d" - "\x20\x23\x26\x29\x2c\x2f\x32\x35" - "\x38\x3b\x3e\x41\x44\x47\x4a\x4d" - "\x50\x53\x56\x59\x5c\x5f\x62\x65" - "\x68\x6b\x6e\x71\x74\x77\x7a\x7d" - "\x80\x83\x86\x89\x8c\x8f\x92\x95" - "\x98\x9b\x9e\xa1\xa4\xa7\xaa\xad" - "\xb0\xb3\xb6\xb9\xbc\xbf\xc2\xc5" - "\xc8\xcb\xce\xd1\xd4\xd7\xda\xdd" - "\xe0\xe3\xe6\xe9\xec\xef\xf2\xf5" - "\xf8\xfb\xfe\x01\x04\x07\x0a\x0d" - "\x10\x13\x16\x19\x1c\x1f\x22\x25" - "\x28\x2b\x2e\x31\x34\x37\x3a\x3d" - "\x40\x43\x46\x49\x4c\x4f\x52\x55" - "\x58\x5b\x5e\x61\x64\x67\x6a\x6d" - "\x70\x73\x76\x79\x7c\x7f\x82\x85" - "\x88\x8b\x8e\x91\x94\x97\x9a\x9d" - "\xa0\xa3\xa6\xa9\xac\xaf\xb2\xb5" - "\xb8\xbb\xbe\xc1\xc4\xc7\xca\xcd" - "\xd0\xd3\xd6\xd9\xdc\xdf\xe2\xe5" - "\xe8\xeb\xee\xf1\xf4\xf7\xfa\xfd" - "\x00\x05\x0a\x0f\x14\x19\x1e\x23" - "\x28\x2d\x32\x37\x3c\x41\x46\x4b" - "\x50\x55\x5a\x5f\x64\x69\x6e\x73" - "\x78\x7d\x82\x87\x8c\x91\x96\x9b" - "\xa0\xa5\xaa\xaf\xb4\xb9\xbe\xc3" - "\xc8\xcd\xd2\xd7\xdc\xe1\xe6\xeb" - "\xf0\xf5\xfa\xff\x04\x09\x0e\x13" - "\x18\x1d\x22\x27\x2c\x31\x36\x3b" - "\x40\x45\x4a\x4f\x54\x59\x5e\x63" - "\x68\x6d\x72\x77\x7c\x81\x86\x8b" - "\x90\x95\x9a\x9f\xa4\xa9\xae\xb3" - "\xb8\xbd\xc2\xc7\xcc\xd1\xd6\xdb" - "\xe0\xe5\xea\xef\xf4\xf9\xfe\x03" - "\x08\x0d\x12\x17\x1c\x21\x26\x2b" - "\x30\x35\x3a\x3f\x44\x49\x4e\x53" - "\x58\x5d\x62\x67\x6c\x71\x76\x7b" - "\x80\x85\x8a\x8f\x94\x99\x9e\xa3" - "\xa8\xad\xb2\xb7\xbc\xc1\xc6\xcb" - "\xd0\xd5\xda\xdf\xe4\xe9\xee\xf3" - "\xf8\xfd\x02\x07\x0c\x11\x16\x1b" - "\x20\x25\x2a\x2f\x34\x39\x3e\x43" - "\x48\x4d\x52\x57\x5c\x61\x66\x6b" - "\x70\x75\x7a\x7f\x84\x89\x8e\x93" - "\x98\x9d\xa2\xa7\xac\xb1\xb6\xbb" - "\xc0\xc5\xca\xcf\xd4\xd9\xde\xe3" - "\xe8\xed\xf2\xf7\xfc\x01\x06\x0b" - "\x10\x15\x1a\x1f\x24\x29\x2e\x33" - "\x38\x3d\x42\x47\x4c\x51\x56\x5b" - "\x60\x65\x6a\x6f\x74\x79\x7e\x83" - "\x88\x8d\x92\x97\x9c\xa1\xa6\xab" - "\xb0\xb5\xba\xbf\xc4\xc9\xce\xd3" - "\xd8\xdd\xe2\xe7\xec\xf1\xf6\xfb" - "\x00\x07\x0e\x15\x1c\x23\x2a\x31" - "\x38\x3f\x46\x4d\x54\x5b\x62\x69" - "\x70\x77\x7e\x85\x8c\x93\x9a\xa1" - "\xa8\xaf\xb6\xbd\xc4\xcb\xd2\xd9" - "\xe0\xe7\xee\xf5\xfc\x03\x0a\x11" - "\x18\x1f\x26\x2d\x34\x3b\x42\x49" - "\x50\x57\x5e\x65\x6c\x73\x7a\x81" - "\x88\x8f\x96\x9d\xa4\xab\xb2\xb9" - "\xc0\xc7\xce\xd5\xdc\xe3\xea\xf1" - "\xf8\xff\x06\x0d\x14\x1b\x22\x29" - "\x30\x37\x3e\x45\x4c\x53\x5a\x61" - "\x68\x6f\x76\x7d\x84\x8b\x92\x99" - "\xa0\xa7\xae\xb5\xbc\xc3\xca\xd1" - "\xd8\xdf\xe6\xed\xf4\xfb\x02\x09" - "\x10\x17\x1e\x25\x2c\x33\x3a\x41" - "\x48\x4f\x56\x5d\x64\x6b\x72\x79" - "\x80\x87\x8e\x95\x9c\xa3\xaa\xb1" - "\xb8\xbf\xc6\xcd\xd4\xdb\xe2\xe9" - "\xf0\xf7\xfe\x05\x0c\x13\x1a\x21" - "\x28\x2f\x36\x3d\x44\x4b\x52\x59" - "\x60\x67\x6e\x75\x7c\x83\x8a\x91" - "\x98\x9f\xa6\xad\xb4\xbb\xc2\xc9" - "\xd0\xd7\xde\xe5\xec\xf3\xfa\x01" - "\x08\x0f\x16\x1d\x24\x2b\x32\x39" - "\x40\x47\x4e\x55\x5c\x63\x6a\x71" - "\x78\x7f\x86\x8d\x94\x9b\xa2\xa9" - "\xb0\xb7\xbe\xc5\xcc\xd3\xda\xe1" - "\xe8\xef\xf6\xfd\x04\x0b\x12\x19" - "\x20\x27\x2e\x35\x3c\x43\x4a\x51" - "\x58\x5f\x66\x6d\x74\x7b\x82\x89" - "\x90\x97\x9e\xa5\xac\xb3\xba\xc1" - "\xc8\xcf\xd6\xdd\xe4\xeb\xf2\xf9" - "\x00\x09\x12\x1b\x24\x2d\x36\x3f" - "\x48\x51\x5a\x63\x6c\x75\x7e\x87" - "\x90\x99\xa2\xab\xb4\xbd\xc6\xcf" - "\xd8\xe1\xea\xf3\xfc\x05\x0e\x17" - "\x20\x29\x32\x3b\x44\x4d\x56\x5f" - "\x68\x71\x7a\x83\x8c\x95\x9e\xa7" - "\xb0\xb9\xc2\xcb\xd4\xdd\xe6\xef" - "\xf8\x01\x0a\x13\x1c\x25\x2e\x37" - "\x40\x49\x52\x5b\x64\x6d\x76\x7f" - "\x88\x91\x9a\xa3\xac\xb5\xbe\xc7" - "\xd0\xd9\xe2\xeb\xf4\xfd\x06\x0f" - "\x18\x21\x2a\x33\x3c\x45\x4e\x57" - "\x60\x69\x72\x7b\x84\x8d\x96\x9f" - "\xa8\xb1\xba\xc3\xcc\xd5\xde\xe7" - "\xf0\xf9\x02\x0b\x14\x1d\x26\x2f" - "\x38\x41\x4a\x53\x5c\x65\x6e\x77" - "\x80\x89\x92\x9b\xa4\xad\xb6\xbf" - "\xc8\xd1\xda\xe3\xec\xf5\xfe\x07" - "\x10\x19\x22\x2b\x34\x3d\x46\x4f" - "\x58\x61\x6a\x73\x7c\x85\x8e\x97" - "\xa0\xa9\xb2\xbb\xc4\xcd\xd6\xdf" - "\xe8\xf1\xfa\x03\x0c\x15\x1e\x27" - "\x30\x39\x42\x4b\x54\x5d\x66\x6f" - "\x78\x81\x8a\x93\x9c\xa5\xae\xb7" - "\xc0\xc9\xd2\xdb\xe4\xed\xf6\xff" - "\x08\x11\x1a\x23\x2c\x35\x3e\x47" - "\x50\x59\x62\x6b\x74\x7d\x86\x8f" - "\x98\xa1\xaa\xb3\xbc\xc5\xce\xd7" - "\xe0\xe9\xf2\xfb\x04\x0d\x16\x1f" - "\x28\x31\x3a\x43\x4c\x55\x5e\x67" - "\x70\x79\x82\x8b\x94\x9d\xa6\xaf" - "\xb8\xc1\xca\xd3\xdc\xe5\xee\xf7" - "\x00\x0b\x16\x21\x2c\x37\x42\x4d" - "\x58\x63\x6e\x79\x84\x8f\x9a\xa5" - "\xb0\xbb\xc6\xd1\xdc\xe7\xf2\xfd" - "\x08\x13\x1e\x29\x34\x3f\x4a\x55" - "\x60\x6b\x76\x81\x8c\x97\xa2\xad" - "\xb8\xc3\xce\xd9\xe4\xef\xfa\x05" - "\x10\x1b\x26\x31\x3c\x47\x52\x5d" - "\x68\x73\x7e\x89\x94\x9f\xaa\xb5" - "\xc0\xcb\xd6\xe1\xec\xf7\x02\x0d" - "\x18\x23\x2e\x39\x44\x4f\x5a\x65" - "\x70\x7b\x86\x91\x9c\xa7\xb2\xbd" - "\xc8\xd3\xde\xe9\xf4\xff\x0a\x15" - "\x20\x2b\x36\x41\x4c\x57\x62\x6d" - "\x78\x83\x8e\x99\xa4\xaf\xba\xc5" - "\xd0\xdb\xe6\xf1\xfc\x07\x12\x1d" - "\x28\x33\x3e\x49\x54\x5f\x6a\x75" - "\x80\x8b\x96\xa1\xac\xb7\xc2\xcd" - "\xd8\xe3\xee\xf9\x04\x0f\x1a\x25" - "\x30\x3b\x46\x51\x5c\x67\x72\x7d" - "\x88\x93\x9e\xa9\xb4\xbf\xca\xd5" - "\xe0\xeb\xf6\x01\x0c\x17\x22\x2d" - "\x38\x43\x4e\x59\x64\x6f\x7a\x85" - "\x90\x9b\xa6\xb1\xbc\xc7\xd2\xdd" - "\xe8\xf3\xfe\x09\x14\x1f\x2a\x35" - "\x40\x4b\x56\x61\x6c\x77\x82\x8d" - "\x98\xa3\xae\xb9\xc4\xcf\xda\xe5" - "\xf0\xfb\x06\x11\x1c\x27\x32\x3d" - "\x48\x53\x5e\x69\x74\x7f\x8a\x95" - "\xa0\xab\xb6\xc1\xcc\xd7\xe2\xed" - "\xf8\x03\x0e\x19\x24\x2f\x3a\x45" - "\x50\x5b\x66\x71\x7c\x87\x92\x9d" - "\xa8\xb3\xbe\xc9\xd4\xdf\xea\xf5" - "\x00\x0d\x1a\x27\x34\x41\x4e\x5b" - "\x68\x75\x82\x8f\x9c\xa9\xb6\xc3" - "\xd0\xdd\xea\xf7\x04\x11\x1e\x2b" - "\x38\x45\x52\x5f\x6c\x79\x86\x93" - "\xa0\xad\xba\xc7\xd4\xe1\xee\xfb" - "\x08\x15\x22\x2f\x3c\x49\x56\x63" - "\x70\x7d\x8a\x97\xa4\xb1\xbe\xcb" - "\xd8\xe5\xf2\xff\x0c\x19\x26\x33" - "\x40\x4d\x5a\x67\x74\x81\x8e\x9b" - "\xa8\xb5\xc2\xcf\xdc\xe9\xf6\x03" - "\x10\x1d\x2a\x37\x44\x51\x5e\x6b" - "\x78\x85\x92\x9f\xac\xb9\xc6\xd3" - "\xe0\xed\xfa\x07\x14\x21\x2e\x3b" - "\x48\x55\x62\x6f\x7c\x89\x96\xa3" - "\xb0\xbd\xca\xd7\xe4\xf1\xfe\x0b" - "\x18\x25\x32\x3f\x4c\x59\x66\x73" - "\x80\x8d\x9a\xa7\xb4\xc1\xce\xdb" - "\xe8\xf5\x02\x0f\x1c\x29\x36\x43" - "\x50\x5d\x6a\x77\x84\x91\x9e\xab" - "\xb8\xc5\xd2\xdf\xec\xf9\x06\x13" - "\x20\x2d\x3a\x47\x54\x61\x6e\x7b" - "\x88\x95\xa2\xaf\xbc\xc9\xd6\xe3" - "\xf0\xfd\x0a\x17\x24\x31\x3e\x4b" - "\x58\x65\x72\x7f\x8c\x99\xa6\xb3" - "\xc0\xcd\xda\xe7\xf4\x01\x0e\x1b" - "\x28\x35\x42\x4f\x5c\x69\x76\x83" - "\x90\x9d\xaa\xb7\xc4\xd1\xde\xeb" - "\xf8\x05\x12\x1f\x2c\x39\x46\x53" - "\x60\x6d\x7a\x87\x94\xa1\xae\xbb" - "\xc8\xd5\xe2\xef\xfc\x09\x16\x23" - "\x30\x3d\x4a\x57\x64\x71\x7e\x8b" - "\x98\xa5\xb2\xbf\xcc\xd9\xe6\xf3" - "\x00\x0f\x1e\x2d\x3c\x4b\x5a\x69" - "\x78\x87\x96\xa5\xb4\xc3\xd2\xe1" - "\xf0\xff\x0e\x1d\x2c\x3b\x4a\x59" - "\x68\x77\x86\x95\xa4\xb3\xc2\xd1" - "\xe0\xef\xfe\x0d\x1c\x2b\x3a\x49" - "\x58\x67\x76\x85\x94\xa3\xb2\xc1" - "\xd0\xdf\xee\xfd\x0c\x1b\x2a\x39" - "\x48\x57\x66\x75\x84\x93\xa2\xb1" - "\xc0\xcf\xde\xed\xfc\x0b\x1a\x29" - "\x38\x47\x56\x65\x74\x83\x92\xa1" - "\xb0\xbf\xce\xdd\xec\xfb\x0a\x19" - "\x28\x37\x46\x55\x64\x73\x82\x91" - "\xa0\xaf\xbe\xcd\xdc\xeb\xfa\x09" - "\x18\x27\x36\x45\x54\x63\x72\x81" - "\x90\x9f\xae\xbd\xcc\xdb\xea\xf9" - "\x08\x17\x26\x35\x44\x53\x62\x71" - "\x80\x8f\x9e\xad\xbc\xcb\xda\xe9" - "\xf8\x07\x16\x25\x34\x43\x52\x61" - "\x70\x7f\x8e\x9d\xac\xbb\xca\xd9" - "\xe8\xf7\x06\x15\x24\x33\x42\x51" - "\x60\x6f\x7e\x8d\x9c\xab\xba\xc9" - "\xd8\xe7\xf6\x05\x14\x23\x32\x41" - "\x50\x5f\x6e\x7d\x8c\x9b\xaa\xb9" - "\xc8\xd7\xe6\xf5\x04\x13\x22\x31" - "\x40\x4f\x5e\x6d\x7c\x8b\x9a\xa9" - "\xb8\xc7\xd6\xe5\xf4\x03\x12\x21" - "\x30\x3f\x4e\x5d\x6c\x7b\x8a\x99" - "\xa8\xb7\xc6\xd5\xe4\xf3\x02\x11" - "\x20\x2f\x3e\x4d\x5c\x6b\x7a\x89" - "\x98\xa7\xb6\xc5\xd4\xe3\xf2\x01" - "\x10\x1f\x2e\x3d\x4c\x5b\x6a\x79" - "\x88\x97\xa6\xb5\xc4\xd3\xe2\xf1" - "\x00\x11\x22\x33\x44\x55\x66\x77" - "\x88\x99\xaa\xbb\xcc\xdd\xee\xff" - "\x10\x21\x32\x43\x54\x65\x76\x87" - "\x98\xa9\xba\xcb\xdc\xed\xfe\x0f" - "\x20\x31\x42\x53\x64\x75\x86\x97" - "\xa8\xb9\xca\xdb\xec\xfd\x0e\x1f" - "\x30\x41\x52\x63\x74\x85\x96\xa7" - "\xb8\xc9\xda\xeb\xfc\x0d\x1e\x2f" - "\x40\x51\x62\x73\x84\x95\xa6\xb7" - "\xc8\xd9\xea\xfb\x0c\x1d\x2e\x3f" - "\x50\x61\x72\x83\x94\xa5\xb6\xc7" - "\xd8\xe9\xfa\x0b\x1c\x2d\x3e\x4f" - "\x60\x71\x82\x93\xa4\xb5\xc6\xd7" - "\xe8\xf9\x0a\x1b\x2c\x3d\x4e\x5f" - "\x70\x81\x92\xa3\xb4\xc5\xd6\xe7" - "\xf8\x09\x1a\x2b\x3c\x4d\x5e\x6f" - "\x80\x91\xa2\xb3\xc4\xd5\xe6\xf7" - "\x08\x19\x2a\x3b\x4c\x5d\x6e\x7f" - "\x90\xa1\xb2\xc3\xd4\xe5\xf6\x07" - "\x18\x29\x3a\x4b\x5c\x6d\x7e\x8f" - "\xa0\xb1\xc2\xd3\xe4\xf5\x06\x17" - "\x28\x39\x4a\x5b\x6c\x7d\x8e\x9f" - "\xb0\xc1\xd2\xe3\xf4\x05\x16\x27" - "\x38\x49\x5a\x6b\x7c\x8d\x9e\xaf" - "\xc0\xd1\xe2\xf3\x04\x15\x26\x37" - "\x48\x59\x6a\x7b\x8c\x9d\xae\xbf" - "\xd0\xe1\xf2\x03\x14\x25\x36\x47" - "\x58\x69\x7a\x8b\x9c\xad\xbe\xcf" - "\xe0\xf1\x02\x13\x24\x35\x46\x57" - "\x68\x79\x8a\x9b\xac\xbd\xce\xdf" - "\xf0\x01\x12\x23\x34\x45\x56\x67" - "\x78\x89\x9a\xab\xbc\xcd\xde\xef" - "\x00\x13\x26\x39\x4c\x5f\x72\x85" - "\x98\xab\xbe\xd1\xe4\xf7\x0a\x1d" - "\x30\x43\x56\x69\x7c\x8f\xa2\xb5" - "\xc8\xdb\xee\x01\x14\x27\x3a\x4d" - "\x60\x73\x86\x99\xac\xbf\xd2\xe5" - "\xf8\x0b\x1e\x31\x44\x57\x6a\x7d" - "\x90\xa3\xb6\xc9\xdc\xef\x02\x15" - "\x28\x3b\x4e\x61\x74\x87\x9a\xad" - "\xc0\xd3\xe6\xf9\x0c\x1f\x32\x45" - "\x58\x6b\x7e\x91\xa4\xb7\xca\xdd" - "\xf0\x03\x16\x29\x3c\x4f\x62\x75" - "\x88\x9b\xae\xc1\xd4\xe7\xfa\x0d" - "\x20\x33\x46\x59\x6c\x7f\x92\xa5" - "\xb8\xcb\xde\xf1\x04\x17\x2a\x3d" - "\x50\x63\x76\x89\x9c\xaf\xc2\xd5" - "\xe8\xfb\x0e\x21\x34\x47\x5a\x6d" - "\x80\x93\xa6\xb9\xcc\xdf\xf2\x05" - "\x18\x2b\x3e\x51\x64\x77\x8a\x9d" - "\xb0\xc3\xd6\xe9\xfc\x0f\x22\x35" - "\x48\x5b\x6e\x81\x94\xa7\xba\xcd" - "\xe0\xf3\x06\x19\x2c\x3f\x52\x65" - "\x78\x8b\x9e\xb1\xc4\xd7\xea\xfd" - "\x10\x23\x36\x49\x5c\x6f\x82\x95" - "\xa8\xbb\xce\xe1\xf4\x07\x1a\x2d" - "\x40\x53\x66\x79\x8c\x9f\xb2\xc5" - "\xd8\xeb\xfe\x11\x24\x37\x4a\x5d" - "\x70\x83\x96\xa9\xbc\xcf\xe2\xf5" - "\x08\x1b\x2e\x41\x54\x67\x7a\x8d" - "\xa0\xb3\xc6\xd9\xec\xff\x12\x25" - "\x38\x4b\x5e\x71\x84\x97\xaa\xbd" - "\xd0\xe3\xf6\x09\x1c\x2f\x42\x55" - "\x68\x7b\x8e\xa1\xb4\xc7\xda\xed" - "\x00\x15\x2a\x3f\x54\x69\x7e\x93" - "\xa8\xbd\xd2\xe7\xfc\x11\x26\x3b" - "\x50\x65\x7a\x8f\xa4\xb9\xce\xe3" - "\xf8\x0d\x22\x37\x4c\x61\x76\x8b" - "\xa0\xb5\xca\xdf\xf4\x09\x1e\x33" - "\x48\x5d\x72\x87\x9c\xb1\xc6\xdb" - "\xf0\x05\x1a\x2f\x44\x59\x6e\x83" - "\x98\xad\xc2\xd7\xec\x01\x16\x2b" - "\x40\x55\x6a\x7f\x94\xa9\xbe\xd3" - "\xe8\xfd\x12\x27\x3c\x51\x66\x7b" - "\x90\xa5\xba\xcf\xe4\xf9\x0e\x23" - "\x38\x4d\x62\x77\x8c\xa1\xb6\xcb" - "\xe0\xf5\x0a\x1f\x34\x49\x5e\x73" - "\x88\x9d\xb2\xc7\xdc\xf1\x06\x1b" - "\x30\x45\x5a\x6f\x84\x99\xae\xc3" - "\xd8\xed\x02\x17\x2c\x41\x56\x6b" - "\x80\x95\xaa\xbf\xd4\xe9\xfe\x13" - "\x28\x3d\x52\x67\x7c\x91\xa6\xbb" - "\xd0\xe5\xfa\x0f\x24\x39\x4e\x63" - "\x78\x8d\xa2\xb7\xcc\xe1\xf6\x0b" - "\x20\x35\x4a\x5f\x74\x89\x9e\xb3" - "\xc8\xdd\xf2\x07\x1c\x31\x46\x5b" - "\x70\x85\x9a\xaf\xc4\xd9\xee\x03" - "\x18\x2d\x42\x57\x6c\x81\x96\xab" - "\xc0\xd5\xea\xff\x14\x29\x3e\x53" - "\x68\x7d\x92\xa7\xbc\xd1\xe6\xfb" - "\x10\x25\x3a\x4f\x64\x79\x8e\xa3" - "\xb8\xcd\xe2\xf7\x0c\x21\x36\x4b" - "\x60\x75\x8a\x9f\xb4\xc9\xde\xf3" - "\x08\x1d\x32\x47\x5c\x71\x86\x9b" - "\xb0\xc5\xda\xef\x04\x19\x2e\x43" - "\x58\x6d\x82\x97\xac\xc1\xd6\xeb" - "\x00\x17\x2e\x45\x5c\x73\x8a\xa1" - "\xb8\xcf\xe6\xfd\x14\x2b\x42\x59" - "\x70\x87\x9e\xb5\xcc\xe3\xfa\x11" - "\x28\x3f\x56\x6d\x84\x9b\xb2\xc9" - "\xe0\xf7\x0e\x25\x3c\x53\x6a\x81" - "\x98\xaf\xc6\xdd\xf4\x0b\x22\x39" - "\x50\x67\x7e\x95\xac\xc3\xda\xf1" - "\x08\x1f\x36\x4d\x64\x7b\x92\xa9" - "\xc0\xd7\xee\x05\x1c\x33\x4a\x61" - "\x78\x8f\xa6\xbd\xd4\xeb\x02\x19" - "\x30\x47\x5e\x75\x8c\xa3\xba\xd1" - "\xe8\xff\x16\x2d\x44\x5b\x72\x89" - "\xa0\xb7\xce\xe5\xfc\x13\x2a\x41" - "\x58\x6f\x86\x9d\xb4\xcb\xe2\xf9" - "\x10\x27\x3e\x55\x6c\x83\x9a\xb1" - "\xc8\xdf\xf6\x0d\x24\x3b\x52\x69" - "\x80\x97\xae\xc5\xdc\xf3\x0a\x21" - "\x38\x4f\x66\x7d\x94\xab\xc2\xd9" - "\xf0\x07\x1e\x35\x4c\x63\x7a\x91" - "\xa8\xbf\xd6\xed\x04\x1b\x32\x49" - "\x60\x77\x8e\xa5\xbc\xd3\xea\x01" - "\x18\x2f\x46\x5d\x74\x8b\xa2\xb9" - "\xd0\xe7\xfe\x15\x2c\x43\x5a\x71" - "\x88\x9f\xb6\xcd\xe4\xfb\x12\x29" - "\x40\x57\x6e\x85\x9c\xb3\xca\xe1" - "\xf8\x0f\x26\x3d\x54\x6b\x82\x99" - "\xb0\xc7\xde\xf5\x0c\x23\x3a\x51" - "\x68\x7f\x96\xad\xc4\xdb\xf2\x09" - "\x20\x37\x4e\x65\x7c\x93\xaa\xc1" - "\xd8\xef\x06\x1d\x34\x4b\x62\x79" - "\x90\xa7\xbe\xd5\xec\x03\x1a\x31" - "\x48\x5f\x76\x8d\xa4\xbb\xd2\xe9" - "\x00\x19\x32\x4b\x64\x7d\x96\xaf" - "\xc8\xe1\xfa\x13\x2c\x45\x5e\x77" - "\x90\xa9\xc2\xdb\xf4\x0d\x26\x3f" - "\x58\x71\x8a\xa3\xbc\xd5\xee\x07" - "\x20\x39\x52\x6b\x84\x9d\xb6\xcf" - "\xe8\x01\x1a\x33\x4c\x65\x7e\x97" - "\xb0\xc9\xe2\xfb\x14\x2d\x46\x5f" - "\x78\x91\xaa\xc3\xdc\xf5\x0e\x27" - "\x40\x59\x72\x8b\xa4\xbd\xd6\xef" - "\x08\x21\x3a\x53\x6c\x85\x9e\xb7" - "\xd0\xe9\x02\x1b\x34\x4d\x66\x7f" - "\x98\xb1\xca\xe3\xfc\x15\x2e\x47" - "\x60\x79\x92\xab\xc4\xdd\xf6\x0f" - "\x28\x41\x5a\x73\x8c\xa5\xbe\xd7" - "\xf0\x09\x22\x3b\x54\x6d\x86\x9f" - "\xb8\xd1\xea\x03\x1c\x35\x4e\x67" - "\x80\x99\xb2\xcb\xe4\xfd\x16\x2f" - "\x48\x61\x7a\x93\xac\xc5\xde\xf7" - "\x10\x29\x42\x5b\x74\x8d\xa6\xbf" - "\xd8\xf1\x0a\x23\x3c\x55\x6e\x87" - "\xa0\xb9\xd2\xeb\x04\x1d\x36\x4f" - "\x68\x81\x9a\xb3\xcc\xe5\xfe\x17" - "\x30\x49\x62\x7b\x94\xad\xc6\xdf" - "\xf8\x11\x2a\x43\x5c\x75\x8e\xa7" - "\xc0\xd9\xf2\x0b\x24\x3d\x56\x6f" - "\x88\xa1\xba\xd3\xec\x05\x1e\x37" - "\x50\x69\x82\x9b\xb4\xcd\xe6\xff" - "\x18\x31\x4a\x63\x7c\x95\xae\xc7" - "\xe0\xf9\x12\x2b\x44\x5d\x76\x8f" - "\xa8\xc1\xda\xf3\x0c\x25\x3e\x57" - "\x70\x89\xa2\xbb\xd4\xed\x06\x1f" - "\x38\x51\x6a\x83\x9c\xb5\xce\xe7" - "\x00\x1b\x36\x51\x6c\x87\xa2\xbd" - "\xd8\xf3\x0e\x29\x44\x5f\x7a\x95" - "\xb0\xcb\xe6\x01\x1c\x37\x52\x6d" - "\x88\xa3\xbe\xd9\xf4\x0f\x2a\x45" - "\x60\x7b\x96\xb1\xcc\xe7\x02\x1d" - "\x38\x53\x6e\x89\xa4\xbf\xda\xf5" - "\x10\x2b\x46\x61\x7c\x97\xb2\xcd" - "\xe8\x03\x1e\x39\x54\x6f\x8a\xa5" - "\xc0\xdb\xf6\x11\x2c\x47\x62\x7d" - "\x98\xb3\xce\xe9\x04\x1f\x3a\x55" - "\x70\x8b\xa6\xc1\xdc\xf7\x12\x2d" - "\x48\x63\x7e\x99\xb4\xcf\xea\x05" - "\x20\x3b\x56\x71\x8c\xa7\xc2\xdd" - "\xf8\x13\x2e\x49\x64\x7f\x9a\xb5" - "\xd0\xeb\x06\x21\x3c\x57\x72\x8d" - "\xa8\xc3\xde\xf9\x14\x2f\x4a\x65" - "\x80\x9b\xb6\xd1\xec\x07\x22\x3d" - "\x58\x73\x8e\xa9\xc4\xdf\xfa\x15" - "\x30\x4b\x66\x81\x9c\xb7\xd2\xed" - "\x08\x23\x3e\x59\x74\x8f\xaa\xc5" - "\xe0\xfb\x16\x31\x4c\x67\x82\x9d" - "\xb8\xd3\xee\x09\x24\x3f\x5a\x75" - "\x90\xab\xc6\xe1\xfc\x17\x32\x4d" - "\x68\x83\x9e\xb9\xd4\xef\x0a\x25" - "\x40\x5b\x76\x91\xac\xc7\xe2\xfd" - "\x18\x33\x4e\x69\x84\x9f\xba\xd5" - "\xf0\x0b\x26\x41\x5c\x77\x92\xad" - "\xc8\xe3\xfe\x19\x34\x4f\x6a\x85" - "\xa0\xbb\xd6\xf1\x0c\x27\x42\x5d" - "\x78\x93\xae\xc9\xe4\xff\x1a\x35" - "\x50\x6b\x86\xa1\xbc\xd7\xf2\x0d" - "\x28\x43\x5e\x79\x94\xaf\xca\xe5" - "\x00\x1d\x3a\x57\x74\x91\xae\xcb" - "\xe8\x05\x22\x3f\x5c\x79\x96\xb3" - "\xd0\xed\x0a\x27\x44\x61\x7e\x9b" - "\xb8\xd5\xf2\x0f\x2c\x49\x66\x83" - "\xa0\xbd\xda\xf7\x14\x31\x4e\x6b" - "\x88\xa5\xc2\xdf\xfc\x19\x36\x53" - "\x70\x8d\xaa\xc7\xe4\x01\x1e\x3b" - "\x58\x75\x92\xaf\xcc\xe9\x06\x23" - "\x40\x5d\x7a\x97\xb4\xd1\xee\x0b" - "\x28\x45\x62\x7f\x9c\xb9\xd6\xf3" - "\x10\x2d\x4a\x67\x84\xa1\xbe\xdb" - "\xf8\x15\x32\x4f\x6c\x89\xa6\xc3" - "\xe0\xfd\x1a\x37\x54\x71\x8e\xab" - "\xc8\xe5\x02\x1f\x3c\x59\x76\x93" - "\xb0\xcd\xea\x07\x24\x41\x5e\x7b" - "\x98\xb5\xd2\xef\x0c\x29\x46\x63" - "\x80\x9d\xba\xd7\xf4\x11\x2e\x4b" - "\x68\x85\xa2\xbf\xdc\xf9\x16\x33" - "\x50\x6d\x8a\xa7\xc4\xe1\xfe\x1b" - "\x38\x55\x72\x8f\xac\xc9\xe6\x03" - "\x20\x3d\x5a\x77\x94\xb1\xce\xeb" - "\x08\x25\x42\x5f\x7c\x99\xb6\xd3" - "\xf0\x0d\x2a\x47\x64\x81\x9e\xbb" - "\xd8\xf5\x12\x2f\x4c\x69\x86\xa3" - "\xc0\xdd\xfa\x17\x34\x51\x6e\x8b" - "\xa8\xc5\xe2\xff\x1c\x39\x56\x73" - "\x90\xad\xca\xe7\x04\x21\x3e\x5b" - "\x78\x95\xb2\xcf\xec\x09\x26\x43" - "\x60\x7d\x9a\xb7\xd4\xf1\x0e\x2b" - "\x48\x65\x82\x9f\xbc\xd9\xf6\x13" - "\x30\x4d\x6a\x87\xa4\xc1\xde\xfb" - "\x18\x35\x52\x6f\x8c\xa9\xc6\xe3" - "\x00\x1f\x3e\x5d\x7c\x9b\xba\xd9" - "\xf8\x17\x36\x55\x74\x93\xb2\xd1" - "\xf0\x0f\x2e\x4d\x6c\x8b\xaa\xc9" - "\xe8\x07\x26\x45\x64\x83\xa2\xc1" - "\xe0\xff\x1e\x3d\x5c\x7b\x9a\xb9" - "\xd8\xf7\x16\x35\x54\x73\x92\xb1" - "\xd0\xef\x0e\x2d\x4c\x6b\x8a\xa9" - "\xc8\xe7\x06\x25\x44\x63\x82\xa1" - "\xc0\xdf\xfe\x1d\x3c\x5b\x7a\x99" - "\xb8\xd7\xf6\x15\x34\x53\x72\x91" - "\xb0\xcf\xee\x0d\x2c\x4b\x6a\x89" - "\xa8\xc7\xe6\x05\x24\x43\x62\x81" - "\xa0\xbf\xde\xfd\x1c\x3b\x5a\x79" - "\x98\xb7\xd6\xf5\x14\x33\x52\x71" - "\x90\xaf\xce\xed\x0c\x2b\x4a\x69" - "\x88\xa7\xc6\xe5\x04\x23\x42\x61" - "\x80\x9f\xbe\xdd\xfc\x1b\x3a\x59" - "\x78\x97\xb6\xd5\xf4\x13\x32\x51" - "\x70\x8f\xae\xcd\xec\x0b\x2a\x49" - "\x68\x87\xa6\xc5\xe4\x03\x22\x41" - "\x60\x7f\x9e\xbd\xdc\xfb\x1a\x39" - "\x58\x77\x96\xb5\xd4\xf3\x12\x31" - "\x50\x6f\x8e\xad\xcc\xeb\x0a\x29" - "\x48\x67\x86\xa5\xc4\xe3\x02\x21" - "\x40\x5f\x7e\x9d\xbc\xdb\xfa\x19" - "\x38\x57\x76\x95\xb4\xd3\xf2\x11" - "\x30\x4f\x6e\x8d\xac\xcb\xea\x09" - "\x28\x47\x66\x85\xa4\xc3\xe2\x01" - "\x20\x3f\x5e\x7d\x9c\xbb\xda\xf9" - "\x18\x37\x56\x75\x94\xb3\xd2\xf1" - "\x10\x2f\x4e\x6d\x8c\xab\xca\xe9" - "\x08\x27\x46\x65\x84\xa3\xc2\xe1" - "\x00\x21\x42\x63", - .ctext = - "\xb5\x81\xf5\x64\x18\x73\xe3\xf0" - "\x4c\x13\xf2\x77\x18\x60\x65\x5e" - "\x29\x01\xce\x98\x55\x53\xf9\x0c" - "\x2a\x08\xd5\x09\xb3\x57\x55\x56" - "\xc5\xe9\x56\x90\xcb\x6a\xa3\xc0" - "\xff\xc4\x79\xb4\xd2\x97\x5d\xc4" - "\x43\xd1\xfe\x94\x7b\x88\x06\x5a" - "\xb2\x9e\x2c\xfc\x44\x03\xb7\x90" - "\xa0\xc1\xba\x6a\x33\xb8\xc7\xb2" - "\x9d\xe1\x12\x4f\xc0\x64\xd4\x01" - "\xfe\x8c\x7a\x66\xf7\xe6\x5a\x91" - "\xbb\xde\x56\x86\xab\x65\x21\x30" - "\x00\x84\x65\x24\xa5\x7d\x85\xb4" - "\xe3\x17\xed\x3a\xb7\x6f\xb4\x0b" - "\x0b\xaf\x15\xae\x5a\x8f\xf2\x0c" - "\x2f\x27\xf4\x09\xd8\xd2\x96\xb7" - "\x71\xf2\xc5\x99\x4d\x7e\x7f\x75" - "\x77\x89\x30\x8b\x59\xdb\xa2\xb2" - "\xa0\xf3\x19\x39\x2b\xc5\x7e\x3f" - "\x4f\xd9\xd3\x56\x28\x97\x44\xdc" - "\xc0\x8b\x77\x24\xd9\x52\xe7\xc5" - "\xaf\xf6\x7d\x59\xb2\x44\x05\x1d" - "\xb1\xb0\x11\xa5\x0f\xec\x33\xe1" - "\x6d\x1b\x4e\x1f\xff\x57\x91\xb4" - "\x5b\x9a\x96\xc5\x53\xbc\xae\x20" - "\x3c\xbb\x14\xe2\xe8\x22\x33\xc1" - "\x5e\x76\x9e\x46\x99\xf6\x2a\x15" - "\xc6\x97\x02\xa0\x66\x43\xd1\xa6" - "\x31\xa6\x9f\xfb\xf4\xd3\x69\xe5" - "\xcd\x76\x95\xb8\x7a\x82\x7f\x21" - "\x45\xff\x3f\xce\x55\xf6\x95\x10" - "\x08\x77\x10\x43\xc6\xf3\x09\xe5" - "\x68\xe7\x3c\xad\x00\x52\x45\x0d" - "\xfe\x2d\xc6\xc2\x94\x8c\x12\x1d" - "\xe6\x25\xae\x98\x12\x8e\x19\x9c" - "\x81\x68\xb1\x11\xf6\x69\xda\xe3" - "\x62\x08\x18\x7a\x25\x49\x28\xac" - "\xba\x71\x12\x0b\xe4\xa2\xe5\xc7" - "\x5d\x8e\xec\x49\x40\x21\xbf\x5a" - "\x98\xf3\x02\x68\x55\x03\x7f\x8a" - "\xe5\x94\x0c\x32\x5c\x07\x82\x63" - "\xaf\x6f\x91\x40\x84\x8e\x52\x25" - "\xd0\xb0\x29\x53\x05\xe2\x50\x7a" - "\x34\xeb\xc9\x46\x20\xa8\x3d\xde" - "\x7f\x16\x5f\x36\xc5\x2e\xdc\xd1" - "\x15\x47\xc7\x50\x40\x6d\x91\xc5" - "\xe7\x93\x95\x1a\xd3\x57\xbc\x52" - "\x33\xee\x14\x19\x22\x52\x89\xa7" - "\x4a\x25\x56\x77\x4b\xca\xcf\x0a" - "\xe1\xf5\x35\x85\x30\x7e\x59\x4a" - "\xbd\x14\x5b\xdf\xe3\x46\xcb\xac" - "\x1f\x6c\x96\x0e\xf4\x81\xd1\x99" - "\xca\x88\x63\x3d\x02\x58\x6b\xa9" - "\xe5\x9f\xb3\x00\xb2\x54\xc6\x74" - "\x1c\xbf\x46\xab\x97\xcc\xf8\x54" - "\x04\x07\x08\x52\xe6\xc0\xda\x93" - "\x74\x7d\x93\x99\x5d\x78\x68\xa6" - "\x2e\x6b\xd3\x6a\x69\xcc\x12\x6b" - "\xd4\xc7\xa5\xc6\xe7\xf6\x03\x04" - "\x5d\xcd\x61\x5e\x17\x40\xdc\xd1" - "\x5c\xf5\x08\xdf\x5c\x90\x85\xa4" - "\xaf\xf6\x78\xbb\x0d\xf1\xf4\xa4" - "\x54\x26\x72\x9e\x61\xfa\x86\xcf" - "\xe8\x9e\xa1\xe0\xc7\x48\x23\xae" - "\x5a\x90\xae\x75\x0a\x74\x18\x89" - "\x05\xb1\x92\xb2\x7f\xd0\x1b\xa6" - "\x62\x07\x25\x01\xc7\xc2\x4f\xf9" - "\xe8\xfe\x63\x95\x80\x07\xb4\x26" - "\xcc\xd1\x26\xb6\xc4\x3f\x9e\xcb" - "\x8e\x3b\x2e\x44\x16\xd3\x10\x9a" - "\x95\x08\xeb\xc8\xcb\xeb\xbf\x6f" - "\x0b\xcd\x1f\xc8\xca\x86\xaa\xec" - "\x33\xe6\x69\xf4\x45\x25\x86\x3a" - "\x22\x94\x4f\x00\x23\x6a\x44\xc2" - "\x49\x97\x33\xab\x36\x14\x0a\x70" - "\x24\xc3\xbe\x04\x3b\x79\xa0\xf9" - "\xb8\xe7\x76\x29\x22\x83\xd7\xf2" - "\x94\xf4\x41\x49\xba\x5f\x7b\x07" - "\xb5\xfb\xdb\x03\x1a\x9f\xb6\x4c" - "\xc2\x2e\x37\x40\x49\xc3\x38\x16" - "\xe2\x4f\x77\x82\xb0\x68\x4c\x71" - "\x1d\x57\x61\x9c\xd9\x4e\x54\x99" - "\x47\x13\x28\x73\x3c\xbb\x00\x90" - "\xf3\x4d\xc9\x0e\xfd\xe7\xb1\x71" - "\xd3\x15\x79\xbf\xcc\x26\x2f\xbd" - "\xad\x6c\x50\x69\x6c\x3e\x6d\x80" - "\x9a\xea\x78\xaf\x19\xb2\x0d\x4d" - "\xad\x04\x07\xae\x22\x90\x4a\x93" - "\x32\x0e\x36\x9b\x1b\x46\xba\x3b" - "\xb4\xac\xc6\xd1\xa2\x31\x53\x3b" - "\x2a\x3d\x45\xfe\x03\x61\x10\x85" - "\x17\x69\xa6\x78\xcc\x6c\x87\x49" - "\x53\xf9\x80\x10\xde\x80\xa2\x41" - "\x6a\xc3\x32\x02\xad\x6d\x3c\x56" - "\x00\x71\x51\x06\xa7\xbd\xfb\xef" - "\x3c\xb5\x9f\xfc\x48\x7d\x53\x7c" - "\x66\xb0\x49\x23\xc4\x47\x10\x0e" - "\xe5\x6c\x74\x13\xe6\xc5\x3f\xaa" - "\xde\xff\x07\x44\xdd\x56\x1b\xad" - "\x09\x77\xfb\x5b\x12\xb8\x0d\x38" - "\x17\x37\x35\x7b\x9b\xbc\xfe\xd4" - "\x7e\x8b\xda\x7e\x5b\x04\xa7\x22" - "\xa7\x31\xa1\x20\x86\xc7\x1b\x99" - "\xdb\xd1\x89\xf4\x94\xa3\x53\x69" - "\x8d\xe7\xe8\x74\x11\x8d\x74\xd6" - "\x07\x37\x91\x9f\xfd\x67\x50\x3a" - "\xc9\xe1\xf4\x36\xd5\xa0\x47\xd1" - "\xf9\xe5\x39\xa3\x31\xac\x07\x36" - "\x23\xf8\x66\x18\x14\x28\x34\x0f" - "\xb8\xd0\xe7\x29\xb3\x04\x4b\x55" - "\x01\x41\xb2\x75\x8d\xcb\x96\x85" - "\x3a\xfb\xab\x2b\x9e\xfa\x58\x20" - "\x44\x1f\xc0\x14\x22\x75\x61\xe8" - "\xaa\x19\xcf\xf1\x82\x56\xf4\xd7" - "\x78\x7b\x3d\x5f\xb3\x9e\x0b\x8a" - "\x57\x50\xdb\x17\x41\x65\x4d\xa3" - "\x02\xc9\x9c\x9c\x53\xfb\x39\x39" - "\x9b\x1d\x72\x24\xda\xb7\x39\xbe" - "\x13\x3b\xfa\x29\xda\x9e\x54\x64" - "\x6e\xba\xd8\xa1\xcb\xb3\x36\xfa" - "\xcb\x47\x85\xe9\x61\x38\xbc\xbe" - "\xc5\x00\x38\x2a\x54\xf7\xc4\xb9" - "\xb3\xd3\x7b\xa0\xa0\xf8\x72\x7f" - "\x8c\x8e\x82\x0e\xc6\x1c\x75\x9d" - "\xca\x8e\x61\x87\xde\xad\x80\xd2" - "\xf5\xf9\x80\xef\x15\x75\xaf\xf5" - "\x80\xfb\xff\x6d\x1e\x25\xb7\x40" - "\x61\x6a\x39\x5a\x6a\xb5\x31\xab" - "\x97\x8a\x19\x89\x44\x40\xc0\xa6" - "\xb4\x4e\x30\x32\x7b\x13\xe7\x67" - "\xa9\x8b\x57\x04\xc2\x01\xa6\xf4" - "\x28\x99\xad\x2c\x76\xa3\x78\xc2" - "\x4a\xe6\xca\x5c\x50\x6a\xc1\xb0" - "\x62\x4b\x10\x8e\x7c\x17\x43\xb3" - "\x17\x66\x1c\x3e\x8d\x69\xf0\x5a" - "\x71\xf5\x97\xdc\xd1\x45\xdd\x28" - "\xf3\x5d\xdf\x53\x7b\x11\xe5\xbc" - "\x4c\xdb\x1b\x51\x6b\xe9\xfb\x3d" - "\xc1\xc3\x2c\xb9\x71\xf5\xb6\xb2" - "\x13\x36\x79\x80\x53\xe8\xd3\xa6" - "\x0a\xaf\xfd\x56\x97\xf7\x40\x8e" - "\x45\xce\xf8\xb0\x9e\x5c\x33\x82" - "\xb0\x44\x56\xfc\x05\x09\xe9\x2a" - "\xac\x26\x80\x14\x1d\xc8\x3a\x35" - "\x4c\x82\x97\xfd\x76\xb7\xa9\x0a" - "\x35\x58\x79\x8e\x0f\x66\xea\xaf" - "\x51\x6c\x09\xa9\x6e\x9b\xcb\x9a" - "\x31\x47\xa0\x2f\x7c\x71\xb4\x4a" - "\x11\xaa\x8c\x66\xc5\x64\xe6\x3a" - "\x54\xda\x24\x6a\xc4\x41\x65\x46" - "\x82\xa0\x0a\x0f\x5f\xfb\x25\xd0" - "\x2c\x91\xa7\xee\xc4\x81\x07\x86" - "\x75\x5e\x33\x69\x97\xe4\x2c\xa8" - "\x9d\x9f\x0b\x6a\xbe\xad\x98\xda" - "\x6d\x94\x41\xda\x2c\x1e\x89\xc4" - "\xc2\xaf\x1e\x00\x05\x0b\x83\x60" - "\xbd\x43\xea\x15\x23\x7f\xb9\xac" - "\xee\x4f\x2c\xaf\x2a\xf3\xdf\xd0" - "\xf3\x19\x31\xbb\x4a\x74\x84\x17" - "\x52\x32\x2c\x7d\x61\xe4\xcb\xeb" - "\x80\x38\x15\x52\xcb\x6f\xea\xe5" - "\x73\x9c\xd9\x24\x69\xc6\x95\x32" - "\x21\xc8\x11\xe4\xdc\x36\xd7\x93" - "\x38\x66\xfb\xb2\x7f\x3a\xb9\xaf" - "\x31\xdd\x93\x75\x78\x8a\x2c\x94" - "\x87\x1a\x58\xec\x9e\x7d\x4d\xba" - "\xe1\xe5\x4d\xfc\xbc\xa4\x2a\x14" - "\xef\xcc\xa7\xec\xab\x43\x09\x18" - "\xd3\xab\x68\xd1\x07\x99\x44\x47" - "\xd6\x83\x85\x3b\x30\xea\xa9\x6b" - "\x63\xea\xc4\x07\xfb\x43\x2f\xa4" - "\xaa\xb0\xab\x03\x89\xce\x3f\x8c" - "\x02\x7c\x86\x54\xbc\x88\xaf\x75" - "\xd2\xdc\x63\x17\xd3\x26\xf6\x96" - "\xa9\x3c\xf1\x61\x8c\x11\x18\xcc" - "\xd6\xea\x5b\xe2\xcd\xf0\xf1\xb2" - "\xe5\x35\x90\x1f\x85\x4c\x76\x5b" - "\x66\xce\x44\xa4\x32\x9f\xe6\x7b" - "\x71\x6e\x9f\x58\x15\x67\x72\x87" - "\x64\x8e\x3a\x44\x45\xd4\x76\xfa" - "\xc2\xf6\xef\x85\x05\x18\x7a\x9b" - "\xba\x41\x54\xac\xf0\xfc\x59\x12" - "\x3f\xdf\xa0\xe5\x8a\x65\xfd\x3a" - "\x62\x8d\x83\x2c\x03\xbe\x05\x76" - "\x2e\x53\x49\x97\x94\x33\xae\x40" - "\x81\x15\xdb\x6e\xad\xaa\xf5\x4b" - "\xe3\x98\x70\xdf\xe0\x7c\xcd\xdb" - "\x02\xd4\x7d\x2f\xc1\xe6\xb4\xf3" - "\xd7\x0d\x7a\xd9\x23\x9e\x87\x2d" - "\xce\x87\xad\xcc\x72\x05\x00\x29" - "\xdc\x73\x7f\x64\xc1\x15\x0e\xc2" - "\xdf\xa7\x5f\xeb\x41\xa1\xcd\xef" - "\x5c\x50\x79\x2a\x56\x56\x71\x8c" - "\xac\xc0\x79\x50\x69\xca\x59\x32" - "\x65\xf2\x54\xe4\x52\x38\x76\xd1" - "\x5e\xde\x26\x9e\xfb\x75\x2e\x11" - "\xb5\x10\xf4\x17\x73\xf5\x89\xc7" - "\x4f\x43\x5c\x8e\x7c\xb9\x05\x52" - "\x24\x40\x99\xfe\x9b\x85\x0b\x6c" - "\x22\x3e\x8b\xae\x86\xa1\xd2\x79" - "\x05\x68\x6b\xab\xe3\x41\x49\xed" - "\x15\xa1\x8d\x40\x2d\x61\xdf\x1a" - "\x59\xc9\x26\x8b\xef\x30\x4c\x88" - "\x4b\x10\xf8\x8d\xa6\x92\x9f\x4b" - "\xf3\xc4\x53\x0b\x89\x5d\x28\x92" - "\xcf\x78\xb2\xc0\x5d\xed\x7e\xfc" - "\xc0\x12\x23\x5f\x5a\x78\x86\x43" - "\x6e\x27\xf7\x5a\xa7\x6a\xed\x19" - "\x04\xf0\xb3\x12\xd1\xbd\x0e\x89" - "\x6e\xbc\x96\xa8\xd8\x49\x39\x9f" - "\x7e\x67\xf0\x2e\x3e\x01\xa9\xba" - "\xec\x8b\x62\x8e\xcb\x4a\x70\x43" - "\xc7\xc2\xc4\xca\x82\x03\x73\xe9" - "\x11\xdf\xcf\x54\xea\xc9\xb0\x95" - "\x51\xc0\x13\x3d\x92\x05\xfa\xf4" - "\xa9\x34\xc8\xce\x6c\x3d\x54\xcc" - "\xc4\xaf\xf1\xdc\x11\x44\x26\xa2" - "\xaf\xf1\x85\x75\x7d\x03\x61\x68" - "\x4e\x78\xc6\x92\x7d\x86\x7d\x77" - "\xdc\x71\x72\xdb\xc6\xae\xa1\xcb" - "\x70\x9a\x0b\x19\xbe\x4a\x6c\x2a" - "\xe2\xba\x6c\x64\x9a\x13\x28\xdf" - "\x85\x75\xe6\x43\xf6\x87\x08\x68" - "\x6e\xba\x6e\x79\x9f\x04\xbc\x23" - "\x50\xf6\x33\x5c\x1f\x24\x25\xbe" - "\x33\x47\x80\x45\x56\xa3\xa7\xd7" - "\x7a\xb1\x34\x0b\x90\x3c\x9c\xad" - "\x44\x5f\x9e\x0e\x9d\xd4\xbd\x93" - "\x5e\xfa\x3c\xe0\xb0\xd9\xed\xf3" - "\xd6\x2e\xff\x24\xd8\x71\x6c\xed" - "\xaf\x55\xeb\x22\xac\x93\x68\x32" - "\x05\x5b\x47\xdd\xc6\x4a\xcb\xc7" - "\x10\xe1\x3c\x92\x1a\xf3\x23\x78" - "\x2b\xa1\xd2\x80\xf4\x12\xb1\x20" - "\x8f\xff\x26\x35\xdd\xfb\xc7\x4e" - "\x78\xf1\x2d\x50\x12\x77\xa8\x60" - "\x7c\x0f\xf5\x16\x2f\x63\x70\x2a" - "\xc0\x96\x80\x4e\x0a\xb4\x93\x35" - "\x5d\x1d\x3f\x56\xf7\x2f\xbb\x90" - "\x11\x16\x8f\xa2\xec\x47\xbe\xac" - "\x56\x01\x26\x56\xb1\x8c\xb2\x10" - "\xf9\x1a\xca\xf5\xd1\xb7\x39\x20" - "\x63\xf1\x69\x20\x4f\x13\x12\x1f" - "\x5b\x65\xfc\x98\xf7\xc4\x7a\xbe" - "\xf7\x26\x4d\x2b\x84\x7b\x42\xad" - "\xd8\x7a\x0a\xb4\xd8\x74\xbf\xc1" - "\xf0\x6e\xb4\x29\xa3\xbb\xca\x46" - "\x67\x70\x6a\x2d\xce\x0e\xa2\x8a" - "\xa9\x87\xbf\x05\xc4\xc1\x04\xa3" - "\xab\xd4\x45\x43\x8c\xb6\x02\xb0" - "\x41\xc8\xfc\x44\x3d\x59\xaa\x2e" - "\x44\x21\x2a\x8d\x88\x9d\x57\xf4" - "\xa0\x02\x77\xb8\xa6\xa0\xe6\x75" - "\x5c\x82\x65\x3e\x03\x5c\x29\x8f" - "\x38\x55\xab\x33\x26\xef\x9f\x43" - "\x52\xfd\x68\xaf\x36\xb4\xbb\x9a" - "\x58\x09\x09\x1b\xc3\x65\x46\x46" - "\x1d\xa7\x94\x18\x23\x50\x2c\xca" - "\x2c\x55\x19\x97\x01\x9d\x93\x3b" - "\x63\x86\xf2\x03\x67\x45\xd2\x72" - "\x28\x52\x6c\xf4\xe3\x1c\xb5\x11" - "\x13\xf1\xeb\x21\xc7\xd9\x56\x82" - "\x2b\x82\x39\xbd\x69\x54\xed\x62" - "\xc3\xe2\xde\x73\xd4\x6a\x12\xae" - "\x13\x21\x7f\x4b\x5b\xfc\xbf\xe8" - "\x2b\xbe\x56\xba\x68\x8b\x9a\xb1" - "\x6e\xfa\xbf\x7e\x5a\x4b\xf1\xac" - "\x98\x65\x85\xd1\x93\x53\xd3\x7b" - "\x09\xdd\x4b\x10\x6d\x84\xb0\x13" - "\x65\xbd\xcf\x52\x09\xc4\x85\xe2" - "\x84\x74\x15\x65\xb7\xf7\x51\xaf" - "\x55\xad\xa4\xd1\x22\x54\x70\x94" - "\xa0\x1c\x90\x41\xfd\x99\xd7\x5a" - "\x31\xef\xaa\x25\xd0\x7f\x4f\xea" - "\x1d\x55\x42\xe5\x49\xb0\xd0\x46" - "\x62\x36\x43\xb2\x82\x15\x75\x50" - "\xa4\x72\xeb\x54\x27\x1f\x8a\xe4" - "\x7d\xe9\x66\xc5\xf1\x53\xa4\xd1" - "\x0c\xeb\xb8\xf8\xbc\xd4\xe2\xe7" - "\xe1\xf8\x4b\xcb\xa9\xa1\xaf\x15" - "\x83\xcb\x72\xd0\x33\x79\x00\x2d" - "\x9f\xd7\xf1\x2e\x1e\x10\xe4\x45" - "\xc0\x75\x3a\x39\xea\x68\xf7\x5d" - "\x1b\x73\x8f\xe9\x8e\x0f\x72\x47" - "\xae\x35\x0a\x31\x7a\x14\x4d\x4a" - "\x6f\x47\xf7\x7e\x91\x6e\x74\x8b" - "\x26\x47\xf9\xc3\xf9\xde\x70\xf5" - "\x61\xab\xa9\x27\x9f\x82\xe4\x9c" - "\x89\x91\x3f\x2e\x6a\xfd\xb5\x49" - "\xe9\xfd\x59\x14\x36\x49\x40\x6d" - "\x32\xd8\x85\x42\xf3\xa5\xdf\x0c" - "\xa8\x27\xd7\x54\xe2\x63\x2f\xf2" - "\x7e\x8b\x8b\xe7\xf1\x9a\x95\x35" - "\x43\xdc\x3a\xe4\xb6\xf4\xd0\xdf" - "\x9c\xcb\x94\xf3\x21\xa0\x77\x50" - "\xe2\xc6\xc4\xc6\x5f\x09\x64\x5b" - "\x92\x90\xd8\xe1\xd1\xed\x4b\x42" - "\xd7\x37\xaf\x65\x3d\x11\x39\xb6" - "\x24\x8a\x60\xae\xd6\x1e\xbf\x0e" - "\x0d\xd7\xdc\x96\x0e\x65\x75\x4e" - "\x29\x06\x9d\xa4\x51\x3a\x10\x63" - "\x8f\x17\x07\xd5\x8e\x3c\xf4\x28" - "\x00\x5a\x5b\x05\x19\xd8\xc0\x6c" - "\xe5\x15\xe4\x9c\x9d\x71\x9d\x5e" - "\x94\x29\x1a\xa7\x80\xfa\x0e\x33" - "\x03\xdd\xb7\x3e\x9a\xa9\x26\x18" - "\x37\xa9\x64\x08\x4d\x94\x5a\x88" - "\xca\x35\xce\x81\x02\xe3\x1f\x1b" - "\x89\x1a\x77\x85\xe3\x41\x6d\x32" - "\x42\x19\x23\x7d\xc8\x73\xee\x25" - "\x85\x0d\xf8\x31\x25\x79\x1b\x6f" - "\x79\x25\xd2\xd8\xd4\x23\xfd\xf7" - "\x82\x36\x6a\x0c\x46\x22\x15\xe9" - "\xff\x72\x41\x91\x91\x7d\x3a\xb7" - "\xdd\x65\x99\x70\xf6\x8d\x84\xf8" - "\x67\x15\x20\x11\xd6\xb2\x55\x7b" - "\xdb\x87\xee\xef\x55\x89\x2a\x59" - "\x2b\x07\x8f\x43\x8a\x59\x3c\x01" - "\x8b\x65\x54\xa1\x66\xd5\x38\xbd" - "\xc6\x30\xa9\xcc\x49\xb6\xa8\x1b" - "\xb8\xc0\x0e\xe3\x45\x28\xe2\xff" - "\x41\x9f\x7e\x7c\xd1\xae\x9e\x25" - "\x3f\x4c\x7c\x7c\xf4\xa8\x26\x4d" - "\x5c\xfd\x4b\x27\x18\xf9\x61\x76" - "\x48\xba\x0c\x6b\xa9\x4d\xfc\xf5" - "\x3b\x35\x7e\x2f\x4a\xa9\xc2\x9a" - "\xae\xab\x86\x09\x89\xc9\xc2\x40" - "\x39\x2c\x81\xb3\xb8\x17\x67\xc2" - "\x0d\x32\x4a\x3a\x67\x81\xd7\x1a" - "\x34\x52\xc5\xdb\x0a\xf5\x63\x39" - "\xea\x1f\xe1\x7c\xa1\x9e\xc1\x35" - "\xe3\xb1\x18\x45\x67\xf9\x22\x38" - "\x95\xd9\x34\x34\x86\xc6\x41\x94" - "\x15\xf9\x5b\x41\xa6\x87\x8b\xf8" - "\xd5\xe1\x1b\xe2\x5b\xf3\x86\x10" - "\xff\xe6\xae\x69\x76\xbc\x0d\xb4" - "\x09\x90\x0c\xa2\x65\x0c\xad\x74" - "\xf5\xd7\xff\xda\xc1\xce\x85\xbe" - "\x00\xa7\xff\x4d\x2f\x65\xd3\x8c" - "\x86\x2d\x05\xe8\xed\x3e\x6b\x8b" - "\x0f\x3d\x83\x8c\xf1\x1d\x5b\x96" - "\x2e\xb1\x9c\xc2\x98\xe1\x70\xb9" - "\xba\x5c\x8a\x43\xd6\x34\xa7\x2d" - "\xc9\x92\xae\xf2\xa5\x7b\x05\x49" - "\xa7\x33\x34\x86\xca\xe4\x96\x23" - "\x76\x5b\xf2\xc6\xf1\x51\x28\x42" - "\x7b\xcc\x76\x8f\xfa\xa2\xad\x31" - "\xd4\xd6\x7a\x6d\x25\x25\x54\xe4" - "\x3f\x50\x59\xe1\x5c\x05\xb7\x27" - "\x48\xbf\x07\xec\x1b\x13\xbe\x2b" - "\xa1\x57\x2b\xd5\xab\xd7\xd0\x4c" - "\x1e\xcb\x71\x9b\xc5\x90\x85\xd3" - "\xde\x59\xec\x71\xeb\x89\xbb\xd0" - "\x09\x50\xe1\x16\x3f\xfd\x1c\x34" - "\xc3\x1c\xa1\x10\x77\x53\x98\xef" - "\xf2\xfd\xa5\x01\x59\xc2\x9b\x26" - "\xc7\x42\xd9\x49\xda\x58\x2b\x6e" - "\x9f\x53\x19\x76\x7e\xd9\xc9\x0e" - "\x68\xc8\x7f\x51\x22\x42\xef\x49" - "\xa4\x55\xb6\x36\xac\x09\xc7\x31" - "\x88\x15\x4b\x2e\x8f\x3a\x08\xf7" - "\xd8\xf7\xa8\xc5\xa9\x33\xa6\x45" - "\xe4\xc4\x94\x76\xf3\x0d\x8f\x7e" - "\xc8\xf6\xbc\x23\x0a\xb6\x4c\xd3" - "\x6a\xcd\x36\xc2\x90\x5c\x5c\x3c" - "\x65\x7b\xc2\xd6\xcc\xe6\x0d\x87" - "\x73\x2e\x71\x79\x16\x06\x63\x28" - "\x09\x15\xd8\x89\x38\x38\x3d\xb5" - "\x42\x1c\x08\x24\xf7\x2a\xd2\x9d" - "\xc8\xca\xef\xf9\x27\xd8\x07\x86" - "\xf7\x43\x0b\x55\x15\x3f\x9f\x83" - "\xef\xdc\x49\x9d\x2a\xc1\x54\x62" - "\xbd\x9b\x66\x55\x9f\xb7\x12\xf3" - "\x1b\x4d\x9d\x2a\x5c\xed\x87\x75" - "\x87\x26\xec\x61\x2c\xb4\x0f\x89" - "\xb0\xfb\x2e\x68\x5d\x15\xc7\x8d" - "\x2e\xc0\xd9\xec\xaf\x4f\xd2\x25" - "\x29\xe8\xd2\x26\x2b\x67\xe9\xfc" - "\x2b\xa8\x67\x96\x12\x1f\x5b\x96" - "\xc6\x14\x53\xaf\x44\xea\xd6\xe2" - "\x94\x98\xe4\x12\x93\x4c\x92\xe0" - "\x18\xa5\x8d\x2d\xe4\x71\x3c\x47" - "\x4c\xf7\xe6\x47\x9e\xc0\x68\xdf" - "\xd4\xf5\x5a\x74\xb1\x2b\x29\x03" - "\x19\x07\xaf\x90\x62\x5c\x68\x98" - "\x48\x16\x11\x02\x9d\xee\xb4\x9b" - "\xe5\x42\x7f\x08\xfd\x16\x32\x0b" - "\xd0\xb3\xfa\x2b\xb7\x99\xf9\x29" - "\xcd\x20\x45\x9f\xb3\x1a\x5d\xa2" - "\xaf\x4d\xe0\xbd\x42\x0d\xbc\x74" - "\x99\x9c\x8e\x53\x1a\xb4\x3e\xbd" - "\xa2\x9a\x2d\xf7\xf8\x39\x0f\x67" - "\x63\xfc\x6b\xc0\xaf\xb3\x4b\x4f" - "\x55\xc4\xcf\xa7\xc8\x04\x11\x3e" - "\x14\x32\xbb\x1b\x38\x77\xd6\x7f" - "\x54\x4c\xdf\x75\xf3\x07\x2d\x33" - "\x9b\xa8\x20\xe1\x7b\x12\xb5\xf3" - "\xef\x2f\xce\x72\xe5\x24\x60\xc1" - "\x30\xe2\xab\xa1\x8e\x11\x09\xa8" - "\x21\x33\x44\xfe\x7f\x35\x32\x93" - "\x39\xa7\xad\x8b\x79\x06\xb2\xcb" - "\x4e\xa9\x5f\xc7\xba\x74\x29\xec" - "\x93\xa0\x4e\x54\x93\xc0\xbc\x55" - "\x64\xf0\x48\xe5\x57\x99\xee\x75" - "\xd6\x79\x0f\x66\xb7\xc6\x57\x76" - "\xf7\xb7\xf3\x9c\xc5\x60\xe8\x7f" - "\x83\x76\xd6\x0e\xaa\xe6\x90\x39" - "\x1d\xa6\x32\x6a\x34\xe3\x55\xf8" - "\x58\xa0\x58\x7d\x33\xe0\x22\x39" - "\x44\x64\x87\x86\x5a\x2f\xa7\x7e" - "\x0f\x38\xea\xb0\x30\xcc\x61\xa5" - "\x6a\x32\xae\x1e\xf7\xe9\xd0\xa9" - "\x0c\x32\x4b\xb5\x49\x28\xab\x85" - "\x2f\x8e\x01\x36\x38\x52\xd0\xba" - "\xd6\x02\x78\xf8\x0e\x3e\x9c\x8b" - "\x6b\x45\x99\x3f\x5c\xfe\x58\xf1" - "\x5c\x94\x04\xe1\xf5\x18\x6d\x51" - "\xb2\x5d\x18\x20\xb6\xc2\x9a\x42" - "\x1d\xb3\xab\x3c\xb6\x3a\x13\x03" - "\xb2\x46\x82\x4f\xfc\x64\xbc\x4f" - "\xca\xfa\x9c\xc0\xd5\xa7\xbd\x11" - "\xb7\xe4\x5a\xf6\x6f\x4d\x4d\x54" - "\xea\xa4\x98\x66\xd4\x22\x3b\xd3" - "\x8f\x34\x47\xd9\x7c\xf4\x72\x3b" - "\x4d\x02\x77\xf6\xd6\xdd\x08\x0a" - "\x81\xe1\x86\x89\x3e\x56\x10\x3c" - "\xba\xd7\x81\x8c\x08\xbc\x8b\xe2" - "\x53\xec\xa7\x89\xee\xc8\x56\xb5" - "\x36\x2c\xb2\x03\xba\x99\xdd\x7c" - "\x48\xa0\xb0\xbc\x91\x33\xe9\xa8" - "\xcb\xcd\xcf\x59\x5f\x1f\x15\xe2" - "\x56\xf5\x4e\x01\x35\x27\x45\x77" - "\x47\xc8\xbc\xcb\x7e\x39\xc1\x97" - "\x28\xd3\x84\xfc\x2c\x3e\xc8\xad" - "\x9c\xf8\x8a\x61\x9c\x28\xaa\xc5" - "\x99\x20\x43\x85\x9d\xa5\xe2\x8b" - "\xb8\xae\xeb\xd0\x32\x0d\x52\x78" - "\x09\x56\x3f\xc7\xd8\x7e\x26\xfc" - "\x37\xfb\x6f\x04\xfc\xfa\x92\x10" - "\xac\xf8\x3e\x21\xdc\x8c\x21\x16" - "\x7d\x67\x6e\xf6\xcd\xda\xb6\x98" - "\x23\xab\x23\x3c\xb2\x10\xa0\x53" - "\x5a\x56\x9f\xc5\xd0\xff\xbb\xe4" - "\x98\x3c\x69\x1e\xdb\x38\x8f\x7e" - "\x0f\xd2\x98\x88\x81\x8b\x45\x67" - "\xea\x33\xf1\xeb\xe9\x97\x55\x2e" - "\xd9\xaa\xeb\x5a\xec\xda\xe1\x68" - "\xa8\x9d\x3c\x84\x7c\x05\x3d\x62" - "\x87\x8f\x03\x21\x28\x95\x0c\x89" - "\x25\x22\x4a\xb0\x93\xa9\x50\xa2" - "\x2f\x57\x6e\x18\x42\x19\x54\x0c" - "\x55\x67\xc6\x11\x49\xf4\x5c\xd2" - "\xe9\x3d\xdd\x8b\x48\x71\x21\x00" - "\xc3\x9a\x6c\x85\x74\x28\x83\x4a" - "\x1b\x31\x05\xe1\x06\x92\xe7\xda" - "\x85\x73\x78\x45\x20\x7f\xae\x13" - "\x7c\x33\x06\x22\xf4\x83\xf9\x35" - "\x3f\x6c\x71\xa8\x4e\x48\xbe\x9b" - "\xce\x8a\xba\xda\xbe\x28\x08\xf7" - "\xe2\x14\x8c\x71\xea\x72\xf9\x33" - "\xf2\x88\x3f\xd7\xbb\x69\x6c\x29" - "\x19\xdc\x84\xce\x1f\x12\x4f\xc8" - "\xaf\xa5\x04\xba\x5a\xab\xb0\xd9" - "\x14\x1f\x6c\x68\x98\x39\x89\x7a" - "\xd9\xd8\x2f\xdf\xa8\x47\x4a\x25" - "\xe2\xfb\x33\xf4\x59\x78\xe1\x68" - "\x85\xcf\xfe\x59\x20\xd4\x05\x1d" - "\x80\x99\xae\xbc\xca\xae\x0f\x2f" - "\x65\x43\x34\x8e\x7e\xac\xd3\x93" - "\x2f\xac\x6d\x14\x3d\x02\x07\x70" - "\x9d\xa4\xf3\x1b\x5c\x36\xfc\x01" - "\x73\x34\x85\x0c\x6c\xd6\xf1\xbd" - "\x3f\xdf\xee\xf5\xd9\xba\x56\xef" - "\xf4\x9b\x6b\xee\x9f\x5a\x78\x6d" - "\x32\x19\xf4\xf7\xf8\x4c\x69\x0b" - "\x4b\xbc\xbb\xb7\xf2\x85\xaf\x70" - "\x75\x24\x6c\x54\xa7\x0e\x4d\x1d" - "\x01\xbf\x08\xac\xcf\x7f\x2c\xe3" - "\x14\x89\x5e\x70\x5a\x99\x92\xcd" - "\x01\x84\xc8\xd2\xab\xe5\x4f\x58" - "\xe7\x0f\x2f\x0e\xff\x68\xea\xfd" - "\x15\xb3\x17\xe6\xb0\xe7\x85\xd8" - "\x23\x2e\x05\xc7\xc9\xc4\x46\x1f" - "\xe1\x9e\x49\x20\x23\x24\x4d\x7e" - "\x29\x65\xff\xf4\xb6\xfd\x1a\x85" - "\xc4\x16\xec\xfc\xea\x7b\xd6\x2c" - "\x43\xf8\xb7\xbf\x79\xc0\x85\xcd" - "\xef\xe1\x98\xd3\xa5\xf7\x90\x8c" - "\xe9\x7f\x80\x6b\xd2\xac\x4c\x30" - "\xa7\xc6\x61\x6c\xd2\xf9\x2c\xff" - "\x30\xbc\x22\x81\x7d\x93\x12\xe4" - "\x0a\xcd\xaf\xdd\xe8\xab\x0a\x1e" - "\x13\xa4\x27\xc3\x5f\xf7\x4b\xbb" - "\x37\x09\x4b\x91\x6f\x92\x4f\xaf" - "\x52\xee\xdf\xef\x09\x6f\xf7\x5c" - "\x6e\x12\x17\x72\x63\x57\xc7\xba" - "\x3b\x6b\x38\x32\x73\x1b\x9c\x80" - "\xc1\x7a\xc6\xcf\xcd\x35\xc0\x6b" - "\x31\x1a\x6b\xe9\xd8\x2c\x29\x3f" - "\x96\xfb\xb6\xcd\x13\x91\x3b\xc2" - "\xd2\xa3\x31\x8d\xa4\xcd\x57\xcd" - "\x13\x3d\x64\xfd\x06\xce\xe6\xdc" - "\x0c\x24\x43\x31\x40\x57\xf1\x72" - "\x17\xe3\x3a\x63\x6d\x35\xcf\x5d" - "\x97\x40\x59\xdd\xf7\x3c\x02\xf7" - "\x1c\x7e\x05\xbb\xa9\x0d\x01\xb1" - "\x8e\xc0\x30\xa9\x53\x24\xc9\x89" - "\x84\x6d\xaa\xd0\xcd\x91\xc2\x4d" - "\x91\xb0\x89\xe2\xbf\x83\x44\xaa" - "\x28\x72\x23\xa0\xc2\xad\xad\x1c" - "\xfc\x3f\x09\x7a\x0b\xdc\xc5\x1b" - "\x87\x13\xc6\x5b\x59\x8d\xf2\xc8" - "\xaf\xdf\x11\x95", - .len = 4100, - }, -}; - static const struct cipher_testvec chacha20_tv_template[] = { { /* RFC7539 A.2. Test Vector #1 */ .key = "\x00\x00\x00\x00\x00\x00\x00\x00" -- cgit v1.2.3-59-g8ed1b From e1b2d980f03b833442768c1987d5ad0b9a58cfe7 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 1 Feb 2021 19:02:29 +0100 Subject: crypto: michael_mic - fix broken misalignment handling The Michael MIC driver uses the cra_alignmask to ensure that pointers presented to its update and finup/final methods are 32-bit aligned. However, due to the way the shash API works, this is no guarantee that the 32-bit reads occurring in the update method are also aligned, as the size of the buffer presented to update may be of uneven length. For instance, an update() of 3 bytes followed by a misaligned update() of 4 or more bytes will result in a misaligned access using an accessor that is not suitable for this. On most architectures, this does not matter, and so setting the cra_alignmask is pointless. On architectures where this does matter, setting the cra_alignmask does not actually solve the problem. So let's get rid of the cra_alignmask, and use unaligned accessors instead, where appropriate. Cc: Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/michael_mic.c | 31 ++++++++++++------------------- 1 file changed, 12 insertions(+), 19 deletions(-) (limited to 'crypto') diff --git a/crypto/michael_mic.c b/crypto/michael_mic.c index 63350c4ad461..f4c31049601c 100644 --- a/crypto/michael_mic.c +++ b/crypto/michael_mic.c @@ -7,7 +7,7 @@ * Copyright (c) 2004 Jouni Malinen */ #include -#include +#include #include #include #include @@ -19,7 +19,7 @@ struct michael_mic_ctx { }; struct michael_mic_desc_ctx { - u8 pending[4]; + __le32 pending; size_t pending_len; u32 l, r; @@ -60,13 +60,12 @@ static int michael_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct michael_mic_desc_ctx *mctx = shash_desc_ctx(desc); - const __le32 *src; if (mctx->pending_len) { int flen = 4 - mctx->pending_len; if (flen > len) flen = len; - memcpy(&mctx->pending[mctx->pending_len], data, flen); + memcpy((u8 *)&mctx->pending + mctx->pending_len, data, flen); mctx->pending_len += flen; data += flen; len -= flen; @@ -74,23 +73,21 @@ static int michael_update(struct shash_desc *desc, const u8 *data, if (mctx->pending_len < 4) return 0; - src = (const __le32 *)mctx->pending; - mctx->l ^= le32_to_cpup(src); + mctx->l ^= le32_to_cpu(mctx->pending); michael_block(mctx->l, mctx->r); mctx->pending_len = 0; } - src = (const __le32 *)data; - while (len >= 4) { - mctx->l ^= le32_to_cpup(src++); + mctx->l ^= get_unaligned_le32(data); michael_block(mctx->l, mctx->r); + data += 4; len -= 4; } if (len > 0) { mctx->pending_len = len; - memcpy(mctx->pending, src, len); + memcpy(&mctx->pending, data, len); } return 0; @@ -100,8 +97,7 @@ static int michael_update(struct shash_desc *desc, const u8 *data, static int michael_final(struct shash_desc *desc, u8 *out) { struct michael_mic_desc_ctx *mctx = shash_desc_ctx(desc); - u8 *data = mctx->pending; - __le32 *dst = (__le32 *)out; + u8 *data = (u8 *)&mctx->pending; /* Last block and padding (0x5a, 4..7 x 0) */ switch (mctx->pending_len) { @@ -123,8 +119,8 @@ static int michael_final(struct shash_desc *desc, u8 *out) /* l ^= 0; */ michael_block(mctx->l, mctx->r); - dst[0] = cpu_to_le32(mctx->l); - dst[1] = cpu_to_le32(mctx->r); + put_unaligned_le32(mctx->l, out); + put_unaligned_le32(mctx->r, out + 4); return 0; } @@ -135,13 +131,11 @@ static int michael_setkey(struct crypto_shash *tfm, const u8 *key, { struct michael_mic_ctx *mctx = crypto_shash_ctx(tfm); - const __le32 *data = (const __le32 *)key; - if (keylen != 8) return -EINVAL; - mctx->l = le32_to_cpu(data[0]); - mctx->r = le32_to_cpu(data[1]); + mctx->l = get_unaligned_le32(key); + mctx->r = get_unaligned_le32(key + 4); return 0; } @@ -156,7 +150,6 @@ static struct shash_alg alg = { .cra_name = "michael_mic", .cra_driver_name = "michael_mic-generic", .cra_blocksize = 8, - .cra_alignmask = 3, .cra_ctxsize = sizeof(struct michael_mic_ctx), .cra_module = THIS_MODULE, } -- cgit v1.2.3-59-g8ed1b From 784506a1df57737fc8460fd644b30ac8fecaedf0 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 1 Feb 2021 19:02:30 +0100 Subject: crypto: serpent - get rid of obsolete tnepres variant It is not trivial to trace back why exactly the tnepres variant of serpent was added ~17 years ago - Google searches come up mostly empty, but it seems to be related with the 'kerneli' version, which was based on an incorrect interpretation of the serpent spec. In other words, nobody is likely to care anymore today, so let's get rid of it. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/Kconfig | 3 +- crypto/serpent_generic.c | 82 +++--------------------------------------------- crypto/tcrypt.c | 6 +--- crypto/testmgr.c | 6 ---- crypto/testmgr.h | 79 ---------------------------------------------- 5 files changed, 7 insertions(+), 169 deletions(-) (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index 9779c7f7531f..15c9c28d9f53 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1460,8 +1460,7 @@ config CRYPTO_SERPENT Serpent cipher algorithm, by Anderson, Biham & Knudsen. Keys are allowed to be from 0 to 256 bits in length, in steps - of 8 bits. Also includes the 'Tnepres' algorithm, a reversed - variant of Serpent for compatibility with old kerneli.org code. + of 8 bits. See also: diff --git a/crypto/serpent_generic.c b/crypto/serpent_generic.c index 492c1d0bfe06..a932e0b2964f 100644 --- a/crypto/serpent_generic.c +++ b/crypto/serpent_generic.c @@ -5,11 +5,6 @@ * Serpent Cipher Algorithm. * * Copyright (C) 2002 Dag Arne Osvik - * 2003 Herbert Valerio Riedel - * - * Added tnepres support: - * Ruben Jesus Garcia Hernandez , 18.10.2004 - * Based on code by hvr */ #include @@ -576,59 +571,7 @@ static void serpent_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) __serpent_decrypt(ctx, dst, src); } -static int tnepres_setkey(struct crypto_tfm *tfm, const u8 *key, - unsigned int keylen) -{ - u8 rev_key[SERPENT_MAX_KEY_SIZE]; - int i; - - for (i = 0; i < keylen; ++i) - rev_key[keylen - i - 1] = key[i]; - - return serpent_setkey(tfm, rev_key, keylen); -} - -static void tnepres_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) -{ - const u32 * const s = (const u32 * const)src; - u32 * const d = (u32 * const)dst; - - u32 rs[4], rd[4]; - - rs[0] = swab32(s[3]); - rs[1] = swab32(s[2]); - rs[2] = swab32(s[1]); - rs[3] = swab32(s[0]); - - serpent_encrypt(tfm, (u8 *)rd, (u8 *)rs); - - d[0] = swab32(rd[3]); - d[1] = swab32(rd[2]); - d[2] = swab32(rd[1]); - d[3] = swab32(rd[0]); -} - -static void tnepres_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) -{ - const u32 * const s = (const u32 * const)src; - u32 * const d = (u32 * const)dst; - - u32 rs[4], rd[4]; - - rs[0] = swab32(s[3]); - rs[1] = swab32(s[2]); - rs[2] = swab32(s[1]); - rs[3] = swab32(s[0]); - - serpent_decrypt(tfm, (u8 *)rd, (u8 *)rs); - - d[0] = swab32(rd[3]); - d[1] = swab32(rd[2]); - d[2] = swab32(rd[1]); - d[3] = swab32(rd[0]); -} - -static struct crypto_alg srp_algs[2] = { { +static struct crypto_alg srp_alg = { .cra_name = "serpent", .cra_driver_name = "serpent-generic", .cra_priority = 100, @@ -643,38 +586,23 @@ static struct crypto_alg srp_algs[2] = { { .cia_setkey = serpent_setkey, .cia_encrypt = serpent_encrypt, .cia_decrypt = serpent_decrypt } } -}, { - .cra_name = "tnepres", - .cra_driver_name = "tnepres-generic", - .cra_flags = CRYPTO_ALG_TYPE_CIPHER, - .cra_blocksize = SERPENT_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct serpent_ctx), - .cra_alignmask = 3, - .cra_module = THIS_MODULE, - .cra_u = { .cipher = { - .cia_min_keysize = SERPENT_MIN_KEY_SIZE, - .cia_max_keysize = SERPENT_MAX_KEY_SIZE, - .cia_setkey = tnepres_setkey, - .cia_encrypt = tnepres_encrypt, - .cia_decrypt = tnepres_decrypt } } -} }; +}; static int __init serpent_mod_init(void) { - return crypto_register_algs(srp_algs, ARRAY_SIZE(srp_algs)); + return crypto_register_alg(&srp_alg); } static void __exit serpent_mod_fini(void) { - crypto_unregister_algs(srp_algs, ARRAY_SIZE(srp_algs)); + crypto_unregister_alg(&srp_alg); } subsys_initcall(serpent_mod_init); module_exit(serpent_mod_fini); MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Serpent and tnepres (kerneli compatible serpent reversed) Cipher Algorithm"); +MODULE_DESCRIPTION("Serpent Cipher Algorithm"); MODULE_AUTHOR("Dag Arne Osvik "); -MODULE_ALIAS_CRYPTO("tnepres"); MODULE_ALIAS_CRYPTO("serpent"); MODULE_ALIAS_CRYPTO("serpent-generic"); diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 2877b88cfa45..6b7c158dc508 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -70,7 +70,7 @@ static const char *check[] = { "des", "md5", "des3_ede", "rot13", "sha1", "sha224", "sha256", "sm3", "blowfish", "twofish", "serpent", "sha384", "sha512", "md4", "aes", "cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea", - "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt", + "khazad", "wp512", "wp384", "wp256", "xeta", "fcrypt", "camellia", "seed", "rmd160", "lzo", "lzo-rle", "cts", "sha3-224", "sha3-256", "sha3-384", "sha3-512", "streebog256", "streebog512", @@ -1806,10 +1806,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) ret += tcrypt_test("wp256"); break; - case 25: - ret += tcrypt_test("ecb(tnepres)"); - break; - case 26: ret += tcrypt_test("ecb(anubis)"); ret += tcrypt_test("cbc(anubis)"); diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 1a4103b1b202..93359999c94b 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -4876,12 +4876,6 @@ static const struct alg_test_desc alg_test_descs[] = { .suite = { .cipher = __VECS(tea_tv_template) } - }, { - .alg = "ecb(tnepres)", - .test = alg_test_skcipher, - .suite = { - .cipher = __VECS(tnepres_tv_template) - } }, { .alg = "ecb(twofish)", .test = alg_test_skcipher, diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 99aca08263d2..ced56ea0c9b4 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -11415,85 +11415,6 @@ static const struct cipher_testvec serpent_tv_template[] = { }, }; -static const struct cipher_testvec tnepres_tv_template[] = { - { /* KeySize=0 */ - .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", - .ctext = "\x41\xcc\x6b\x31\x59\x31\x45\x97" - "\x6d\x6f\xbb\x38\x4b\x37\x21\x28", - .len = 16, - }, - { /* KeySize=128, PT=0, I=1 */ - .ptext = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .key = "\x80\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .klen = 16, - .ctext = "\x49\xaf\xbf\xad\x9d\x5a\x34\x05" - "\x2c\xd8\xff\xa5\x98\x6b\xd2\xdd", - .len = 16, - }, { /* KeySize=128 */ - .key = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", - .klen = 16, - .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", - .ctext = "\xea\xf4\xd7\xfc\xd8\x01\x34\x47" - "\x81\x45\x0b\xfa\x0c\xd6\xad\x6e", - .len = 16, - }, { /* KeySize=128, I=121 */ - .key = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80", - .klen = 16, - .ptext = zeroed_string, - .ctext = "\x3d\xda\xbf\xc0\x06\xda\xab\x06" - "\x46\x2a\xf4\xef\x81\x54\x4e\x26", - .len = 16, - }, { /* KeySize=192, PT=0, I=1 */ - .key = "\x80\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .klen = 24, - .ptext = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .ctext = "\xe7\x8e\x54\x02\xc7\x19\x55\x68" - "\xac\x36\x78\xf7\xa3\xf6\x0c\x66", - .len = 16, - }, { /* KeySize=256, PT=0, I=1 */ - .key = "\x80\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .klen = 32, - .ptext = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .ctext = "\xab\xed\x96\xe7\x66\xbf\x28\xcb" - "\xc0\xeb\xd2\x1a\x82\xef\x08\x19", - .len = 16, - }, { /* KeySize=256, I=257 */ - .key = "\x1f\x1e\x1d\x1c\x1b\x1a\x19\x18" - "\x17\x16\x15\x14\x13\x12\x11\x10" - "\x0f\x0e\x0d\x0c\x0b\x0a\x09\x08" - "\x07\x06\x05\x04\x03\x02\x01\x00", - .klen = 32, - .ptext = "\x0f\x0e\x0d\x0c\x0b\x0a\x09\x08" - "\x07\x06\x05\x04\x03\x02\x01\x00", - .ctext = "\x5c\xe7\x1c\x70\xd2\x88\x2e\x5b" - "\xb8\x32\xe4\x33\xf8\x9f\x26\xde", - .len = 16, - }, { /* KeySize=256 */ - .key = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" - "\x10\x11\x12\x13\x14\x15\x16\x17" - "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", - .klen = 32, - .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", - .ctext = "\x64\xa9\x1a\x37\xed\x9f\xe7\x49" - "\xa8\x4e\x76\xd6\xf5\x0d\x78\xee", - .len = 16, - } -}; - static const struct cipher_testvec serpent_cbc_tv_template[] = { { /* Generated with Crypto++ */ .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" -- cgit v1.2.3-59-g8ed1b From 81d091a293a24912a61c22e073824d29496301d5 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 1 Feb 2021 19:02:31 +0100 Subject: crypto: serpent - use unaligned accessors instead of alignmask Instead of using an alignmask of 0x3 to ensure 32-bit alignment of the Serpent input and output blocks, which propagates to mode drivers, and results in pointless copying on architectures that don't care about alignment, use the unaligned accessors, which will do the right thing on each respective architecture, avoiding the need for double buffering. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/serpent_generic.c | 44 +++++++++++++++++--------------------------- 1 file changed, 17 insertions(+), 27 deletions(-) (limited to 'crypto') diff --git a/crypto/serpent_generic.c b/crypto/serpent_generic.c index a932e0b2964f..236c87547a17 100644 --- a/crypto/serpent_generic.c +++ b/crypto/serpent_generic.c @@ -10,7 +10,7 @@ #include #include #include -#include +#include #include #include #include @@ -448,19 +448,12 @@ void __serpent_encrypt(const void *c, u8 *dst, const u8 *src) { const struct serpent_ctx *ctx = c; const u32 *k = ctx->expkey; - const __le32 *s = (const __le32 *)src; - __le32 *d = (__le32 *)dst; u32 r0, r1, r2, r3, r4; -/* - * Note: The conversions between u8* and u32* might cause trouble - * on architectures with stricter alignment rules than x86 - */ - - r0 = le32_to_cpu(s[0]); - r1 = le32_to_cpu(s[1]); - r2 = le32_to_cpu(s[2]); - r3 = le32_to_cpu(s[3]); + r0 = get_unaligned_le32(src); + r1 = get_unaligned_le32(src + 4); + r2 = get_unaligned_le32(src + 8); + r3 = get_unaligned_le32(src + 12); K(r0, r1, r2, r3, 0); S0(r0, r1, r2, r3, r4); LK(r2, r1, r3, r0, r4, 1); @@ -496,10 +489,10 @@ void __serpent_encrypt(const void *c, u8 *dst, const u8 *src) S6(r0, r1, r3, r2, r4); LK(r3, r4, r1, r2, r0, 31); S7(r3, r4, r1, r2, r0); K(r0, r1, r2, r3, 32); - d[0] = cpu_to_le32(r0); - d[1] = cpu_to_le32(r1); - d[2] = cpu_to_le32(r2); - d[3] = cpu_to_le32(r3); + put_unaligned_le32(r0, dst); + put_unaligned_le32(r1, dst + 4); + put_unaligned_le32(r2, dst + 8); + put_unaligned_le32(r3, dst + 12); } EXPORT_SYMBOL_GPL(__serpent_encrypt); @@ -514,14 +507,12 @@ void __serpent_decrypt(const void *c, u8 *dst, const u8 *src) { const struct serpent_ctx *ctx = c; const u32 *k = ctx->expkey; - const __le32 *s = (const __le32 *)src; - __le32 *d = (__le32 *)dst; u32 r0, r1, r2, r3, r4; - r0 = le32_to_cpu(s[0]); - r1 = le32_to_cpu(s[1]); - r2 = le32_to_cpu(s[2]); - r3 = le32_to_cpu(s[3]); + r0 = get_unaligned_le32(src); + r1 = get_unaligned_le32(src + 4); + r2 = get_unaligned_le32(src + 8); + r3 = get_unaligned_le32(src + 12); K(r0, r1, r2, r3, 32); SI7(r0, r1, r2, r3, r4); KL(r1, r3, r0, r4, r2, 31); @@ -557,10 +548,10 @@ void __serpent_decrypt(const void *c, u8 *dst, const u8 *src) SI1(r3, r1, r2, r0, r4); KL(r4, r1, r2, r0, r3, 1); SI0(r4, r1, r2, r0, r3); K(r2, r3, r1, r4, 0); - d[0] = cpu_to_le32(r2); - d[1] = cpu_to_le32(r3); - d[2] = cpu_to_le32(r1); - d[3] = cpu_to_le32(r4); + put_unaligned_le32(r2, dst); + put_unaligned_le32(r3, dst + 4); + put_unaligned_le32(r1, dst + 8); + put_unaligned_le32(r4, dst + 12); } EXPORT_SYMBOL_GPL(__serpent_decrypt); @@ -578,7 +569,6 @@ static struct crypto_alg srp_alg = { .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = SERPENT_BLOCK_SIZE, .cra_ctxsize = sizeof(struct serpent_ctx), - .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_u = { .cipher = { .cia_min_keysize = SERPENT_MIN_KEY_SIZE, -- cgit v1.2.3-59-g8ed1b From 50a3a9fae3e0c3662786875b941c93dcdd26eee6 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 1 Feb 2021 19:02:32 +0100 Subject: crypto: blowfish - use unaligned accessors instead of alignmask Instead of using an alignmask of 0x3 to ensure 32-bit alignment of the Blowfish input and output blocks, which propagates to mode drivers, and results in pointless copying on architectures that don't care about alignment, use the unaligned accessors, which will do the right thing on each respective architecture, avoiding the need for double buffering. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/blowfish_generic.c | 23 +++++++++-------------- 1 file changed, 9 insertions(+), 14 deletions(-) (limited to 'crypto') diff --git a/crypto/blowfish_generic.c b/crypto/blowfish_generic.c index c3c2041fe0c5..003b52c6880e 100644 --- a/crypto/blowfish_generic.c +++ b/crypto/blowfish_generic.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include #include #include @@ -36,12 +36,10 @@ static void bf_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct bf_ctx *ctx = crypto_tfm_ctx(tfm); - const __be32 *in_blk = (const __be32 *)src; - __be32 *const out_blk = (__be32 *)dst; const u32 *P = ctx->p; const u32 *S = ctx->s; - u32 yl = be32_to_cpu(in_blk[0]); - u32 yr = be32_to_cpu(in_blk[1]); + u32 yl = get_unaligned_be32(src); + u32 yr = get_unaligned_be32(src + 4); ROUND(yr, yl, 0); ROUND(yl, yr, 1); @@ -63,19 +61,17 @@ static void bf_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) yl ^= P[16]; yr ^= P[17]; - out_blk[0] = cpu_to_be32(yr); - out_blk[1] = cpu_to_be32(yl); + put_unaligned_be32(yr, dst); + put_unaligned_be32(yl, dst + 4); } static void bf_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct bf_ctx *ctx = crypto_tfm_ctx(tfm); - const __be32 *in_blk = (const __be32 *)src; - __be32 *const out_blk = (__be32 *)dst; const u32 *P = ctx->p; const u32 *S = ctx->s; - u32 yl = be32_to_cpu(in_blk[0]); - u32 yr = be32_to_cpu(in_blk[1]); + u32 yl = get_unaligned_be32(src); + u32 yr = get_unaligned_be32(src + 4); ROUND(yr, yl, 17); ROUND(yl, yr, 16); @@ -97,8 +93,8 @@ static void bf_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) yl ^= P[1]; yr ^= P[0]; - out_blk[0] = cpu_to_be32(yr); - out_blk[1] = cpu_to_be32(yl); + put_unaligned_be32(yr, dst); + put_unaligned_be32(yl, dst + 4); } static struct crypto_alg alg = { @@ -108,7 +104,6 @@ static struct crypto_alg alg = { .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = BF_BLOCK_SIZE, .cra_ctxsize = sizeof(struct bf_ctx), - .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_u = { .cipher = { .cia_min_keysize = BF_MIN_KEY_SIZE, -- cgit v1.2.3-59-g8ed1b From 83385415100591248b25d0b89a2796a9cb3bea5c Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 1 Feb 2021 19:02:33 +0100 Subject: crypto: camellia - use unaligned accessors instead of alignmask Instead of using an alignmask of 0x3 to ensure 32-bit alignment of the Camellia input and output blocks, which propagates to mode drivers, and results in pointless copying on architectures that don't care about alignment, use the unaligned accessors, which will do the right thing on each respective architecture, avoiding the need for double buffering. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/camellia_generic.c | 45 ++++++++++++++++----------------------------- 1 file changed, 16 insertions(+), 29 deletions(-) (limited to 'crypto') diff --git a/crypto/camellia_generic.c b/crypto/camellia_generic.c index 0b9f409f7370..fd1a88af9e77 100644 --- a/crypto/camellia_generic.c +++ b/crypto/camellia_generic.c @@ -9,14 +9,6 @@ * https://info.isl.ntt.co.jp/crypt/eng/camellia/specifications.html */ -/* - * - * NOTE --- NOTE --- NOTE --- NOTE - * This implementation assumes that all memory addresses passed - * as parameters are four-byte aligned. - * - */ - #include #include #include @@ -994,16 +986,14 @@ camellia_set_key(struct crypto_tfm *tfm, const u8 *in_key, static void camellia_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { const struct camellia_ctx *cctx = crypto_tfm_ctx(tfm); - const __be32 *src = (const __be32 *)in; - __be32 *dst = (__be32 *)out; unsigned int max; u32 tmp[4]; - tmp[0] = be32_to_cpu(src[0]); - tmp[1] = be32_to_cpu(src[1]); - tmp[2] = be32_to_cpu(src[2]); - tmp[3] = be32_to_cpu(src[3]); + tmp[0] = get_unaligned_be32(in); + tmp[1] = get_unaligned_be32(in + 4); + tmp[2] = get_unaligned_be32(in + 8); + tmp[3] = get_unaligned_be32(in + 12); if (cctx->key_length == 16) max = 24; @@ -1013,25 +1003,23 @@ static void camellia_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) camellia_do_encrypt(cctx->key_table, tmp, max); /* do_encrypt returns 0,1 swapped with 2,3 */ - dst[0] = cpu_to_be32(tmp[2]); - dst[1] = cpu_to_be32(tmp[3]); - dst[2] = cpu_to_be32(tmp[0]); - dst[3] = cpu_to_be32(tmp[1]); + put_unaligned_be32(tmp[2], out); + put_unaligned_be32(tmp[3], out + 4); + put_unaligned_be32(tmp[0], out + 8); + put_unaligned_be32(tmp[1], out + 12); } static void camellia_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { const struct camellia_ctx *cctx = crypto_tfm_ctx(tfm); - const __be32 *src = (const __be32 *)in; - __be32 *dst = (__be32 *)out; unsigned int max; u32 tmp[4]; - tmp[0] = be32_to_cpu(src[0]); - tmp[1] = be32_to_cpu(src[1]); - tmp[2] = be32_to_cpu(src[2]); - tmp[3] = be32_to_cpu(src[3]); + tmp[0] = get_unaligned_be32(in); + tmp[1] = get_unaligned_be32(in + 4); + tmp[2] = get_unaligned_be32(in + 8); + tmp[3] = get_unaligned_be32(in + 12); if (cctx->key_length == 16) max = 24; @@ -1041,10 +1029,10 @@ static void camellia_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) camellia_do_decrypt(cctx->key_table, tmp, max); /* do_decrypt returns 0,1 swapped with 2,3 */ - dst[0] = cpu_to_be32(tmp[2]); - dst[1] = cpu_to_be32(tmp[3]); - dst[2] = cpu_to_be32(tmp[0]); - dst[3] = cpu_to_be32(tmp[1]); + put_unaligned_be32(tmp[2], out); + put_unaligned_be32(tmp[3], out + 4); + put_unaligned_be32(tmp[0], out + 8); + put_unaligned_be32(tmp[1], out + 12); } static struct crypto_alg camellia_alg = { @@ -1054,7 +1042,6 @@ static struct crypto_alg camellia_alg = { .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = CAMELLIA_BLOCK_SIZE, .cra_ctxsize = sizeof(struct camellia_ctx), - .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_u = { .cipher = { -- cgit v1.2.3-59-g8ed1b From 24a2ee44f2fb0b90b3322c1ecef3b7bfb86880be Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 1 Feb 2021 19:02:34 +0100 Subject: crypto: cast5 - use unaligned accessors instead of alignmask Instead of using an alignmask of 0x3 to ensure 32-bit alignment of the CAST5 input and output blocks, which propagates to mode drivers, and results in pointless copying on architectures that don't care about alignment, use the unaligned accessors, which will do the right thing on each respective architecture, avoiding the need for double buffering. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/cast5_generic.c | 23 +++++++++-------------- 1 file changed, 9 insertions(+), 14 deletions(-) (limited to 'crypto') diff --git a/crypto/cast5_generic.c b/crypto/cast5_generic.c index 4095085d4e51..0257c14cefc2 100644 --- a/crypto/cast5_generic.c +++ b/crypto/cast5_generic.c @@ -13,7 +13,7 @@ */ -#include +#include #include #include #include @@ -302,8 +302,6 @@ static const u32 sb8[256] = { void __cast5_encrypt(struct cast5_ctx *c, u8 *outbuf, const u8 *inbuf) { - const __be32 *src = (const __be32 *)inbuf; - __be32 *dst = (__be32 *)outbuf; u32 l, r, t; u32 I; /* used by the Fx macros */ u32 *Km; @@ -315,8 +313,8 @@ void __cast5_encrypt(struct cast5_ctx *c, u8 *outbuf, const u8 *inbuf) /* (L0,R0) <-- (m1...m64). (Split the plaintext into left and * right 32-bit halves L0 = m1...m32 and R0 = m33...m64.) */ - l = be32_to_cpu(src[0]); - r = be32_to_cpu(src[1]); + l = get_unaligned_be32(inbuf); + r = get_unaligned_be32(inbuf + 4); /* (16 rounds) for i from 1 to 16, compute Li and Ri as follows: * Li = Ri-1; @@ -347,8 +345,8 @@ void __cast5_encrypt(struct cast5_ctx *c, u8 *outbuf, const u8 *inbuf) /* c1...c64 <-- (R16,L16). (Exchange final blocks L16, R16 and * concatenate to form the ciphertext.) */ - dst[0] = cpu_to_be32(r); - dst[1] = cpu_to_be32(l); + put_unaligned_be32(r, outbuf); + put_unaligned_be32(l, outbuf + 4); } EXPORT_SYMBOL_GPL(__cast5_encrypt); @@ -359,8 +357,6 @@ static void cast5_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) void __cast5_decrypt(struct cast5_ctx *c, u8 *outbuf, const u8 *inbuf) { - const __be32 *src = (const __be32 *)inbuf; - __be32 *dst = (__be32 *)outbuf; u32 l, r, t; u32 I; u32 *Km; @@ -369,8 +365,8 @@ void __cast5_decrypt(struct cast5_ctx *c, u8 *outbuf, const u8 *inbuf) Km = c->Km; Kr = c->Kr; - l = be32_to_cpu(src[0]); - r = be32_to_cpu(src[1]); + l = get_unaligned_be32(inbuf); + r = get_unaligned_be32(inbuf + 4); if (!(c->rr)) { t = l; l = r; r = t ^ F1(r, Km[15], Kr[15]); @@ -391,8 +387,8 @@ void __cast5_decrypt(struct cast5_ctx *c, u8 *outbuf, const u8 *inbuf) t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]); t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]); - dst[0] = cpu_to_be32(r); - dst[1] = cpu_to_be32(l); + put_unaligned_be32(r, outbuf); + put_unaligned_be32(l, outbuf + 4); } EXPORT_SYMBOL_GPL(__cast5_decrypt); @@ -513,7 +509,6 @@ static struct crypto_alg alg = { .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = CAST5_BLOCK_SIZE, .cra_ctxsize = sizeof(struct cast5_ctx), - .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_u = { .cipher = { -- cgit v1.2.3-59-g8ed1b From 80879dd9de7aa34c8de620e9f18e940b919497f7 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 1 Feb 2021 19:02:35 +0100 Subject: crypto: cast6 - use unaligned accessors instead of alignmask Instead of using an alignmask of 0x3 to ensure 32-bit alignment of the CAST6 input and output blocks, which propagates to mode drivers, and results in pointless copying on architectures that don't care about alignment, use the unaligned accessors, which will do the right thing on each respective architecture, avoiding the need for double buffering. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/cast6_generic.c | 39 +++++++++++++++++---------------------- 1 file changed, 17 insertions(+), 22 deletions(-) (limited to 'crypto') diff --git a/crypto/cast6_generic.c b/crypto/cast6_generic.c index c77ff6c8a2b2..75346380aa0b 100644 --- a/crypto/cast6_generic.c +++ b/crypto/cast6_generic.c @@ -10,7 +10,7 @@ */ -#include +#include #include #include #include @@ -172,16 +172,14 @@ static inline void QBAR(u32 *block, const u8 *Kr, const u32 *Km) void __cast6_encrypt(const void *ctx, u8 *outbuf, const u8 *inbuf) { const struct cast6_ctx *c = ctx; - const __be32 *src = (const __be32 *)inbuf; - __be32 *dst = (__be32 *)outbuf; u32 block[4]; const u32 *Km; const u8 *Kr; - block[0] = be32_to_cpu(src[0]); - block[1] = be32_to_cpu(src[1]); - block[2] = be32_to_cpu(src[2]); - block[3] = be32_to_cpu(src[3]); + block[0] = get_unaligned_be32(inbuf); + block[1] = get_unaligned_be32(inbuf + 4); + block[2] = get_unaligned_be32(inbuf + 8); + block[3] = get_unaligned_be32(inbuf + 12); Km = c->Km[0]; Kr = c->Kr[0]; Q(block, Kr, Km); Km = c->Km[1]; Kr = c->Kr[1]; Q(block, Kr, Km); @@ -196,10 +194,10 @@ void __cast6_encrypt(const void *ctx, u8 *outbuf, const u8 *inbuf) Km = c->Km[10]; Kr = c->Kr[10]; QBAR(block, Kr, Km); Km = c->Km[11]; Kr = c->Kr[11]; QBAR(block, Kr, Km); - dst[0] = cpu_to_be32(block[0]); - dst[1] = cpu_to_be32(block[1]); - dst[2] = cpu_to_be32(block[2]); - dst[3] = cpu_to_be32(block[3]); + put_unaligned_be32(block[0], outbuf); + put_unaligned_be32(block[1], outbuf + 4); + put_unaligned_be32(block[2], outbuf + 8); + put_unaligned_be32(block[3], outbuf + 12); } EXPORT_SYMBOL_GPL(__cast6_encrypt); @@ -211,16 +209,14 @@ static void cast6_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) void __cast6_decrypt(const void *ctx, u8 *outbuf, const u8 *inbuf) { const struct cast6_ctx *c = ctx; - const __be32 *src = (const __be32 *)inbuf; - __be32 *dst = (__be32 *)outbuf; u32 block[4]; const u32 *Km; const u8 *Kr; - block[0] = be32_to_cpu(src[0]); - block[1] = be32_to_cpu(src[1]); - block[2] = be32_to_cpu(src[2]); - block[3] = be32_to_cpu(src[3]); + block[0] = get_unaligned_be32(inbuf); + block[1] = get_unaligned_be32(inbuf + 4); + block[2] = get_unaligned_be32(inbuf + 8); + block[3] = get_unaligned_be32(inbuf + 12); Km = c->Km[11]; Kr = c->Kr[11]; Q(block, Kr, Km); Km = c->Km[10]; Kr = c->Kr[10]; Q(block, Kr, Km); @@ -235,10 +231,10 @@ void __cast6_decrypt(const void *ctx, u8 *outbuf, const u8 *inbuf) Km = c->Km[1]; Kr = c->Kr[1]; QBAR(block, Kr, Km); Km = c->Km[0]; Kr = c->Kr[0]; QBAR(block, Kr, Km); - dst[0] = cpu_to_be32(block[0]); - dst[1] = cpu_to_be32(block[1]); - dst[2] = cpu_to_be32(block[2]); - dst[3] = cpu_to_be32(block[3]); + put_unaligned_be32(block[0], outbuf); + put_unaligned_be32(block[1], outbuf + 4); + put_unaligned_be32(block[2], outbuf + 8); + put_unaligned_be32(block[3], outbuf + 12); } EXPORT_SYMBOL_GPL(__cast6_decrypt); @@ -254,7 +250,6 @@ static struct crypto_alg alg = { .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = CAST6_BLOCK_SIZE, .cra_ctxsize = sizeof(struct cast6_ctx), - .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_u = { .cipher = { -- cgit v1.2.3-59-g8ed1b From e9cbaef5111a403b1e40ddec2bfb9adea2da682f Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 1 Feb 2021 19:02:36 +0100 Subject: crypto: fcrypt - drop unneeded alignmask The fcrypt implementation uses memcpy() to access the input and output buffers so there is no need to set an alignmask. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/fcrypt.c | 1 - 1 file changed, 1 deletion(-) (limited to 'crypto') diff --git a/crypto/fcrypt.c b/crypto/fcrypt.c index 58f935315cf8..c36ea0c8be98 100644 --- a/crypto/fcrypt.c +++ b/crypto/fcrypt.c @@ -396,7 +396,6 @@ static struct crypto_alg fcrypt_alg = { .cra_blocksize = 8, .cra_ctxsize = sizeof(struct fcrypt_ctx), .cra_module = THIS_MODULE, - .cra_alignmask = 3, .cra_u = { .cipher = { .cia_min_keysize = 8, .cia_max_keysize = 8, -- cgit v1.2.3-59-g8ed1b From af1050a4eca430c49a70e15a2b6972cf5a457f8d Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 1 Feb 2021 19:02:37 +0100 Subject: crypto: twofish - use unaligned accessors instead of alignmask Instead of using an alignmask of 0x3 to ensure 32-bit alignment of the Twofish input and output blocks, which propagates to mode drivers, and results in pointless copying on architectures that don't care about alignment, use the unaligned accessors, which will do the right thing on each respective architecture, avoiding the need for double buffering. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/twofish_generic.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) (limited to 'crypto') diff --git a/crypto/twofish_generic.c b/crypto/twofish_generic.c index 4f7c033224f9..86b2f067a416 100644 --- a/crypto/twofish_generic.c +++ b/crypto/twofish_generic.c @@ -24,7 +24,7 @@ * Third Edition. */ -#include +#include #include #include #include @@ -83,11 +83,11 @@ * whitening subkey number m. */ #define INPACK(n, x, m) \ - x = le32_to_cpu(src[n]) ^ ctx->w[m] + x = get_unaligned_le32(in + (n) * 4) ^ ctx->w[m] #define OUTUNPACK(n, x, m) \ x ^= ctx->w[m]; \ - dst[n] = cpu_to_le32(x) + put_unaligned_le32(x, out + (n) * 4) @@ -95,8 +95,6 @@ static void twofish_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { struct twofish_ctx *ctx = crypto_tfm_ctx(tfm); - const __le32 *src = (const __le32 *)in; - __le32 *dst = (__le32 *)out; /* The four 32-bit chunks of the text. */ u32 a, b, c, d; @@ -132,8 +130,6 @@ static void twofish_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) static void twofish_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { struct twofish_ctx *ctx = crypto_tfm_ctx(tfm); - const __le32 *src = (const __le32 *)in; - __le32 *dst = (__le32 *)out; /* The four 32-bit chunks of the text. */ u32 a, b, c, d; @@ -172,7 +168,6 @@ static struct crypto_alg alg = { .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = TF_BLOCK_SIZE, .cra_ctxsize = sizeof(struct twofish_ctx), - .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_u = { .cipher = { .cia_min_keysize = TF_MIN_KEY_SIZE, -- cgit v1.2.3-59-g8ed1b From a53ab94eb6850c3657392e2d2ce9b38c387a2633 Mon Sep 17 00:00:00 2001 From: Daniele Alessandrelli Date: Wed, 3 Feb 2021 11:28:37 +0000 Subject: crypto: ecdh_helper - Ensure 'len >= secret.len' in decode_key() The length ('len' parameter) passed to crypto_ecdh_decode_key() is never checked against the length encoded in the passed buffer ('buf' parameter). This could lead to an out-of-bounds access when the passed length is less than the encoded length. Add a check to prevent that. Fixes: 3c4b23901a0c7 ("crypto: ecdh - Add ECDH software support") Signed-off-by: Daniele Alessandrelli Signed-off-by: Herbert Xu --- crypto/ecdh_helper.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'crypto') diff --git a/crypto/ecdh_helper.c b/crypto/ecdh_helper.c index 66fcb2ea8154..fca63b559f65 100644 --- a/crypto/ecdh_helper.c +++ b/crypto/ecdh_helper.c @@ -67,6 +67,9 @@ int crypto_ecdh_decode_key(const char *buf, unsigned int len, if (secret.type != CRYPTO_KPP_SECRET_TYPE_ECDH) return -EINVAL; + if (unlikely(len < secret.len)) + return -EINVAL; + ptr = ecdh_unpack_data(¶ms->curve_id, ptr, sizeof(params->curve_id)); ptr = ecdh_unpack_data(¶ms->key_size, ptr, sizeof(params->key_size)); if (secret.len != crypto_ecdh_key_len(params)) -- cgit v1.2.3-59-g8ed1b From cfb28fde083761bfb839bc53059068bab5634b6a Mon Sep 17 00:00:00 2001 From: Bhaskar Chowdhury Date: Wed, 3 Feb 2021 21:09:33 +0530 Subject: crypto: xor - Fix typo of optimization s/optimzation/optimization/ Signed-off-by: Bhaskar Chowdhury Acked-by: Randy Dunlap Signed-off-by: Herbert Xu --- crypto/xor.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'crypto') diff --git a/crypto/xor.c b/crypto/xor.c index eacbf4f93990..c046d074f522 100644 --- a/crypto/xor.c +++ b/crypto/xor.c @@ -95,7 +95,7 @@ do_xor_speed(struct xor_block_template *tmpl, void *b1, void *b2) for (i = 0; i < 3; i++) { start = ktime_get(); for (j = 0; j < REPS; j++) { - mb(); /* prevent loop optimzation */ + mb(); /* prevent loop optimization */ tmpl->do_2(BENCH_SIZE, b1, b2); mb(); } -- cgit v1.2.3-59-g8ed1b