diff options
Diffstat (limited to '')
146 files changed, 16122 insertions, 9940 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig index c24a47406f8f..d779667671b2 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -15,12 +15,13 @@ source "crypto/async_tx/Kconfig" # menuconfig CRYPTO tristate "Cryptographic API" + select CRYPTO_LIB_UTILS help This option provides the core Cryptographic API. if CRYPTO -comment "Crypto core or helper" +menu "Crypto core or helper" config CRYPTO_FIPS bool "FIPS 200 compliance" @@ -32,6 +33,27 @@ config CRYPTO_FIPS certification. You should say no unless you know what this is. +config CRYPTO_FIPS_NAME + string "FIPS Module Name" + default "Linux Kernel Cryptographic API" + depends on CRYPTO_FIPS + help + This option sets the FIPS Module name reported by the Crypto API via + the /proc/sys/crypto/fips_name file. + +config CRYPTO_FIPS_CUSTOM_VERSION + bool "Use Custom FIPS Module Version" + depends on CRYPTO_FIPS + default n + +config CRYPTO_FIPS_VERSION + string "FIPS Module Version" + default "(none)" + depends on CRYPTO_FIPS_CUSTOM_VERSION + help + This option provides the ability to override the FIPS Module Version. + By default the KERNELRELEASE value is used. + config CRYPTO_ALGAPI tristate select CRYPTO_ALGAPI2 @@ -145,7 +167,7 @@ config CRYPTO_MANAGER_DISABLE_TESTS config CRYPTO_MANAGER_EXTRA_TESTS bool "Enable extra run-time crypto self tests" - depends on DEBUG_KERNEL && !CRYPTO_MANAGER_DISABLE_TESTS + depends on DEBUG_KERNEL && !CRYPTO_MANAGER_DISABLE_TESTS && CRYPTO_MANAGER help Enable extra run-time self tests of registered crypto algorithms, including randomized fuzz tests. @@ -197,11 +219,12 @@ config CRYPTO_AUTHENC select CRYPTO_NULL help Authenc: Combined mode wrapper for IPsec. - This is required for IPSec. + + This is required for IPSec ESP (XFRM_ESP). config CRYPTO_TEST tristate "Testing module" - depends on m + depends on m || EXPERT select CRYPTO_MANAGER help Quick & dirty crypto test module. @@ -210,45 +233,68 @@ config CRYPTO_SIMD tristate select CRYPTO_CRYPTD -config CRYPTO_GLUE_HELPER_X86 - tristate - depends on X86 - select CRYPTO_SKCIPHER - config CRYPTO_ENGINE tristate -comment "Public-key cryptography" +endmenu + +menu "Public-key cryptography" config CRYPTO_RSA - tristate "RSA algorithm" + tristate "RSA (Rivest-Shamir-Adleman)" select CRYPTO_AKCIPHER select CRYPTO_MANAGER select MPILIB select ASN1 help - Generic implementation of the RSA public key algorithm. + RSA (Rivest-Shamir-Adleman) public key algorithm (RFC8017) config CRYPTO_DH - tristate "Diffie-Hellman algorithm" + tristate "DH (Diffie-Hellman)" select CRYPTO_KPP select MPILIB help - Generic implementation of the Diffie-Hellman algorithm. + DH (Diffie-Hellman) key exchange algorithm + +config CRYPTO_DH_RFC7919_GROUPS + bool "RFC 7919 FFDHE groups" + depends on CRYPTO_DH + select CRYPTO_RNG_DEFAULT + help + FFDHE (Finite-Field-based Diffie-Hellman Ephemeral) groups + defined in RFC7919. + + Support these finite-field groups in DH key exchanges: + - ffdhe2048, ffdhe3072, ffdhe4096, ffdhe6144, ffdhe8192 + + If unsure, say N. config CRYPTO_ECC tristate + select CRYPTO_RNG_DEFAULT config CRYPTO_ECDH - tristate "ECDH algorithm" + tristate "ECDH (Elliptic Curve Diffie-Hellman)" select CRYPTO_ECC select CRYPTO_KPP - select CRYPTO_RNG_DEFAULT help - Generic implementation of the ECDH algorithm + ECDH (Elliptic Curve Diffie-Hellman) key exchange algorithm + using curves P-192, P-256, and P-384 (FIPS 186) + +config CRYPTO_ECDSA + tristate "ECDSA (Elliptic Curve Digital Signature Algorithm)" + select CRYPTO_ECC + select CRYPTO_AKCIPHER + select ASN1 + help + ECDSA (Elliptic Curve Digital Signature Algorithm) (FIPS 186, + ISO/IEC 14888-3) + using curves P-192, P-256, and P-384 + + Only signature verification is implemented. config CRYPTO_ECRDSA - tristate "EC-RDSA (GOST 34.10) algorithm" + tristate "EC-RDSA (Elliptic Curve Russian Digital Signature Algorithm)" select CRYPTO_ECC select CRYPTO_AKCIPHER select CRYPTO_STREEBOG @@ -256,168 +302,441 @@ config CRYPTO_ECRDSA select ASN1 help Elliptic Curve Russian Digital Signature Algorithm (GOST R 34.10-2012, - RFC 7091, ISO/IEC 14888-3:2018) is one of the Russian cryptographic - standard algorithms (called GOST algorithms). Only signature verification - is implemented. + RFC 7091, ISO/IEC 14888-3) + + One of the Russian cryptographic standard algorithms (called GOST + algorithms). Only signature verification is implemented. + +config CRYPTO_SM2 + tristate "SM2 (ShangMi 2)" + select CRYPTO_SM3 + select CRYPTO_AKCIPHER + select CRYPTO_MANAGER + select MPILIB + select ASN1 + help + SM2 (ShangMi 2) public key algorithm + + Published by State Encryption Management Bureau, China, + as specified by OSCCA GM/T 0003.1-2012 -- 0003.5-2012. + + References: + https://datatracker.ietf.org/doc/draft-shen-sm2-ecdsa/ + http://www.oscca.gov.cn/sca/xxgk/2010-12/17/content_1002386.shtml + http://www.gmbz.org.cn/main/bzlb.html config CRYPTO_CURVE25519 - tristate "Curve25519 algorithm" + tristate "Curve25519" select CRYPTO_KPP select CRYPTO_LIB_CURVE25519_GENERIC + help + Curve25519 elliptic curve (RFC7748) -config CRYPTO_CURVE25519_X86 - tristate "x86_64 accelerated Curve25519 scalar multiplication library" - depends on X86 && 64BIT - select CRYPTO_LIB_CURVE25519_GENERIC - select CRYPTO_ARCH_HAVE_LIB_CURVE25519 +endmenu -comment "Authenticated Encryption with Associated Data" +menu "Block ciphers" -config CRYPTO_CCM - tristate "CCM support" - select CRYPTO_CTR - select CRYPTO_HASH - select CRYPTO_AEAD - select CRYPTO_MANAGER +config CRYPTO_AES + tristate "AES (Advanced Encryption Standard)" + select CRYPTO_ALGAPI + select CRYPTO_LIB_AES help - Support for Counter with CBC MAC. Required for IPsec. + AES cipher algorithms (Rijndael)(FIPS-197, ISO/IEC 18033-3) -config CRYPTO_GCM - tristate "GCM/GMAC support" - select CRYPTO_CTR - select CRYPTO_AEAD - select CRYPTO_GHASH - select CRYPTO_NULL - select CRYPTO_MANAGER + Rijndael appears to be consistently a very good performer in + both hardware and software across a wide range of computing + environments regardless of its use in feedback or non-feedback + modes. Its key setup time is excellent, and its key agility is + good. Rijndael's very low memory requirements make it very well + suited for restricted-space environments, in which it also + demonstrates excellent performance. Rijndael's operations are + among the easiest to defend against power and timing attacks. + + The AES specifies three key sizes: 128, 192 and 256 bits + +config CRYPTO_AES_TI + tristate "AES (Advanced Encryption Standard) (fixed time)" + select CRYPTO_ALGAPI + select CRYPTO_LIB_AES help - Support for Galois/Counter Mode (GCM) and Galois Message - Authentication Code (GMAC). Required for IPSec. + AES cipher algorithms (Rijndael)(FIPS-197, ISO/IEC 18033-3) -config CRYPTO_CHACHA20POLY1305 - tristate "ChaCha20-Poly1305 AEAD support" - select CRYPTO_CHACHA20 - select CRYPTO_POLY1305 - select CRYPTO_AEAD - select CRYPTO_MANAGER + This is a generic implementation of AES that attempts to eliminate + data dependent latencies as much as possible without affecting + performance too much. It is intended for use by the generic CCM + and GCM drivers, and other CTR or CMAC/XCBC based modes that rely + solely on encryption (although decryption is supported as well, but + with a more dramatic performance hit) + + Instead of using 16 lookup tables of 1 KB each, (8 for encryption and + 8 for decryption), this implementation only uses just two S-boxes of + 256 bytes each, and attempts to eliminate data dependent latencies by + prefetching the entire table into the cache at the start of each + block. Interrupts are also disabled to avoid races where cachelines + are evicted when the CPU is interrupted to do something else. + +config CRYPTO_ANUBIS + tristate "Anubis" + depends on CRYPTO_USER_API_ENABLE_OBSOLETE + select CRYPTO_ALGAPI help - ChaCha20-Poly1305 AEAD support, RFC7539. + Anubis cipher algorithm - Support for the AEAD wrapper using the ChaCha20 stream cipher combined - with the Poly1305 authenticator. It is defined in RFC7539 for use in - IETF protocols. + Anubis is a variable key length cipher which can use keys from + 128 bits to 320 bits in length. It was evaluated as a entrant + in the NESSIE competition. -config CRYPTO_AEGIS128 - tristate "AEGIS-128 AEAD algorithm" - select CRYPTO_AEAD - select CRYPTO_AES # for AES S-box tables + See https://web.archive.org/web/20160606112246/http://www.larc.usp.br/~pbarreto/AnubisPage.html + for further information. + +config CRYPTO_ARIA + tristate "ARIA" + select CRYPTO_ALGAPI help - Support for the AEGIS-128 dedicated AEAD algorithm. + ARIA cipher algorithm (RFC5794) -config CRYPTO_AEGIS128_SIMD - bool "Support SIMD acceleration for AEGIS-128" - depends on CRYPTO_AEGIS128 && ((ARM || ARM64) && KERNEL_MODE_NEON) - depends on !ARM || CC_IS_CLANG || GCC_VERSION >= 40800 - default y + ARIA is a standard encryption algorithm of the Republic of Korea. + The ARIA specifies three key sizes and rounds. + 128-bit: 12 rounds. + 192-bit: 14 rounds. + 256-bit: 16 rounds. -config CRYPTO_AEGIS128_AESNI_SSE2 - tristate "AEGIS-128 AEAD algorithm (x86_64 AESNI+SSE2 implementation)" - depends on X86 && 64BIT - select CRYPTO_AEAD - select CRYPTO_SIMD + See: + https://seed.kisa.or.kr/kisa/algorithm/EgovAriaInfo.do + +config CRYPTO_BLOWFISH + tristate "Blowfish" + select CRYPTO_ALGAPI + select CRYPTO_BLOWFISH_COMMON help - AESNI+SSE2 implementation of the AEGIS-128 dedicated AEAD algorithm. + Blowfish cipher algorithm, by Bruce Schneier -config CRYPTO_SEQIV - tristate "Sequence Number IV Generator" - select CRYPTO_AEAD + This is a variable key length cipher which can use keys from 32 + bits to 448 bits in length. It's fast, simple and specifically + designed for use on "large microprocessors". + + See https://www.schneier.com/blowfish.html for further information. + +config CRYPTO_BLOWFISH_COMMON + tristate + help + Common parts of the Blowfish cipher algorithm shared by the + generic c and the assembler implementations. + +config CRYPTO_CAMELLIA + tristate "Camellia" + select CRYPTO_ALGAPI + help + Camellia cipher algorithms (ISO/IEC 18033-3) + + Camellia is a symmetric key block cipher developed jointly + at NTT and Mitsubishi Electric Corporation. + + The Camellia specifies three key sizes: 128, 192 and 256 bits. + + See https://info.isl.ntt.co.jp/crypt/eng/camellia/ for further information. + +config CRYPTO_CAST_COMMON + tristate + help + Common parts of the CAST cipher algorithms shared by the + generic c and the assembler implementations. + +config CRYPTO_CAST5 + tristate "CAST5 (CAST-128)" + select CRYPTO_ALGAPI + select CRYPTO_CAST_COMMON + help + CAST5 (CAST-128) cipher algorithm (RFC2144, ISO/IEC 18033-3) + +config CRYPTO_CAST6 + tristate "CAST6 (CAST-256)" + select CRYPTO_ALGAPI + select CRYPTO_CAST_COMMON + help + CAST6 (CAST-256) encryption algorithm (RFC2612) + +config CRYPTO_DES + tristate "DES and Triple DES EDE" + select CRYPTO_ALGAPI + select CRYPTO_LIB_DES + help + DES (Data Encryption Standard)(FIPS 46-2, ISO/IEC 18033-3) and + Triple DES EDE (Encrypt/Decrypt/Encrypt) (FIPS 46-3, ISO/IEC 18033-3) + cipher algorithms + +config CRYPTO_FCRYPT + tristate "FCrypt" + select CRYPTO_ALGAPI select CRYPTO_SKCIPHER - select CRYPTO_NULL - select CRYPTO_RNG_DEFAULT - select CRYPTO_MANAGER help - This IV generator generates an IV based on a sequence number by - xoring it with a salt. This algorithm is mainly useful for CTR + FCrypt algorithm used by RxRPC -config CRYPTO_ECHAINIV - tristate "Encrypted Chain IV Generator" - select CRYPTO_AEAD - select CRYPTO_NULL - select CRYPTO_RNG_DEFAULT + See https://ota.polyonymo.us/fcrypt-paper.txt + +config CRYPTO_KHAZAD + tristate "Khazad" + depends on CRYPTO_USER_API_ENABLE_OBSOLETE + select CRYPTO_ALGAPI + help + Khazad cipher algorithm + + Khazad was a finalist in the initial NESSIE competition. It is + an algorithm optimized for 64-bit processors with good performance + on 32-bit processors. Khazad uses an 128 bit key size. + + See https://web.archive.org/web/20171011071731/http://www.larc.usp.br/~pbarreto/KhazadPage.html + for further information. + +config CRYPTO_SEED + tristate "SEED" + depends on CRYPTO_USER_API_ENABLE_OBSOLETE + select CRYPTO_ALGAPI + help + SEED cipher algorithm (RFC4269, ISO/IEC 18033-3) + + SEED is a 128-bit symmetric key block cipher that has been + developed by KISA (Korea Information Security Agency) as a + national standard encryption algorithm of the Republic of Korea. + It is a 16 round block cipher with the key size of 128 bit. + + See https://seed.kisa.or.kr/kisa/algorithm/EgovSeedInfo.do + for further information. + +config CRYPTO_SERPENT + tristate "Serpent" + select CRYPTO_ALGAPI + help + Serpent cipher algorithm, by Anderson, Biham & Knudsen + + Keys are allowed to be from 0 to 256 bits in length, in steps + of 8 bits. + + See https://www.cl.cam.ac.uk/~rja14/serpent.html for further information. + +config CRYPTO_SM4 + tristate + +config CRYPTO_SM4_GENERIC + tristate "SM4 (ShangMi 4)" + select CRYPTO_ALGAPI + select CRYPTO_SM4 + help + SM4 cipher algorithms (OSCCA GB/T 32907-2016, + ISO/IEC 18033-3:2010/Amd 1:2021) + + SM4 (GBT.32907-2016) is a cryptographic standard issued by the + Organization of State Commercial Administration of China (OSCCA) + as an authorized cryptographic algorithms for the use within China. + + SMS4 was originally created for use in protecting wireless + networks, and is mandated in the Chinese National Standard for + Wireless LAN WAPI (Wired Authentication and Privacy Infrastructure) + (GB.15629.11-2003). + + The latest SM4 standard (GBT.32907-2016) was proposed by OSCCA and + standardized through TC 260 of the Standardization Administration + of the People's Republic of China (SAC). + + The input, output, and key of SMS4 are each 128 bits. + + See https://eprint.iacr.org/2008/329.pdf for further information. + + If unsure, say N. + +config CRYPTO_TEA + tristate "TEA, XTEA and XETA" + depends on CRYPTO_USER_API_ENABLE_OBSOLETE + select CRYPTO_ALGAPI + help + TEA (Tiny Encryption Algorithm) cipher algorithms + + Tiny Encryption Algorithm is a simple cipher that uses + many rounds for security. It is very fast and uses + little memory. + + Xtendend Tiny Encryption Algorithm is a modification to + the TEA algorithm to address a potential key weakness + in the TEA algorithm. + + Xtendend Encryption Tiny Algorithm is a mis-implementation + of the XTEA algorithm for compatibility purposes. + +config CRYPTO_TWOFISH + tristate "Twofish" + select CRYPTO_ALGAPI + select CRYPTO_TWOFISH_COMMON + help + Twofish cipher algorithm + + Twofish was submitted as an AES (Advanced Encryption Standard) + candidate cipher by researchers at CounterPane Systems. It is a + 16 round block cipher supporting key sizes of 128, 192, and 256 + bits. + + See https://www.schneier.com/twofish.html for further information. + +config CRYPTO_TWOFISH_COMMON + tristate + help + Common parts of the Twofish cipher algorithm shared by the + generic c and the assembler implementations. + +endmenu + +menu "Length-preserving ciphers and modes" + +config CRYPTO_ADIANTUM + tristate "Adiantum" + select CRYPTO_CHACHA20 + select CRYPTO_LIB_POLY1305_GENERIC + select CRYPTO_NHPOLY1305 select CRYPTO_MANAGER help - This IV generator generates an IV based on the encryption of - a sequence number xored with a salt. This is the default - algorithm for CBC. + Adiantum tweakable, length-preserving encryption mode + + Designed for fast and secure disk encryption, especially on + CPUs without dedicated crypto instructions. It encrypts + each sector using the XChaCha12 stream cipher, two passes of + an ε-almost-∆-universal hash function, and an invocation of + the AES-256 block cipher on a single 16-byte block. On CPUs + without AES instructions, Adiantum is much faster than + AES-XTS. + + Adiantum's security is provably reducible to that of its + underlying stream and block ciphers, subject to a security + bound. Unlike XTS, Adiantum is a true wide-block encryption + mode, so it actually provides an even stronger notion of + security than XTS, subject to the security bound. + + If unsure, say N. + +config CRYPTO_ARC4 + tristate "ARC4 (Alleged Rivest Cipher 4)" + depends on CRYPTO_USER_API_ENABLE_OBSOLETE + select CRYPTO_SKCIPHER + select CRYPTO_LIB_ARC4 + help + ARC4 cipher algorithm + + ARC4 is a stream cipher using keys ranging from 8 bits to 2048 + bits in length. This algorithm is required for driver-based + WEP, but it should not be for other purposes because of the + weakness of the algorithm. + +config CRYPTO_CHACHA20 + tristate "ChaCha" + select CRYPTO_LIB_CHACHA_GENERIC + select CRYPTO_SKCIPHER + help + The ChaCha20, XChaCha20, and XChaCha12 stream cipher algorithms + + ChaCha20 is a 256-bit high-speed stream cipher designed by Daniel J. + Bernstein and further specified in RFC7539 for use in IETF protocols. + This is the portable C implementation of ChaCha20. See + https://cr.yp.to/chacha/chacha-20080128.pdf for further information. + + XChaCha20 is the application of the XSalsa20 construction to ChaCha20 + rather than to Salsa20. XChaCha20 extends ChaCha20's nonce length + from 64 bits (or 96 bits using the RFC7539 convention) to 192 bits, + while provably retaining ChaCha20's security. See + https://cr.yp.to/snuffle/xsalsa-20081128.pdf for further information. -comment "Block modes" + XChaCha12 is XChaCha20 reduced to 12 rounds, with correspondingly + reduced security margin but increased performance. It can be needed + in some performance-sensitive scenarios. config CRYPTO_CBC - tristate "CBC support" + tristate "CBC (Cipher Block Chaining)" select CRYPTO_SKCIPHER select CRYPTO_MANAGER help - CBC: Cipher Block Chaining mode - This block cipher algorithm is required for IPSec. + CBC (Cipher Block Chaining) mode (NIST SP800-38A) + + This block cipher mode is required for IPSec ESP (XFRM_ESP). config CRYPTO_CFB - tristate "CFB support" + tristate "CFB (Cipher Feedback)" select CRYPTO_SKCIPHER select CRYPTO_MANAGER help - CFB: Cipher FeedBack mode - This block cipher algorithm is required for TPM2 Cryptography. + CFB (Cipher Feedback) mode (NIST SP800-38A) + + This block cipher mode is required for TPM2 Cryptography. config CRYPTO_CTR - tristate "CTR support" + tristate "CTR (Counter)" select CRYPTO_SKCIPHER - select CRYPTO_SEQIV select CRYPTO_MANAGER help - CTR: Counter mode - This block cipher algorithm is required for IPSec. + CTR (Counter) mode (NIST SP800-38A) config CRYPTO_CTS - tristate "CTS support" + tristate "CTS (Cipher Text Stealing)" select CRYPTO_SKCIPHER select CRYPTO_MANAGER help - CTS: Cipher Text Stealing - This is the Cipher Text Stealing mode as described by - Section 8 of rfc2040 and referenced by rfc3962 - (rfc3962 includes errata information in its Appendix A) or - CBC-CS3 as defined by NIST in Sp800-38A addendum from Oct 2010. + CBC-CS3 variant of CTS (Cipher Text Stealing) (NIST + Addendum to SP800-38A (October 2010)) + This mode is required for Kerberos gss mechanism support for AES encryption. - See: https://csrc.nist.gov/publications/detail/sp/800-38a/addendum/final - config CRYPTO_ECB - tristate "ECB support" + tristate "ECB (Electronic Codebook)" select CRYPTO_SKCIPHER select CRYPTO_MANAGER help - ECB: Electronic CodeBook mode - This is the simplest block cipher algorithm. It simply encrypts - the input block by block. + ECB (Electronic Codebook) mode (NIST SP800-38A) + +config CRYPTO_HCTR2 + tristate "HCTR2" + select CRYPTO_XCTR + select CRYPTO_POLYVAL + select CRYPTO_MANAGER + help + HCTR2 length-preserving encryption mode + + A mode for storage encryption that is efficient on processors with + instructions to accelerate AES and carryless multiplication, e.g. + x86 processors with AES-NI and CLMUL, and ARM processors with the + ARMv8 crypto extensions. + + See https://eprint.iacr.org/2021/1441 + +config CRYPTO_KEYWRAP + tristate "KW (AES Key Wrap)" + select CRYPTO_SKCIPHER + select CRYPTO_MANAGER + help + KW (AES Key Wrap) authenticated encryption mode (NIST SP800-38F + and RFC3394) without padding. config CRYPTO_LRW - tristate "LRW support" + tristate "LRW (Liskov Rivest Wagner)" select CRYPTO_SKCIPHER select CRYPTO_MANAGER select CRYPTO_GF128MUL + select CRYPTO_ECB help - LRW: Liskov Rivest Wagner, a tweakable, non malleable, non movable + LRW (Liskov Rivest Wagner) mode + + A tweakable, non malleable, non movable narrow block cipher mode for dm-crypt. Use it with cipher specification string aes-lrw-benbi, the key must be 256, 320 or 384. The first 128, 192 or 256 bits in the key are used for AES and the rest is used to tie each cipher block to its logical position. + See https://people.csail.mit.edu/rivest/pubs/LRW02.pdf + config CRYPTO_OFB - tristate "OFB support" + tristate "OFB (Output Feedback)" select CRYPTO_SKCIPHER select CRYPTO_MANAGER help - OFB: the Output Feedback mode makes a block cipher into a synchronous + OFB (Output Feedback) mode (NIST SP800-38A) + + This mode makes a block cipher into a synchronous stream cipher. It generates keystream blocks, which are then XORed with the plaintext blocks to get the ciphertext. Flipping a bit in the ciphertext produces a flipped bit in the plaintext at the same @@ -425,82 +744,133 @@ config CRYPTO_OFB normally even when applied before encryption. config CRYPTO_PCBC - tristate "PCBC support" + tristate "PCBC (Propagating Cipher Block Chaining)" select CRYPTO_SKCIPHER select CRYPTO_MANAGER help - PCBC: Propagating Cipher Block Chaining mode - This block cipher algorithm is required for RxRPC. + PCBC (Propagating Cipher Block Chaining) mode -config CRYPTO_XTS - tristate "XTS support" + This block cipher mode is required for RxRPC. + +config CRYPTO_XCTR + tristate select CRYPTO_SKCIPHER select CRYPTO_MANAGER - select CRYPTO_ECB help - XTS: IEEE1619/D16 narrow block cipher use with aes-xts-plain, - key size 256, 384 or 512 bits. This implementation currently - can't handle a sectorsize which is not a multiple of 16 bytes. + XCTR (XOR Counter) mode for HCTR2 -config CRYPTO_KEYWRAP - tristate "Key wrapping support" + This blockcipher mode is a variant of CTR mode using XORs and little-endian + addition rather than big-endian arithmetic. + + XCTR mode is used to implement HCTR2. + +config CRYPTO_XTS + tristate "XTS (XOR Encrypt XOR with ciphertext stealing)" select CRYPTO_SKCIPHER select CRYPTO_MANAGER + select CRYPTO_ECB help - Support for key wrapping (NIST SP800-38F / RFC3394) without - padding. + XTS (XOR Encrypt XOR with ciphertext stealing) mode (NIST SP800-38E + and IEEE 1619) + + Use with aes-xts-plain, key size 256, 384 or 512 bits. This + implementation currently can't handle a sectorsize which is not a + multiple of 16 bytes. config CRYPTO_NHPOLY1305 tristate select CRYPTO_HASH select CRYPTO_LIB_POLY1305_GENERIC -config CRYPTO_NHPOLY1305_SSE2 - tristate "NHPoly1305 hash function (x86_64 SSE2 implementation)" - depends on X86 && 64BIT - select CRYPTO_NHPOLY1305 +endmenu + +menu "AEAD (authenticated encryption with associated data) ciphers" + +config CRYPTO_AEGIS128 + tristate "AEGIS-128" + select CRYPTO_AEAD + select CRYPTO_AES # for AES S-box tables help - SSE2 optimized implementation of the hash function used by the - Adiantum encryption mode. + AEGIS-128 AEAD algorithm -config CRYPTO_NHPOLY1305_AVX2 - tristate "NHPoly1305 hash function (x86_64 AVX2 implementation)" - depends on X86 && 64BIT - select CRYPTO_NHPOLY1305 +config CRYPTO_AEGIS128_SIMD + bool "AEGIS-128 (arm NEON, arm64 NEON)" + depends on CRYPTO_AEGIS128 && ((ARM || ARM64) && KERNEL_MODE_NEON) + default y help - AVX2 optimized implementation of the hash function used by the - Adiantum encryption mode. + AEGIS-128 AEAD algorithm -config CRYPTO_ADIANTUM - tristate "Adiantum support" + Architecture: arm or arm64 using: + - NEON (Advanced SIMD) extension + +config CRYPTO_CHACHA20POLY1305 + tristate "ChaCha20-Poly1305" select CRYPTO_CHACHA20 - select CRYPTO_LIB_POLY1305_GENERIC - select CRYPTO_NHPOLY1305 + select CRYPTO_POLY1305 + select CRYPTO_AEAD select CRYPTO_MANAGER help - Adiantum is a tweakable, length-preserving encryption mode - designed for fast and secure disk encryption, especially on - CPUs without dedicated crypto instructions. It encrypts - each sector using the XChaCha12 stream cipher, two passes of - an ε-almost-∆-universal hash function, and an invocation of - the AES-256 block cipher on a single 16-byte block. On CPUs - without AES instructions, Adiantum is much faster than - AES-XTS. + ChaCha20 stream cipher and Poly1305 authenticator combined + mode (RFC8439) - Adiantum's security is provably reducible to that of its - underlying stream and block ciphers, subject to a security - bound. Unlike XTS, Adiantum is a true wide-block encryption - mode, so it actually provides an even stronger notion of - security than XTS, subject to the security bound. +config CRYPTO_CCM + tristate "CCM (Counter with Cipher Block Chaining-MAC)" + select CRYPTO_CTR + select CRYPTO_HASH + select CRYPTO_AEAD + select CRYPTO_MANAGER + help + CCM (Counter with Cipher Block Chaining-Message Authentication Code) + authenticated encryption mode (NIST SP800-38C) - If unsure, say N. +config CRYPTO_GCM + tristate "GCM (Galois/Counter Mode) and GMAC (GCM MAC)" + select CRYPTO_CTR + select CRYPTO_AEAD + select CRYPTO_GHASH + select CRYPTO_NULL + select CRYPTO_MANAGER + help + GCM (Galois/Counter Mode) authenticated encryption mode and GMAC + (GCM Message Authentication Code) (NIST SP800-38D) + + This is required for IPSec ESP (XFRM_ESP). + +config CRYPTO_SEQIV + tristate "Sequence Number IV Generator" + select CRYPTO_AEAD + select CRYPTO_SKCIPHER + select CRYPTO_NULL + select CRYPTO_RNG_DEFAULT + select CRYPTO_MANAGER + help + Sequence Number IV generator + + This IV generator generates an IV based on a sequence number by + xoring it with a salt. This algorithm is mainly useful for CTR. + + This is required for IPsec ESP (XFRM_ESP). + +config CRYPTO_ECHAINIV + tristate "Encrypted Chain IV Generator" + select CRYPTO_AEAD + select CRYPTO_NULL + select CRYPTO_RNG_DEFAULT + select CRYPTO_MANAGER + help + Encrypted Chain IV generator + + This IV generator generates an IV based on the encryption of + a sequence number xored with a salt. This is the default + algorithm for CBC. config CRYPTO_ESSIV - tristate "ESSIV support for block encryption" + tristate "Encrypted Salt-Sector IV Generator" select CRYPTO_AUTHENC help - Encrypted salt-sector initialization vector (ESSIV) is an IV - generation method that is used in some cases by fscrypt and/or + Encrypted Salt-Sector IV generator + + This IV generator is used in some cases by fscrypt and/or dm-crypt. It uses the hash of the block encryption key as the symmetric key for a block encryption pass applied to the input IV, making low entropy IV sources more suitable for block @@ -523,1285 +893,356 @@ config CRYPTO_ESSIV combined with ESSIV the only feasible mode for h/w accelerated block encryption) -comment "Hash modes" - -config CRYPTO_CMAC - tristate "CMAC support" - select CRYPTO_HASH - select CRYPTO_MANAGER - help - Cipher-based Message Authentication Code (CMAC) specified by - The National Institute of Standards and Technology (NIST). - - https://tools.ietf.org/html/rfc4493 - http://csrc.nist.gov/publications/nistpubs/800-38B/SP_800-38B.pdf - -config CRYPTO_HMAC - tristate "HMAC support" - select CRYPTO_HASH - select CRYPTO_MANAGER - help - HMAC: Keyed-Hashing for Message Authentication (RFC2104). - This is required for IPSec. - -config CRYPTO_XCBC - tristate "XCBC support" - select CRYPTO_HASH - select CRYPTO_MANAGER - help - XCBC: Keyed-Hashing with encryption algorithm - http://www.ietf.org/rfc/rfc3566.txt - http://csrc.nist.gov/encryption/modes/proposedmodes/ - xcbc-mac/xcbc-mac-spec.pdf - -config CRYPTO_VMAC - tristate "VMAC support" - select CRYPTO_HASH - select CRYPTO_MANAGER - help - VMAC is a message authentication algorithm designed for - very high speed on 64-bit architectures. - - See also: - <http://fastcrypto.org/vmac> - -comment "Digest" - -config CRYPTO_CRC32C - tristate "CRC32c CRC algorithm" - select CRYPTO_HASH - select CRC32 - help - Castagnoli, et al Cyclic Redundancy-Check Algorithm. Used - by iSCSI for header and data digests and by others. - See Castagnoli93. Module will be crc32c. +endmenu -config CRYPTO_CRC32C_INTEL - tristate "CRC32c INTEL hardware acceleration" - depends on X86 - select CRYPTO_HASH - help - In Intel processor with SSE4.2 supported, the processor will - support CRC32C implementation using hardware accelerated CRC32 - instruction. This option will create 'crc32c-intel' module, - which will enable any routine to use the CRC32 instruction to - gain performance compared with software implementation. - Module will be crc32c-intel. - -config CRYPTO_CRC32C_VPMSUM - tristate "CRC32c CRC algorithm (powerpc64)" - depends on PPC64 && ALTIVEC - select CRYPTO_HASH - select CRC32 - help - CRC32c algorithm implemented using vector polynomial multiply-sum - (vpmsum) instructions, introduced in POWER8. Enable on POWER8 - and newer processors for improved performance. - - -config CRYPTO_CRC32C_SPARC64 - tristate "CRC32c CRC algorithm (SPARC64)" - depends on SPARC64 - select CRYPTO_HASH - select CRC32 - help - CRC32c CRC algorithm implemented using sparc64 crypto instructions, - when available. - -config CRYPTO_CRC32 - tristate "CRC32 CRC algorithm" - select CRYPTO_HASH - select CRC32 - help - CRC-32-IEEE 802.3 cyclic redundancy-check algorithm. - Shash crypto api wrappers to crc32_le function. - -config CRYPTO_CRC32_PCLMUL - tristate "CRC32 PCLMULQDQ hardware acceleration" - depends on X86 - select CRYPTO_HASH - select CRC32 - help - From Intel Westmere and AMD Bulldozer processor with SSE4.2 - and PCLMULQDQ supported, the processor will support - CRC32 PCLMULQDQ implementation using hardware accelerated PCLMULQDQ - instruction. This option will create 'crc32-pclmul' module, - which will enable any routine to use the CRC-32-IEEE 802.3 checksum - and gain better performance as compared with the table implementation. - -config CRYPTO_CRC32_MIPS - tristate "CRC32c and CRC32 CRC algorithm (MIPS)" - depends on MIPS_CRC_SUPPORT - select CRYPTO_HASH - help - CRC32c and CRC32 CRC algorithms implemented using mips crypto - instructions, when available. - - -config CRYPTO_XXHASH - tristate "xxHash hash algorithm" - select CRYPTO_HASH - select XXHASH - help - xxHash non-cryptographic hash algorithm. Extremely fast, working at - speeds close to RAM limits. +menu "Hashes, digests, and MACs" config CRYPTO_BLAKE2B - tristate "BLAKE2b digest algorithm" + tristate "BLAKE2b" select CRYPTO_HASH help - Implementation of cryptographic hash function BLAKE2b (or just BLAKE2), - optimized for 64bit platforms and can produce digests of any size - between 1 to 64. The keyed hash is also implemented. + BLAKE2b cryptographic hash function (RFC 7693) - This module provides the following algorithms: + BLAKE2b is optimized for 64-bit platforms and can produce digests + of any size between 1 and 64 bytes. The keyed hash is also implemented. + This module provides the following algorithms: - blake2b-160 - blake2b-256 - blake2b-384 - blake2b-512 - See https://blake2.net for further information. - -config CRYPTO_BLAKE2S - tristate "BLAKE2s digest algorithm" - select CRYPTO_LIB_BLAKE2S_GENERIC - select CRYPTO_HASH - help - Implementation of cryptographic hash function BLAKE2s - optimized for 8-32bit platforms and can produce digests of any size - between 1 to 32. The keyed hash is also implemented. - - This module provides the following algorithms: - - - blake2s-128 - - blake2s-160 - - blake2s-224 - - blake2s-256 + Used by the btrfs filesystem. See https://blake2.net for further information. -config CRYPTO_BLAKE2S_X86 - tristate "BLAKE2s digest algorithm (x86 accelerated version)" - depends on X86 && 64BIT - select CRYPTO_LIB_BLAKE2S_GENERIC - select CRYPTO_ARCH_HAVE_LIB_BLAKE2S - -config CRYPTO_CRCT10DIF - tristate "CRCT10DIF algorithm" - select CRYPTO_HASH - help - CRC T10 Data Integrity Field computation is being cast as - a crypto transform. This allows for faster crc t10 diff - transforms to be used if they are available. - -config CRYPTO_CRCT10DIF_PCLMUL - tristate "CRCT10DIF PCLMULQDQ hardware acceleration" - depends on X86 && 64BIT && CRC_T10DIF - select CRYPTO_HASH - help - For x86_64 processors with SSE4.2 and PCLMULQDQ supported, - CRC T10 DIF PCLMULQDQ computation can be hardware - accelerated PCLMULQDQ instruction. This option will create - 'crct10dif-pclmul' module, which is faster when computing the - crct10dif checksum as compared with the generic table implementation. - -config CRYPTO_CRCT10DIF_VPMSUM - tristate "CRC32T10DIF powerpc64 hardware acceleration" - depends on PPC64 && ALTIVEC && CRC_T10DIF +config CRYPTO_CMAC + tristate "CMAC (Cipher-based MAC)" select CRYPTO_HASH + select CRYPTO_MANAGER help - CRC10T10DIF algorithm implemented using vector polynomial - multiply-sum (vpmsum) instructions, introduced in POWER8. Enable on - POWER8 and newer processors for improved performance. - -config CRYPTO_VPMSUM_TESTER - tristate "Powerpc64 vpmsum hardware acceleration tester" - depends on CRYPTO_CRCT10DIF_VPMSUM && CRYPTO_CRC32C_VPMSUM - help - Stress test for CRC32c and CRC-T10DIF algorithms implemented with - POWER8 vpmsum instructions. - Unless you are testing these algorithms, you don't need this. + CMAC (Cipher-based Message Authentication Code) authentication + mode (NIST SP800-38B and IETF RFC4493) config CRYPTO_GHASH - tristate "GHASH hash function" + tristate "GHASH" select CRYPTO_GF128MUL select CRYPTO_HASH help - GHASH is the hash function used in GCM (Galois/Counter Mode). - It is not a general-purpose cryptographic hash function. + GCM GHASH function (NIST SP800-38D) -config CRYPTO_POLY1305 - tristate "Poly1305 authenticator algorithm" +config CRYPTO_HMAC + tristate "HMAC (Keyed-Hash MAC)" select CRYPTO_HASH - select CRYPTO_LIB_POLY1305_GENERIC - help - Poly1305 authenticator algorithm, RFC7539. - - Poly1305 is an authenticator algorithm designed by Daniel J. Bernstein. - It is used for the ChaCha20-Poly1305 AEAD, specified in RFC7539 for use - in IETF protocols. This is the portable C implementation of Poly1305. - -config CRYPTO_POLY1305_X86_64 - tristate "Poly1305 authenticator algorithm (x86_64/SSE2/AVX2)" - depends on X86 && 64BIT - select CRYPTO_LIB_POLY1305_GENERIC - select CRYPTO_ARCH_HAVE_LIB_POLY1305 + select CRYPTO_MANAGER help - Poly1305 authenticator algorithm, RFC7539. - - Poly1305 is an authenticator algorithm designed by Daniel J. Bernstein. - It is used for the ChaCha20-Poly1305 AEAD, specified in RFC7539 for use - in IETF protocols. This is the x86_64 assembler implementation using SIMD - instructions. + HMAC (Keyed-Hash Message Authentication Code) (FIPS 198 and + RFC2104) -config CRYPTO_POLY1305_MIPS - tristate "Poly1305 authenticator algorithm (MIPS optimized)" - depends on CPU_MIPS32 || (CPU_MIPS64 && 64BIT) - select CRYPTO_ARCH_HAVE_LIB_POLY1305 + This is required for IPsec AH (XFRM_AH) and IPsec ESP (XFRM_ESP). config CRYPTO_MD4 - tristate "MD4 digest algorithm" + tristate "MD4" select CRYPTO_HASH help - MD4 message digest algorithm (RFC1320). + MD4 message digest algorithm (RFC1320) config CRYPTO_MD5 - tristate "MD5 digest algorithm" + tristate "MD5" select CRYPTO_HASH help - MD5 message digest algorithm (RFC1321). + MD5 message digest algorithm (RFC1321) -config CRYPTO_MD5_OCTEON - tristate "MD5 digest algorithm (OCTEON)" - depends on CPU_CAVIUM_OCTEON - select CRYPTO_MD5 +config CRYPTO_MICHAEL_MIC + tristate "Michael MIC" select CRYPTO_HASH help - MD5 message digest algorithm (RFC1321) implemented - using OCTEON crypto instructions, when available. + Michael MIC (Message Integrity Code) (IEEE 802.11i) -config CRYPTO_MD5_PPC - tristate "MD5 digest algorithm (PPC)" - depends on PPC - select CRYPTO_HASH - help - MD5 message digest algorithm (RFC1321) implemented - in PPC assembler. + Defined by the IEEE 802.11i TKIP (Temporal Key Integrity Protocol), + known as WPA (Wif-Fi Protected Access). -config CRYPTO_MD5_SPARC64 - tristate "MD5 digest algorithm (SPARC64)" - depends on SPARC64 - select CRYPTO_MD5 - select CRYPTO_HASH - help - MD5 message digest algorithm (RFC1321) implemented - using sparc64 crypto instructions, when available. + This algorithm is required for TKIP, but it should not be used for + other purposes because of the weakness of the algorithm. -config CRYPTO_MICHAEL_MIC - tristate "Michael MIC keyed digest algorithm" +config CRYPTO_POLYVAL + tristate + select CRYPTO_GF128MUL select CRYPTO_HASH help - Michael MIC is used for message integrity protection in TKIP - (IEEE 802.11i). This algorithm is required for TKIP, but it - should not be used for other purposes because of the weakness - of the algorithm. + POLYVAL hash function for HCTR2 -config CRYPTO_RMD128 - tristate "RIPEMD-128 digest algorithm" + This is used in HCTR2. It is not a general-purpose + cryptographic hash function. + +config CRYPTO_POLY1305 + tristate "Poly1305" select CRYPTO_HASH + select CRYPTO_LIB_POLY1305_GENERIC help - RIPEMD-128 (ISO/IEC 10118-3:2004). - - RIPEMD-128 is a 128-bit cryptographic hash function. It should only - be used as a secure replacement for RIPEMD. For other use cases, - RIPEMD-160 should be used. + Poly1305 authenticator algorithm (RFC7539) - Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. - See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html> + Poly1305 is an authenticator algorithm designed by Daniel J. Bernstein. + It is used for the ChaCha20-Poly1305 AEAD, specified in RFC7539 for use + in IETF protocols. This is the portable C implementation of Poly1305. config CRYPTO_RMD160 - tristate "RIPEMD-160 digest algorithm" + tristate "RIPEMD-160" select CRYPTO_HASH help - RIPEMD-160 (ISO/IEC 10118-3:2004). + RIPEMD-160 hash function (ISO/IEC 10118-3) RIPEMD-160 is a 160-bit cryptographic hash function. It is intended to be used as a secure replacement for the 128-bit hash functions - MD4, MD5 and it's predecessor RIPEMD + MD4, MD5 and its predecessor RIPEMD (not to be confused with RIPEMD-128). - It's speed is comparable to SHA1 and there are no known attacks + Its speed is comparable to SHA-1 and there are no known attacks against RIPEMD-160. Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. - See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html> - -config CRYPTO_RMD256 - tristate "RIPEMD-256 digest algorithm" - select CRYPTO_HASH - help - RIPEMD-256 is an optional extension of RIPEMD-128 with a - 256 bit hash. It is intended for applications that require - longer hash-results, without needing a larger security level - (than RIPEMD-128). - - Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. - See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html> - -config CRYPTO_RMD320 - tristate "RIPEMD-320 digest algorithm" - select CRYPTO_HASH - help - RIPEMD-320 is an optional extension of RIPEMD-160 with a - 320 bit hash. It is intended for applications that require - longer hash-results, without needing a larger security level - (than RIPEMD-160). - - Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. - See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html> + See https://homes.esat.kuleuven.be/~bosselae/ripemd160.html + for further information. config CRYPTO_SHA1 - tristate "SHA1 digest algorithm" - select CRYPTO_HASH - help - SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2). - -config CRYPTO_SHA1_SSSE3 - tristate "SHA1 digest algorithm (SSSE3/AVX/AVX2/SHA-NI)" - depends on X86 && 64BIT - select CRYPTO_SHA1 + tristate "SHA-1" select CRYPTO_HASH + select CRYPTO_LIB_SHA1 help - SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented - using Supplemental SSE3 (SSSE3) instructions or Advanced Vector - Extensions (AVX/AVX2) or SHA-NI(SHA Extensions New Instructions), - when available. - -config CRYPTO_SHA256_SSSE3 - tristate "SHA256 digest algorithm (SSSE3/AVX/AVX2/SHA-NI)" - depends on X86 && 64BIT - select CRYPTO_SHA256 - select CRYPTO_HASH - help - SHA-256 secure hash standard (DFIPS 180-2) implemented - using Supplemental SSE3 (SSSE3) instructions, or Advanced Vector - Extensions version 1 (AVX1), or Advanced Vector Extensions - version 2 (AVX2) instructions, or SHA-NI (SHA Extensions New - Instructions) when available. - -config CRYPTO_SHA512_SSSE3 - tristate "SHA512 digest algorithm (SSSE3/AVX/AVX2)" - depends on X86 && 64BIT - select CRYPTO_SHA512 - select CRYPTO_HASH - help - SHA-512 secure hash standard (DFIPS 180-2) implemented - using Supplemental SSE3 (SSSE3) instructions, or Advanced Vector - Extensions version 1 (AVX1), or Advanced Vector Extensions - version 2 (AVX2) instructions, when available. - -config CRYPTO_SHA1_OCTEON - tristate "SHA1 digest algorithm (OCTEON)" - depends on CPU_CAVIUM_OCTEON - select CRYPTO_SHA1 - select CRYPTO_HASH - help - SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented - using OCTEON crypto instructions, when available. - -config CRYPTO_SHA1_SPARC64 - tristate "SHA1 digest algorithm (SPARC64)" - depends on SPARC64 - select CRYPTO_SHA1 - select CRYPTO_HASH - help - SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented - using sparc64 crypto instructions, when available. - -config CRYPTO_SHA1_PPC - tristate "SHA1 digest algorithm (powerpc)" - depends on PPC - help - This is the powerpc hardware accelerated implementation of the - SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2). - -config CRYPTO_SHA1_PPC_SPE - tristate "SHA1 digest algorithm (PPC SPE)" - depends on PPC && SPE - help - SHA-1 secure hash standard (DFIPS 180-4) implemented - using powerpc SPE SIMD instruction set. + SHA-1 secure hash algorithm (FIPS 180, ISO/IEC 10118-3) config CRYPTO_SHA256 - tristate "SHA224 and SHA256 digest algorithm" + tristate "SHA-224 and SHA-256" select CRYPTO_HASH select CRYPTO_LIB_SHA256 help - SHA256 secure hash standard (DFIPS 180-2). - - This version of SHA implements a 256 bit hash with 128 bits of - security against collision attacks. - - This code also includes SHA-224, a 224 bit hash with 112 bits - of security against collision attacks. - -config CRYPTO_SHA256_PPC_SPE - tristate "SHA224 and SHA256 digest algorithm (PPC SPE)" - depends on PPC && SPE - select CRYPTO_SHA256 - select CRYPTO_HASH - help - SHA224 and SHA256 secure hash standard (DFIPS 180-2) - implemented using powerpc SPE SIMD instruction set. - -config CRYPTO_SHA256_OCTEON - tristate "SHA224 and SHA256 digest algorithm (OCTEON)" - depends on CPU_CAVIUM_OCTEON - select CRYPTO_SHA256 - select CRYPTO_HASH - help - SHA-256 secure hash standard (DFIPS 180-2) implemented - using OCTEON crypto instructions, when available. + SHA-224 and SHA-256 secure hash algorithms (FIPS 180, ISO/IEC 10118-3) -config CRYPTO_SHA256_SPARC64 - tristate "SHA224 and SHA256 digest algorithm (SPARC64)" - depends on SPARC64 - select CRYPTO_SHA256 - select CRYPTO_HASH - help - SHA-256 secure hash standard (DFIPS 180-2) implemented - using sparc64 crypto instructions, when available. + This is required for IPsec AH (XFRM_AH) and IPsec ESP (XFRM_ESP). + Used by the btrfs filesystem, Ceph, NFS, and SMB. config CRYPTO_SHA512 - tristate "SHA384 and SHA512 digest algorithms" + tristate "SHA-384 and SHA-512" select CRYPTO_HASH help - SHA512 secure hash standard (DFIPS 180-2). - - This version of SHA implements a 512 bit hash with 256 bits of - security against collision attacks. - - This code also includes SHA-384, a 384 bit hash with 192 bits - of security against collision attacks. - -config CRYPTO_SHA512_OCTEON - tristate "SHA384 and SHA512 digest algorithms (OCTEON)" - depends on CPU_CAVIUM_OCTEON - select CRYPTO_SHA512 - select CRYPTO_HASH - help - SHA-512 secure hash standard (DFIPS 180-2) implemented - using OCTEON crypto instructions, when available. - -config CRYPTO_SHA512_SPARC64 - tristate "SHA384 and SHA512 digest algorithm (SPARC64)" - depends on SPARC64 - select CRYPTO_SHA512 - select CRYPTO_HASH - help - SHA-512 secure hash standard (DFIPS 180-2) implemented - using sparc64 crypto instructions, when available. + SHA-384 and SHA-512 secure hash algorithms (FIPS 180, ISO/IEC 10118-3) config CRYPTO_SHA3 - tristate "SHA3 digest algorithm" + tristate "SHA-3" select CRYPTO_HASH help - SHA-3 secure hash standard (DFIPS 202). It's based on - cryptographic sponge function family called Keccak. - - References: - http://keccak.noekeon.org/ + SHA-3 secure hash algorithms (FIPS 202, ISO/IEC 10118-3) config CRYPTO_SM3 - tristate "SM3 digest algorithm" + tristate + +config CRYPTO_SM3_GENERIC + tristate "SM3 (ShangMi 3)" select CRYPTO_HASH + select CRYPTO_SM3 help - SM3 secure hash function as defined by OSCCA GM/T 0004-2012 SM3). - It is part of the Chinese Commercial Cryptography suite. + SM3 (ShangMi 3) secure hash function (OSCCA GM/T 0004-2012, ISO/IEC 10118-3) + + This is part of the Chinese Commercial Cryptography suite. References: http://www.oscca.gov.cn/UpFile/20101222141857786.pdf https://datatracker.ietf.org/doc/html/draft-shen-sm3-hash config CRYPTO_STREEBOG - tristate "Streebog Hash Function" + tristate "Streebog" select CRYPTO_HASH help - Streebog Hash Function (GOST R 34.11-2012, RFC 6986) is one of the Russian - cryptographic standard algorithms (called GOST algorithms). - This setting enables two hash algorithms with 256 and 512 bits output. + Streebog Hash Function (GOST R 34.11-2012, RFC 6986, ISO/IEC 10118-3) + + This is one of the Russian cryptographic standard algorithms (called + GOST algorithms). This setting enables two hash algorithms with + 256 and 512 bits output. References: https://tc26.ru/upload/iblock/fed/feddbb4d26b685903faa2ba11aea43f6.pdf https://tools.ietf.org/html/rfc6986 -config CRYPTO_TGR192 - tristate "Tiger digest algorithms" +config CRYPTO_VMAC + tristate "VMAC" select CRYPTO_HASH + select CRYPTO_MANAGER help - Tiger hash algorithm 192, 160 and 128-bit hashes - - Tiger is a hash function optimized for 64-bit processors while - still having decent performance on 32-bit processors. - Tiger was developed by Ross Anderson and Eli Biham. + VMAC is a message authentication algorithm designed for + very high speed on 64-bit architectures. - See also: - <http://www.cs.technion.ac.il/~biham/Reports/Tiger/>. + See https://fastcrypto.org/vmac for further information. config CRYPTO_WP512 - tristate "Whirlpool digest algorithms" + tristate "Whirlpool" select CRYPTO_HASH help - Whirlpool hash algorithm 512, 384 and 256-bit hashes - - Whirlpool-512 is part of the NESSIE cryptographic primitives. - Whirlpool will be part of the ISO/IEC 10118-3:2003(E) standard - - See also: - <http://www.larc.usp.br/~pbarreto/WhirlpoolPage.html> - -config CRYPTO_GHASH_CLMUL_NI_INTEL - tristate "GHASH hash function (CLMUL-NI accelerated)" - depends on X86 && 64BIT - select CRYPTO_CRYPTD - help - This is the x86_64 CLMUL-NI accelerated implementation of - GHASH, the hash function used in GCM (Galois/Counter mode). - -comment "Ciphers" - -config CRYPTO_AES - tristate "AES cipher algorithms" - select CRYPTO_ALGAPI - select CRYPTO_LIB_AES - help - AES cipher algorithms (FIPS-197). AES uses the Rijndael - algorithm. - - Rijndael appears to be consistently a very good performer in - both hardware and software across a wide range of computing - environments regardless of its use in feedback or non-feedback - modes. Its key setup time is excellent, and its key agility is - good. Rijndael's very low memory requirements make it very well - suited for restricted-space environments, in which it also - demonstrates excellent performance. Rijndael's operations are - among the easiest to defend against power and timing attacks. - - The AES specifies three key sizes: 128, 192 and 256 bits - - See <http://csrc.nist.gov/CryptoToolkit/aes/> for more information. - -config CRYPTO_AES_TI - tristate "Fixed time AES cipher" - select CRYPTO_ALGAPI - select CRYPTO_LIB_AES - help - This is a generic implementation of AES that attempts to eliminate - data dependent latencies as much as possible without affecting - performance too much. It is intended for use by the generic CCM - and GCM drivers, and other CTR or CMAC/XCBC based modes that rely - solely on encryption (although decryption is supported as well, but - with a more dramatic performance hit) + Whirlpool hash function (ISO/IEC 10118-3) - Instead of using 16 lookup tables of 1 KB each, (8 for encryption and - 8 for decryption), this implementation only uses just two S-boxes of - 256 bytes each, and attempts to eliminate data dependent latencies by - prefetching the entire table into the cache at the start of each - block. Interrupts are also disabled to avoid races where cachelines - are evicted when the CPU is interrupted to do something else. + 512, 384 and 256-bit hashes. -config CRYPTO_AES_NI_INTEL - tristate "AES cipher algorithms (AES-NI)" - depends on X86 - select CRYPTO_AEAD - select CRYPTO_LIB_AES - select CRYPTO_ALGAPI - select CRYPTO_SKCIPHER - select CRYPTO_GLUE_HELPER_X86 if 64BIT - select CRYPTO_SIMD - help - Use Intel AES-NI instructions for AES algorithm. - - AES cipher algorithms (FIPS-197). AES uses the Rijndael - algorithm. - - Rijndael appears to be consistently a very good performer in - both hardware and software across a wide range of computing - environments regardless of its use in feedback or non-feedback - modes. Its key setup time is excellent, and its key agility is - good. Rijndael's very low memory requirements make it very well - suited for restricted-space environments, in which it also - demonstrates excellent performance. Rijndael's operations are - among the easiest to defend against power and timing attacks. - - The AES specifies three key sizes: 128, 192 and 256 bits - - See <http://csrc.nist.gov/encryption/aes/> for more information. - - In addition to AES cipher algorithm support, the acceleration - for some popular block cipher mode is supported too, including - ECB, CBC, LRW, XTS. The 64 bit version has additional - acceleration for CTR. - -config CRYPTO_AES_SPARC64 - tristate "AES cipher algorithms (SPARC64)" - depends on SPARC64 - select CRYPTO_SKCIPHER - help - Use SPARC64 crypto opcodes for AES algorithm. - - AES cipher algorithms (FIPS-197). AES uses the Rijndael - algorithm. - - Rijndael appears to be consistently a very good performer in - both hardware and software across a wide range of computing - environments regardless of its use in feedback or non-feedback - modes. Its key setup time is excellent, and its key agility is - good. Rijndael's very low memory requirements make it very well - suited for restricted-space environments, in which it also - demonstrates excellent performance. Rijndael's operations are - among the easiest to defend against power and timing attacks. - - The AES specifies three key sizes: 128, 192 and 256 bits - - See <http://csrc.nist.gov/encryption/aes/> for more information. - - In addition to AES cipher algorithm support, the acceleration - for some popular block cipher mode is supported too, including - ECB and CBC. - -config CRYPTO_AES_PPC_SPE - tristate "AES cipher algorithms (PPC SPE)" - depends on PPC && SPE - select CRYPTO_SKCIPHER - help - AES cipher algorithms (FIPS-197). Additionally the acceleration - for popular block cipher modes ECB, CBC, CTR and XTS is supported. - This module should only be used for low power (router) devices - without hardware AES acceleration (e.g. caam crypto). It reduces the - size of the AES tables from 16KB to 8KB + 256 bytes and mitigates - timining attacks. Nevertheless it might be not as secure as other - architecture specific assembler implementations that work on 1KB - tables or 256 bytes S-boxes. - -config CRYPTO_ANUBIS - tristate "Anubis cipher algorithm" - select CRYPTO_ALGAPI - help - Anubis cipher algorithm. - - Anubis is a variable key length cipher which can use keys from - 128 bits to 320 bits in length. It was evaluated as a entrant - in the NESSIE competition. - - See also: - <https://www.cosic.esat.kuleuven.be/nessie/reports/> - <http://www.larc.usp.br/~pbarreto/AnubisPage.html> - -config CRYPTO_ARC4 - tristate "ARC4 cipher algorithm" - select CRYPTO_SKCIPHER - select CRYPTO_LIB_ARC4 - help - ARC4 cipher algorithm. - - ARC4 is a stream cipher using keys ranging from 8 bits to 2048 - bits in length. This algorithm is required for driver-based - WEP, but it should not be for other purposes because of the - weakness of the algorithm. - -config CRYPTO_BLOWFISH - tristate "Blowfish cipher algorithm" - select CRYPTO_ALGAPI - select CRYPTO_BLOWFISH_COMMON - help - Blowfish cipher algorithm, by Bruce Schneier. - - This is a variable key length cipher which can use keys from 32 - bits to 448 bits in length. It's fast, simple and specifically - designed for use on "large microprocessors". - - See also: - <http://www.schneier.com/blowfish.html> - -config CRYPTO_BLOWFISH_COMMON - tristate - help - Common parts of the Blowfish cipher algorithm shared by the - generic c and the assembler implementations. - - See also: - <http://www.schneier.com/blowfish.html> - -config CRYPTO_BLOWFISH_X86_64 - tristate "Blowfish cipher algorithm (x86_64)" - depends on X86 && 64BIT - select CRYPTO_SKCIPHER - select CRYPTO_BLOWFISH_COMMON - help - Blowfish cipher algorithm (x86_64), by Bruce Schneier. - - This is a variable key length cipher which can use keys from 32 - bits to 448 bits in length. It's fast, simple and specifically - designed for use on "large microprocessors". - - See also: - <http://www.schneier.com/blowfish.html> - -config CRYPTO_CAMELLIA - tristate "Camellia cipher algorithms" - depends on CRYPTO - select CRYPTO_ALGAPI - help - Camellia cipher algorithms module. - - Camellia is a symmetric key block cipher developed jointly - at NTT and Mitsubishi Electric Corporation. - - The Camellia specifies three key sizes: 128, 192 and 256 bits. - - See also: - <https://info.isl.ntt.co.jp/crypt/eng/camellia/index_s.html> - -config CRYPTO_CAMELLIA_X86_64 - tristate "Camellia cipher algorithm (x86_64)" - depends on X86 && 64BIT - depends on CRYPTO - select CRYPTO_SKCIPHER - select CRYPTO_GLUE_HELPER_X86 - help - Camellia cipher algorithm module (x86_64). - - Camellia is a symmetric key block cipher developed jointly - at NTT and Mitsubishi Electric Corporation. - - The Camellia specifies three key sizes: 128, 192 and 256 bits. - - See also: - <https://info.isl.ntt.co.jp/crypt/eng/camellia/index_s.html> - -config CRYPTO_CAMELLIA_AESNI_AVX_X86_64 - tristate "Camellia cipher algorithm (x86_64/AES-NI/AVX)" - depends on X86 && 64BIT - depends on CRYPTO - select CRYPTO_SKCIPHER - select CRYPTO_CAMELLIA_X86_64 - select CRYPTO_GLUE_HELPER_X86 - select CRYPTO_SIMD - select CRYPTO_XTS - help - Camellia cipher algorithm module (x86_64/AES-NI/AVX). - - Camellia is a symmetric key block cipher developed jointly - at NTT and Mitsubishi Electric Corporation. - - The Camellia specifies three key sizes: 128, 192 and 256 bits. - - See also: - <https://info.isl.ntt.co.jp/crypt/eng/camellia/index_s.html> - -config CRYPTO_CAMELLIA_AESNI_AVX2_X86_64 - tristate "Camellia cipher algorithm (x86_64/AES-NI/AVX2)" - depends on X86 && 64BIT - depends on CRYPTO - select CRYPTO_CAMELLIA_AESNI_AVX_X86_64 - help - Camellia cipher algorithm module (x86_64/AES-NI/AVX2). - - Camellia is a symmetric key block cipher developed jointly - at NTT and Mitsubishi Electric Corporation. - - The Camellia specifies three key sizes: 128, 192 and 256 bits. - - See also: - <https://info.isl.ntt.co.jp/crypt/eng/camellia/index_s.html> - -config CRYPTO_CAMELLIA_SPARC64 - tristate "Camellia cipher algorithm (SPARC64)" - depends on SPARC64 - depends on CRYPTO - select CRYPTO_ALGAPI - select CRYPTO_SKCIPHER - help - Camellia cipher algorithm module (SPARC64). - - Camellia is a symmetric key block cipher developed jointly - at NTT and Mitsubishi Electric Corporation. - - The Camellia specifies three key sizes: 128, 192 and 256 bits. - - See also: - <https://info.isl.ntt.co.jp/crypt/eng/camellia/index_s.html> - -config CRYPTO_CAST_COMMON - tristate - help - Common parts of the CAST cipher algorithms shared by the - generic c and the assembler implementations. - -config CRYPTO_CAST5 - tristate "CAST5 (CAST-128) cipher algorithm" - select CRYPTO_ALGAPI - select CRYPTO_CAST_COMMON - help - The CAST5 encryption algorithm (synonymous with CAST-128) is - described in RFC2144. - -config CRYPTO_CAST5_AVX_X86_64 - tristate "CAST5 (CAST-128) cipher algorithm (x86_64/AVX)" - depends on X86 && 64BIT - select CRYPTO_SKCIPHER - select CRYPTO_CAST5 - select CRYPTO_CAST_COMMON - select CRYPTO_SIMD - help - The CAST5 encryption algorithm (synonymous with CAST-128) is - described in RFC2144. - - This module provides the Cast5 cipher algorithm that processes - sixteen blocks parallel using the AVX instruction set. - -config CRYPTO_CAST6 - tristate "CAST6 (CAST-256) cipher algorithm" - select CRYPTO_ALGAPI - select CRYPTO_CAST_COMMON - help - The CAST6 encryption algorithm (synonymous with CAST-256) is - described in RFC2612. - -config CRYPTO_CAST6_AVX_X86_64 - tristate "CAST6 (CAST-256) cipher algorithm (x86_64/AVX)" - depends on X86 && 64BIT - select CRYPTO_SKCIPHER - select CRYPTO_CAST6 - select CRYPTO_CAST_COMMON - select CRYPTO_GLUE_HELPER_X86 - select CRYPTO_SIMD - select CRYPTO_XTS - help - The CAST6 encryption algorithm (synonymous with CAST-256) is - described in RFC2612. - - This module provides the Cast6 cipher algorithm that processes - eight blocks parallel using the AVX instruction set. - -config CRYPTO_DES - tristate "DES and Triple DES EDE cipher algorithms" - select CRYPTO_ALGAPI - select CRYPTO_LIB_DES - help - DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3). - -config CRYPTO_DES_SPARC64 - tristate "DES and Triple DES EDE cipher algorithms (SPARC64)" - depends on SPARC64 - select CRYPTO_ALGAPI - select CRYPTO_LIB_DES - select CRYPTO_SKCIPHER - help - DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3), - optimized using SPARC64 crypto opcodes. - -config CRYPTO_DES3_EDE_X86_64 - tristate "Triple DES EDE cipher algorithm (x86-64)" - depends on X86 && 64BIT - select CRYPTO_SKCIPHER - select CRYPTO_LIB_DES - help - Triple DES EDE (FIPS 46-3) algorithm. - - This module provides implementation of the Triple DES EDE cipher - algorithm that is optimized for x86-64 processors. Two versions of - algorithm are provided; regular processing one input block and - one that processes three blocks parallel. - -config CRYPTO_FCRYPT - tristate "FCrypt cipher algorithm" - select CRYPTO_ALGAPI - select CRYPTO_SKCIPHER - help - FCrypt algorithm used by RxRPC. - -config CRYPTO_KHAZAD - tristate "Khazad cipher algorithm" - select CRYPTO_ALGAPI - help - Khazad cipher algorithm. - - Khazad was a finalist in the initial NESSIE competition. It is - an algorithm optimized for 64-bit processors with good performance - on 32-bit processors. Khazad uses an 128 bit key size. - - See also: - <http://www.larc.usp.br/~pbarreto/KhazadPage.html> - -config CRYPTO_SALSA20 - tristate "Salsa20 stream cipher algorithm" - select CRYPTO_SKCIPHER - help - Salsa20 stream cipher algorithm. - - Salsa20 is a stream cipher submitted to eSTREAM, the ECRYPT - Stream Cipher Project. See <http://www.ecrypt.eu.org/stream/> - - The Salsa20 stream cipher algorithm is designed by Daniel J. - Bernstein <djb@cr.yp.to>. See <http://cr.yp.to/snuffle.html> - -config CRYPTO_CHACHA20 - tristate "ChaCha stream cipher algorithms" - select CRYPTO_LIB_CHACHA_GENERIC - select CRYPTO_SKCIPHER - help - The ChaCha20, XChaCha20, and XChaCha12 stream cipher algorithms. - - ChaCha20 is a 256-bit high-speed stream cipher designed by Daniel J. - Bernstein and further specified in RFC7539 for use in IETF protocols. - This is the portable C implementation of ChaCha20. See also: - <http://cr.yp.to/chacha/chacha-20080128.pdf> - - XChaCha20 is the application of the XSalsa20 construction to ChaCha20 - rather than to Salsa20. XChaCha20 extends ChaCha20's nonce length - from 64 bits (or 96 bits using the RFC7539 convention) to 192 bits, - while provably retaining ChaCha20's security. See also: - <https://cr.yp.to/snuffle/xsalsa-20081128.pdf> - - XChaCha12 is XChaCha20 reduced to 12 rounds, with correspondingly - reduced security margin but increased performance. It can be needed - in some performance-sensitive scenarios. - -config CRYPTO_CHACHA20_X86_64 - tristate "ChaCha stream cipher algorithms (x86_64/SSSE3/AVX2/AVX-512VL)" - depends on X86 && 64BIT - select CRYPTO_SKCIPHER - select CRYPTO_LIB_CHACHA_GENERIC - select CRYPTO_ARCH_HAVE_LIB_CHACHA - help - SSSE3, AVX2, and AVX-512VL optimized implementations of the ChaCha20, - XChaCha20, and XChaCha12 stream ciphers. - -config CRYPTO_CHACHA_MIPS - tristate "ChaCha stream cipher algorithms (MIPS 32r2 optimized)" - depends on CPU_MIPS32_R2 - select CRYPTO_SKCIPHER - select CRYPTO_ARCH_HAVE_LIB_CHACHA - -config CRYPTO_SEED - tristate "SEED cipher algorithm" - select CRYPTO_ALGAPI - help - SEED cipher algorithm (RFC4269). - - SEED is a 128-bit symmetric key block cipher that has been - developed by KISA (Korea Information Security Agency) as a - national standard encryption algorithm of the Republic of Korea. - It is a 16 round block cipher with the key size of 128 bit. - - See also: - <http://www.kisa.or.kr/kisa/seed/jsp/seed_eng.jsp> - -config CRYPTO_SERPENT - tristate "Serpent cipher algorithm" - select CRYPTO_ALGAPI - help - Serpent cipher algorithm, by Anderson, Biham & Knudsen. - - Keys are allowed to be from 0 to 256 bits in length, in steps - of 8 bits. Also includes the 'Tnepres' algorithm, a reversed - variant of Serpent for compatibility with old kerneli.org code. - - See also: - <http://www.cl.cam.ac.uk/~rja14/serpent.html> - -config CRYPTO_SERPENT_SSE2_X86_64 - tristate "Serpent cipher algorithm (x86_64/SSE2)" - depends on X86 && 64BIT - select CRYPTO_SKCIPHER - select CRYPTO_GLUE_HELPER_X86 - select CRYPTO_SERPENT - select CRYPTO_SIMD - help - Serpent cipher algorithm, by Anderson, Biham & Knudsen. - - Keys are allowed to be from 0 to 256 bits in length, in steps - of 8 bits. - - This module provides Serpent cipher algorithm that processes eight - blocks parallel using SSE2 instruction set. - - See also: - <http://www.cl.cam.ac.uk/~rja14/serpent.html> - -config CRYPTO_SERPENT_SSE2_586 - tristate "Serpent cipher algorithm (i586/SSE2)" - depends on X86 && !64BIT - select CRYPTO_SKCIPHER - select CRYPTO_GLUE_HELPER_X86 - select CRYPTO_SERPENT - select CRYPTO_SIMD - help - Serpent cipher algorithm, by Anderson, Biham & Knudsen. - - Keys are allowed to be from 0 to 256 bits in length, in steps - of 8 bits. - - This module provides Serpent cipher algorithm that processes four - blocks parallel using SSE2 instruction set. - - See also: - <http://www.cl.cam.ac.uk/~rja14/serpent.html> - -config CRYPTO_SERPENT_AVX_X86_64 - tristate "Serpent cipher algorithm (x86_64/AVX)" - depends on X86 && 64BIT - select CRYPTO_SKCIPHER - select CRYPTO_GLUE_HELPER_X86 - select CRYPTO_SERPENT - select CRYPTO_SIMD - select CRYPTO_XTS - help - Serpent cipher algorithm, by Anderson, Biham & Knudsen. - - Keys are allowed to be from 0 to 256 bits in length, in steps - of 8 bits. - - This module provides the Serpent cipher algorithm that processes - eight blocks parallel using the AVX instruction set. - - See also: - <http://www.cl.cam.ac.uk/~rja14/serpent.html> - -config CRYPTO_SERPENT_AVX2_X86_64 - tristate "Serpent cipher algorithm (x86_64/AVX2)" - depends on X86 && 64BIT - select CRYPTO_SERPENT_AVX_X86_64 - help - Serpent cipher algorithm, by Anderson, Biham & Knudsen. - - Keys are allowed to be from 0 to 256 bits in length, in steps - of 8 bits. - - This module provides Serpent cipher algorithm that processes 16 - blocks parallel using AVX2 instruction set. + Whirlpool-512 is part of the NESSIE cryptographic primitives. - See also: - <http://www.cl.cam.ac.uk/~rja14/serpent.html> + See https://web.archive.org/web/20171129084214/http://www.larc.usp.br/~pbarreto/WhirlpoolPage.html + for further information. -config CRYPTO_SM4 - tristate "SM4 cipher algorithm" - select CRYPTO_ALGAPI +config CRYPTO_XCBC + tristate "XCBC-MAC (Extended Cipher Block Chaining MAC)" + select CRYPTO_HASH + select CRYPTO_MANAGER help - SM4 cipher algorithms (OSCCA GB/T 32907-2016). - - SM4 (GBT.32907-2016) is a cryptographic standard issued by the - Organization of State Commercial Administration of China (OSCCA) - as an authorized cryptographic algorithms for the use within China. + XCBC-MAC (Extended Cipher Block Chaining Message Authentication + Code) (RFC3566) - SMS4 was originally created for use in protecting wireless - networks, and is mandated in the Chinese National Standard for - Wireless LAN WAPI (Wired Authentication and Privacy Infrastructure) - (GB.15629.11-2003). - - The latest SM4 standard (GBT.32907-2016) was proposed by OSCCA and - standardized through TC 260 of the Standardization Administration - of the People's Republic of China (SAC). - - The input, output, and key of SMS4 are each 128 bits. - - See also: <https://eprint.iacr.org/2008/329.pdf> - - If unsure, say N. - -config CRYPTO_TEA - tristate "TEA, XTEA and XETA cipher algorithms" - select CRYPTO_ALGAPI +config CRYPTO_XXHASH + tristate "xxHash" + select CRYPTO_HASH + select XXHASH help - TEA cipher algorithm. - - Tiny Encryption Algorithm is a simple cipher that uses - many rounds for security. It is very fast and uses - little memory. + xxHash non-cryptographic hash algorithm - Xtendend Tiny Encryption Algorithm is a modification to - the TEA algorithm to address a potential key weakness - in the TEA algorithm. - - Xtendend Encryption Tiny Algorithm is a mis-implementation - of the XTEA algorithm for compatibility purposes. + Extremely fast, working at speeds close to RAM limits. -config CRYPTO_TWOFISH - tristate "Twofish cipher algorithm" - select CRYPTO_ALGAPI - select CRYPTO_TWOFISH_COMMON - help - Twofish cipher algorithm. + Used by the btrfs filesystem. - Twofish was submitted as an AES (Advanced Encryption Standard) - candidate cipher by researchers at CounterPane Systems. It is a - 16 round block cipher supporting key sizes of 128, 192, and 256 - bits. +endmenu - See also: - <http://www.schneier.com/twofish.html> +menu "CRCs (cyclic redundancy checks)" -config CRYPTO_TWOFISH_COMMON - tristate - help - Common parts of the Twofish cipher algorithm shared by the - generic c and the assembler implementations. - -config CRYPTO_TWOFISH_586 - tristate "Twofish cipher algorithms (i586)" - depends on (X86 || UML_X86) && !64BIT - select CRYPTO_ALGAPI - select CRYPTO_TWOFISH_COMMON +config CRYPTO_CRC32C + tristate "CRC32c" + select CRYPTO_HASH + select CRC32 help - Twofish cipher algorithm. + CRC32c CRC algorithm with the iSCSI polynomial (RFC 3385 and RFC 3720) - Twofish was submitted as an AES (Advanced Encryption Standard) - candidate cipher by researchers at CounterPane Systems. It is a - 16 round block cipher supporting key sizes of 128, 192, and 256 - bits. + A 32-bit CRC (cyclic redundancy check) with a polynomial defined + by G. Castagnoli, S. Braeuer and M. Herrman in "Optimization of Cyclic + Redundancy-Check Codes with 24 and 32 Parity Bits", IEEE Transactions + on Communications, Vol. 41, No. 6, June 1993, selected for use with + iSCSI. - See also: - <http://www.schneier.com/twofish.html> + Used by btrfs, ext4, jbd2, NVMeoF/TCP, and iSCSI. -config CRYPTO_TWOFISH_X86_64 - tristate "Twofish cipher algorithm (x86_64)" - depends on (X86 || UML_X86) && 64BIT - select CRYPTO_ALGAPI - select CRYPTO_TWOFISH_COMMON +config CRYPTO_CRC32 + tristate "CRC32" + select CRYPTO_HASH + select CRC32 help - Twofish cipher algorithm (x86_64). - - Twofish was submitted as an AES (Advanced Encryption Standard) - candidate cipher by researchers at CounterPane Systems. It is a - 16 round block cipher supporting key sizes of 128, 192, and 256 - bits. + CRC32 CRC algorithm (IEEE 802.3) - See also: - <http://www.schneier.com/twofish.html> + Used by RoCEv2 and f2fs. -config CRYPTO_TWOFISH_X86_64_3WAY - tristate "Twofish cipher algorithm (x86_64, 3-way parallel)" - depends on X86 && 64BIT - select CRYPTO_SKCIPHER - select CRYPTO_TWOFISH_COMMON - select CRYPTO_TWOFISH_X86_64 - select CRYPTO_GLUE_HELPER_X86 +config CRYPTO_CRCT10DIF + tristate "CRCT10DIF" + select CRYPTO_HASH help - Twofish cipher algorithm (x86_64, 3-way parallel). + CRC16 CRC algorithm used for the T10 (SCSI) Data Integrity Field (DIF) - Twofish was submitted as an AES (Advanced Encryption Standard) - candidate cipher by researchers at CounterPane Systems. It is a - 16 round block cipher supporting key sizes of 128, 192, and 256 - bits. + CRC algorithm used by the SCSI Block Commands standard. - This module provides Twofish cipher algorithm that processes three - blocks parallel, utilizing resources of out-of-order CPUs better. - - See also: - <http://www.schneier.com/twofish.html> - -config CRYPTO_TWOFISH_AVX_X86_64 - tristate "Twofish cipher algorithm (x86_64/AVX)" - depends on X86 && 64BIT - select CRYPTO_SKCIPHER - select CRYPTO_GLUE_HELPER_X86 - select CRYPTO_SIMD - select CRYPTO_TWOFISH_COMMON - select CRYPTO_TWOFISH_X86_64 - select CRYPTO_TWOFISH_X86_64_3WAY +config CRYPTO_CRC64_ROCKSOFT + tristate "CRC64 based on Rocksoft Model algorithm" + depends on CRC64 + select CRYPTO_HASH help - Twofish cipher algorithm (x86_64/AVX). + CRC64 CRC algorithm based on the Rocksoft Model CRC Algorithm - Twofish was submitted as an AES (Advanced Encryption Standard) - candidate cipher by researchers at CounterPane Systems. It is a - 16 round block cipher supporting key sizes of 128, 192, and 256 - bits. + Used by the NVMe implementation of T10 DIF (BLK_DEV_INTEGRITY) - This module provides the Twofish cipher algorithm that processes - eight blocks parallel using the AVX Instruction Set. + See https://zlib.net/crc_v3.txt - See also: - <http://www.schneier.com/twofish.html> +endmenu -comment "Compression" +menu "Compression" config CRYPTO_DEFLATE - tristate "Deflate compression algorithm" + tristate "Deflate" select CRYPTO_ALGAPI select CRYPTO_ACOMP2 select ZLIB_INFLATE select ZLIB_DEFLATE help - This is the Deflate algorithm (RFC1951), specified for use in - IPSec with the IPCOMP protocol (RFC3173, RFC2394). + Deflate compression algorithm (RFC1951) - You will most probably want this if using IPSec. + Used by IPSec with the IPCOMP protocol (RFC3173, RFC2394) config CRYPTO_LZO - tristate "LZO compression algorithm" + tristate "LZO" select CRYPTO_ALGAPI select CRYPTO_ACOMP2 select LZO_COMPRESS select LZO_DECOMPRESS help - This is the LZO algorithm. + LZO compression algorithm + + See https://www.oberhumer.com/opensource/lzo/ for further information. config CRYPTO_842 - tristate "842 compression algorithm" + tristate "842" select CRYPTO_ALGAPI select CRYPTO_ACOMP2 select 842_COMPRESS select 842_DECOMPRESS help - This is the 842 algorithm. + 842 compression algorithm by IBM + + See https://github.com/plauth/lib842 for further information. config CRYPTO_LZ4 - tristate "LZ4 compression algorithm" + tristate "LZ4" select CRYPTO_ALGAPI select CRYPTO_ACOMP2 select LZ4_COMPRESS select LZ4_DECOMPRESS help - This is the LZ4 algorithm. + LZ4 compression algorithm + + See https://github.com/lz4/lz4 for further information. config CRYPTO_LZ4HC - tristate "LZ4HC compression algorithm" + tristate "LZ4HC" select CRYPTO_ALGAPI select CRYPTO_ACOMP2 select LZ4HC_COMPRESS select LZ4_DECOMPRESS help - This is the LZ4 high compression mode algorithm. + LZ4 high compression mode algorithm + + See https://github.com/lz4/lz4 for further information. config CRYPTO_ZSTD - tristate "Zstd compression algorithm" + tristate "Zstd" select CRYPTO_ALGAPI select CRYPTO_ACOMP2 select ZSTD_COMPRESS select ZSTD_DECOMPRESS help - This is the zstd algorithm. + zstd compression algorithm + + See https://github.com/facebook/zstd for further information. + +endmenu -comment "Random Number Generation" +menu "Random number generation" config CRYPTO_ANSI_CPRNG - tristate "Pseudo Random Number Generation for Cryptographic modules" + tristate "ANSI PRNG (Pseudo Random Number Generator)" select CRYPTO_AES select CRYPTO_RNG help - This option enables the generic pseudo random number generator - for cryptographic modules. Uses the Algorithm specified in - ANSI X9.31 A.2.4. Note that this option must be enabled if - CRYPTO_FIPS is selected + Pseudo RNG (random number generator) (ANSI X9.31 Appendix A.2.4) + + This uses the AES cipher algorithm. + + Note that this option must be enabled if CRYPTO_FIPS is selected menuconfig CRYPTO_DRBG_MENU - tristate "NIST SP800-90A DRBG" + tristate "NIST SP800-90A DRBG (Deterministic Random Bit Generator)" help - NIST SP800-90A compliant DRBG. In the following submenu, one or - more of the DRBG types must be selected. + DRBG (Deterministic Random Bit Generator) (NIST SP800-90A) + + In the following submenu, one or more of the DRBG types must be selected. if CRYPTO_DRBG_MENU @@ -1809,20 +1250,24 @@ config CRYPTO_DRBG_HMAC bool default y select CRYPTO_HMAC - select CRYPTO_SHA256 + select CRYPTO_SHA512 config CRYPTO_DRBG_HASH - bool "Enable Hash DRBG" + bool "Hash_DRBG" select CRYPTO_SHA256 help - Enable the Hash DRBG variant as defined in NIST SP800-90A. + Hash_DRBG variant as defined in NIST SP800-90A. + + This uses the SHA-1, SHA-256, SHA-384, or SHA-512 hash algorithms. config CRYPTO_DRBG_CTR - bool "Enable CTR DRBG" + bool "CTR_DRBG" select CRYPTO_AES - depends on CRYPTO_CTR + select CRYPTO_CTR help - Enable the CTR DRBG variant as defined in NIST SP800-90A. + CTR_DRBG variant as defined in NIST SP800-90A. + + This uses the AES cipher algorithm with the counter block mode. config CRYPTO_DRBG tristate @@ -1833,72 +1278,143 @@ config CRYPTO_DRBG endif # if CRYPTO_DRBG_MENU config CRYPTO_JITTERENTROPY - tristate "Jitterentropy Non-Deterministic Random Number Generator" + tristate "CPU Jitter Non-Deterministic RNG (Random Number Generator)" select CRYPTO_RNG help - The Jitterentropy RNG is a noise that is intended - to provide seed to another RNG. The RNG does not - perform any cryptographic whitening of the generated - random numbers. This Jitterentropy RNG registers with - the kernel crypto API and can be used by any caller. + CPU Jitter RNG (Random Number Generator) from the Jitterentropy library + + A non-physical non-deterministic ("true") RNG (e.g., an entropy source + compliant with NIST SP800-90B) intended to provide a seed to a + deterministic RNG (e.g. per NIST SP800-90C). + This RNG does not perform any cryptographic whitening of the generated + + See https://www.chronox.de/jent.html + +config CRYPTO_KDF800108_CTR + tristate + select CRYPTO_HMAC + select CRYPTO_SHA256 + +endmenu +menu "Userspace interface" config CRYPTO_USER_API tristate config CRYPTO_USER_API_HASH - tristate "User-space interface for hash algorithms" + tristate "Hash algorithms" depends on NET select CRYPTO_HASH select CRYPTO_USER_API help - This option enables the user-spaces interface for hash - algorithms. + Enable the userspace interface for hash algorithms. + + See Documentation/crypto/userspace-if.rst and + https://www.chronox.de/libkcapi/html/index.html config CRYPTO_USER_API_SKCIPHER - tristate "User-space interface for symmetric key cipher algorithms" + tristate "Symmetric key cipher algorithms" depends on NET select CRYPTO_SKCIPHER select CRYPTO_USER_API help - This option enables the user-spaces interface for symmetric - key cipher algorithms. + Enable the userspace interface for symmetric key cipher algorithms. + + See Documentation/crypto/userspace-if.rst and + https://www.chronox.de/libkcapi/html/index.html config CRYPTO_USER_API_RNG - tristate "User-space interface for random number generator algorithms" + tristate "RNG (random number generator) algorithms" depends on NET select CRYPTO_RNG select CRYPTO_USER_API help - This option enables the user-spaces interface for random - number generator algorithms. + Enable the userspace interface for RNG (random number generator) + algorithms. + + See Documentation/crypto/userspace-if.rst and + https://www.chronox.de/libkcapi/html/index.html + +config CRYPTO_USER_API_RNG_CAVP + bool "Enable CAVP testing of DRBG" + depends on CRYPTO_USER_API_RNG && CRYPTO_DRBG + help + Enable extra APIs in the userspace interface for NIST CAVP + (Cryptographic Algorithm Validation Program) testing: + - resetting DRBG entropy + - providing Additional Data + + This should only be enabled for CAVP testing. You should say + no unless you know what this is. config CRYPTO_USER_API_AEAD - tristate "User-space interface for AEAD cipher algorithms" + tristate "AEAD cipher algorithms" depends on NET select CRYPTO_AEAD select CRYPTO_SKCIPHER select CRYPTO_NULL select CRYPTO_USER_API help - This option enables the user-spaces interface for AEAD - cipher algorithms. + Enable the userspace interface for AEAD cipher algorithms. + + See Documentation/crypto/userspace-if.rst and + https://www.chronox.de/libkcapi/html/index.html + +config CRYPTO_USER_API_ENABLE_OBSOLETE + bool "Obsolete cryptographic algorithms" + depends on CRYPTO_USER_API + default y + help + Allow obsolete cryptographic algorithms to be selected that have + already been phased out from internal use by the kernel, and are + only useful for userspace clients that still rely on them. config CRYPTO_STATS - bool "Crypto usage statistics for User-space" + bool "Crypto usage statistics" depends on CRYPTO_USER help - This option enables the gathering of crypto stats. - This will collect: - - encrypt/decrypt size and numbers of symmeric operations - - compress/decompress size and numbers of compress operations - - size and numbers of hash operations - - encrypt/decrypt/sign/verify numbers for asymmetric operations - - generate/seed numbers for rng operations + Enable the gathering of crypto stats. + + This collects data sizes, numbers of requests, and numbers + of errors processed by: + - AEAD ciphers (encrypt, decrypt) + - asymmetric key ciphers (encrypt, decrypt, verify, sign) + - symmetric key ciphers (encrypt, decrypt) + - compression algorithms (compress, decompress) + - hash algorithms (hash) + - key-agreement protocol primitives (setsecret, generate + public key, compute shared secret) + - RNG (generate, seed) + +endmenu config CRYPTO_HASH_INFO bool -source "lib/crypto/Kconfig" +if !KMSAN # avoid false positives from assembly +if ARM +source "arch/arm/crypto/Kconfig" +endif +if ARM64 +source "arch/arm64/crypto/Kconfig" +endif +if MIPS +source "arch/mips/crypto/Kconfig" +endif +if PPC +source "arch/powerpc/crypto/Kconfig" +endif +if S390 +source "arch/s390/crypto/Kconfig" +endif +if SPARC +source "arch/sparc/crypto/Kconfig" +endif +if X86 +source "arch/x86/crypto/Kconfig" +endif +endif + source "drivers/crypto/Kconfig" source "crypto/asymmetric_keys/Kconfig" source "certs/Kconfig" diff --git a/crypto/Makefile b/crypto/Makefile index 4ca12b6044f7..303b21c43df0 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -4,7 +4,7 @@ # obj-$(CONFIG_CRYPTO) += crypto.o -crypto-y := api.o cipher.o compress.o memneq.o +crypto-y := api.o cipher.o compress.o obj-$(CONFIG_CRYPTO_ENGINE) += crypto_engine.o obj-$(CONFIG_CRYPTO_FIPS) += fips.o @@ -42,6 +42,20 @@ rsa_generic-y += rsa_helper.o rsa_generic-y += rsa-pkcs1pad.o obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o +$(obj)/sm2signature.asn1.o: $(obj)/sm2signature.asn1.c $(obj)/sm2signature.asn1.h +$(obj)/sm2.o: $(obj)/sm2signature.asn1.h + +sm2_generic-y += sm2signature.asn1.o +sm2_generic-y += sm2.o + +obj-$(CONFIG_CRYPTO_SM2) += sm2_generic.o + +$(obj)/ecdsasignature.asn1.o: $(obj)/ecdsasignature.asn1.c $(obj)/ecdsasignature.asn1.h +$(obj)/ecdsa.o: $(obj)/ecdsasignature.asn1.h +ecdsa_generic-y += ecdsa.o +ecdsa_generic-y += ecdsasignature.asn1.o +obj-$(CONFIG_CRYPTO_ECDSA) += ecdsa_generic.o + crypto_acompress-y := acompress.o crypto_acompress-y += scompress.o obj-$(CONFIG_CRYPTO_ACOMP2) += crypto_acompress.o @@ -59,21 +73,18 @@ obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o obj-$(CONFIG_CRYPTO_NULL2) += crypto_null.o obj-$(CONFIG_CRYPTO_MD4) += md4.o obj-$(CONFIG_CRYPTO_MD5) += md5.o -obj-$(CONFIG_CRYPTO_RMD128) += rmd128.o obj-$(CONFIG_CRYPTO_RMD160) += rmd160.o -obj-$(CONFIG_CRYPTO_RMD256) += rmd256.o -obj-$(CONFIG_CRYPTO_RMD320) += rmd320.o obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o obj-$(CONFIG_CRYPTO_SHA3) += sha3_generic.o -obj-$(CONFIG_CRYPTO_SM3) += sm3_generic.o +obj-$(CONFIG_CRYPTO_SM3) += sm3.o +obj-$(CONFIG_CRYPTO_SM3_GENERIC) += sm3_generic.o obj-$(CONFIG_CRYPTO_STREEBOG) += streebog_generic.o obj-$(CONFIG_CRYPTO_WP512) += wp512.o CFLAGS_wp512.o := $(call cc-option,-fno-schedule-insns) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149 -obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o obj-$(CONFIG_CRYPTO_BLAKE2B) += blake2b_generic.o -obj-$(CONFIG_CRYPTO_BLAKE2S) += blake2s_generic.o +CFLAGS_blake2b_generic.o := -Wframe-larger-than=4096 # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=105930 obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o obj-$(CONFIG_CRYPTO_ECB) += ecb.o obj-$(CONFIG_CRYPTO_CBC) += cbc.o @@ -83,6 +94,8 @@ obj-$(CONFIG_CRYPTO_CTS) += cts.o obj-$(CONFIG_CRYPTO_LRW) += lrw.o obj-$(CONFIG_CRYPTO_XTS) += xts.o obj-$(CONFIG_CRYPTO_CTR) += ctr.o +obj-$(CONFIG_CRYPTO_XCTR) += xctr.o +obj-$(CONFIG_CRYPTO_HCTR2) += hctr2.o obj-$(CONFIG_CRYPTO_KEYWRAP) += keywrap.o obj-$(CONFIG_CRYPTO_ADIANTUM) += adiantum.o obj-$(CONFIG_CRYPTO_NHPOLY1305) += nhpoly1305.o @@ -109,6 +122,8 @@ CFLAGS_aegis128-neon-inner.o += $(aegis128-cflags-y) CFLAGS_REMOVE_aegis128-neon-inner.o += -mgeneral-regs-only aegis128-$(CONFIG_CRYPTO_AEGIS128_SIMD) += aegis128-neon.o aegis128-neon-inner.o endif +# Enable <arm_neon.h> +CFLAGS_aegis128-neon-inner.o += -isystem $(shell $(CC) -print-file-name=include) obj-$(CONFIG_CRYPTO_PCRYPT) += pcrypt.o obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o @@ -122,7 +137,8 @@ obj-$(CONFIG_CRYPTO_SERPENT) += serpent_generic.o CFLAGS_serpent_generic.o := $(call cc-option,-fsched-pressure) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149 obj-$(CONFIG_CRYPTO_AES) += aes_generic.o CFLAGS_aes_generic.o := $(call cc-option,-fno-code-hoisting) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83356 -obj-$(CONFIG_CRYPTO_SM4) += sm4_generic.o +obj-$(CONFIG_CRYPTO_SM4) += sm4.o +obj-$(CONFIG_CRYPTO_SM4_GENERIC) += sm4_generic.o obj-$(CONFIG_CRYPTO_AES_TI) += aes_ti.o obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia_generic.o obj-$(CONFIG_CRYPTO_CAST_COMMON) += cast_common.o @@ -133,7 +149,7 @@ obj-$(CONFIG_CRYPTO_TEA) += tea.o obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o obj-$(CONFIG_CRYPTO_SEED) += seed.o -obj-$(CONFIG_CRYPTO_SALSA20) += salsa20_generic.o +obj-$(CONFIG_CRYPTO_ARIA) += aria_generic.o obj-$(CONFIG_CRYPTO_CHACHA20) += chacha_generic.o obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o @@ -141,6 +157,7 @@ obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o obj-$(CONFIG_CRYPTO_CRC32C) += crc32c_generic.o obj-$(CONFIG_CRYPTO_CRC32) += crc32_generic.o obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_common.o crct10dif_generic.o +obj-$(CONFIG_CRYPTO_CRC64_ROCKSOFT) += crc64_rocksoft_generic.o obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o obj-$(CONFIG_CRYPTO_LZO) += lzo.o lzo-rle.o obj-$(CONFIG_CRYPTO_LZ4) += lz4.o @@ -157,6 +174,7 @@ UBSAN_SANITIZE_jitterentropy.o = n jitterentropy_rng-y := jitterentropy.o jitterentropy-kcapi.o obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o +obj-$(CONFIG_CRYPTO_POLYVAL) += polyval-generic.o obj-$(CONFIG_CRYPTO_USER_API) += af_alg.o obj-$(CONFIG_CRYPTO_USER_API_HASH) += algif_hash.o obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o @@ -189,3 +207,8 @@ obj-$(CONFIG_ASYMMETRIC_KEY_TYPE) += asymmetric_keys/ obj-$(CONFIG_CRYPTO_HASH_INFO) += hash_info.o crypto_simd-y := simd.o obj-$(CONFIG_CRYPTO_SIMD) += crypto_simd.o + +# +# Key derivation function +# +obj-$(CONFIG_CRYPTO_KDF800108_CTR) += kdf_sp800108.o diff --git a/crypto/acompress.c b/crypto/acompress.c index 84a76723e851..c32c72048a1c 100644 --- a/crypto/acompress.c +++ b/crypto/acompress.c @@ -109,6 +109,14 @@ struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type, } EXPORT_SYMBOL_GPL(crypto_alloc_acomp); +struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type, + u32 mask, int node) +{ + return crypto_alloc_tfm_node(alg_name, &crypto_acomp_type, type, mask, + node); +} +EXPORT_SYMBOL_GPL(crypto_alloc_acomp_node); + struct acomp_req *acomp_request_alloc(struct crypto_acomp *acomp) { struct crypto_tfm *tfm = crypto_acomp_tfm(acomp); diff --git a/crypto/adiantum.c b/crypto/adiantum.c index cf2b9f4103dd..84450130cb6b 100644 --- a/crypto/adiantum.c +++ b/crypto/adiantum.c @@ -32,6 +32,7 @@ #include <crypto/b128ops.h> #include <crypto/chacha.h> +#include <crypto/internal/cipher.h> #include <crypto/internal/hash.h> #include <crypto/internal/poly1305.h> #include <crypto/internal/skcipher.h> @@ -177,7 +178,7 @@ static int adiantum_setkey(struct crypto_skcipher *tfm, const u8 *key, keyp += NHPOLY1305_KEY_SIZE; WARN_ON(keyp != &data->derived_keys[ARRAY_SIZE(data->derived_keys)]); out: - kzfree(data); + kfree_sensitive(data); return err; } @@ -490,7 +491,6 @@ static bool adiantum_supported_algorithms(struct skcipher_alg *streamcipher_alg, static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; u32 mask; const char *nhpoly1305_name; struct skcipher_instance *inst; @@ -500,14 +500,9 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) struct shash_alg *hash_alg; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL); if (!inst) @@ -565,8 +560,6 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) hash_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = streamcipher_alg->base.cra_flags & - CRYPTO_ALG_ASYNC; inst->alg.base.cra_blocksize = BLOCKCIPHER_BLOCK_SIZE; inst->alg.base.cra_ctxsize = sizeof(struct adiantum_tfm_ctx); inst->alg.base.cra_alignmask = streamcipher_alg->base.cra_alignmask | @@ -624,3 +617,4 @@ MODULE_DESCRIPTION("Adiantum length-preserving encryption mode"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>"); MODULE_ALIAS_CRYPTO("adiantum"); +MODULE_IMPORT_NS(CRYPTO_INTERNAL); diff --git a/crypto/aegis.h b/crypto/aegis.h index 6920ebe77679..6ef9c174c973 100644 --- a/crypto/aegis.h +++ b/crypto/aegis.h @@ -21,9 +21,28 @@ union aegis_block { u8 bytes[AEGIS_BLOCK_SIZE]; }; +struct aegis_state; + +extern int aegis128_have_aes_insn; + #define AEGIS_BLOCK_ALIGN (__alignof__(union aegis_block)) #define AEGIS_ALIGNED(p) IS_ALIGNED((uintptr_t)p, AEGIS_BLOCK_ALIGN) +bool crypto_aegis128_have_simd(void); +void crypto_aegis128_update_simd(struct aegis_state *state, const void *msg); +void crypto_aegis128_init_simd(struct aegis_state *state, + const union aegis_block *key, + const u8 *iv); +void crypto_aegis128_encrypt_chunk_simd(struct aegis_state *state, u8 *dst, + const u8 *src, unsigned int size); +void crypto_aegis128_decrypt_chunk_simd(struct aegis_state *state, u8 *dst, + const u8 *src, unsigned int size); +int crypto_aegis128_final_simd(struct aegis_state *state, + union aegis_block *tag_xor, + unsigned int assoclen, + unsigned int cryptlen, + unsigned int authsize); + static __always_inline void crypto_aegis_block_xor(union aegis_block *dst, const union aegis_block *src) { diff --git a/crypto/aegis128-core.c b/crypto/aegis128-core.c index 44fb4956f0dd..c4f1bfa1d04f 100644 --- a/crypto/aegis128-core.c +++ b/crypto/aegis128-core.c @@ -58,19 +58,6 @@ static bool aegis128_do_simd(void) return false; } -bool crypto_aegis128_have_simd(void); -void crypto_aegis128_update_simd(struct aegis_state *state, const void *msg); -void crypto_aegis128_init_simd(struct aegis_state *state, - const union aegis_block *key, - const u8 *iv); -void crypto_aegis128_encrypt_chunk_simd(struct aegis_state *state, u8 *dst, - const u8 *src, unsigned int size); -void crypto_aegis128_decrypt_chunk_simd(struct aegis_state *state, u8 *dst, - const u8 *src, unsigned int size); -void crypto_aegis128_final_simd(struct aegis_state *state, - union aegis_block *tag_xor, - u64 assoclen, u64 cryptlen); - static void crypto_aegis128_update(struct aegis_state *state) { union aegis_block tmp; @@ -84,9 +71,10 @@ static void crypto_aegis128_update(struct aegis_state *state) } static void crypto_aegis128_update_a(struct aegis_state *state, - const union aegis_block *msg) + const union aegis_block *msg, + bool do_simd) { - if (aegis128_do_simd()) { + if (IS_ENABLED(CONFIG_CRYPTO_AEGIS128_SIMD) && do_simd) { crypto_aegis128_update_simd(state, msg); return; } @@ -95,9 +83,10 @@ static void crypto_aegis128_update_a(struct aegis_state *state, crypto_aegis_block_xor(&state->blocks[0], msg); } -static void crypto_aegis128_update_u(struct aegis_state *state, const void *msg) +static void crypto_aegis128_update_u(struct aegis_state *state, const void *msg, + bool do_simd) { - if (aegis128_do_simd()) { + if (IS_ENABLED(CONFIG_CRYPTO_AEGIS128_SIMD) && do_simd) { crypto_aegis128_update_simd(state, msg); return; } @@ -126,27 +115,28 @@ static void crypto_aegis128_init(struct aegis_state *state, crypto_aegis_block_xor(&state->blocks[4], &crypto_aegis_const[1]); for (i = 0; i < 5; i++) { - crypto_aegis128_update_a(state, key); - crypto_aegis128_update_a(state, &key_iv); + crypto_aegis128_update_a(state, key, false); + crypto_aegis128_update_a(state, &key_iv, false); } } static void crypto_aegis128_ad(struct aegis_state *state, - const u8 *src, unsigned int size) + const u8 *src, unsigned int size, + bool do_simd) { if (AEGIS_ALIGNED(src)) { const union aegis_block *src_blk = (const union aegis_block *)src; while (size >= AEGIS_BLOCK_SIZE) { - crypto_aegis128_update_a(state, src_blk); + crypto_aegis128_update_a(state, src_blk, do_simd); size -= AEGIS_BLOCK_SIZE; src_blk++; } } else { while (size >= AEGIS_BLOCK_SIZE) { - crypto_aegis128_update_u(state, src); + crypto_aegis128_update_u(state, src, do_simd); size -= AEGIS_BLOCK_SIZE; src += AEGIS_BLOCK_SIZE; @@ -154,6 +144,12 @@ static void crypto_aegis128_ad(struct aegis_state *state, } } +static void crypto_aegis128_wipe_chunk(struct aegis_state *state, u8 *dst, + const u8 *src, unsigned int size) +{ + memzero_explicit(dst, size); +} + static void crypto_aegis128_encrypt_chunk(struct aegis_state *state, u8 *dst, const u8 *src, unsigned int size) { @@ -172,7 +168,7 @@ static void crypto_aegis128_encrypt_chunk(struct aegis_state *state, u8 *dst, crypto_aegis_block_xor(&tmp, &state->blocks[1]); crypto_aegis_block_xor(&tmp, src_blk); - crypto_aegis128_update_a(state, src_blk); + crypto_aegis128_update_a(state, src_blk, false); *dst_blk = tmp; @@ -188,7 +184,7 @@ static void crypto_aegis128_encrypt_chunk(struct aegis_state *state, u8 *dst, crypto_aegis_block_xor(&tmp, &state->blocks[1]); crypto_xor(tmp.bytes, src, AEGIS_BLOCK_SIZE); - crypto_aegis128_update_u(state, src); + crypto_aegis128_update_u(state, src, false); memcpy(dst, tmp.bytes, AEGIS_BLOCK_SIZE); @@ -207,7 +203,7 @@ static void crypto_aegis128_encrypt_chunk(struct aegis_state *state, u8 *dst, crypto_aegis_block_xor(&tmp, &state->blocks[4]); crypto_aegis_block_xor(&tmp, &state->blocks[1]); - crypto_aegis128_update_a(state, &msg); + crypto_aegis128_update_a(state, &msg, false); crypto_aegis_block_xor(&msg, &tmp); @@ -233,7 +229,7 @@ static void crypto_aegis128_decrypt_chunk(struct aegis_state *state, u8 *dst, crypto_aegis_block_xor(&tmp, &state->blocks[1]); crypto_aegis_block_xor(&tmp, src_blk); - crypto_aegis128_update_a(state, &tmp); + crypto_aegis128_update_a(state, &tmp, false); *dst_blk = tmp; @@ -249,7 +245,7 @@ static void crypto_aegis128_decrypt_chunk(struct aegis_state *state, u8 *dst, crypto_aegis_block_xor(&tmp, &state->blocks[1]); crypto_xor(tmp.bytes, src, AEGIS_BLOCK_SIZE); - crypto_aegis128_update_a(state, &tmp); + crypto_aegis128_update_a(state, &tmp, false); memcpy(dst, tmp.bytes, AEGIS_BLOCK_SIZE); @@ -271,7 +267,7 @@ static void crypto_aegis128_decrypt_chunk(struct aegis_state *state, u8 *dst, memset(msg.bytes + size, 0, AEGIS_BLOCK_SIZE - size); - crypto_aegis128_update_a(state, &msg); + crypto_aegis128_update_a(state, &msg, false); memcpy(dst, msg.bytes, size); } @@ -279,7 +275,8 @@ static void crypto_aegis128_decrypt_chunk(struct aegis_state *state, u8 *dst, static void crypto_aegis128_process_ad(struct aegis_state *state, struct scatterlist *sg_src, - unsigned int assoclen) + unsigned int assoclen, + bool do_simd) { struct scatter_walk walk; union aegis_block buf; @@ -296,13 +293,13 @@ static void crypto_aegis128_process_ad(struct aegis_state *state, if (pos > 0) { unsigned int fill = AEGIS_BLOCK_SIZE - pos; memcpy(buf.bytes + pos, src, fill); - crypto_aegis128_update_a(state, &buf); + crypto_aegis128_update_a(state, &buf, do_simd); pos = 0; left -= fill; src += fill; } - crypto_aegis128_ad(state, src, left); + crypto_aegis128_ad(state, src, left, do_simd); src += left & ~(AEGIS_BLOCK_SIZE - 1); left &= AEGIS_BLOCK_SIZE - 1; } @@ -318,13 +315,12 @@ static void crypto_aegis128_process_ad(struct aegis_state *state, if (pos > 0) { memset(buf.bytes + pos, 0, AEGIS_BLOCK_SIZE - pos); - crypto_aegis128_update_a(state, &buf); + crypto_aegis128_update_a(state, &buf, do_simd); } } static __always_inline int crypto_aegis128_process_crypt(struct aegis_state *state, - struct aead_request *req, struct skcipher_walk *walk, void (*crypt)(struct aegis_state *state, u8 *dst, const u8 *src, @@ -361,7 +357,7 @@ static void crypto_aegis128_final(struct aegis_state *state, crypto_aegis_block_xor(&tmp, &state->blocks[3]); for (i = 0; i < 7; i++) - crypto_aegis128_update_a(state, &tmp); + crypto_aegis128_update_a(state, &tmp, false); for (i = 0; i < AEGIS128_STATE_BLOCKS; i++) crypto_aegis_block_xor(tag_xor, &state->blocks[i]); @@ -389,7 +385,7 @@ static int crypto_aegis128_setauthsize(struct crypto_aead *tfm, return 0; } -static int crypto_aegis128_encrypt(struct aead_request *req) +static int crypto_aegis128_encrypt_generic(struct aead_request *req) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); union aegis_block tag = {}; @@ -400,27 +396,18 @@ static int crypto_aegis128_encrypt(struct aead_request *req) struct aegis_state state; skcipher_walk_aead_encrypt(&walk, req, false); - if (aegis128_do_simd()) { - crypto_aegis128_init_simd(&state, &ctx->key, req->iv); - crypto_aegis128_process_ad(&state, req->src, req->assoclen); - crypto_aegis128_process_crypt(&state, req, &walk, - crypto_aegis128_encrypt_chunk_simd); - crypto_aegis128_final_simd(&state, &tag, req->assoclen, - cryptlen); - } else { - crypto_aegis128_init(&state, &ctx->key, req->iv); - crypto_aegis128_process_ad(&state, req->src, req->assoclen); - crypto_aegis128_process_crypt(&state, req, &walk, - crypto_aegis128_encrypt_chunk); - crypto_aegis128_final(&state, &tag, req->assoclen, cryptlen); - } + crypto_aegis128_init(&state, &ctx->key, req->iv); + crypto_aegis128_process_ad(&state, req->src, req->assoclen, false); + crypto_aegis128_process_crypt(&state, &walk, + crypto_aegis128_encrypt_chunk); + crypto_aegis128_final(&state, &tag, req->assoclen, cryptlen); scatterwalk_map_and_copy(tag.bytes, req->dst, req->assoclen + cryptlen, authsize, 1); return 0; } -static int crypto_aegis128_decrypt(struct aead_request *req) +static int crypto_aegis128_decrypt_generic(struct aead_request *req) { static const u8 zeros[AEGIS128_MAX_AUTH_SIZE] = {}; struct crypto_aead *tfm = crypto_aead_reqtfm(req); @@ -435,60 +422,152 @@ static int crypto_aegis128_decrypt(struct aead_request *req) authsize, 0); skcipher_walk_aead_decrypt(&walk, req, false); - if (aegis128_do_simd()) { - crypto_aegis128_init_simd(&state, &ctx->key, req->iv); - crypto_aegis128_process_ad(&state, req->src, req->assoclen); - crypto_aegis128_process_crypt(&state, req, &walk, - crypto_aegis128_decrypt_chunk_simd); - crypto_aegis128_final_simd(&state, &tag, req->assoclen, - cryptlen); - } else { - crypto_aegis128_init(&state, &ctx->key, req->iv); - crypto_aegis128_process_ad(&state, req->src, req->assoclen); - crypto_aegis128_process_crypt(&state, req, &walk, - crypto_aegis128_decrypt_chunk); - crypto_aegis128_final(&state, &tag, req->assoclen, cryptlen); + crypto_aegis128_init(&state, &ctx->key, req->iv); + crypto_aegis128_process_ad(&state, req->src, req->assoclen, false); + crypto_aegis128_process_crypt(&state, &walk, + crypto_aegis128_decrypt_chunk); + crypto_aegis128_final(&state, &tag, req->assoclen, cryptlen); + + if (unlikely(crypto_memneq(tag.bytes, zeros, authsize))) { + /* + * From Chapter 4. 'Security Analysis' of the AEGIS spec [0] + * + * "3. If verification fails, the decrypted plaintext and the + * wrong authentication tag should not be given as output." + * + * [0] https://competitions.cr.yp.to/round3/aegisv11.pdf + */ + skcipher_walk_aead_decrypt(&walk, req, false); + crypto_aegis128_process_crypt(NULL, &walk, + crypto_aegis128_wipe_chunk); + memzero_explicit(&tag, sizeof(tag)); + return -EBADMSG; } - - return crypto_memneq(tag.bytes, zeros, authsize) ? -EBADMSG : 0; + return 0; } -static struct aead_alg crypto_aegis128_alg = { - .setkey = crypto_aegis128_setkey, - .setauthsize = crypto_aegis128_setauthsize, - .encrypt = crypto_aegis128_encrypt, - .decrypt = crypto_aegis128_decrypt, +static int crypto_aegis128_encrypt_simd(struct aead_request *req) +{ + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + union aegis_block tag = {}; + unsigned int authsize = crypto_aead_authsize(tfm); + struct aegis_ctx *ctx = crypto_aead_ctx(tfm); + unsigned int cryptlen = req->cryptlen; + struct skcipher_walk walk; + struct aegis_state state; + + if (!aegis128_do_simd()) + return crypto_aegis128_encrypt_generic(req); + + skcipher_walk_aead_encrypt(&walk, req, false); + crypto_aegis128_init_simd(&state, &ctx->key, req->iv); + crypto_aegis128_process_ad(&state, req->src, req->assoclen, true); + crypto_aegis128_process_crypt(&state, &walk, + crypto_aegis128_encrypt_chunk_simd); + crypto_aegis128_final_simd(&state, &tag, req->assoclen, cryptlen, 0); - .ivsize = AEGIS128_NONCE_SIZE, - .maxauthsize = AEGIS128_MAX_AUTH_SIZE, - .chunksize = AEGIS_BLOCK_SIZE, + scatterwalk_map_and_copy(tag.bytes, req->dst, req->assoclen + cryptlen, + authsize, 1); + return 0; +} - .base = { - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct aegis_ctx), - .cra_alignmask = 0, +static int crypto_aegis128_decrypt_simd(struct aead_request *req) +{ + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + union aegis_block tag; + unsigned int authsize = crypto_aead_authsize(tfm); + unsigned int cryptlen = req->cryptlen - authsize; + struct aegis_ctx *ctx = crypto_aead_ctx(tfm); + struct skcipher_walk walk; + struct aegis_state state; - .cra_priority = 100, + if (!aegis128_do_simd()) + return crypto_aegis128_decrypt_generic(req); - .cra_name = "aegis128", - .cra_driver_name = "aegis128-generic", + scatterwalk_map_and_copy(tag.bytes, req->src, req->assoclen + cryptlen, + authsize, 0); - .cra_module = THIS_MODULE, + skcipher_walk_aead_decrypt(&walk, req, false); + crypto_aegis128_init_simd(&state, &ctx->key, req->iv); + crypto_aegis128_process_ad(&state, req->src, req->assoclen, true); + crypto_aegis128_process_crypt(&state, &walk, + crypto_aegis128_decrypt_chunk_simd); + + if (unlikely(crypto_aegis128_final_simd(&state, &tag, req->assoclen, + cryptlen, authsize))) { + skcipher_walk_aead_decrypt(&walk, req, false); + crypto_aegis128_process_crypt(NULL, &walk, + crypto_aegis128_wipe_chunk); + return -EBADMSG; } + return 0; +} + +static struct aead_alg crypto_aegis128_alg_generic = { + .setkey = crypto_aegis128_setkey, + .setauthsize = crypto_aegis128_setauthsize, + .encrypt = crypto_aegis128_encrypt_generic, + .decrypt = crypto_aegis128_decrypt_generic, + + .ivsize = AEGIS128_NONCE_SIZE, + .maxauthsize = AEGIS128_MAX_AUTH_SIZE, + .chunksize = AEGIS_BLOCK_SIZE, + + .base.cra_blocksize = 1, + .base.cra_ctxsize = sizeof(struct aegis_ctx), + .base.cra_alignmask = 0, + .base.cra_priority = 100, + .base.cra_name = "aegis128", + .base.cra_driver_name = "aegis128-generic", + .base.cra_module = THIS_MODULE, +}; + +static struct aead_alg crypto_aegis128_alg_simd = { + .setkey = crypto_aegis128_setkey, + .setauthsize = crypto_aegis128_setauthsize, + .encrypt = crypto_aegis128_encrypt_simd, + .decrypt = crypto_aegis128_decrypt_simd, + + .ivsize = AEGIS128_NONCE_SIZE, + .maxauthsize = AEGIS128_MAX_AUTH_SIZE, + .chunksize = AEGIS_BLOCK_SIZE, + + .base.cra_blocksize = 1, + .base.cra_ctxsize = sizeof(struct aegis_ctx), + .base.cra_alignmask = 0, + .base.cra_priority = 200, + .base.cra_name = "aegis128", + .base.cra_driver_name = "aegis128-simd", + .base.cra_module = THIS_MODULE, }; static int __init crypto_aegis128_module_init(void) { + int ret; + + ret = crypto_register_aead(&crypto_aegis128_alg_generic); + if (ret) + return ret; + if (IS_ENABLED(CONFIG_CRYPTO_AEGIS128_SIMD) && - crypto_aegis128_have_simd()) + crypto_aegis128_have_simd()) { + ret = crypto_register_aead(&crypto_aegis128_alg_simd); + if (ret) { + crypto_unregister_aead(&crypto_aegis128_alg_generic); + return ret; + } static_branch_enable(&have_simd); - - return crypto_register_aead(&crypto_aegis128_alg); + } + return 0; } static void __exit crypto_aegis128_module_exit(void) { - crypto_unregister_aead(&crypto_aegis128_alg); + if (IS_ENABLED(CONFIG_CRYPTO_AEGIS128_SIMD) && + crypto_aegis128_have_simd()) + crypto_unregister_aead(&crypto_aegis128_alg_simd); + + crypto_unregister_aead(&crypto_aegis128_alg_generic); } subsys_initcall(crypto_aegis128_module_init); @@ -499,3 +578,4 @@ MODULE_AUTHOR("Ondrej Mosnacek <omosnacek@gmail.com>"); MODULE_DESCRIPTION("AEGIS-128 AEAD algorithm"); MODULE_ALIAS_CRYPTO("aegis128"); MODULE_ALIAS_CRYPTO("aegis128-generic"); +MODULE_ALIAS_CRYPTO("aegis128-simd"); diff --git a/crypto/aegis128-neon-inner.c b/crypto/aegis128-neon-inner.c index 2a660ac1bc3a..7de485907d81 100644 --- a/crypto/aegis128-neon-inner.c +++ b/crypto/aegis128-neon-inner.c @@ -20,7 +20,6 @@ extern int aegis128_have_aes_insn; void *memcpy(void *dest, const void *src, size_t n); -void *memset(void *s, int c, size_t n); struct aegis128_state { uint8x16_t v[5]; @@ -173,10 +172,57 @@ void crypto_aegis128_update_neon(void *state, const void *msg) aegis128_save_state_neon(st, state); } +#ifdef CONFIG_ARM +/* + * AArch32 does not provide these intrinsics natively because it does not + * implement the underlying instructions. AArch32 only provides 64-bit + * wide vtbl.8/vtbx.8 instruction, so use those instead. + */ +static uint8x16_t vqtbl1q_u8(uint8x16_t a, uint8x16_t b) +{ + union { + uint8x16_t val; + uint8x8x2_t pair; + } __a = { a }; + + return vcombine_u8(vtbl2_u8(__a.pair, vget_low_u8(b)), + vtbl2_u8(__a.pair, vget_high_u8(b))); +} + +static uint8x16_t vqtbx1q_u8(uint8x16_t v, uint8x16_t a, uint8x16_t b) +{ + union { + uint8x16_t val; + uint8x8x2_t pair; + } __a = { a }; + + return vcombine_u8(vtbx2_u8(vget_low_u8(v), __a.pair, vget_low_u8(b)), + vtbx2_u8(vget_high_u8(v), __a.pair, vget_high_u8(b))); +} + +static int8_t vminvq_s8(int8x16_t v) +{ + int8x8_t s = vpmin_s8(vget_low_s8(v), vget_high_s8(v)); + + s = vpmin_s8(s, s); + s = vpmin_s8(s, s); + s = vpmin_s8(s, s); + + return vget_lane_s8(s, 0); +} +#endif + +static const uint8_t permute[] __aligned(64) = { + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, +}; + void crypto_aegis128_encrypt_chunk_neon(void *state, void *dst, const void *src, unsigned int size) { struct aegis128_state st = aegis128_load_state_neon(state); + const int short_input = size < AEGIS_BLOCK_SIZE; uint8x16_t msg; preload_sbox(); @@ -186,7 +232,8 @@ void crypto_aegis128_encrypt_chunk_neon(void *state, void *dst, const void *src, msg = vld1q_u8(src); st = aegis128_update_neon(st, msg); - vst1q_u8(dst, msg ^ s); + msg ^= s; + vst1q_u8(dst, msg); size -= AEGIS_BLOCK_SIZE; src += AEGIS_BLOCK_SIZE; @@ -195,13 +242,26 @@ void crypto_aegis128_encrypt_chunk_neon(void *state, void *dst, const void *src, if (size > 0) { uint8x16_t s = st.v[1] ^ (st.v[2] & st.v[3]) ^ st.v[4]; - uint8_t buf[AEGIS_BLOCK_SIZE] = {}; + uint8_t buf[AEGIS_BLOCK_SIZE]; + const void *in = src; + void *out = dst; + uint8x16_t m; - memcpy(buf, src, size); - msg = vld1q_u8(buf); - st = aegis128_update_neon(st, msg); - vst1q_u8(buf, msg ^ s); - memcpy(dst, buf, size); + if (__builtin_expect(short_input, 0)) + in = out = memcpy(buf + AEGIS_BLOCK_SIZE - size, src, size); + + m = vqtbl1q_u8(vld1q_u8(in + size - AEGIS_BLOCK_SIZE), + vld1q_u8(permute + 32 - size)); + + st = aegis128_update_neon(st, m); + + vst1q_u8(out + size - AEGIS_BLOCK_SIZE, + vqtbl1q_u8(m ^ s, vld1q_u8(permute + size))); + + if (__builtin_expect(short_input, 0)) + memcpy(dst, out, size); + else + vst1q_u8(out - AEGIS_BLOCK_SIZE, msg); } aegis128_save_state_neon(st, state); @@ -211,6 +271,7 @@ void crypto_aegis128_decrypt_chunk_neon(void *state, void *dst, const void *src, unsigned int size) { struct aegis128_state st = aegis128_load_state_neon(state); + const int short_input = size < AEGIS_BLOCK_SIZE; uint8x16_t msg; preload_sbox(); @@ -228,21 +289,34 @@ void crypto_aegis128_decrypt_chunk_neon(void *state, void *dst, const void *src, if (size > 0) { uint8x16_t s = st.v[1] ^ (st.v[2] & st.v[3]) ^ st.v[4]; uint8_t buf[AEGIS_BLOCK_SIZE]; + const void *in = src; + void *out = dst; + uint8x16_t m; - vst1q_u8(buf, s); - memcpy(buf, src, size); - msg = vld1q_u8(buf) ^ s; - vst1q_u8(buf, msg); - memcpy(dst, buf, size); + if (__builtin_expect(short_input, 0)) + in = out = memcpy(buf + AEGIS_BLOCK_SIZE - size, src, size); - st = aegis128_update_neon(st, msg); + m = s ^ vqtbx1q_u8(s, vld1q_u8(in + size - AEGIS_BLOCK_SIZE), + vld1q_u8(permute + 32 - size)); + + st = aegis128_update_neon(st, m); + + vst1q_u8(out + size - AEGIS_BLOCK_SIZE, + vqtbl1q_u8(m, vld1q_u8(permute + size))); + + if (__builtin_expect(short_input, 0)) + memcpy(dst, out, size); + else + vst1q_u8(out - AEGIS_BLOCK_SIZE, msg); } aegis128_save_state_neon(st, state); } -void crypto_aegis128_final_neon(void *state, void *tag_xor, uint64_t assoclen, - uint64_t cryptlen) +int crypto_aegis128_final_neon(void *state, void *tag_xor, + unsigned int assoclen, + unsigned int cryptlen, + unsigned int authsize) { struct aegis128_state st = aegis128_load_state_neon(state); uint8x16_t v; @@ -250,13 +324,21 @@ void crypto_aegis128_final_neon(void *state, void *tag_xor, uint64_t assoclen, preload_sbox(); - v = st.v[3] ^ (uint8x16_t)vcombine_u64(vmov_n_u64(8 * assoclen), - vmov_n_u64(8 * cryptlen)); + v = st.v[3] ^ (uint8x16_t)vcombine_u64(vmov_n_u64(8ULL * assoclen), + vmov_n_u64(8ULL * cryptlen)); for (i = 0; i < 7; i++) st = aegis128_update_neon(st, v); - v = vld1q_u8(tag_xor); - v ^= st.v[0] ^ st.v[1] ^ st.v[2] ^ st.v[3] ^ st.v[4]; + v = st.v[0] ^ st.v[1] ^ st.v[2] ^ st.v[3] ^ st.v[4]; + + if (authsize > 0) { + v = vqtbl1q_u8(~vceqq_u8(v, vld1q_u8(tag_xor)), + vld1q_u8(permute + authsize)); + + return vminvq_s8((int8x16_t)v); + } + vst1q_u8(tag_xor, v); + return 0; } diff --git a/crypto/aegis128-neon.c b/crypto/aegis128-neon.c index 8271b1fa0fbc..a7856915ec85 100644 --- a/crypto/aegis128-neon.c +++ b/crypto/aegis128-neon.c @@ -14,8 +14,10 @@ void crypto_aegis128_encrypt_chunk_neon(void *state, void *dst, const void *src, unsigned int size); void crypto_aegis128_decrypt_chunk_neon(void *state, void *dst, const void *src, unsigned int size); -void crypto_aegis128_final_neon(void *state, void *tag_xor, uint64_t assoclen, - uint64_t cryptlen); +int crypto_aegis128_final_neon(void *state, void *tag_xor, + unsigned int assoclen, + unsigned int cryptlen, + unsigned int authsize); int aegis128_have_aes_insn __ro_after_init; @@ -28,7 +30,7 @@ bool crypto_aegis128_have_simd(void) return IS_ENABLED(CONFIG_ARM64); } -void crypto_aegis128_init_simd(union aegis_block *state, +void crypto_aegis128_init_simd(struct aegis_state *state, const union aegis_block *key, const u8 *iv) { @@ -37,14 +39,14 @@ void crypto_aegis128_init_simd(union aegis_block *state, kernel_neon_end(); } -void crypto_aegis128_update_simd(union aegis_block *state, const void *msg) +void crypto_aegis128_update_simd(struct aegis_state *state, const void *msg) { kernel_neon_begin(); crypto_aegis128_update_neon(state, msg); kernel_neon_end(); } -void crypto_aegis128_encrypt_chunk_simd(union aegis_block *state, u8 *dst, +void crypto_aegis128_encrypt_chunk_simd(struct aegis_state *state, u8 *dst, const u8 *src, unsigned int size) { kernel_neon_begin(); @@ -52,7 +54,7 @@ void crypto_aegis128_encrypt_chunk_simd(union aegis_block *state, u8 *dst, kernel_neon_end(); } -void crypto_aegis128_decrypt_chunk_simd(union aegis_block *state, u8 *dst, +void crypto_aegis128_decrypt_chunk_simd(struct aegis_state *state, u8 *dst, const u8 *src, unsigned int size) { kernel_neon_begin(); @@ -60,11 +62,18 @@ void crypto_aegis128_decrypt_chunk_simd(union aegis_block *state, u8 *dst, kernel_neon_end(); } -void crypto_aegis128_final_simd(union aegis_block *state, - union aegis_block *tag_xor, - u64 assoclen, u64 cryptlen) +int crypto_aegis128_final_simd(struct aegis_state *state, + union aegis_block *tag_xor, + unsigned int assoclen, + unsigned int cryptlen, + unsigned int authsize) { + int ret; + kernel_neon_begin(); - crypto_aegis128_final_neon(state, tag_xor, assoclen, cryptlen); + ret = crypto_aegis128_final_neon(state, tag_xor, assoclen, cryptlen, + authsize); kernel_neon_end(); + + return ret; } diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 439367a8e95c..e893c0f6c879 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -16,6 +16,7 @@ #include <linux/module.h> #include <linux/net.h> #include <linux/rwsem.h> +#include <linux/sched.h> #include <linux/sched/signal.h> #include <linux/security.h> @@ -24,12 +25,9 @@ struct alg_type_list { struct list_head list; }; -static atomic_long_t alg_memory_allocated; - static struct proto alg_proto = { .name = "ALG", .owner = THIS_MODULE, - .memory_allocated = &alg_memory_allocated, .obj_size = sizeof(struct alg_sock), }; @@ -128,21 +126,15 @@ EXPORT_SYMBOL_GPL(af_alg_release); void af_alg_release_parent(struct sock *sk) { struct alg_sock *ask = alg_sk(sk); - unsigned int nokey = ask->nokey_refcnt; - bool last = nokey && !ask->refcnt; + unsigned int nokey = atomic_read(&ask->nokey_refcnt); sk = ask->parent; ask = alg_sk(sk); - local_bh_disable(); - bh_lock_sock(sk); - ask->nokey_refcnt -= nokey; - if (!last) - last = !--ask->refcnt; - bh_unlock_sock(sk); - local_bh_enable(); + if (nokey) + atomic_dec(&ask->nokey_refcnt); - if (last) + if (atomic_dec_and_test(&ask->refcnt)) sock_put(sk); } EXPORT_SYMBOL_GPL(af_alg_release_parent); @@ -152,7 +144,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) const u32 allowed = CRYPTO_ALG_KERN_DRIVER_ONLY; struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); - struct sockaddr_alg *sa = (void *)uaddr; + struct sockaddr_alg_new *sa = (void *)uaddr; const struct af_alg_type *type; void *private; int err; @@ -160,7 +152,11 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) if (sock->state == SS_CONNECTED) return -EINVAL; - if (addr_len < sizeof(*sa)) + BUILD_BUG_ON(offsetof(struct sockaddr_alg_new, salg_name) != + offsetof(struct sockaddr_alg, salg_name)); + BUILD_BUG_ON(offsetof(struct sockaddr_alg, salg_name) != sizeof(*sa)); + + if (addr_len < sizeof(*sa) + 1) return -EINVAL; /* If caller uses non-allowed flag, return error. */ @@ -168,7 +164,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) return -EINVAL; sa->salg_type[sizeof(sa->salg_type) - 1] = 0; - sa->salg_name[sizeof(sa->salg_name) + addr_len - sizeof(*sa) - 1] = 0; + sa->salg_name[addr_len - sizeof(*sa) - 1] = 0; type = alg_get_type(sa->salg_type); if (PTR_ERR(type) == -ENOENT) { @@ -187,7 +183,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) err = -EBUSY; lock_sock(sk); - if (ask->refcnt | ask->nokey_refcnt) + if (atomic_read(&ask->refcnt)) goto unlock; swap(ask->type, type); @@ -203,8 +199,7 @@ unlock: return err; } -static int alg_setkey(struct sock *sk, char __user *ukey, - unsigned int keylen) +static int alg_setkey(struct sock *sk, sockptr_t ukey, unsigned int keylen) { struct alg_sock *ask = alg_sk(sk); const struct af_alg_type *type = ask->type; @@ -216,7 +211,7 @@ static int alg_setkey(struct sock *sk, char __user *ukey, return -ENOMEM; err = -EFAULT; - if (copy_from_user(key, ukey, keylen)) + if (copy_from_sockptr(key, ukey, keylen)) goto out; err = type->setkey(ask->private, key, keylen); @@ -228,7 +223,7 @@ out: } static int alg_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); @@ -236,7 +231,7 @@ static int alg_setsockopt(struct socket *sock, int level, int optname, int err = -EBUSY; lock_sock(sk); - if (ask->refcnt) + if (atomic_read(&ask->refcnt) != atomic_read(&ask->nokey_refcnt)) goto unlock; type = ask->type; @@ -260,6 +255,14 @@ static int alg_setsockopt(struct socket *sock, int level, int optname, if (!type->setauthsize) goto unlock; err = type->setauthsize(ask->private, optlen); + break; + case ALG_SET_DRBG_ENTROPY: + if (sock->state == SS_CONNECTED) + goto unlock; + if (!type->setentropy) + goto unlock; + + err = type->setentropy(ask->private, optval, optlen); } unlock: @@ -292,6 +295,11 @@ int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern) security_sock_graft(sk2, newsock); security_sk_clone(sk, sk2); + /* + * newsock->ops assigned here to allow type->accept call to override + * them when required. + */ + newsock->ops = type->ops; err = type->accept(ask->private, sk2); nokey = err == -ENOKEY; @@ -301,14 +309,15 @@ int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern) if (err) goto unlock; - if (nokey || !ask->refcnt++) + if (atomic_inc_return_relaxed(&ask->refcnt) == 1) sock_hold(sk); - ask->nokey_refcnt += nokey; + if (nokey) { + atomic_inc(&ask->nokey_refcnt); + atomic_set(&alg_sk(sk2)->nokey_refcnt, 1); + } alg_sk(sk2)->parent = sk; alg_sk(sk2)->type = type; - alg_sk(sk2)->nokey_refcnt = nokey; - newsock->ops = type->ops; newsock->state = SS_CONNECTED; if (nokey) @@ -339,7 +348,6 @@ static const struct proto_ops alg_proto_ops = { .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .getsockopt = sock_no_getsockopt, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, .sendmsg = sock_no_sendmsg, @@ -396,11 +404,11 @@ int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len) ssize_t n; int npages, i; - n = iov_iter_get_pages(iter, sgl->pages, len, ALG_MAX_PAGES, &off); + n = iov_iter_get_pages2(iter, sgl->pages, len, ALG_MAX_PAGES, &off); if (n < 0) return n; - npages = (off + n + PAGE_SIZE - 1) >> PAGE_SHIFT; + npages = DIV_ROUND_UP(off + n, PAGE_SIZE); if (WARN_ON(npages == 0)) return -EINVAL; /* Add one extra for linking */ @@ -480,8 +488,8 @@ static int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con) /** * af_alg_alloc_tsgl - allocate the TX SGL * - * @sk socket of connection to user space - * @return: 0 upon success, < 0 upon error + * @sk: socket of connection to user space + * Return: 0 upon success, < 0 upon error */ static int af_alg_alloc_tsgl(struct sock *sk) { @@ -514,15 +522,15 @@ static int af_alg_alloc_tsgl(struct sock *sk) } /** - * aead_count_tsgl - Count number of TX SG entries + * af_alg_count_tsgl - Count number of TX SG entries * * The counting starts from the beginning of the SGL to @bytes. If - * an offset is provided, the counting of the SG entries starts at the offset. + * an @offset is provided, the counting of the SG entries starts at the @offset. * - * @sk socket of connection to user space - * @bytes Count the number of SG entries holding given number of bytes. - * @offset Start the counting of SG entries from the given offset. - * @return Number of TX SG entries found given the constraints + * @sk: socket of connection to user space + * @bytes: Count the number of SG entries holding given number of bytes. + * @offset: Start the counting of SG entries from the given offset. + * Return: Number of TX SG entries found given the constraints */ unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset) { @@ -566,19 +574,19 @@ unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset) EXPORT_SYMBOL_GPL(af_alg_count_tsgl); /** - * aead_pull_tsgl - Release the specified buffers from TX SGL + * af_alg_pull_tsgl - Release the specified buffers from TX SGL * - * If @dst is non-null, reassign the pages to dst. The caller must release + * If @dst is non-null, reassign the pages to @dst. The caller must release * the pages. If @dst_offset is given only reassign the pages to @dst starting * at the @dst_offset (byte). The caller must ensure that @dst is large * enough (e.g. by using af_alg_count_tsgl with the same offset). * - * @sk socket of connection to user space - * @used Number of bytes to pull from TX SGL - * @dst If non-NULL, buffer is reassigned to dst SGL instead of releasing. The - * caller must release the buffers in dst. - * @dst_offset Reassign the TX SGL from given offset. All buffers before - * reaching the offset is released. + * @sk: socket of connection to user space + * @used: Number of bytes to pull from TX SGL + * @dst: If non-NULL, buffer is reassigned to dst SGL instead of releasing. The + * caller must release the buffers in dst. + * @dst_offset: Reassign the TX SGL from given offset. All buffers before + * reaching the offset is released. */ void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst, size_t dst_offset) @@ -639,13 +647,14 @@ void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst, if (!ctx->used) ctx->merge = 0; + ctx->init = ctx->more; } EXPORT_SYMBOL_GPL(af_alg_pull_tsgl); /** * af_alg_free_areq_sgls - Release TX and RX SGLs of the request * - * @areq Request holding the TX and RX SGL + * @areq: Request holding the TX and RX SGL */ static void af_alg_free_areq_sgls(struct af_alg_async_req *areq) { @@ -680,9 +689,9 @@ static void af_alg_free_areq_sgls(struct af_alg_async_req *areq) /** * af_alg_wait_for_wmem - wait for availability of writable memory * - * @sk socket of connection to user space - * @flags If MSG_DONTWAIT is set, then only report if function would sleep - * @return 0 when writable memory is available, < 0 upon error + * @sk: socket of connection to user space + * @flags: If MSG_DONTWAIT is set, then only report if function would sleep + * Return: 0 when writable memory is available, < 0 upon error */ static int af_alg_wait_for_wmem(struct sock *sk, unsigned int flags) { @@ -713,7 +722,7 @@ static int af_alg_wait_for_wmem(struct sock *sk, unsigned int flags) /** * af_alg_wmem_wakeup - wakeup caller when writable memory is available * - * @sk socket of connection to user space + * @sk: socket of connection to user space */ void af_alg_wmem_wakeup(struct sock *sk) { @@ -736,11 +745,12 @@ EXPORT_SYMBOL_GPL(af_alg_wmem_wakeup); /** * af_alg_wait_for_data - wait for availability of TX data * - * @sk socket of connection to user space - * @flags If MSG_DONTWAIT is set, then only report if function would sleep - * @return 0 when writable memory is available, < 0 upon error + * @sk: socket of connection to user space + * @flags: If MSG_DONTWAIT is set, then only report if function would sleep + * @min: Set to minimum request size if partial requests are allowed. + * Return: 0 when writable memory is available, < 0 upon error */ -int af_alg_wait_for_data(struct sock *sk, unsigned flags) +int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min) { DEFINE_WAIT_FUNC(wait, woken_wake_function); struct alg_sock *ask = alg_sk(sk); @@ -758,7 +768,9 @@ int af_alg_wait_for_data(struct sock *sk, unsigned flags) if (signal_pending(current)) break; timeout = MAX_SCHEDULE_TIMEOUT; - if (sk_wait_event(sk, &timeout, (ctx->used || !ctx->more), + if (sk_wait_event(sk, &timeout, + ctx->init && (!ctx->more || + (min && ctx->used >= min)), &wait)) { err = 0; break; @@ -775,7 +787,7 @@ EXPORT_SYMBOL_GPL(af_alg_wait_for_data); /** * af_alg_data_wakeup - wakeup caller when new data can be sent to kernel * - * @sk socket of connection to user space + * @sk: socket of connection to user space */ static void af_alg_data_wakeup(struct sock *sk) { @@ -805,12 +817,12 @@ static void af_alg_data_wakeup(struct sock *sk) * * In addition, the ctx is filled with the information sent via CMSG. * - * @sock socket of connection to user space - * @msg message from user space - * @size size of message from user space - * @ivsize the size of the IV for the cipher operation to verify that the + * @sock: socket of connection to user space + * @msg: message from user space + * @size: size of message from user space + * @ivsize: the size of the IV for the cipher operation to verify that the * user-space-provided IV has the right size - * @return the number of copied data upon success, < 0 upon error + * Return: the number of copied data upon success, < 0 upon error */ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, unsigned int ivsize) @@ -821,8 +833,8 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, struct af_alg_tsgl *sgl; struct af_alg_control con = {}; long copied = 0; - bool enc = 0; - bool init = 0; + bool enc = false; + bool init = false; int err = 0; if (msg->msg_controllen) { @@ -830,13 +842,13 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, if (err) return err; - init = 1; + init = true; switch (con.op) { case ALG_OP_ENCRYPT: - enc = 1; + enc = true; break; case ALG_OP_DECRYPT: - enc = 0; + enc = false; break; default: return -EINVAL; @@ -847,10 +859,17 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, } lock_sock(sk); - if (!ctx->more && ctx->used) { - err = -EINVAL; - goto unlock; + if (ctx->init && !ctx->more) { + if (ctx->used) { + err = -EINVAL; + goto unlock; + } + + pr_info_once( + "%s sent an empty control message without MSG_MORE.\n", + current->comm); } + ctx->init = true; if (init) { ctx->enc = enc; @@ -909,16 +928,19 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, sg_unmark_end(sg + sgl->cur - 1); do { + struct page *pg; unsigned int i = sgl->cur; plen = min_t(size_t, len, PAGE_SIZE); - sg_assign_page(sg + i, alloc_page(GFP_KERNEL)); - if (!sg_page(sg + i)) { + pg = alloc_page(GFP_KERNEL); + if (!pg) { err = -ENOMEM; goto unlock; } + sg_assign_page(sg + i, pg); + err = memcpy_from_msg(page_address(sg_page(sg + i)), msg, plen); if (err) { @@ -955,6 +977,11 @@ EXPORT_SYMBOL_GPL(af_alg_sendmsg); /** * af_alg_sendpage - sendpage system call handler + * @sock: socket of connection to user space to write to + * @page: data to send + * @offset: offset into page to begin sending + * @size: length of data + * @flags: message send/receive flags * * This is a generic implementation of sendpage to fill ctx->tsgl_list. */ @@ -1013,6 +1040,7 @@ EXPORT_SYMBOL_GPL(af_alg_sendpage); /** * af_alg_free_resources - release resources required for crypto request + * @areq: Request holding the TX and RX SGL */ void af_alg_free_resources(struct af_alg_async_req *areq) { @@ -1025,6 +1053,9 @@ EXPORT_SYMBOL_GPL(af_alg_free_resources); /** * af_alg_async_cb - AIO callback handler + * @_req: async request info + * @err: if non-zero, error result to be returned via ki_complete(); + * otherwise return the AIO output length via ki_complete(). * * This handler cleans up the struct af_alg_async_req upon completion of the * AIO operation. @@ -1045,12 +1076,15 @@ void af_alg_async_cb(struct crypto_async_request *_req, int err) af_alg_free_resources(areq); sock_put(sk); - iocb->ki_complete(iocb, err ? err : (int)resultlen, 0); + iocb->ki_complete(iocb, err ? err : (int)resultlen); } EXPORT_SYMBOL_GPL(af_alg_async_cb); /** * af_alg_poll - poll system call handler + * @file: file pointer + * @sock: socket to poll + * @wait: poll_table */ __poll_t af_alg_poll(struct file *file, struct socket *sock, poll_table *wait) @@ -1076,9 +1110,9 @@ EXPORT_SYMBOL_GPL(af_alg_poll); /** * af_alg_alloc_areq - allocate struct af_alg_async_req * - * @sk socket of connection to user space - * @areqlen size of struct af_alg_async_req + crypto_*_reqsize - * @return allocated data structure or ERR_PTR upon error + * @sk: socket of connection to user space + * @areqlen: size of struct af_alg_async_req + crypto_*_reqsize + * Return: allocated data structure or ERR_PTR upon error */ struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk, unsigned int areqlen) @@ -1103,13 +1137,13 @@ EXPORT_SYMBOL_GPL(af_alg_alloc_areq); * af_alg_get_rsgl - create the RX SGL for the output data from the crypto * operation * - * @sk socket of connection to user space - * @msg user space message - * @flags flags used to invoke recvmsg with - * @areq instance of the cryptographic request that will hold the RX SGL - * @maxsize maximum number of bytes to be pulled from user space - * @outlen number of bytes in the RX SGL - * @return 0 on success, < 0 upon error + * @sk: socket of connection to user space + * @msg: user space message + * @flags: flags used to invoke recvmsg with + * @areq: instance of the cryptographic request that will hold the RX SGL + * @maxsize: maximum number of bytes to be pulled from user space + * @outlen: number of bytes in the RX SGL + * Return: 0 on success, < 0 upon error */ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags, struct af_alg_async_req *areq, size_t maxsize, @@ -1157,7 +1191,6 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags, len += err; atomic_add(err, &ctx->rcvused); rsgl->sg_num_bytes = err; - iov_iter_advance(&msg->msg_iter, err); } *outlen = len; diff --git a/crypto/ahash.c b/crypto/ahash.c index 68a0f0cb75c4..c2ca631a111f 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -10,7 +10,6 @@ #include <crypto/internal/hash.h> #include <crypto/scatterwalk.h> -#include <linux/bug.h> #include <linux/err.h> #include <linux/kernel.h> #include <linux/module.h> @@ -46,10 +45,7 @@ static int hash_walk_next(struct crypto_hash_walk *walk) unsigned int nbytes = min(walk->entrylen, ((unsigned int)(PAGE_SIZE)) - offset); - if (walk->flags & CRYPTO_ALG_ASYNC) - walk->data = kmap(walk->pg); - else - walk->data = kmap_atomic(walk->pg); + walk->data = kmap_atomic(walk->pg); walk->data += offset; if (offset & alignmask) { @@ -99,16 +95,8 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) } } - if (walk->flags & CRYPTO_ALG_ASYNC) - kunmap(walk->pg); - else { - kunmap_atomic(walk->data); - /* - * The may sleep test only makes sense for sync users. - * Async users don't need to sleep here anyway. - */ - crypto_yield(walk->flags); - } + kunmap_atomic(walk->data); + crypto_yield(walk->flags); if (err) return err; @@ -140,33 +128,12 @@ int crypto_hash_walk_first(struct ahash_request *req, walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req)); walk->sg = req->src; - walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK; + walk->flags = req->base.flags; return hash_walk_new_entry(walk); } EXPORT_SYMBOL_GPL(crypto_hash_walk_first); -int crypto_ahash_walk_first(struct ahash_request *req, - struct crypto_hash_walk *walk) -{ - walk->total = req->nbytes; - - if (!walk->total) { - walk->entrylen = 0; - return 0; - } - - walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req)); - walk->sg = req->src; - walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK; - walk->flags |= CRYPTO_ALG_ASYNC; - - BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK & CRYPTO_ALG_ASYNC); - - return hash_walk_new_entry(walk); -} -EXPORT_SYMBOL_GPL(crypto_ahash_walk_first); - static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { @@ -183,7 +150,7 @@ static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key, alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); memcpy(alignbuffer, key, keylen); ret = tfm->setkey(tfm, alignbuffer, keylen); - kzfree(buffer); + kfree_sensitive(buffer); return ret; } @@ -302,7 +269,7 @@ static void ahash_restore_req(struct ahash_request *req, int err) req->priv = NULL; /* Free the req->priv.priv from the ADJUSTED request. */ - kzfree(priv); + kfree_sensitive(priv); } static void ahash_notify_einprogress(struct ahash_request *req) @@ -477,6 +444,14 @@ static int ahash_def_finup(struct ahash_request *req) return ahash_def_finup_finish1(req, err); } +static void crypto_ahash_exit_tfm(struct crypto_tfm *tfm) +{ + struct crypto_ahash *hash = __crypto_ahash_cast(tfm); + struct ahash_alg *alg = crypto_ahash_alg(hash); + + alg->exit_tfm(hash); +} + static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) { struct crypto_ahash *hash = __crypto_ahash_cast(tfm); @@ -500,7 +475,10 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) ahash_set_needkey(hash); } - return 0; + if (alg->exit_tfm) + tfm->exit = crypto_ahash_exit_tfm; + + return alg->init_tfm ? alg->init_tfm(hash) : 0; } static unsigned int crypto_ahash_extsize(struct crypto_alg *alg) diff --git a/crypto/akcipher.c b/crypto/akcipher.c index f866085c8a4a..ab975a420e1e 100644 --- a/crypto/akcipher.c +++ b/crypto/akcipher.c @@ -120,6 +120,12 @@ static int akcipher_default_op(struct akcipher_request *req) return -ENOSYS; } +static int akcipher_default_set_key(struct crypto_akcipher *tfm, + const void *key, unsigned int keylen) +{ + return -ENOSYS; +} + int crypto_register_akcipher(struct akcipher_alg *alg) { struct crypto_alg *base = &alg->base; @@ -132,6 +138,8 @@ int crypto_register_akcipher(struct akcipher_alg *alg) alg->encrypt = akcipher_default_op; if (!alg->decrypt) alg->decrypt = akcipher_default_op; + if (!alg->set_priv_key) + alg->set_priv_key = akcipher_default_set_key; akcipher_prepare_alg(alg); return crypto_register_alg(base); diff --git a/crypto/algapi.c b/crypto/algapi.c index 69605e21af92..5c69ff8e8fa5 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -6,6 +6,7 @@ */ #include <crypto/algapi.h> +#include <crypto/internal/simd.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/fips.h> @@ -21,6 +22,11 @@ static LIST_HEAD(crypto_template_list); +#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS +DEFINE_PER_CPU(bool, crypto_simd_disabled_for_test); +EXPORT_PER_CPU_SYMBOL_GPL(crypto_simd_disabled_for_test); +#endif + static inline void crypto_check_module_sig(struct module *mod) { if (fips_enabled && mod && !module_sig_ok(mod)) @@ -216,6 +222,32 @@ void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list, } EXPORT_SYMBOL_GPL(crypto_remove_spawns); +static struct crypto_larval *crypto_alloc_test_larval(struct crypto_alg *alg) +{ + struct crypto_larval *larval; + + if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER)) + return NULL; + + larval = crypto_larval_alloc(alg->cra_name, + alg->cra_flags | CRYPTO_ALG_TESTED, 0); + if (IS_ERR(larval)) + return larval; + + larval->adult = crypto_mod_get(alg); + if (!larval->adult) { + kfree(larval); + return ERR_PTR(-ENOENT); + } + + refcount_set(&larval->alg.cra_refcnt, 1); + memcpy(larval->alg.cra_driver_name, alg->cra_driver_name, + CRYPTO_MAX_ALG_NAME); + larval->alg.cra_priority = alg->cra_priority; + + return larval; +} + static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg) { struct crypto_alg *q; @@ -250,31 +282,22 @@ static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg) goto err; } - larval = crypto_larval_alloc(alg->cra_name, - alg->cra_flags | CRYPTO_ALG_TESTED, 0); + larval = crypto_alloc_test_larval(alg); if (IS_ERR(larval)) goto out; - ret = -ENOENT; - larval->adult = crypto_mod_get(alg); - if (!larval->adult) - goto free_larval; - - refcount_set(&larval->alg.cra_refcnt, 1); - memcpy(larval->alg.cra_driver_name, alg->cra_driver_name, - CRYPTO_MAX_ALG_NAME); - larval->alg.cra_priority = alg->cra_priority; - list_add(&alg->cra_list, &crypto_alg_list); - list_add(&larval->alg.cra_list, &crypto_alg_list); + + if (larval) + list_add(&larval->alg.cra_list, &crypto_alg_list); + else + alg->cra_flags |= CRYPTO_ALG_TESTED; crypto_stats_init(alg); out: return larval; -free_larval: - kfree(larval); err: larval = ERR_PTR(ret); goto out; @@ -305,8 +328,16 @@ void crypto_alg_tested(const char *name, int err) found: q->cra_flags |= CRYPTO_ALG_DEAD; alg = test->adult; - if (err || list_empty(&alg->cra_list)) + + if (list_empty(&alg->cra_list)) + goto complete; + + if (err == -ECANCELED) + alg->cra_flags |= CRYPTO_ALG_FIPS_INTERNAL; + else if (err) goto complete; + else + alg->cra_flags &= ~CRYPTO_ALG_FIPS_INTERNAL; alg->cra_flags |= CRYPTO_ALG_TESTED; @@ -389,29 +420,10 @@ void crypto_remove_final(struct list_head *list) } EXPORT_SYMBOL_GPL(crypto_remove_final); -static void crypto_wait_for_test(struct crypto_larval *larval) -{ - int err; - - err = crypto_probing_notify(CRYPTO_MSG_ALG_REGISTER, larval->adult); - if (err != NOTIFY_STOP) { - if (WARN_ON(err != NOTIFY_DONE)) - goto out; - crypto_alg_tested(larval->alg.cra_driver_name, 0); - } - - err = wait_for_completion_killable(&larval->completion); - WARN_ON(err); - if (!err) - crypto_probing_notify(CRYPTO_MSG_ALG_LOADED, larval); - -out: - crypto_larval_kill(&larval->alg); -} - int crypto_register_alg(struct crypto_alg *alg) { struct crypto_larval *larval; + bool test_started; int err; alg->cra_flags &= ~CRYPTO_ALG_DEAD; @@ -421,12 +433,16 @@ int crypto_register_alg(struct crypto_alg *alg) down_write(&crypto_alg_sem); larval = __crypto_register_alg(alg); + test_started = static_key_enabled(&crypto_boot_test_finished); + if (!IS_ERR_OR_NULL(larval)) + larval->test_started = test_started; up_write(&crypto_alg_sem); - if (IS_ERR(larval)) + if (IS_ERR_OR_NULL(larval)) return PTR_ERR(larval); - crypto_wait_for_test(larval); + if (test_started) + crypto_wait_for_test(larval); return 0; } EXPORT_SYMBOL_GPL(crypto_register_alg); @@ -602,6 +618,7 @@ int crypto_register_instance(struct crypto_template *tmpl, { struct crypto_larval *larval; struct crypto_spawn *spawn; + u32 fips_internal = 0; int err; err = crypto_check_alg(&inst->alg); @@ -624,14 +641,20 @@ int crypto_register_instance(struct crypto_template *tmpl, spawn->inst = inst; spawn->registered = true; + fips_internal |= spawn->alg->cra_flags; + crypto_mod_put(spawn->alg); spawn = next; } + inst->alg.cra_flags |= (fips_internal & CRYPTO_ALG_FIPS_INTERNAL); + larval = __crypto_register_alg(&inst->alg); if (IS_ERR(larval)) goto unlock; + else if (larval) + larval->test_started = true; hlist_add_head(&inst->list, &tmpl->instances); inst->tmpl = tmpl; @@ -640,7 +663,7 @@ unlock: up_write(&crypto_alg_sem); err = PTR_ERR(larval); - if (IS_ERR(larval)) + if (IS_ERR_OR_NULL(larval)) goto err; crypto_wait_for_test(larval); @@ -679,7 +702,8 @@ int crypto_grab_spawn(struct crypto_spawn *spawn, struct crypto_instance *inst, if (IS_ERR(name)) return PTR_ERR(name); - alg = crypto_find_alg(name, spawn->frontend, type, mask); + alg = crypto_find_alg(name, spawn->frontend, + type | CRYPTO_ALG_FIPS_INTERNAL, mask); if (IS_ERR(alg)) return PTR_ERR(alg); @@ -690,6 +714,8 @@ int crypto_grab_spawn(struct crypto_spawn *spawn, struct crypto_instance *inst, spawn->mask = mask; spawn->next = inst->spawns; inst->spawns = spawn; + inst->alg.cra_flags |= + (alg->cra_flags & CRYPTO_ALG_INHERITED_FLAGS); err = 0; } up_write(&crypto_alg_sem); @@ -716,17 +742,27 @@ EXPORT_SYMBOL_GPL(crypto_drop_spawn); static struct crypto_alg *crypto_spawn_alg(struct crypto_spawn *spawn) { - struct crypto_alg *alg; + struct crypto_alg *alg = ERR_PTR(-EAGAIN); + struct crypto_alg *target; + bool shoot = false; down_read(&crypto_alg_sem); - alg = spawn->alg; - if (!spawn->dead && !crypto_mod_get(alg)) { - alg->cra_flags |= CRYPTO_ALG_DYING; - alg = NULL; + if (!spawn->dead) { + alg = spawn->alg; + if (!crypto_mod_get(alg)) { + target = crypto_alg_get(alg); + shoot = true; + alg = ERR_PTR(-EAGAIN); + } } up_read(&crypto_alg_sem); - return alg ?: ERR_PTR(-EAGAIN); + if (shoot) { + crypto_shoot_alg(target); + crypto_alg_put(target); + } + + return alg; } struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, @@ -806,7 +842,23 @@ struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb) } EXPORT_SYMBOL_GPL(crypto_get_attr_type); -int crypto_check_attr_type(struct rtattr **tb, u32 type) +/** + * crypto_check_attr_type() - check algorithm type and compute inherited mask + * @tb: the template parameters + * @type: the algorithm type the template would be instantiated as + * @mask_ret: (output) the mask that should be passed to crypto_grab_*() + * to restrict the flags of any inner algorithms + * + * Validate that the algorithm type the user requested is compatible with the + * one the template would actually be instantiated as. E.g., if the user is + * doing crypto_alloc_shash("cbc(aes)", ...), this would return an error because + * the "cbc" template creates an "skcipher" algorithm, not an "shash" algorithm. + * + * Also compute the mask to use to restrict the flags of any inner algorithms. + * + * Return: 0 on success; -errno on failure + */ +int crypto_check_attr_type(struct rtattr **tb, u32 type, u32 *mask_ret) { struct crypto_attr_type *algt; @@ -817,6 +869,7 @@ int crypto_check_attr_type(struct rtattr **tb, u32 type) if ((algt->type ^ type) & algt->mask) return -EINVAL; + *mask_ret = crypto_algt_inherited_mask(algt); return 0; } EXPORT_SYMBOL_GPL(crypto_check_attr_type); @@ -839,24 +892,6 @@ const char *crypto_attr_alg_name(struct rtattr *rta) } EXPORT_SYMBOL_GPL(crypto_attr_alg_name); -int crypto_attr_u32(struct rtattr *rta, u32 *num) -{ - struct crypto_attr_u32 *nu32; - - if (!rta) - return -ENOENT; - if (RTA_PAYLOAD(rta) < sizeof(*nu32)) - return -EINVAL; - if (rta->rta_type != CRYPTOA_U32) - return -EINVAL; - - nu32 = RTA_DATA(rta); - *num = nu32->num; - - return 0; -} -EXPORT_SYMBOL_GPL(crypto_attr_u32); - int crypto_inst_setname(struct crypto_instance *inst, const char *name, struct crypto_alg *alg) { @@ -904,6 +939,14 @@ out: } EXPORT_SYMBOL_GPL(crypto_enqueue_request); +void crypto_enqueue_request_head(struct crypto_queue *queue, + struct crypto_async_request *request) +{ + queue->qlen++; + list_add(&request->list, &queue->list); +} +EXPORT_SYMBOL_GPL(crypto_enqueue_request_head); + struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue) { struct list_head *request; @@ -954,59 +997,6 @@ void crypto_inc(u8 *a, unsigned int size) } EXPORT_SYMBOL_GPL(crypto_inc); -void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int len) -{ - int relalign = 0; - - if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) { - int size = sizeof(unsigned long); - int d = (((unsigned long)dst ^ (unsigned long)src1) | - ((unsigned long)dst ^ (unsigned long)src2)) & - (size - 1); - - relalign = d ? 1 << __ffs(d) : size; - - /* - * If we care about alignment, process as many bytes as - * needed to advance dst and src to values whose alignments - * equal their relative alignment. This will allow us to - * process the remainder of the input using optimal strides. - */ - while (((unsigned long)dst & (relalign - 1)) && len > 0) { - *dst++ = *src1++ ^ *src2++; - len--; - } - } - - while (IS_ENABLED(CONFIG_64BIT) && len >= 8 && !(relalign & 7)) { - *(u64 *)dst = *(u64 *)src1 ^ *(u64 *)src2; - dst += 8; - src1 += 8; - src2 += 8; - len -= 8; - } - - while (len >= 4 && !(relalign & 3)) { - *(u32 *)dst = *(u32 *)src1 ^ *(u32 *)src2; - dst += 4; - src1 += 4; - src2 += 4; - len -= 4; - } - - while (len >= 2 && !(relalign & 1)) { - *(u16 *)dst = *(u16 *)src1 ^ *(u16 *)src2; - dst += 2; - src1 += 2; - src2 += 2; - len -= 2; - } - - while (len--) - *dst++ = *src1++ ^ *src2++; -} -EXPORT_SYMBOL_GPL(__crypto_xor); - unsigned int crypto_alg_extsize(struct crypto_alg *alg) { return alg->cra_ctxsize + @@ -1242,9 +1232,48 @@ void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret, EXPORT_SYMBOL_GPL(crypto_stats_skcipher_decrypt); #endif +static void __init crypto_start_tests(void) +{ + for (;;) { + struct crypto_larval *larval = NULL; + struct crypto_alg *q; + + down_write(&crypto_alg_sem); + + list_for_each_entry(q, &crypto_alg_list, cra_list) { + struct crypto_larval *l; + + if (!crypto_is_larval(q)) + continue; + + l = (void *)q; + + if (!crypto_is_test_larval(l)) + continue; + + if (l->test_started) + continue; + + l->test_started = true; + larval = l; + break; + } + + up_write(&crypto_alg_sem); + + if (!larval) + break; + + crypto_wait_for_test(larval); + } + + static_branch_enable(&crypto_boot_test_finished); +} + static int __init crypto_algapi_init(void) { crypto_init_proc(); + crypto_start_tests(); return 0; } @@ -1253,8 +1282,13 @@ static void __exit crypto_algapi_exit(void) crypto_exit_proc(); } -module_init(crypto_algapi_init); +/* + * We run this at late_initcall so that all the built-in algorithms + * have had a chance to register themselves first. + */ +late_initcall(crypto_algapi_init); module_exit(crypto_algapi_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Cryptographic algorithms API"); +MODULE_SOFTDEP("pre: cryptomgr"); diff --git a/crypto/algboss.c b/crypto/algboss.c index 535f1f87e6c1..eb5fe84efb83 100644 --- a/crypto/algboss.c +++ b/crypto/algboss.c @@ -28,16 +28,9 @@ struct cryptomgr_param { struct crypto_attr_type data; } type; - union { + struct { struct rtattr attr; - struct { - struct rtattr attr; - struct crypto_attr_alg data; - } alg; - struct { - struct rtattr attr; - struct crypto_attr_u32 data; - } nu32; + struct crypto_attr_alg data; } attrs[CRYPTO_MAX_ATTRS]; char template[CRYPTO_MAX_ALG_NAME]; @@ -74,7 +67,7 @@ out: complete_all(¶m->larval->completion); crypto_alg_put(¶m->larval->alg); kfree(param); - module_put_and_exit(0); + module_put_and_kthread_exit(0); } static int cryptomgr_schedule_probe(struct crypto_larval *larval) @@ -104,12 +97,10 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval) i = 0; for (;;) { - int notnum = 0; - name = ++p; for (; isalnum(*p) || *p == '-' || *p == '_'; p++) - notnum |= !isdigit(*p); + ; if (*p == '(') { int recursion = 0; @@ -123,7 +114,6 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval) break; } - notnum = 1; p++; } @@ -131,18 +121,9 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval) if (!len) goto err_free_param; - if (notnum) { - param->attrs[i].alg.attr.rta_len = - sizeof(param->attrs[i].alg); - param->attrs[i].alg.attr.rta_type = CRYPTOA_ALG; - memcpy(param->attrs[i].alg.data.name, name, len); - } else { - param->attrs[i].nu32.attr.rta_len = - sizeof(param->attrs[i].nu32); - param->attrs[i].nu32.attr.rta_type = CRYPTOA_U32; - param->attrs[i].nu32.data.num = - simple_strtol(name, NULL, 0); - } + param->attrs[i].attr.rta_len = sizeof(param->attrs[i]); + param->attrs[i].attr.rta_type = CRYPTOA_ALG; + memcpy(param->attrs[i].data.name, name, len); param->tb[i + 1] = ¶m->attrs[i].attr; i++; @@ -178,8 +159,6 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval) if (IS_ERR(thread)) goto err_put_larval; - wait_for_completion_interruptible(&larval->completion); - return NOTIFY_STOP; err_put_larval: @@ -211,7 +190,7 @@ skiptest: crypto_alg_tested(param->driver, err); kfree(param); - module_put_and_exit(0); + module_put_and_kthread_exit(0); } static int cryptomgr_schedule_test(struct crypto_alg *alg) diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index eb1910b6d434..42493b4d8ce4 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -78,7 +78,7 @@ static int crypto_aead_copy_sgl(struct crypto_sync_skcipher *null_tfm, SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm); skcipher_request_set_sync_tfm(skreq, null_tfm); - skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_BACKLOG, + skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); skcipher_request_set_crypt(skreq, src, dst, len, NULL); @@ -106,8 +106,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t usedpages = 0; /* [in] RX bufs to be used from user */ size_t processed = 0; /* [in] TX bufs to be consumed */ - if (!ctx->used) { - err = af_alg_wait_for_data(sk, flags); + if (!ctx->init || ctx->more) { + err = af_alg_wait_for_data(sk, flags, 0); if (err) return err; } @@ -120,7 +120,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, /* * Make sure sufficient data is present -- note, the same check is - * is also present in sendmsg/sendpage. The checks in sendpage/sendmsg + * also present in sendmsg/sendpage. The checks in sendpage/sendmsg * shall provide an information to the data sender that something is * wrong, but they are irrelevant to maintain the kernel integrity. * We need this check here too in case user space decides to not honor @@ -291,19 +291,20 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, areq->outlen = outlen; aead_request_set_callback(&areq->cra_u.aead_req, - CRYPTO_TFM_REQ_MAY_BACKLOG, + CRYPTO_TFM_REQ_MAY_SLEEP, af_alg_async_cb, areq); err = ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) : crypto_aead_decrypt(&areq->cra_u.aead_req); /* AIO operation in progress */ - if (err == -EINPROGRESS || err == -EBUSY) + if (err == -EINPROGRESS) return -EIOCBQUEUED; sock_put(sk); } else { /* Synchronous operation */ aead_request_set_callback(&areq->cra_u.aead_req, + CRYPTO_TFM_REQ_MAY_SLEEP | CRYPTO_TFM_REQ_MAY_BACKLOG, crypto_req_done, &ctx->wait); err = crypto_wait_req(ctx->enc ? @@ -361,11 +362,9 @@ static struct proto_ops algif_aead_ops = { .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .getsockopt = sock_no_getsockopt, .mmap = sock_no_mmap, .bind = sock_no_bind, .accept = sock_no_accept, - .setsockopt = sock_no_setsockopt, .release = af_alg_release, .sendmsg = aead_sendmsg, @@ -384,7 +383,7 @@ static int aead_check_key(struct socket *sock) struct alg_sock *ask = alg_sk(sk); lock_sock(sk); - if (ask->refcnt) + if (!atomic_read(&ask->nokey_refcnt)) goto unlock_child; psk = ask->parent; @@ -396,11 +395,8 @@ static int aead_check_key(struct socket *sock) if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY) goto unlock; - if (!pask->refcnt++) - sock_hold(psk); - - ask->refcnt = 1; - sock_put(psk); + atomic_dec(&pask->nokey_refcnt); + atomic_set(&ask->nokey_refcnt, 0); err = 0; @@ -457,11 +453,9 @@ static struct proto_ops algif_aead_ops_nokey = { .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .getsockopt = sock_no_getsockopt, .mmap = sock_no_mmap, .bind = sock_no_bind, .accept = sock_no_accept, - .setsockopt = sock_no_setsockopt, .release = af_alg_release, .sendmsg = aead_sendmsg_nokey, @@ -561,12 +555,6 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk) INIT_LIST_HEAD(&ctx->tsgl_list); ctx->len = len; - ctx->used = 0; - atomic_set(&ctx->rcvused, 0); - ctx->more = 0; - ctx->merge = 0; - ctx->enc = 0; - ctx->aead_assoclen = 0; crypto_init_wait(&ctx->wait); ask->private = ctx; diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index 178f4cd75ef1..1d017ec5c63c 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c @@ -83,7 +83,7 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg, goto unlock; } - ctx->more = 0; + ctx->more = false; while (msg_data_left(msg)) { int len = msg_data_left(msg); @@ -102,11 +102,12 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg, err = crypto_wait_req(crypto_ahash_update(&ctx->req), &ctx->wait); af_alg_free_sg(&ctx->sgl); - if (err) + if (err) { + iov_iter_revert(&msg->msg_iter, len); goto unlock; + } copied += len; - iov_iter_advance(&msg->msg_iter, len); } err = 0; @@ -211,7 +212,7 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, } if (!result || ctx->more) { - ctx->more = 0; + ctx->more = false; err = crypto_wait_req(crypto_ahash_final(&ctx->req), &ctx->wait); if (err) @@ -279,10 +280,8 @@ static struct proto_ops algif_hash_ops = { .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .getsockopt = sock_no_getsockopt, .mmap = sock_no_mmap, .bind = sock_no_bind, - .setsockopt = sock_no_setsockopt, .release = af_alg_release, .sendmsg = hash_sendmsg, @@ -301,7 +300,7 @@ static int hash_check_key(struct socket *sock) struct alg_sock *ask = alg_sk(sk); lock_sock(sk); - if (ask->refcnt) + if (!atomic_read(&ask->nokey_refcnt)) goto unlock_child; psk = ask->parent; @@ -313,11 +312,8 @@ static int hash_check_key(struct socket *sock) if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) goto unlock; - if (!pask->refcnt++) - sock_hold(psk); - - ask->refcnt = 1; - sock_put(psk); + atomic_dec(&pask->nokey_refcnt); + atomic_set(&ask->nokey_refcnt, 0); err = 0; @@ -386,10 +382,8 @@ static struct proto_ops algif_hash_ops_nokey = { .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .getsockopt = sock_no_getsockopt, .mmap = sock_no_mmap, .bind = sock_no_bind, - .setsockopt = sock_no_setsockopt, .release = af_alg_release, .sendmsg = hash_sendmsg_nokey, @@ -436,7 +430,7 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk) ctx->result = NULL; ctx->len = len; - ctx->more = 0; + ctx->more = false; crypto_init_wait(&ctx->wait); ask->private = ctx; diff --git a/crypto/algif_rng.c b/crypto/algif_rng.c index 22df3799a17b..407408c43730 100644 --- a/crypto/algif_rng.c +++ b/crypto/algif_rng.c @@ -38,6 +38,7 @@ * DAMAGE. */ +#include <linux/capability.h> #include <linux/module.h> #include <crypto/rng.h> #include <linux/random.h> @@ -53,15 +54,26 @@ struct rng_ctx { #define MAXSIZE 128 unsigned int len; struct crypto_rng *drng; + u8 *addtl; + size_t addtl_len; }; -static int rng_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, - int flags) +struct rng_parent_ctx { + struct crypto_rng *drng; + u8 *entropy; +}; + +static void rng_reset_addtl(struct rng_ctx *ctx) { - struct sock *sk = sock->sk; - struct alg_sock *ask = alg_sk(sk); - struct rng_ctx *ctx = ask->private; - int err = -EFAULT; + kfree_sensitive(ctx->addtl); + ctx->addtl = NULL; + ctx->addtl_len = 0; +} + +static int _rng_recvmsg(struct crypto_rng *drng, struct msghdr *msg, size_t len, + u8 *addtl, size_t addtl_len) +{ + int err = 0; int genlen = 0; u8 result[MAXSIZE]; @@ -82,7 +94,7 @@ static int rng_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, * seeding as they automatically seed. The X9.31 DRNG will return * an error if it was not seeded properly. */ - genlen = crypto_rng_get_bytes(ctx->drng, result, len); + genlen = crypto_rng_generate(drng, addtl, addtl_len, result, len); if (genlen < 0) return genlen; @@ -92,6 +104,63 @@ static int rng_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, return err ? err : len; } +static int rng_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, + int flags) +{ + struct sock *sk = sock->sk; + struct alg_sock *ask = alg_sk(sk); + struct rng_ctx *ctx = ask->private; + + return _rng_recvmsg(ctx->drng, msg, len, NULL, 0); +} + +static int rng_test_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, + int flags) +{ + struct sock *sk = sock->sk; + struct alg_sock *ask = alg_sk(sk); + struct rng_ctx *ctx = ask->private; + int ret; + + lock_sock(sock->sk); + ret = _rng_recvmsg(ctx->drng, msg, len, ctx->addtl, ctx->addtl_len); + rng_reset_addtl(ctx); + release_sock(sock->sk); + + return ret; +} + +static int rng_test_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) +{ + int err; + struct alg_sock *ask = alg_sk(sock->sk); + struct rng_ctx *ctx = ask->private; + + lock_sock(sock->sk); + if (len > MAXSIZE) { + err = -EMSGSIZE; + goto unlock; + } + + rng_reset_addtl(ctx); + ctx->addtl = kmalloc(len, GFP_KERNEL); + if (!ctx->addtl) { + err = -ENOMEM; + goto unlock; + } + + err = memcpy_from_msg(ctx->addtl, msg, len); + if (err) { + rng_reset_addtl(ctx); + goto unlock; + } + ctx->addtl_len = len; + +unlock: + release_sock(sock->sk); + return err ? err : len; +} + static struct proto_ops algif_rng_ops = { .family = PF_ALG, @@ -101,11 +170,9 @@ static struct proto_ops algif_rng_ops = { .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .getsockopt = sock_no_getsockopt, .mmap = sock_no_mmap, .bind = sock_no_bind, .accept = sock_no_accept, - .setsockopt = sock_no_setsockopt, .sendmsg = sock_no_sendmsg, .sendpage = sock_no_sendpage, @@ -113,14 +180,53 @@ static struct proto_ops algif_rng_ops = { .recvmsg = rng_recvmsg, }; +static struct proto_ops __maybe_unused algif_rng_test_ops = { + .family = PF_ALG, + + .connect = sock_no_connect, + .socketpair = sock_no_socketpair, + .getname = sock_no_getname, + .ioctl = sock_no_ioctl, + .listen = sock_no_listen, + .shutdown = sock_no_shutdown, + .mmap = sock_no_mmap, + .bind = sock_no_bind, + .accept = sock_no_accept, + .sendpage = sock_no_sendpage, + + .release = af_alg_release, + .recvmsg = rng_test_recvmsg, + .sendmsg = rng_test_sendmsg, +}; + static void *rng_bind(const char *name, u32 type, u32 mask) { - return crypto_alloc_rng(name, type, mask); + struct rng_parent_ctx *pctx; + struct crypto_rng *rng; + + pctx = kzalloc(sizeof(*pctx), GFP_KERNEL); + if (!pctx) + return ERR_PTR(-ENOMEM); + + rng = crypto_alloc_rng(name, type, mask); + if (IS_ERR(rng)) { + kfree(pctx); + return ERR_CAST(rng); + } + + pctx->drng = rng; + return pctx; } static void rng_release(void *private) { - crypto_free_rng(private); + struct rng_parent_ctx *pctx = private; + + if (unlikely(!pctx)) + return; + crypto_free_rng(pctx->drng); + kfree_sensitive(pctx->entropy); + kfree_sensitive(pctx); } static void rng_sock_destruct(struct sock *sk) @@ -128,6 +234,7 @@ static void rng_sock_destruct(struct sock *sk) struct alg_sock *ask = alg_sk(sk); struct rng_ctx *ctx = ask->private; + rng_reset_addtl(ctx); sock_kfree_s(sk, ctx, ctx->len); af_alg_release_parent(sk); } @@ -135,6 +242,7 @@ static void rng_sock_destruct(struct sock *sk) static int rng_accept_parent(void *private, struct sock *sk) { struct rng_ctx *ctx; + struct rng_parent_ctx *pctx = private; struct alg_sock *ask = alg_sk(sk); unsigned int len = sizeof(*ctx); @@ -143,6 +251,8 @@ static int rng_accept_parent(void *private, struct sock *sk) return -ENOMEM; ctx->len = len; + ctx->addtl = NULL; + ctx->addtl_len = 0; /* * No seeding done at that point -- if multiple accepts are @@ -150,20 +260,58 @@ static int rng_accept_parent(void *private, struct sock *sk) * state of the RNG. */ - ctx->drng = private; + ctx->drng = pctx->drng; ask->private = ctx; sk->sk_destruct = rng_sock_destruct; + /* + * Non NULL pctx->entropy means that CAVP test has been initiated on + * this socket, replace proto_ops algif_rng_ops with algif_rng_test_ops. + */ + if (IS_ENABLED(CONFIG_CRYPTO_USER_API_RNG_CAVP) && pctx->entropy) + sk->sk_socket->ops = &algif_rng_test_ops; + return 0; } static int rng_setkey(void *private, const u8 *seed, unsigned int seedlen) { + struct rng_parent_ctx *pctx = private; /* * Check whether seedlen is of sufficient size is done in RNG * implementations. */ - return crypto_rng_reset(private, seed, seedlen); + return crypto_rng_reset(pctx->drng, seed, seedlen); +} + +static int __maybe_unused rng_setentropy(void *private, sockptr_t entropy, + unsigned int len) +{ + struct rng_parent_ctx *pctx = private; + u8 *kentropy = NULL; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + if (pctx->entropy) + return -EINVAL; + + if (len > MAXSIZE) + return -EMSGSIZE; + + if (len) { + kentropy = memdup_sockptr(entropy, len); + if (IS_ERR(kentropy)) + return PTR_ERR(kentropy); + } + + crypto_rng_alg(pctx->drng)->set_ent(pctx->drng, kentropy, len); + /* + * Since rng doesn't perform any memory management for the entropy + * buffer, save kentropy pointer to pctx now to free it after use. + */ + pctx->entropy = kentropy; + return 0; } static const struct af_alg_type algif_type_rng = { @@ -171,6 +319,9 @@ static const struct af_alg_type algif_type_rng = { .release = rng_release, .accept = rng_accept_parent, .setkey = rng_setkey, +#ifdef CONFIG_CRYPTO_USER_API_RNG_CAVP + .setentropy = rng_setentropy, +#endif .ops = &algif_rng_ops, .name = "rng", .owner = THIS_MODULE diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index e2c8ab408bed..ee8890ee8f33 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -61,8 +61,8 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, int err = 0; size_t len = 0; - if (!ctx->used) { - err = af_alg_wait_for_data(sk, flags); + if (!ctx->init || (ctx->more && ctx->used < bs)) { + err = af_alg_wait_for_data(sk, flags, bs); if (err) return err; } @@ -74,14 +74,10 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, return PTR_ERR(areq); /* convert iovecs of output buffers into RX SGL */ - err = af_alg_get_rsgl(sk, msg, flags, areq, -1, &len); + err = af_alg_get_rsgl(sk, msg, flags, areq, ctx->used, &len); if (err) goto free; - /* Process only as much RX buffers for which we have TX data */ - if (len > ctx->used) - len = ctx->used; - /* * If more buffers are to be expected to be processed, process only * full block size buffers. @@ -127,7 +123,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, crypto_skcipher_decrypt(&areq->cra_u.skcipher_req); /* AIO operation in progress */ - if (err == -EINPROGRESS || err == -EBUSY) + if (err == -EINPROGRESS) return -EIOCBQUEUED; sock_put(sk); @@ -192,11 +188,9 @@ static struct proto_ops algif_skcipher_ops = { .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .getsockopt = sock_no_getsockopt, .mmap = sock_no_mmap, .bind = sock_no_bind, .accept = sock_no_accept, - .setsockopt = sock_no_setsockopt, .release = af_alg_release, .sendmsg = skcipher_sendmsg, @@ -215,7 +209,7 @@ static int skcipher_check_key(struct socket *sock) struct alg_sock *ask = alg_sk(sk); lock_sock(sk); - if (ask->refcnt) + if (!atomic_read(&ask->nokey_refcnt)) goto unlock_child; psk = ask->parent; @@ -227,11 +221,8 @@ static int skcipher_check_key(struct socket *sock) if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) goto unlock; - if (!pask->refcnt++) - sock_hold(psk); - - ask->refcnt = 1; - sock_put(psk); + atomic_dec(&pask->nokey_refcnt); + atomic_set(&ask->nokey_refcnt, 0); err = 0; @@ -288,11 +279,9 @@ static struct proto_ops algif_skcipher_ops_nokey = { .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .getsockopt = sock_no_getsockopt, .mmap = sock_no_mmap, .bind = sock_no_bind, .accept = sock_no_accept, - .setsockopt = sock_no_setsockopt, .release = af_alg_release, .sendmsg = skcipher_sendmsg_nokey, @@ -340,6 +329,7 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk) ctx = sock_kmalloc(sk, len, GFP_KERNEL); if (!ctx) return -ENOMEM; + memset(ctx, 0, len); ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(tfm), GFP_KERNEL); @@ -347,16 +337,10 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk) sock_kfree_s(sk, ctx, len); return -ENOMEM; } - memset(ctx->iv, 0, crypto_skcipher_ivsize(tfm)); INIT_LIST_HEAD(&ctx->tsgl_list); ctx->len = len; - ctx->used = 0; - atomic_set(&ctx->rcvused, 0); - ctx->more = 0; - ctx->merge = 0; - ctx->enc = 0; crypto_init_wait(&ctx->wait); ask->private = ctx; diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c index c475c1129ff2..3f512efaba3a 100644 --- a/crypto/ansi_cprng.c +++ b/crypto/ansi_cprng.c @@ -7,6 +7,7 @@ * (C) Neil Horman <nhorman@tuxdriver.com> */ +#include <crypto/internal/cipher.h> #include <crypto/internal/rng.h> #include <linux/err.h> #include <linux/init.h> @@ -470,3 +471,4 @@ subsys_initcall(prng_mod_init); module_exit(prng_mod_fini); MODULE_ALIAS_CRYPTO("stdrng"); MODULE_ALIAS_CRYPTO("ansi_cprng"); +MODULE_IMPORT_NS(CRYPTO_INTERNAL); diff --git a/crypto/api.c b/crypto/api.c index 7d71a9b10e5f..64f2d365a8e9 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -12,6 +12,7 @@ #include <linux/err.h> #include <linux/errno.h> +#include <linux/jump_label.h> #include <linux/kernel.h> #include <linux/kmod.h> #include <linux/module.h> @@ -30,6 +31,9 @@ EXPORT_SYMBOL_GPL(crypto_alg_sem); BLOCKING_NOTIFIER_HEAD(crypto_chain); EXPORT_SYMBOL_GPL(crypto_chain); +DEFINE_STATIC_KEY_FALSE(crypto_boot_test_finished); +EXPORT_SYMBOL_GPL(crypto_boot_test_finished); + static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg); struct crypto_alg *crypto_mod_get(struct crypto_alg *alg) @@ -47,11 +51,6 @@ void crypto_mod_put(struct crypto_alg *alg) } EXPORT_SYMBOL_GPL(crypto_mod_put); -static inline int crypto_is_test_larval(struct crypto_larval *larval) -{ - return larval->alg.cra_driver_name[0]; -} - static struct crypto_alg *__crypto_alg_lookup(const char *name, u32 type, u32 mask) { @@ -115,7 +114,7 @@ struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask) larval->alg.cra_priority = -1; larval->alg.cra_destroy = crypto_larval_destroy; - strlcpy(larval->alg.cra_name, name, CRYPTO_MAX_ALG_NAME); + strscpy(larval->alg.cra_name, name, CRYPTO_MAX_ALG_NAME); init_completion(&larval->completion); return larval; @@ -163,11 +162,52 @@ void crypto_larval_kill(struct crypto_alg *alg) } EXPORT_SYMBOL_GPL(crypto_larval_kill); +void crypto_wait_for_test(struct crypto_larval *larval) +{ + int err; + + err = crypto_probing_notify(CRYPTO_MSG_ALG_REGISTER, larval->adult); + if (WARN_ON_ONCE(err != NOTIFY_STOP)) + goto out; + + err = wait_for_completion_killable(&larval->completion); + WARN_ON(err); + if (!err) + crypto_notify(CRYPTO_MSG_ALG_LOADED, larval); + +out: + crypto_larval_kill(&larval->alg); +} +EXPORT_SYMBOL_GPL(crypto_wait_for_test); + +static void crypto_start_test(struct crypto_larval *larval) +{ + if (!crypto_is_test_larval(larval)) + return; + + if (larval->test_started) + return; + + down_write(&crypto_alg_sem); + if (larval->test_started) { + up_write(&crypto_alg_sem); + return; + } + + larval->test_started = true; + up_write(&crypto_alg_sem); + + crypto_wait_for_test(larval); +} + static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg) { struct crypto_larval *larval = (void *)alg; long timeout; + if (!static_branch_likely(&crypto_boot_test_finished)) + crypto_start_test(larval); + timeout = wait_for_completion_killable_timeout( &larval->completion, 60 * HZ); @@ -183,6 +223,8 @@ static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg) else if (crypto_is_test_larval(larval) && !(alg->cra_flags & CRYPTO_ALG_TESTED)) alg = ERR_PTR(-EAGAIN); + else if (alg->cra_flags & CRYPTO_ALG_FIPS_INTERNAL) + alg = ERR_PTR(-EAGAIN); else if (!crypto_mod_get(alg)) alg = ERR_PTR(-EAGAIN); crypto_mod_put(&larval->alg); @@ -193,6 +235,7 @@ static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg) static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, u32 mask) { + const u32 fips = CRYPTO_ALG_FIPS_INTERNAL; struct crypto_alg *alg; u32 test = 0; @@ -200,8 +243,20 @@ static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, test |= CRYPTO_ALG_TESTED; down_read(&crypto_alg_sem); - alg = __crypto_alg_lookup(name, type | test, mask | test); - if (!alg && test) { + alg = __crypto_alg_lookup(name, (type | test) & ~fips, + (mask | test) & ~fips); + if (alg) { + if (((type | mask) ^ fips) & fips) + mask |= fips; + mask &= fips; + + if (!crypto_is_larval(alg) && + ((type ^ alg->cra_flags) & mask)) { + /* Algorithm is disallowed in FIPS mode. */ + crypto_mod_put(alg); + alg = ERR_PTR(-ENOENT); + } + } else if (test) { alg = __crypto_alg_lookup(name, type, mask); if (alg && !crypto_is_larval(alg)) { /* Test failed */ @@ -266,7 +321,7 @@ struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask) /* * If the internal flag is set for a cipher, require a caller to - * to invoke the cipher with the internal flag to use that cipher. + * invoke the cipher with the internal flag to use that cipher. * Also, if a caller wants to allocate a cipher that may or may * not be an internal cipher, use type | CRYPTO_ALG_INTERNAL and * !(mask & CRYPTO_ALG_INTERNAL). @@ -333,12 +388,13 @@ static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask) return len; } -static void crypto_shoot_alg(struct crypto_alg *alg) +void crypto_shoot_alg(struct crypto_alg *alg) { down_write(&crypto_alg_sem); alg->cra_flags |= CRYPTO_ALG_DYING; up_write(&crypto_alg_sem); } +EXPORT_SYMBOL_GPL(crypto_shoot_alg); struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, u32 mask) @@ -432,8 +488,9 @@ err: } EXPORT_SYMBOL_GPL(crypto_alloc_base); -void *crypto_create_tfm(struct crypto_alg *alg, - const struct crypto_type *frontend) +void *crypto_create_tfm_node(struct crypto_alg *alg, + const struct crypto_type *frontend, + int node) { char *mem; struct crypto_tfm *tfm = NULL; @@ -444,12 +501,13 @@ void *crypto_create_tfm(struct crypto_alg *alg, tfmsize = frontend->tfmsize; total = tfmsize + sizeof(*tfm) + frontend->extsize(alg); - mem = kzalloc(total, GFP_KERNEL); + mem = kzalloc_node(total, GFP_KERNEL, node); if (mem == NULL) goto out_err; tfm = (struct crypto_tfm *)(mem + tfmsize); tfm->__crt_alg = alg; + tfm->node = node; err = frontend->init_tfm(tfm); if (err) @@ -471,7 +529,7 @@ out_err: out: return mem; } -EXPORT_SYMBOL_GPL(crypto_create_tfm); +EXPORT_SYMBOL_GPL(crypto_create_tfm_node); struct crypto_alg *crypto_find_alg(const char *alg_name, const struct crypto_type *frontend, @@ -489,11 +547,13 @@ struct crypto_alg *crypto_find_alg(const char *alg_name, EXPORT_SYMBOL_GPL(crypto_find_alg); /* - * crypto_alloc_tfm - Locate algorithm and allocate transform + * crypto_alloc_tfm_node - Locate algorithm and allocate transform * @alg_name: Name of algorithm * @frontend: Frontend algorithm type * @type: Type of algorithm * @mask: Mask for type comparison + * @node: NUMA node in which users desire to put requests, if node is + * NUMA_NO_NODE, it means users have no special requirement. * * crypto_alloc_tfm() will first attempt to locate an already loaded * algorithm. If that fails and the kernel supports dynamically loadable @@ -508,8 +568,10 @@ EXPORT_SYMBOL_GPL(crypto_find_alg); * * In case of error the return value is an error pointer. */ -void *crypto_alloc_tfm(const char *alg_name, - const struct crypto_type *frontend, u32 type, u32 mask) + +void *crypto_alloc_tfm_node(const char *alg_name, + const struct crypto_type *frontend, u32 type, u32 mask, + int node) { void *tfm; int err; @@ -523,7 +585,7 @@ void *crypto_alloc_tfm(const char *alg_name, goto err; } - tfm = crypto_create_tfm(alg, frontend); + tfm = crypto_create_tfm_node(alg, frontend, node); if (!IS_ERR(tfm)) return tfm; @@ -541,7 +603,7 @@ err: return ERR_PTR(err); } -EXPORT_SYMBOL_GPL(crypto_alloc_tfm); +EXPORT_SYMBOL_GPL(crypto_alloc_tfm_node); /* * crypto_destroy_tfm - Free crypto transform @@ -555,7 +617,7 @@ void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm) { struct crypto_alg *alg; - if (unlikely(!mem)) + if (IS_ERR_OR_NULL(mem)) return; alg = tfm->__crt_alg; @@ -564,7 +626,7 @@ void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm) alg->cra_exit(tfm); crypto_exit_ops(tfm); crypto_mod_put(alg); - kzfree(mem); + kfree_sensitive(mem); } EXPORT_SYMBOL_GPL(crypto_destroy_tfm); @@ -596,4 +658,3 @@ EXPORT_SYMBOL_GPL(crypto_req_done); MODULE_DESCRIPTION("Cryptographic core API"); MODULE_LICENSE("GPL"); -MODULE_SOFTDEP("pre: cryptomgr"); diff --git a/crypto/arc4.c b/crypto/arc4.c index aa79571dbd49..3254dcc34368 100644 --- a/crypto/arc4.c +++ b/crypto/arc4.c @@ -11,7 +11,9 @@ #include <crypto/arc4.h> #include <crypto/internal/skcipher.h> #include <linux/init.h> +#include <linux/kernel.h> #include <linux/module.h> +#include <linux/sched.h> static int crypto_arc4_setkey(struct crypto_skcipher *tfm, const u8 *in_key, unsigned int key_len) @@ -39,6 +41,14 @@ static int crypto_arc4_crypt(struct skcipher_request *req) return err; } +static int crypto_arc4_init(struct crypto_skcipher *tfm) +{ + pr_warn_ratelimited("\"%s\" (%ld) uses obsolete ecb(arc4) skcipher\n", + current->comm, (unsigned long)current->pid); + + return 0; +} + static struct skcipher_alg arc4_alg = { /* * For legacy reasons, this is named "ecb(arc4)", not "arc4". @@ -55,6 +65,7 @@ static struct skcipher_alg arc4_alg = { .setkey = crypto_arc4_setkey, .encrypt = crypto_arc4_crypt, .decrypt = crypto_arc4_crypt, + .init = crypto_arc4_init, }; static int __init arc4_init(void) diff --git a/crypto/aria_generic.c b/crypto/aria_generic.c new file mode 100644 index 000000000000..4cc29b82b99d --- /dev/null +++ b/crypto/aria_generic.c @@ -0,0 +1,313 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Cryptographic API. + * + * ARIA Cipher Algorithm. + * + * Documentation of ARIA can be found in RFC 5794. + * Copyright (c) 2022 Taehee Yoo <ap420073@gmail.com> + * + * Information for ARIA + * http://210.104.33.10/ARIA/index-e.html (English) + * http://seed.kisa.or.kr/ (Korean) + * + * Public domain version is distributed above. + */ + +#include <crypto/aria.h> + +static const u32 key_rc[20] = { + 0x517cc1b7, 0x27220a94, 0xfe13abe8, 0xfa9a6ee0, + 0x6db14acc, 0x9e21c820, 0xff28b1d5, 0xef5de2b0, + 0xdb92371d, 0x2126e970, 0x03249775, 0x04e8c90e, + 0x517cc1b7, 0x27220a94, 0xfe13abe8, 0xfa9a6ee0, + 0x6db14acc, 0x9e21c820, 0xff28b1d5, 0xef5de2b0 +}; + +static void aria_set_encrypt_key(struct aria_ctx *ctx, const u8 *in_key, + unsigned int key_len) +{ + const __be32 *key = (const __be32 *)in_key; + u32 w0[4], w1[4], w2[4], w3[4]; + u32 reg0, reg1, reg2, reg3; + const u32 *ck; + int rkidx = 0; + + ck = &key_rc[(key_len - 16) / 2]; + + w0[0] = be32_to_cpu(key[0]); + w0[1] = be32_to_cpu(key[1]); + w0[2] = be32_to_cpu(key[2]); + w0[3] = be32_to_cpu(key[3]); + + reg0 = w0[0] ^ ck[0]; + reg1 = w0[1] ^ ck[1]; + reg2 = w0[2] ^ ck[2]; + reg3 = w0[3] ^ ck[3]; + + aria_subst_diff_odd(®0, ®1, ®2, ®3); + + if (key_len > 16) { + w1[0] = be32_to_cpu(key[4]); + w1[1] = be32_to_cpu(key[5]); + if (key_len > 24) { + w1[2] = be32_to_cpu(key[6]); + w1[3] = be32_to_cpu(key[7]); + } else { + w1[2] = 0; + w1[3] = 0; + } + } else { + w1[0] = 0; + w1[1] = 0; + w1[2] = 0; + w1[3] = 0; + } + + w1[0] ^= reg0; + w1[1] ^= reg1; + w1[2] ^= reg2; + w1[3] ^= reg3; + + reg0 = w1[0]; + reg1 = w1[1]; + reg2 = w1[2]; + reg3 = w1[3]; + + reg0 ^= ck[4]; + reg1 ^= ck[5]; + reg2 ^= ck[6]; + reg3 ^= ck[7]; + + aria_subst_diff_even(®0, ®1, ®2, ®3); + + reg0 ^= w0[0]; + reg1 ^= w0[1]; + reg2 ^= w0[2]; + reg3 ^= w0[3]; + + w2[0] = reg0; + w2[1] = reg1; + w2[2] = reg2; + w2[3] = reg3; + + reg0 ^= ck[8]; + reg1 ^= ck[9]; + reg2 ^= ck[10]; + reg3 ^= ck[11]; + + aria_subst_diff_odd(®0, ®1, ®2, ®3); + + w3[0] = reg0 ^ w1[0]; + w3[1] = reg1 ^ w1[1]; + w3[2] = reg2 ^ w1[2]; + w3[3] = reg3 ^ w1[3]; + + aria_gsrk(ctx->enc_key[rkidx], w0, w1, 19); + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w1, w2, 19); + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w2, w3, 19); + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w3, w0, 19); + + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w0, w1, 31); + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w1, w2, 31); + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w2, w3, 31); + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w3, w0, 31); + + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w0, w1, 67); + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w1, w2, 67); + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w2, w3, 67); + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w3, w0, 67); + + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w0, w1, 97); + if (key_len > 16) { + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w1, w2, 97); + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w2, w3, 97); + + if (key_len > 24) { + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w3, w0, 97); + + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w0, w1, 109); + } + } +} + +static void aria_set_decrypt_key(struct aria_ctx *ctx) +{ + int i; + + for (i = 0; i < 4; i++) { + ctx->dec_key[0][i] = ctx->enc_key[ctx->rounds][i]; + ctx->dec_key[ctx->rounds][i] = ctx->enc_key[0][i]; + } + + for (i = 1; i < ctx->rounds; i++) { + ctx->dec_key[i][0] = aria_m(ctx->enc_key[ctx->rounds - i][0]); + ctx->dec_key[i][1] = aria_m(ctx->enc_key[ctx->rounds - i][1]); + ctx->dec_key[i][2] = aria_m(ctx->enc_key[ctx->rounds - i][2]); + ctx->dec_key[i][3] = aria_m(ctx->enc_key[ctx->rounds - i][3]); + + aria_diff_word(&ctx->dec_key[i][0], &ctx->dec_key[i][1], + &ctx->dec_key[i][2], &ctx->dec_key[i][3]); + aria_diff_byte(&ctx->dec_key[i][1], + &ctx->dec_key[i][2], &ctx->dec_key[i][3]); + aria_diff_word(&ctx->dec_key[i][0], &ctx->dec_key[i][1], + &ctx->dec_key[i][2], &ctx->dec_key[i][3]); + } +} + +int aria_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) +{ + struct aria_ctx *ctx = crypto_tfm_ctx(tfm); + + if (key_len != 16 && key_len != 24 && key_len != 32) + return -EINVAL; + + ctx->key_length = key_len; + ctx->rounds = (key_len + 32) / 4; + + aria_set_encrypt_key(ctx, in_key, key_len); + aria_set_decrypt_key(ctx); + + return 0; +} +EXPORT_SYMBOL_GPL(aria_set_key); + +static void __aria_crypt(struct aria_ctx *ctx, u8 *out, const u8 *in, + u32 key[][ARIA_RD_KEY_WORDS]) +{ + const __be32 *src = (const __be32 *)in; + __be32 *dst = (__be32 *)out; + u32 reg0, reg1, reg2, reg3; + int rounds, rkidx = 0; + + rounds = ctx->rounds; + + reg0 = be32_to_cpu(src[0]); + reg1 = be32_to_cpu(src[1]); + reg2 = be32_to_cpu(src[2]); + reg3 = be32_to_cpu(src[3]); + + aria_add_round_key(key[rkidx], ®0, ®1, ®2, ®3); + rkidx++; + + aria_subst_diff_odd(®0, ®1, ®2, ®3); + aria_add_round_key(key[rkidx], ®0, ®1, ®2, ®3); + rkidx++; + + while ((rounds -= 2) > 0) { + aria_subst_diff_even(®0, ®1, ®2, ®3); + aria_add_round_key(key[rkidx], ®0, ®1, ®2, ®3); + rkidx++; + + aria_subst_diff_odd(®0, ®1, ®2, ®3); + aria_add_round_key(key[rkidx], ®0, ®1, ®2, ®3); + rkidx++; + } + + reg0 = key[rkidx][0] ^ make_u32((u8)(x1[get_u8(reg0, 0)]), + (u8)(x2[get_u8(reg0, 1)] >> 8), + (u8)(s1[get_u8(reg0, 2)]), + (u8)(s2[get_u8(reg0, 3)])); + reg1 = key[rkidx][1] ^ make_u32((u8)(x1[get_u8(reg1, 0)]), + (u8)(x2[get_u8(reg1, 1)] >> 8), + (u8)(s1[get_u8(reg1, 2)]), + (u8)(s2[get_u8(reg1, 3)])); + reg2 = key[rkidx][2] ^ make_u32((u8)(x1[get_u8(reg2, 0)]), + (u8)(x2[get_u8(reg2, 1)] >> 8), + (u8)(s1[get_u8(reg2, 2)]), + (u8)(s2[get_u8(reg2, 3)])); + reg3 = key[rkidx][3] ^ make_u32((u8)(x1[get_u8(reg3, 0)]), + (u8)(x2[get_u8(reg3, 1)] >> 8), + (u8)(s1[get_u8(reg3, 2)]), + (u8)(s2[get_u8(reg3, 3)])); + + dst[0] = cpu_to_be32(reg0); + dst[1] = cpu_to_be32(reg1); + dst[2] = cpu_to_be32(reg2); + dst[3] = cpu_to_be32(reg3); +} + +void aria_encrypt(void *_ctx, u8 *out, const u8 *in) +{ + struct aria_ctx *ctx = (struct aria_ctx *)_ctx; + + __aria_crypt(ctx, out, in, ctx->enc_key); +} +EXPORT_SYMBOL_GPL(aria_encrypt); + +void aria_decrypt(void *_ctx, u8 *out, const u8 *in) +{ + struct aria_ctx *ctx = (struct aria_ctx *)_ctx; + + __aria_crypt(ctx, out, in, ctx->dec_key); +} +EXPORT_SYMBOL_GPL(aria_decrypt); + +static void __aria_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + struct aria_ctx *ctx = crypto_tfm_ctx(tfm); + + __aria_crypt(ctx, out, in, ctx->enc_key); +} + +static void __aria_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + struct aria_ctx *ctx = crypto_tfm_ctx(tfm); + + __aria_crypt(ctx, out, in, ctx->dec_key); +} + +static struct crypto_alg aria_alg = { + .cra_name = "aria", + .cra_driver_name = "aria-generic", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + .cra_blocksize = ARIA_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct aria_ctx), + .cra_alignmask = 3, + .cra_module = THIS_MODULE, + .cra_u = { + .cipher = { + .cia_min_keysize = ARIA_MIN_KEY_SIZE, + .cia_max_keysize = ARIA_MAX_KEY_SIZE, + .cia_setkey = aria_set_key, + .cia_encrypt = __aria_encrypt, + .cia_decrypt = __aria_decrypt + } + } +}; + +static int __init aria_init(void) +{ + return crypto_register_alg(&aria_alg); +} + +static void __exit aria_fini(void) +{ + crypto_unregister_alg(&aria_alg); +} + +subsys_initcall(aria_init); +module_exit(aria_fini); + +MODULE_DESCRIPTION("ARIA Cipher Algorithm"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Taehee Yoo <ap420073@gmail.com>"); +MODULE_ALIAS_CRYPTO("aria"); +MODULE_ALIAS_CRYPTO("aria-generic"); diff --git a/crypto/asymmetric_keys/Kconfig b/crypto/asymmetric_keys/Kconfig index 1f1f004dc757..3df3fe4ed95f 100644 --- a/crypto/asymmetric_keys/Kconfig +++ b/crypto/asymmetric_keys/Kconfig @@ -22,18 +22,6 @@ config ASYMMETRIC_PUBLIC_KEY_SUBTYPE appropriate hash algorithms (such as SHA-1) must be available. ENOPKG will be reported if the requisite algorithm is unavailable. -config ASYMMETRIC_TPM_KEY_SUBTYPE - tristate "Asymmetric TPM backed private key subtype" - depends on TCG_TPM - depends on TRUSTED_KEYS - select CRYPTO_HMAC - select CRYPTO_SHA1 - select CRYPTO_HASH_INFO - help - This option provides support for TPM backed private key type handling. - Operations such as sign, verify, encrypt, decrypt are performed by - the TPM after the private key is loaded. - config X509_CERTIFICATE_PARSER tristate "X.509 certificate parser" depends on ASYMMETRIC_PUBLIC_KEY_SUBTYPE @@ -54,15 +42,6 @@ config PKCS8_PRIVATE_KEY_PARSER private key data and provides the ability to instantiate a crypto key from that data. -config TPM_KEY_PARSER - tristate "TPM private key parser" - depends on ASYMMETRIC_TPM_KEY_SUBTYPE - select ASN1 - help - This option provides support for parsing TPM format blobs for - private key data and provides the ability to instantiate a crypto key - from that data. - config PKCS7_MESSAGE_PARSER tristate "PKCS#7 message parser" depends on X509_CERTIFICATE_PARSER @@ -96,4 +75,14 @@ config SIGNED_PE_FILE_VERIFICATION This option provides support for verifying the signature(s) on a signed PE binary. +config FIPS_SIGNATURE_SELFTEST + bool "Run FIPS selftests on the X.509+PKCS7 signature verification" + help + This option causes some selftests to be run on the signature + verification code, using some built in data. This is required + for FIPS. + depends on KEYS + depends on ASYMMETRIC_KEY_TYPE + depends on PKCS7_MESSAGE_PARSER + endif # ASYMMETRIC_KEY_TYPE diff --git a/crypto/asymmetric_keys/Makefile b/crypto/asymmetric_keys/Makefile index 28b91adba2ae..0d1fa1b692c6 100644 --- a/crypto/asymmetric_keys/Makefile +++ b/crypto/asymmetric_keys/Makefile @@ -11,7 +11,6 @@ asymmetric_keys-y := \ signature.o obj-$(CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE) += public_key.o -obj-$(CONFIG_ASYMMETRIC_TPM_KEY_SUBTYPE) += asym_tpm.o # # X.509 Certificate handling @@ -21,7 +20,9 @@ x509_key_parser-y := \ x509.asn1.o \ x509_akid.asn1.o \ x509_cert_parser.o \ + x509_loader.o \ x509_public_key.o +x509_key_parser-$(CONFIG_FIPS_SIGNATURE_SELFTEST) += selftest.o $(obj)/x509_cert_parser.o: \ $(obj)/x509.asn1.h \ @@ -75,14 +76,3 @@ verify_signed_pefile-y := \ $(obj)/mscode_parser.o: $(obj)/mscode.asn1.h $(obj)/mscode.asn1.h $(obj)/mscode.asn1.o: $(obj)/mscode.asn1.c $(obj)/mscode.asn1.h - -# -# TPM private key parsing -# -obj-$(CONFIG_TPM_KEY_PARSER) += tpm_key_parser.o -tpm_key_parser-y := \ - tpm.asn1.o \ - tpm_parser.o - -$(obj)/tpm_parser.o: $(obj)/tpm.asn1.h -$(obj)/tpm.asn1.o: $(obj)/tpm.asn1.c $(obj)/tpm.asn1.h diff --git a/crypto/asymmetric_keys/asym_tpm.c b/crypto/asymmetric_keys/asym_tpm.c deleted file mode 100644 index 378b18b9bc34..000000000000 --- a/crypto/asymmetric_keys/asym_tpm.c +++ /dev/null @@ -1,957 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#define pr_fmt(fmt) "ASYM-TPM: "fmt -#include <linux/slab.h> -#include <linux/module.h> -#include <linux/export.h> -#include <linux/kernel.h> -#include <linux/seq_file.h> -#include <linux/scatterlist.h> -#include <linux/tpm.h> -#include <linux/tpm_command.h> -#include <crypto/akcipher.h> -#include <crypto/hash.h> -#include <crypto/sha.h> -#include <asm/unaligned.h> -#include <keys/asymmetric-subtype.h> -#include <keys/trusted_tpm.h> -#include <crypto/asym_tpm_subtype.h> -#include <crypto/public_key.h> - -#define TPM_ORD_FLUSHSPECIFIC 186 -#define TPM_ORD_LOADKEY2 65 -#define TPM_ORD_UNBIND 30 -#define TPM_ORD_SIGN 60 - -#define TPM_RT_KEY 0x00000001 - -/* - * Load a TPM key from the blob provided by userspace - */ -static int tpm_loadkey2(struct tpm_buf *tb, - uint32_t keyhandle, unsigned char *keyauth, - const unsigned char *keyblob, int keybloblen, - uint32_t *newhandle) -{ - unsigned char nonceodd[TPM_NONCE_SIZE]; - unsigned char enonce[TPM_NONCE_SIZE]; - unsigned char authdata[SHA1_DIGEST_SIZE]; - uint32_t authhandle = 0; - unsigned char cont = 0; - uint32_t ordinal; - int ret; - - ordinal = htonl(TPM_ORD_LOADKEY2); - - /* session for loading the key */ - ret = oiap(tb, &authhandle, enonce); - if (ret < 0) { - pr_info("oiap failed (%d)\n", ret); - return ret; - } - - /* generate odd nonce */ - ret = tpm_get_random(NULL, nonceodd, TPM_NONCE_SIZE); - if (ret < 0) { - pr_info("tpm_get_random failed (%d)\n", ret); - return ret; - } - - /* calculate authorization HMAC value */ - ret = TSS_authhmac(authdata, keyauth, SHA1_DIGEST_SIZE, enonce, - nonceodd, cont, sizeof(uint32_t), &ordinal, - keybloblen, keyblob, 0, 0); - if (ret < 0) - return ret; - - /* build the request buffer */ - tpm_buf_reset(tb, TPM_TAG_RQU_AUTH1_COMMAND, TPM_ORD_LOADKEY2); - tpm_buf_append_u32(tb, keyhandle); - tpm_buf_append(tb, keyblob, keybloblen); - tpm_buf_append_u32(tb, authhandle); - tpm_buf_append(tb, nonceodd, TPM_NONCE_SIZE); - tpm_buf_append_u8(tb, cont); - tpm_buf_append(tb, authdata, SHA1_DIGEST_SIZE); - - ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE); - if (ret < 0) { - pr_info("authhmac failed (%d)\n", ret); - return ret; - } - - ret = TSS_checkhmac1(tb->data, ordinal, nonceodd, keyauth, - SHA1_DIGEST_SIZE, 0, 0); - if (ret < 0) { - pr_info("TSS_checkhmac1 failed (%d)\n", ret); - return ret; - } - - *newhandle = LOAD32(tb->data, TPM_DATA_OFFSET); - return 0; -} - -/* - * Execute the FlushSpecific TPM command - */ -static int tpm_flushspecific(struct tpm_buf *tb, uint32_t handle) -{ - tpm_buf_reset(tb, TPM_TAG_RQU_COMMAND, TPM_ORD_FLUSHSPECIFIC); - tpm_buf_append_u32(tb, handle); - tpm_buf_append_u32(tb, TPM_RT_KEY); - - return trusted_tpm_send(tb->data, MAX_BUF_SIZE); -} - -/* - * Decrypt a blob provided by userspace using a specific key handle. - * The handle is a well known handle or previously loaded by e.g. LoadKey2 - */ -static int tpm_unbind(struct tpm_buf *tb, - uint32_t keyhandle, unsigned char *keyauth, - const unsigned char *blob, uint32_t bloblen, - void *out, uint32_t outlen) -{ - unsigned char nonceodd[TPM_NONCE_SIZE]; - unsigned char enonce[TPM_NONCE_SIZE]; - unsigned char authdata[SHA1_DIGEST_SIZE]; - uint32_t authhandle = 0; - unsigned char cont = 0; - uint32_t ordinal; - uint32_t datalen; - int ret; - - ordinal = htonl(TPM_ORD_UNBIND); - datalen = htonl(bloblen); - - /* session for loading the key */ - ret = oiap(tb, &authhandle, enonce); - if (ret < 0) { - pr_info("oiap failed (%d)\n", ret); - return ret; - } - - /* generate odd nonce */ - ret = tpm_get_random(NULL, nonceodd, TPM_NONCE_SIZE); - if (ret < 0) { - pr_info("tpm_get_random failed (%d)\n", ret); - return ret; - } - - /* calculate authorization HMAC value */ - ret = TSS_authhmac(authdata, keyauth, SHA1_DIGEST_SIZE, enonce, - nonceodd, cont, sizeof(uint32_t), &ordinal, - sizeof(uint32_t), &datalen, - bloblen, blob, 0, 0); - if (ret < 0) - return ret; - - /* build the request buffer */ - tpm_buf_reset(tb, TPM_TAG_RQU_AUTH1_COMMAND, TPM_ORD_UNBIND); - tpm_buf_append_u32(tb, keyhandle); - tpm_buf_append_u32(tb, bloblen); - tpm_buf_append(tb, blob, bloblen); - tpm_buf_append_u32(tb, authhandle); - tpm_buf_append(tb, nonceodd, TPM_NONCE_SIZE); - tpm_buf_append_u8(tb, cont); - tpm_buf_append(tb, authdata, SHA1_DIGEST_SIZE); - - ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE); - if (ret < 0) { - pr_info("authhmac failed (%d)\n", ret); - return ret; - } - - datalen = LOAD32(tb->data, TPM_DATA_OFFSET); - - ret = TSS_checkhmac1(tb->data, ordinal, nonceodd, - keyauth, SHA1_DIGEST_SIZE, - sizeof(uint32_t), TPM_DATA_OFFSET, - datalen, TPM_DATA_OFFSET + sizeof(uint32_t), - 0, 0); - if (ret < 0) { - pr_info("TSS_checkhmac1 failed (%d)\n", ret); - return ret; - } - - memcpy(out, tb->data + TPM_DATA_OFFSET + sizeof(uint32_t), - min(outlen, datalen)); - - return datalen; -} - -/* - * Sign a blob provided by userspace (that has had the hash function applied) - * using a specific key handle. The handle is assumed to have been previously - * loaded by e.g. LoadKey2. - * - * Note that the key signature scheme of the used key should be set to - * TPM_SS_RSASSAPKCS1v15_DER. This allows the hashed input to be of any size - * up to key_length_in_bytes - 11 and not be limited to size 20 like the - * TPM_SS_RSASSAPKCS1v15_SHA1 signature scheme. - */ -static int tpm_sign(struct tpm_buf *tb, - uint32_t keyhandle, unsigned char *keyauth, - const unsigned char *blob, uint32_t bloblen, - void *out, uint32_t outlen) -{ - unsigned char nonceodd[TPM_NONCE_SIZE]; - unsigned char enonce[TPM_NONCE_SIZE]; - unsigned char authdata[SHA1_DIGEST_SIZE]; - uint32_t authhandle = 0; - unsigned char cont = 0; - uint32_t ordinal; - uint32_t datalen; - int ret; - - ordinal = htonl(TPM_ORD_SIGN); - datalen = htonl(bloblen); - - /* session for loading the key */ - ret = oiap(tb, &authhandle, enonce); - if (ret < 0) { - pr_info("oiap failed (%d)\n", ret); - return ret; - } - - /* generate odd nonce */ - ret = tpm_get_random(NULL, nonceodd, TPM_NONCE_SIZE); - if (ret < 0) { - pr_info("tpm_get_random failed (%d)\n", ret); - return ret; - } - - /* calculate authorization HMAC value */ - ret = TSS_authhmac(authdata, keyauth, SHA1_DIGEST_SIZE, enonce, - nonceodd, cont, sizeof(uint32_t), &ordinal, - sizeof(uint32_t), &datalen, - bloblen, blob, 0, 0); - if (ret < 0) - return ret; - - /* build the request buffer */ - tpm_buf_reset(tb, TPM_TAG_RQU_AUTH1_COMMAND, TPM_ORD_SIGN); - tpm_buf_append_u32(tb, keyhandle); - tpm_buf_append_u32(tb, bloblen); - tpm_buf_append(tb, blob, bloblen); - tpm_buf_append_u32(tb, authhandle); - tpm_buf_append(tb, nonceodd, TPM_NONCE_SIZE); - tpm_buf_append_u8(tb, cont); - tpm_buf_append(tb, authdata, SHA1_DIGEST_SIZE); - - ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE); - if (ret < 0) { - pr_info("authhmac failed (%d)\n", ret); - return ret; - } - - datalen = LOAD32(tb->data, TPM_DATA_OFFSET); - - ret = TSS_checkhmac1(tb->data, ordinal, nonceodd, - keyauth, SHA1_DIGEST_SIZE, - sizeof(uint32_t), TPM_DATA_OFFSET, - datalen, TPM_DATA_OFFSET + sizeof(uint32_t), - 0, 0); - if (ret < 0) { - pr_info("TSS_checkhmac1 failed (%d)\n", ret); - return ret; - } - - memcpy(out, tb->data + TPM_DATA_OFFSET + sizeof(uint32_t), - min(datalen, outlen)); - - return datalen; -} - -/* Room to fit two u32 zeros for algo id and parameters length. */ -#define SETKEY_PARAMS_SIZE (sizeof(u32) * 2) - -/* - * Maximum buffer size for the BER/DER encoded public key. The public key - * is of the form SEQUENCE { INTEGER n, INTEGER e } where n is a maximum 2048 - * bit key and e is usually 65537 - * The encoding overhead is: - * - max 4 bytes for SEQUENCE - * - max 4 bytes for INTEGER n type/length - * - 257 bytes of n - * - max 2 bytes for INTEGER e type/length - * - 3 bytes of e - * - 4+4 of zeros for set_pub_key parameters (SETKEY_PARAMS_SIZE) - */ -#define PUB_KEY_BUF_SIZE (4 + 4 + 257 + 2 + 3 + SETKEY_PARAMS_SIZE) - -/* - * Provide a part of a description of the key for /proc/keys. - */ -static void asym_tpm_describe(const struct key *asymmetric_key, - struct seq_file *m) -{ - struct tpm_key *tk = asymmetric_key->payload.data[asym_crypto]; - - if (!tk) - return; - - seq_printf(m, "TPM1.2/Blob"); -} - -static void asym_tpm_destroy(void *payload0, void *payload3) -{ - struct tpm_key *tk = payload0; - - if (!tk) - return; - - kfree(tk->blob); - tk->blob_len = 0; - - kfree(tk); -} - -/* How many bytes will it take to encode the length */ -static inline uint32_t definite_length(uint32_t len) -{ - if (len <= 127) - return 1; - if (len <= 255) - return 2; - return 3; -} - -static inline uint8_t *encode_tag_length(uint8_t *buf, uint8_t tag, - uint32_t len) -{ - *buf++ = tag; - - if (len <= 127) { - buf[0] = len; - return buf + 1; - } - - if (len <= 255) { - buf[0] = 0x81; - buf[1] = len; - return buf + 2; - } - - buf[0] = 0x82; - put_unaligned_be16(len, buf + 1); - return buf + 3; -} - -static uint32_t derive_pub_key(const void *pub_key, uint32_t len, uint8_t *buf) -{ - uint8_t *cur = buf; - uint32_t n_len = definite_length(len) + 1 + len + 1; - uint32_t e_len = definite_length(3) + 1 + 3; - uint8_t e[3] = { 0x01, 0x00, 0x01 }; - - /* SEQUENCE */ - cur = encode_tag_length(cur, 0x30, n_len + e_len); - /* INTEGER n */ - cur = encode_tag_length(cur, 0x02, len + 1); - cur[0] = 0x00; - memcpy(cur + 1, pub_key, len); - cur += len + 1; - cur = encode_tag_length(cur, 0x02, sizeof(e)); - memcpy(cur, e, sizeof(e)); - cur += sizeof(e); - /* Zero parameters to satisfy set_pub_key ABI. */ - memset(cur, 0, SETKEY_PARAMS_SIZE); - - return cur - buf; -} - -/* - * Determine the crypto algorithm name. - */ -static int determine_akcipher(const char *encoding, const char *hash_algo, - char alg_name[CRYPTO_MAX_ALG_NAME]) -{ - if (strcmp(encoding, "pkcs1") == 0) { - if (!hash_algo) { - strcpy(alg_name, "pkcs1pad(rsa)"); - return 0; - } - - if (snprintf(alg_name, CRYPTO_MAX_ALG_NAME, "pkcs1pad(rsa,%s)", - hash_algo) >= CRYPTO_MAX_ALG_NAME) - return -EINVAL; - - return 0; - } - - if (strcmp(encoding, "raw") == 0) { - strcpy(alg_name, "rsa"); - return 0; - } - - return -ENOPKG; -} - -/* - * Query information about a key. - */ -static int tpm_key_query(const struct kernel_pkey_params *params, - struct kernel_pkey_query *info) -{ - struct tpm_key *tk = params->key->payload.data[asym_crypto]; - int ret; - char alg_name[CRYPTO_MAX_ALG_NAME]; - struct crypto_akcipher *tfm; - uint8_t der_pub_key[PUB_KEY_BUF_SIZE]; - uint32_t der_pub_key_len; - int len; - - /* TPM only works on private keys, public keys still done in software */ - ret = determine_akcipher(params->encoding, params->hash_algo, alg_name); - if (ret < 0) - return ret; - - tfm = crypto_alloc_akcipher(alg_name, 0, 0); - if (IS_ERR(tfm)) - return PTR_ERR(tfm); - - der_pub_key_len = derive_pub_key(tk->pub_key, tk->pub_key_len, - der_pub_key); - - ret = crypto_akcipher_set_pub_key(tfm, der_pub_key, der_pub_key_len); - if (ret < 0) - goto error_free_tfm; - - len = crypto_akcipher_maxsize(tfm); - - info->key_size = tk->key_len; - info->max_data_size = tk->key_len / 8; - info->max_sig_size = len; - info->max_enc_size = len; - info->max_dec_size = tk->key_len / 8; - - info->supported_ops = KEYCTL_SUPPORTS_ENCRYPT | - KEYCTL_SUPPORTS_DECRYPT | - KEYCTL_SUPPORTS_VERIFY | - KEYCTL_SUPPORTS_SIGN; - - ret = 0; -error_free_tfm: - crypto_free_akcipher(tfm); - pr_devel("<==%s() = %d\n", __func__, ret); - return ret; -} - -/* - * Encryption operation is performed with the public key. Hence it is done - * in software - */ -static int tpm_key_encrypt(struct tpm_key *tk, - struct kernel_pkey_params *params, - const void *in, void *out) -{ - char alg_name[CRYPTO_MAX_ALG_NAME]; - struct crypto_akcipher *tfm; - struct akcipher_request *req; - struct crypto_wait cwait; - struct scatterlist in_sg, out_sg; - uint8_t der_pub_key[PUB_KEY_BUF_SIZE]; - uint32_t der_pub_key_len; - int ret; - - pr_devel("==>%s()\n", __func__); - - ret = determine_akcipher(params->encoding, params->hash_algo, alg_name); - if (ret < 0) - return ret; - - tfm = crypto_alloc_akcipher(alg_name, 0, 0); - if (IS_ERR(tfm)) - return PTR_ERR(tfm); - - der_pub_key_len = derive_pub_key(tk->pub_key, tk->pub_key_len, - der_pub_key); - - ret = crypto_akcipher_set_pub_key(tfm, der_pub_key, der_pub_key_len); - if (ret < 0) - goto error_free_tfm; - - ret = -ENOMEM; - req = akcipher_request_alloc(tfm, GFP_KERNEL); - if (!req) - goto error_free_tfm; - - sg_init_one(&in_sg, in, params->in_len); - sg_init_one(&out_sg, out, params->out_len); - akcipher_request_set_crypt(req, &in_sg, &out_sg, params->in_len, - params->out_len); - crypto_init_wait(&cwait); - akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | - CRYPTO_TFM_REQ_MAY_SLEEP, - crypto_req_done, &cwait); - - ret = crypto_akcipher_encrypt(req); - ret = crypto_wait_req(ret, &cwait); - - if (ret == 0) - ret = req->dst_len; - - akcipher_request_free(req); -error_free_tfm: - crypto_free_akcipher(tfm); - pr_devel("<==%s() = %d\n", __func__, ret); - return ret; -} - -/* - * Decryption operation is performed with the private key in the TPM. - */ -static int tpm_key_decrypt(struct tpm_key *tk, - struct kernel_pkey_params *params, - const void *in, void *out) -{ - struct tpm_buf tb; - uint32_t keyhandle; - uint8_t srkauth[SHA1_DIGEST_SIZE]; - uint8_t keyauth[SHA1_DIGEST_SIZE]; - int r; - - pr_devel("==>%s()\n", __func__); - - if (params->hash_algo) - return -ENOPKG; - - if (strcmp(params->encoding, "pkcs1")) - return -ENOPKG; - - r = tpm_buf_init(&tb, 0, 0); - if (r) - return r; - - /* TODO: Handle a non-all zero SRK authorization */ - memset(srkauth, 0, sizeof(srkauth)); - - r = tpm_loadkey2(&tb, SRKHANDLE, srkauth, - tk->blob, tk->blob_len, &keyhandle); - if (r < 0) { - pr_devel("loadkey2 failed (%d)\n", r); - goto error; - } - - /* TODO: Handle a non-all zero key authorization */ - memset(keyauth, 0, sizeof(keyauth)); - - r = tpm_unbind(&tb, keyhandle, keyauth, - in, params->in_len, out, params->out_len); - if (r < 0) - pr_devel("tpm_unbind failed (%d)\n", r); - - if (tpm_flushspecific(&tb, keyhandle) < 0) - pr_devel("flushspecific failed (%d)\n", r); - -error: - tpm_buf_destroy(&tb); - pr_devel("<==%s() = %d\n", __func__, r); - return r; -} - -/* - * Hash algorithm OIDs plus ASN.1 DER wrappings [RFC4880 sec 5.2.2]. - */ -static const u8 digest_info_md5[] = { - 0x30, 0x20, 0x30, 0x0c, 0x06, 0x08, - 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x05, /* OID */ - 0x05, 0x00, 0x04, 0x10 -}; - -static const u8 digest_info_sha1[] = { - 0x30, 0x21, 0x30, 0x09, 0x06, 0x05, - 0x2b, 0x0e, 0x03, 0x02, 0x1a, - 0x05, 0x00, 0x04, 0x14 -}; - -static const u8 digest_info_rmd160[] = { - 0x30, 0x21, 0x30, 0x09, 0x06, 0x05, - 0x2b, 0x24, 0x03, 0x02, 0x01, - 0x05, 0x00, 0x04, 0x14 -}; - -static const u8 digest_info_sha224[] = { - 0x30, 0x2d, 0x30, 0x0d, 0x06, 0x09, - 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04, - 0x05, 0x00, 0x04, 0x1c -}; - -static const u8 digest_info_sha256[] = { - 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, - 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, - 0x05, 0x00, 0x04, 0x20 -}; - -static const u8 digest_info_sha384[] = { - 0x30, 0x41, 0x30, 0x0d, 0x06, 0x09, - 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02, - 0x05, 0x00, 0x04, 0x30 -}; - -static const u8 digest_info_sha512[] = { - 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, - 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, - 0x05, 0x00, 0x04, 0x40 -}; - -static const struct asn1_template { - const char *name; - const u8 *data; - size_t size; -} asn1_templates[] = { -#define _(X) { #X, digest_info_##X, sizeof(digest_info_##X) } - _(md5), - _(sha1), - _(rmd160), - _(sha256), - _(sha384), - _(sha512), - _(sha224), - { NULL } -#undef _ -}; - -static const struct asn1_template *lookup_asn1(const char *name) -{ - const struct asn1_template *p; - - for (p = asn1_templates; p->name; p++) - if (strcmp(name, p->name) == 0) - return p; - return NULL; -} - -/* - * Sign operation is performed with the private key in the TPM. - */ -static int tpm_key_sign(struct tpm_key *tk, - struct kernel_pkey_params *params, - const void *in, void *out) -{ - struct tpm_buf tb; - uint32_t keyhandle; - uint8_t srkauth[SHA1_DIGEST_SIZE]; - uint8_t keyauth[SHA1_DIGEST_SIZE]; - void *asn1_wrapped = NULL; - uint32_t in_len = params->in_len; - int r; - - pr_devel("==>%s()\n", __func__); - - if (strcmp(params->encoding, "pkcs1")) - return -ENOPKG; - - if (params->hash_algo) { - const struct asn1_template *asn1 = - lookup_asn1(params->hash_algo); - - if (!asn1) - return -ENOPKG; - - /* request enough space for the ASN.1 template + input hash */ - asn1_wrapped = kzalloc(in_len + asn1->size, GFP_KERNEL); - if (!asn1_wrapped) - return -ENOMEM; - - /* Copy ASN.1 template, then the input */ - memcpy(asn1_wrapped, asn1->data, asn1->size); - memcpy(asn1_wrapped + asn1->size, in, in_len); - - in = asn1_wrapped; - in_len += asn1->size; - } - - if (in_len > tk->key_len / 8 - 11) { - r = -EOVERFLOW; - goto error_free_asn1_wrapped; - } - - r = tpm_buf_init(&tb, 0, 0); - if (r) - goto error_free_asn1_wrapped; - - /* TODO: Handle a non-all zero SRK authorization */ - memset(srkauth, 0, sizeof(srkauth)); - - r = tpm_loadkey2(&tb, SRKHANDLE, srkauth, - tk->blob, tk->blob_len, &keyhandle); - if (r < 0) { - pr_devel("loadkey2 failed (%d)\n", r); - goto error_free_tb; - } - - /* TODO: Handle a non-all zero key authorization */ - memset(keyauth, 0, sizeof(keyauth)); - - r = tpm_sign(&tb, keyhandle, keyauth, in, in_len, out, params->out_len); - if (r < 0) - pr_devel("tpm_sign failed (%d)\n", r); - - if (tpm_flushspecific(&tb, keyhandle) < 0) - pr_devel("flushspecific failed (%d)\n", r); - -error_free_tb: - tpm_buf_destroy(&tb); -error_free_asn1_wrapped: - kfree(asn1_wrapped); - pr_devel("<==%s() = %d\n", __func__, r); - return r; -} - -/* - * Do encryption, decryption and signing ops. - */ -static int tpm_key_eds_op(struct kernel_pkey_params *params, - const void *in, void *out) -{ - struct tpm_key *tk = params->key->payload.data[asym_crypto]; - int ret = -EOPNOTSUPP; - - /* Perform the encryption calculation. */ - switch (params->op) { - case kernel_pkey_encrypt: - ret = tpm_key_encrypt(tk, params, in, out); - break; - case kernel_pkey_decrypt: - ret = tpm_key_decrypt(tk, params, in, out); - break; - case kernel_pkey_sign: - ret = tpm_key_sign(tk, params, in, out); - break; - default: - BUG(); - } - - return ret; -} - -/* - * Verify a signature using a public key. - */ -static int tpm_key_verify_signature(const struct key *key, - const struct public_key_signature *sig) -{ - const struct tpm_key *tk = key->payload.data[asym_crypto]; - struct crypto_wait cwait; - struct crypto_akcipher *tfm; - struct akcipher_request *req; - struct scatterlist src_sg[2]; - char alg_name[CRYPTO_MAX_ALG_NAME]; - uint8_t der_pub_key[PUB_KEY_BUF_SIZE]; - uint32_t der_pub_key_len; - int ret; - - pr_devel("==>%s()\n", __func__); - - BUG_ON(!tk); - BUG_ON(!sig); - BUG_ON(!sig->s); - - if (!sig->digest) - return -ENOPKG; - - ret = determine_akcipher(sig->encoding, sig->hash_algo, alg_name); - if (ret < 0) - return ret; - - tfm = crypto_alloc_akcipher(alg_name, 0, 0); - if (IS_ERR(tfm)) - return PTR_ERR(tfm); - - der_pub_key_len = derive_pub_key(tk->pub_key, tk->pub_key_len, - der_pub_key); - - ret = crypto_akcipher_set_pub_key(tfm, der_pub_key, der_pub_key_len); - if (ret < 0) - goto error_free_tfm; - - ret = -ENOMEM; - req = akcipher_request_alloc(tfm, GFP_KERNEL); - if (!req) - goto error_free_tfm; - - sg_init_table(src_sg, 2); - sg_set_buf(&src_sg[0], sig->s, sig->s_size); - sg_set_buf(&src_sg[1], sig->digest, sig->digest_size); - akcipher_request_set_crypt(req, src_sg, NULL, sig->s_size, - sig->digest_size); - crypto_init_wait(&cwait); - akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | - CRYPTO_TFM_REQ_MAY_SLEEP, - crypto_req_done, &cwait); - ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait); - - akcipher_request_free(req); -error_free_tfm: - crypto_free_akcipher(tfm); - pr_devel("<==%s() = %d\n", __func__, ret); - if (WARN_ON_ONCE(ret > 0)) - ret = -EINVAL; - return ret; -} - -/* - * Parse enough information out of TPM_KEY structure: - * TPM_STRUCT_VER -> 4 bytes - * TPM_KEY_USAGE -> 2 bytes - * TPM_KEY_FLAGS -> 4 bytes - * TPM_AUTH_DATA_USAGE -> 1 byte - * TPM_KEY_PARMS -> variable - * UINT32 PCRInfoSize -> 4 bytes - * BYTE* -> PCRInfoSize bytes - * TPM_STORE_PUBKEY - * UINT32 encDataSize; - * BYTE* -> encDataSize; - * - * TPM_KEY_PARMS: - * TPM_ALGORITHM_ID -> 4 bytes - * TPM_ENC_SCHEME -> 2 bytes - * TPM_SIG_SCHEME -> 2 bytes - * UINT32 parmSize -> 4 bytes - * BYTE* -> variable - */ -static int extract_key_parameters(struct tpm_key *tk) -{ - const void *cur = tk->blob; - uint32_t len = tk->blob_len; - const void *pub_key; - uint32_t sz; - uint32_t key_len; - - if (len < 11) - return -EBADMSG; - - /* Ensure this is a legacy key */ - if (get_unaligned_be16(cur + 4) != 0x0015) - return -EBADMSG; - - /* Skip to TPM_KEY_PARMS */ - cur += 11; - len -= 11; - - if (len < 12) - return -EBADMSG; - - /* Make sure this is an RSA key */ - if (get_unaligned_be32(cur) != 0x00000001) - return -EBADMSG; - - /* Make sure this is TPM_ES_RSAESPKCSv15 encoding scheme */ - if (get_unaligned_be16(cur + 4) != 0x0002) - return -EBADMSG; - - /* Make sure this is TPM_SS_RSASSAPKCS1v15_DER signature scheme */ - if (get_unaligned_be16(cur + 6) != 0x0003) - return -EBADMSG; - - sz = get_unaligned_be32(cur + 8); - if (len < sz + 12) - return -EBADMSG; - - /* Move to TPM_RSA_KEY_PARMS */ - len -= 12; - cur += 12; - - /* Grab the RSA key length */ - key_len = get_unaligned_be32(cur); - - switch (key_len) { - case 512: - case 1024: - case 1536: - case 2048: - break; - default: - return -EINVAL; - } - - /* Move just past TPM_KEY_PARMS */ - cur += sz; - len -= sz; - - if (len < 4) - return -EBADMSG; - - sz = get_unaligned_be32(cur); - if (len < 4 + sz) - return -EBADMSG; - - /* Move to TPM_STORE_PUBKEY */ - cur += 4 + sz; - len -= 4 + sz; - - /* Grab the size of the public key, it should jive with the key size */ - sz = get_unaligned_be32(cur); - if (sz > 256) - return -EINVAL; - - pub_key = cur + 4; - - tk->key_len = key_len; - tk->pub_key = pub_key; - tk->pub_key_len = sz; - - return 0; -} - -/* Given the blob, parse it and load it into the TPM */ -struct tpm_key *tpm_key_create(const void *blob, uint32_t blob_len) -{ - int r; - struct tpm_key *tk; - - r = tpm_is_tpm2(NULL); - if (r < 0) - goto error; - - /* We don't support TPM2 yet */ - if (r > 0) { - r = -ENODEV; - goto error; - } - - r = -ENOMEM; - tk = kzalloc(sizeof(struct tpm_key), GFP_KERNEL); - if (!tk) - goto error; - - tk->blob = kmemdup(blob, blob_len, GFP_KERNEL); - if (!tk->blob) - goto error_memdup; - - tk->blob_len = blob_len; - - r = extract_key_parameters(tk); - if (r < 0) - goto error_extract; - - return tk; - -error_extract: - kfree(tk->blob); - tk->blob_len = 0; -error_memdup: - kfree(tk); -error: - return ERR_PTR(r); -} -EXPORT_SYMBOL_GPL(tpm_key_create); - -/* - * TPM-based asymmetric key subtype - */ -struct asymmetric_key_subtype asym_tpm_subtype = { - .owner = THIS_MODULE, - .name = "asym_tpm", - .name_len = sizeof("asym_tpm") - 1, - .describe = asym_tpm_describe, - .destroy = asym_tpm_destroy, - .query = tpm_key_query, - .eds_op = tpm_key_eds_op, - .verify_signature = tpm_key_verify_signature, -}; -EXPORT_SYMBOL_GPL(asym_tpm_subtype); - -MODULE_DESCRIPTION("TPM based asymmetric key subtype"); -MODULE_AUTHOR("Intel Corporation"); -MODULE_LICENSE("GPL v2"); diff --git a/crypto/asymmetric_keys/asymmetric_type.c b/crypto/asymmetric_keys/asymmetric_type.c index 6e5fc8e31f01..41a2f0eb4ce4 100644 --- a/crypto/asymmetric_keys/asymmetric_type.c +++ b/crypto/asymmetric_keys/asymmetric_type.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* Asymmetric public-key cryptography key type * - * See Documentation/crypto/asymmetric-keys.txt + * See Documentation/crypto/asymmetric-keys.rst * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) @@ -36,16 +36,23 @@ static DECLARE_RWSEM(asymmetric_key_parsers_sem); * find_asymmetric_key - Find a key by ID. * @keyring: The keys to search. * @id_0: The first ID to look for or NULL. - * @id_1: The second ID to look for or NULL. - * @partial: Use partial match if true, exact if false. + * @id_1: The second ID to look for or NULL, matched together with @id_0 + * against @keyring keys' id[0] and id[1]. + * @id_2: The fallback ID to match against @keyring keys' id[2] if both of the + * other IDs are NULL. + * @partial: Use partial match for @id_0 and @id_1 if true, exact if false. * * Find a key in the given keyring by identifier. The preferred identifier is * the id_0 and the fallback identifier is the id_1. If both are given, the - * lookup is by the former, but the latter must also match. + * former is matched (exactly or partially) against either of the sought key's + * identifiers and the latter must match the found key's second identifier + * exactly. If both are missing, id_2 must match the sought key's third + * identifier exactly. */ struct key *find_asymmetric_key(struct key *keyring, const struct asymmetric_key_id *id_0, const struct asymmetric_key_id *id_1, + const struct asymmetric_key_id *id_2, bool partial) { struct key *key; @@ -54,14 +61,17 @@ struct key *find_asymmetric_key(struct key *keyring, char *req, *p; int len; - BUG_ON(!id_0 && !id_1); + WARN_ON(!id_0 && !id_1 && !id_2); if (id_0) { lookup = id_0->data; len = id_0->len; - } else { + } else if (id_1) { lookup = id_1->data; len = id_1->len; + } else { + lookup = id_2->data; + len = id_2->len; } /* Construct an identifier "id:<keyid>". */ @@ -69,7 +79,10 @@ struct key *find_asymmetric_key(struct key *keyring, if (!req) return ERR_PTR(-ENOMEM); - if (partial) { + if (!id_0 && !id_1) { + *p++ = 'd'; + *p++ = 'n'; + } else if (partial) { *p++ = 'i'; *p++ = 'd'; } else { @@ -152,7 +165,8 @@ EXPORT_SYMBOL_GPL(asymmetric_key_generate_id); /** * asymmetric_key_id_same - Return true if two asymmetric keys IDs are the same. - * @kid_1, @kid_2: The key IDs to compare + * @kid1: The key ID to compare + * @kid2: The key ID to compare */ bool asymmetric_key_id_same(const struct asymmetric_key_id *kid1, const struct asymmetric_key_id *kid2) @@ -168,7 +182,8 @@ EXPORT_SYMBOL_GPL(asymmetric_key_id_same); /** * asymmetric_key_id_partial - Return true if two asymmetric keys IDs * partially match - * @kid_1, @kid_2: The key IDs to compare + * @kid1: The key ID to compare + * @kid2: The key ID to compare */ bool asymmetric_key_id_partial(const struct asymmetric_key_id *kid1, const struct asymmetric_key_id *kid2) @@ -183,8 +198,8 @@ bool asymmetric_key_id_partial(const struct asymmetric_key_id *kid1, EXPORT_SYMBOL_GPL(asymmetric_key_id_partial); /** - * asymmetric_match_key_ids - Search asymmetric key IDs - * @kids: The list of key IDs to check + * asymmetric_match_key_ids - Search asymmetric key IDs 1 & 2 + * @kids: The pair of key IDs to check * @match_id: The key ID we're looking for * @match: The match function to use */ @@ -198,7 +213,7 @@ static bool asymmetric_match_key_ids( if (!kids || !match_id) return false; - for (i = 0; i < ARRAY_SIZE(kids->id); i++) + for (i = 0; i < 2; i++) if (match(kids->id[i], match_id)) return true; return false; @@ -242,7 +257,7 @@ struct asymmetric_key_id *asymmetric_key_hex_to_key_id(const char *id) } /* - * Match asymmetric keys by an exact match on an ID. + * Match asymmetric keys by an exact match on one of the first two IDs. */ static bool asymmetric_key_cmp(const struct key *key, const struct key_match_data *match_data) @@ -255,7 +270,7 @@ static bool asymmetric_key_cmp(const struct key *key, } /* - * Match asymmetric keys by a partial match on an IDs. + * Match asymmetric keys by a partial match on one of the first two IDs. */ static bool asymmetric_key_cmp_partial(const struct key *key, const struct key_match_data *match_data) @@ -268,14 +283,27 @@ static bool asymmetric_key_cmp_partial(const struct key *key, } /* + * Match asymmetric keys by an exact match on the third IDs. + */ +static bool asymmetric_key_cmp_name(const struct key *key, + const struct key_match_data *match_data) +{ + const struct asymmetric_key_ids *kids = asymmetric_key_ids(key); + const struct asymmetric_key_id *match_id = match_data->preparsed; + + return kids && asymmetric_key_id_same(kids->id[2], match_id); +} + +/* * Preparse the match criterion. If we don't set lookup_type and cmp, * the default will be an exact match on the key description. * * There are some specifiers for matching key IDs rather than by the key * description: * - * "id:<id>" - find a key by partial match on any available ID - * "ex:<id>" - find a key by exact match on any available ID + * "id:<id>" - find a key by partial match on one of the first two IDs + * "ex:<id>" - find a key by exact match on one of the first two IDs + * "dn:<id>" - find a key by exact match on the third ID * * These have to be searched by iteration rather than by direct lookup because * the key is hashed according to its description. @@ -299,6 +327,11 @@ static int asymmetric_key_match_preparse(struct key_match_data *match_data) spec[1] == 'x' && spec[2] == ':') { id = spec + 3; + } else if (spec[0] == 'd' && + spec[1] == 'n' && + spec[2] == ':') { + id = spec + 3; + cmp = asymmetric_key_cmp_name; } else { goto default_match; } diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c index 967329e0a07b..277482bb1777 100644 --- a/crypto/asymmetric_keys/pkcs7_parser.c +++ b/crypto/asymmetric_keys/pkcs7_parser.c @@ -248,6 +248,15 @@ int pkcs7_sig_note_digest_algo(void *context, size_t hdrlen, case OID_sha224: ctx->sinfo->sig->hash_algo = "sha224"; break; + case OID_sm3: + ctx->sinfo->sig->hash_algo = "sm3"; + break; + case OID_gost2012Digest256: + ctx->sinfo->sig->hash_algo = "streebog256"; + break; + case OID_gost2012Digest512: + ctx->sinfo->sig->hash_algo = "streebog512"; + break; default: printk("Unsupported digest algo: %u\n", ctx->last_oid); return -ENOPKG; @@ -269,6 +278,23 @@ int pkcs7_sig_note_pkey_algo(void *context, size_t hdrlen, ctx->sinfo->sig->pkey_algo = "rsa"; ctx->sinfo->sig->encoding = "pkcs1"; break; + case OID_id_ecdsa_with_sha1: + case OID_id_ecdsa_with_sha224: + case OID_id_ecdsa_with_sha256: + case OID_id_ecdsa_with_sha384: + case OID_id_ecdsa_with_sha512: + ctx->sinfo->sig->pkey_algo = "ecdsa"; + ctx->sinfo->sig->encoding = "x962"; + break; + case OID_SM2_with_SM3: + ctx->sinfo->sig->pkey_algo = "sm2"; + ctx->sinfo->sig->encoding = "raw"; + break; + case OID_gost2012PKey256: + case OID_gost2012PKey512: + ctx->sinfo->sig->pkey_algo = "ecrdsa"; + ctx->sinfo->sig->encoding = "raw"; + break; default: printk("Unsupported pkey algo: %u\n", ctx->last_oid); return -ENOPKG; diff --git a/crypto/asymmetric_keys/pkcs7_parser.h b/crypto/asymmetric_keys/pkcs7_parser.h index 6565fdc2d4ca..e17f7ce4fb43 100644 --- a/crypto/asymmetric_keys/pkcs7_parser.h +++ b/crypto/asymmetric_keys/pkcs7_parser.h @@ -41,10 +41,9 @@ struct pkcs7_signed_info { * * This contains the generated digest of _either_ the Content Data or * the Authenticated Attributes [RFC2315 9.3]. If the latter, one of - * the attributes contains the digest of the the Content Data within - * it. + * the attributes contains the digest of the Content Data within it. * - * THis also contains the issuing cert serial number and issuer's name + * This also contains the issuing cert serial number and issuer's name * [PKCS#7 or CMS ver 1] or issuing cert's SKID [CMS ver 3]. */ struct public_key_signature *sig; diff --git a/crypto/asymmetric_keys/pkcs7_trust.c b/crypto/asymmetric_keys/pkcs7_trust.c index 61af3c4d82cc..9a87c34ed173 100644 --- a/crypto/asymmetric_keys/pkcs7_trust.c +++ b/crypto/asymmetric_keys/pkcs7_trust.c @@ -16,7 +16,7 @@ #include <crypto/public_key.h> #include "pkcs7_parser.h" -/** +/* * Check the trust on one PKCS#7 SignedInfo block. */ static int pkcs7_validate_trust_one(struct pkcs7_message *pkcs7, @@ -48,7 +48,7 @@ static int pkcs7_validate_trust_one(struct pkcs7_message *pkcs7, * keys. */ key = find_asymmetric_key(trust_keyring, - x509->id, x509->skid, false); + x509->id, x509->skid, NULL, false); if (!IS_ERR(key)) { /* One of the X.509 certificates in the PKCS#7 message * is apparently the same as one we already trust. @@ -82,7 +82,7 @@ static int pkcs7_validate_trust_one(struct pkcs7_message *pkcs7, key = find_asymmetric_key(trust_keyring, last->sig->auth_ids[0], last->sig->auth_ids[1], - false); + NULL, false); if (!IS_ERR(key)) { x509 = last; pr_devel("sinfo %u: Root cert %u signer is key %x\n", @@ -97,7 +97,7 @@ static int pkcs7_validate_trust_one(struct pkcs7_message *pkcs7, * the signed info directly. */ key = find_asymmetric_key(trust_keyring, - sinfo->sig->auth_ids[0], NULL, false); + sinfo->sig->auth_ids[0], NULL, NULL, false); if (!IS_ERR(key)) { pr_devel("sinfo %u: Direct signer is key %x\n", sinfo->index, key_serial(key)); diff --git a/crypto/asymmetric_keys/pkcs7_verify.c b/crypto/asymmetric_keys/pkcs7_verify.c index ce49820caa97..f6321c785714 100644 --- a/crypto/asymmetric_keys/pkcs7_verify.c +++ b/crypto/asymmetric_keys/pkcs7_verify.c @@ -141,11 +141,10 @@ int pkcs7_get_digest(struct pkcs7_message *pkcs7, const u8 **buf, u32 *len, *buf = sinfo->sig->digest; *len = sinfo->sig->digest_size; - for (i = 0; i < HASH_ALGO__LAST; i++) - if (!strcmp(hash_algo_name[i], sinfo->sig->hash_algo)) { - *hash_algo = i; - break; - } + i = match_string(hash_algo_name, HASH_ALGO__LAST, + sinfo->sig->hash_algo); + if (i >= 0) + *hash_algo = i; return 0; } @@ -175,12 +174,6 @@ static int pkcs7_find_key(struct pkcs7_message *pkcs7, pr_devel("Sig %u: Found cert serial match X.509[%u]\n", sinfo->index, certix); - if (strcmp(x509->pub->pkey_algo, sinfo->sig->pkey_algo) != 0) { - pr_warn("Sig %u: X.509 algo and PKCS#7 sig algo don't match\n", - sinfo->index); - continue; - } - sinfo->signer = x509; return 0; } @@ -227,9 +220,6 @@ static int pkcs7_verify_sig_chain(struct pkcs7_message *pkcs7, return 0; } - if (x509->unsupported_key) - goto unsupported_crypto_in_x509; - pr_debug("- issuer %s\n", x509->issuer); sig = x509->sig; if (sig->auth_ids[0]) @@ -246,7 +236,7 @@ static int pkcs7_verify_sig_chain(struct pkcs7_message *pkcs7, * authority. */ if (x509->unsupported_sig) - goto unsupported_crypto_in_x509; + goto unsupported_sig_in_x509; x509->signer = x509; pr_debug("- self-signed\n"); return 0; @@ -310,7 +300,7 @@ static int pkcs7_verify_sig_chain(struct pkcs7_message *pkcs7, might_sleep(); } -unsupported_crypto_in_x509: +unsupported_sig_in_x509: /* Just prune the certificate chain at this point if we lack some * crypto module to go further. Note, however, we don't want to set * sinfo->unsupported_crypto as the signed info block may still be diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c index d7f43d4ea925..2f8352e88860 100644 --- a/crypto/asymmetric_keys/public_key.c +++ b/crypto/asymmetric_keys/public_key.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* In-software asymmetric public-key crypto subtype * - * See Documentation/crypto/asymmetric-keys.txt + * See Documentation/crypto/asymmetric-keys.rst * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) @@ -14,9 +14,12 @@ #include <linux/slab.h> #include <linux/seq_file.h> #include <linux/scatterlist.h> +#include <linux/asn1.h> #include <keys/asymmetric-subtype.h> #include <crypto/public_key.h> #include <crypto/akcipher.h> +#include <crypto/sm2.h> +#include <crypto/sm3_base.h> MODULE_DESCRIPTION("In-software asymmetric public-key subtype"); MODULE_AUTHOR("Red Hat, Inc."); @@ -57,38 +60,83 @@ static void public_key_destroy(void *payload0, void *payload3) } /* - * Determine the crypto algorithm name. + * Given a public_key, and an encoding and hash_algo to be used for signing + * and/or verification with that key, determine the name of the corresponding + * akcipher algorithm. Also check that encoding and hash_algo are allowed. */ -static -int software_key_determine_akcipher(const char *encoding, - const char *hash_algo, - const struct public_key *pkey, - char alg_name[CRYPTO_MAX_ALG_NAME]) +static int +software_key_determine_akcipher(const struct public_key *pkey, + const char *encoding, const char *hash_algo, + char alg_name[CRYPTO_MAX_ALG_NAME]) { int n; - if (strcmp(encoding, "pkcs1") == 0) { - /* The data wangled by the RSA algorithm is typically padded - * and encoded in some manner, such as EMSA-PKCS1-1_5 [RFC3447 - * sec 8.2]. + if (!encoding) + return -EINVAL; + + if (strcmp(pkey->pkey_algo, "rsa") == 0) { + /* + * RSA signatures usually use EMSA-PKCS1-1_5 [RFC3447 sec 8.2]. + */ + if (strcmp(encoding, "pkcs1") == 0) { + if (!hash_algo) + n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME, + "pkcs1pad(%s)", + pkey->pkey_algo); + else + n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME, + "pkcs1pad(%s,%s)", + pkey->pkey_algo, hash_algo); + return n >= CRYPTO_MAX_ALG_NAME ? -EINVAL : 0; + } + if (strcmp(encoding, "raw") != 0) + return -EINVAL; + /* + * Raw RSA cannot differentiate between different hash + * algorithms. + */ + if (hash_algo) + return -EINVAL; + } else if (strncmp(pkey->pkey_algo, "ecdsa", 5) == 0) { + if (strcmp(encoding, "x962") != 0) + return -EINVAL; + /* + * ECDSA signatures are taken over a raw hash, so they don't + * differentiate between different hash algorithms. That means + * that the verifier should hard-code a specific hash algorithm. + * Unfortunately, in practice ECDSA is used with multiple SHAs, + * so we have to allow all of them and not just one. */ if (!hash_algo) - n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME, - "pkcs1pad(%s)", - pkey->pkey_algo); - else - n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME, - "pkcs1pad(%s,%s)", - pkey->pkey_algo, hash_algo); - return n >= CRYPTO_MAX_ALG_NAME ? -EINVAL : 0; - } - - if (strcmp(encoding, "raw") == 0) { - strcpy(alg_name, pkey->pkey_algo); - return 0; + return -EINVAL; + if (strcmp(hash_algo, "sha1") != 0 && + strcmp(hash_algo, "sha224") != 0 && + strcmp(hash_algo, "sha256") != 0 && + strcmp(hash_algo, "sha384") != 0 && + strcmp(hash_algo, "sha512") != 0) + return -EINVAL; + } else if (strcmp(pkey->pkey_algo, "sm2") == 0) { + if (strcmp(encoding, "raw") != 0) + return -EINVAL; + if (!hash_algo) + return -EINVAL; + if (strcmp(hash_algo, "sm3") != 0) + return -EINVAL; + } else if (strcmp(pkey->pkey_algo, "ecrdsa") == 0) { + if (strcmp(encoding, "raw") != 0) + return -EINVAL; + if (!hash_algo) + return -EINVAL; + if (strcmp(hash_algo, "streebog256") != 0 && + strcmp(hash_algo, "streebog512") != 0) + return -EINVAL; + } else { + /* Unknown public key algorithm */ + return -ENOPKG; } - - return -ENOPKG; + if (strscpy(alg_name, pkey->pkey_algo, CRYPTO_MAX_ALG_NAME) < 0) + return -EINVAL; + return 0; } static u8 *pkey_pack_u32(u8 *dst, u32 val) @@ -109,9 +157,8 @@ static int software_key_query(const struct kernel_pkey_params *params, u8 *key, *ptr; int ret, len; - ret = software_key_determine_akcipher(params->encoding, - params->hash_algo, - pkey, alg_name); + ret = software_key_determine_akcipher(pkey, params->encoding, + params->hash_algo, alg_name); if (ret < 0) return ret; @@ -119,6 +166,7 @@ static int software_key_query(const struct kernel_pkey_params *params, if (IS_ERR(tfm)) return PTR_ERR(tfm); + ret = -ENOMEM; key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen, GFP_KERNEL); if (!key) @@ -174,9 +222,8 @@ static int software_key_eds_op(struct kernel_pkey_params *params, pr_devel("==>%s()\n", __func__); - ret = software_key_determine_akcipher(params->encoding, - params->hash_algo, - pkey, alg_name); + ret = software_key_determine_akcipher(pkey, params->encoding, + params->hash_algo, alg_name); if (ret < 0) return ret; @@ -245,6 +292,65 @@ error_free_tfm: return ret; } +#if IS_REACHABLE(CONFIG_CRYPTO_SM2) +static int cert_sig_digest_update(const struct public_key_signature *sig, + struct crypto_akcipher *tfm_pkey) +{ + struct crypto_shash *tfm; + struct shash_desc *desc; + size_t desc_size; + unsigned char dgst[SM3_DIGEST_SIZE]; + int ret; + + BUG_ON(!sig->data); + + /* SM2 signatures always use the SM3 hash algorithm */ + if (!sig->hash_algo || strcmp(sig->hash_algo, "sm3") != 0) + return -EINVAL; + + ret = sm2_compute_z_digest(tfm_pkey, SM2_DEFAULT_USERID, + SM2_DEFAULT_USERID_LEN, dgst); + if (ret) + return ret; + + tfm = crypto_alloc_shash(sig->hash_algo, 0, 0); + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + + desc_size = crypto_shash_descsize(tfm) + sizeof(*desc); + desc = kzalloc(desc_size, GFP_KERNEL); + if (!desc) { + ret = -ENOMEM; + goto error_free_tfm; + } + + desc->tfm = tfm; + + ret = crypto_shash_init(desc); + if (ret < 0) + goto error_free_desc; + + ret = crypto_shash_update(desc, dgst, SM3_DIGEST_SIZE); + if (ret < 0) + goto error_free_desc; + + ret = crypto_shash_finup(desc, sig->data, sig->data_size, sig->digest); + +error_free_desc: + kfree(desc); +error_free_tfm: + crypto_free_shash(tfm); + return ret; +} +#else +static inline int cert_sig_digest_update( + const struct public_key_signature *sig, + struct crypto_akcipher *tfm_pkey) +{ + return -ENOTSUPP; +} +#endif /* ! IS_REACHABLE(CONFIG_CRYPTO_SM2) */ + /* * Verify a signature using a public key. */ @@ -265,9 +371,23 @@ int public_key_verify_signature(const struct public_key *pkey, BUG_ON(!sig); BUG_ON(!sig->s); - ret = software_key_determine_akcipher(sig->encoding, - sig->hash_algo, - pkey, alg_name); + /* + * If the signature specifies a public key algorithm, it *must* match + * the key's actual public key algorithm. + * + * Small exception: ECDSA signatures don't specify the curve, but ECDSA + * keys do. So the strings can mismatch slightly in that case: + * "ecdsa-nist-*" for the key, but "ecdsa" for the signature. + */ + if (sig->pkey_algo) { + if (strcmp(pkey->pkey_algo, sig->pkey_algo) != 0 && + (strncmp(pkey->pkey_algo, "ecdsa-", 6) != 0 || + strcmp(sig->pkey_algo, "ecdsa") != 0)) + return -EKEYREJECTED; + } + + ret = software_key_determine_akcipher(pkey, sig->encoding, + sig->hash_algo, alg_name); if (ret < 0) return ret; @@ -298,6 +418,12 @@ int public_key_verify_signature(const struct public_key *pkey, if (ret) goto error_free_key; + if (strcmp(pkey->pkey_algo, "sm2") == 0 && sig->data_size) { + ret = cert_sig_digest_update(sig, tfm); + if (ret) + goto error_free_key; + } + sg_init_table(src_sg, 2); sg_set_buf(&src_sg[0], sig->s, sig->s_size); sg_set_buf(&src_sg[1], sig->digest, sig->digest_size); diff --git a/crypto/asymmetric_keys/restrict.c b/crypto/asymmetric_keys/restrict.c index 77ebebada29c..6b1ac5f5896a 100644 --- a/crypto/asymmetric_keys/restrict.c +++ b/crypto/asymmetric_keys/restrict.c @@ -87,7 +87,7 @@ int restrict_link_by_signature(struct key *dest_keyring, sig = payload->data[asym_auth]; if (!sig) return -ENOPKG; - if (!sig->auth_ids[0] && !sig->auth_ids[1]) + if (!sig->auth_ids[0] && !sig->auth_ids[1] && !sig->auth_ids[2]) return -ENOKEY; if (ca_keyid && !asymmetric_key_id_partial(sig->auth_ids[1], ca_keyid)) @@ -96,7 +96,7 @@ int restrict_link_by_signature(struct key *dest_keyring, /* See if we have a key that signed this one. */ key = find_asymmetric_key(trust_keyring, sig->auth_ids[0], sig->auth_ids[1], - false); + sig->auth_ids[2], false); if (IS_ERR(key)) return -ENOKEY; @@ -108,11 +108,11 @@ int restrict_link_by_signature(struct key *dest_keyring, return ret; } -static bool match_either_id(const struct asymmetric_key_ids *pair, +static bool match_either_id(const struct asymmetric_key_id **pair, const struct asymmetric_key_id *single) { - return (asymmetric_key_id_same(pair->id[0], single) || - asymmetric_key_id_same(pair->id[1], single)); + return (asymmetric_key_id_same(pair[0], single) || + asymmetric_key_id_same(pair[1], single)); } static int key_or_keyring_common(struct key *dest_keyring, @@ -140,20 +140,22 @@ static int key_or_keyring_common(struct key *dest_keyring, sig = payload->data[asym_auth]; if (!sig) return -ENOPKG; - if (!sig->auth_ids[0] && !sig->auth_ids[1]) + if (!sig->auth_ids[0] && !sig->auth_ids[1] && !sig->auth_ids[2]) return -ENOKEY; if (trusted) { if (trusted->type == &key_type_keyring) { /* See if we have a key that signed this one. */ key = find_asymmetric_key(trusted, sig->auth_ids[0], - sig->auth_ids[1], false); + sig->auth_ids[1], + sig->auth_ids[2], false); if (IS_ERR(key)) key = NULL; } else if (trusted->type == &key_type_asymmetric) { - const struct asymmetric_key_ids *signer_ids; + const struct asymmetric_key_id **signer_ids; - signer_ids = asymmetric_key_ids(trusted); + signer_ids = (const struct asymmetric_key_id **) + asymmetric_key_ids(trusted)->id; /* * The auth_ids come from the candidate key (the @@ -164,22 +166,29 @@ static int key_or_keyring_common(struct key *dest_keyring, * The signer_ids are identifiers for the * signing key specified for dest_keyring. * - * The first auth_id is the preferred id, and - * the second is the fallback. If only one - * auth_id is present, it may match against - * either signer_id. If two auth_ids are - * present, the first auth_id must match one - * signer_id and the second auth_id must match - * the second signer_id. + * The first auth_id is the preferred id, 2nd and + * 3rd are the fallbacks. If exactly one of + * auth_ids[0] and auth_ids[1] is present, it may + * match either signer_ids[0] or signed_ids[1]. + * If both are present the first one may match + * either signed_id but the second one must match + * the second signer_id. If neither of them is + * available, auth_ids[2] is matched against + * signer_ids[2] as a fallback. */ - if (!sig->auth_ids[0] || !sig->auth_ids[1]) { + if (!sig->auth_ids[0] && !sig->auth_ids[1]) { + if (asymmetric_key_id_same(signer_ids[2], + sig->auth_ids[2])) + key = __key_get(trusted); + + } else if (!sig->auth_ids[0] || !sig->auth_ids[1]) { const struct asymmetric_key_id *auth_id; auth_id = sig->auth_ids[0] ?: sig->auth_ids[1]; if (match_either_id(signer_ids, auth_id)) key = __key_get(trusted); - } else if (asymmetric_key_id_same(signer_ids->id[1], + } else if (asymmetric_key_id_same(signer_ids[1], sig->auth_ids[1]) && match_either_id(signer_ids, sig->auth_ids[0])) { @@ -193,7 +202,8 @@ static int key_or_keyring_common(struct key *dest_keyring, if (check_dest && !key) { /* See if the destination has a key that signed this one. */ key = find_asymmetric_key(dest_keyring, sig->auth_ids[0], - sig->auth_ids[1], false); + sig->auth_ids[1], sig->auth_ids[2], + false); if (IS_ERR(key)) key = NULL; } @@ -244,9 +254,10 @@ int restrict_link_by_key_or_keyring(struct key *dest_keyring, * @payload: The payload of the new key. * @trusted: A key or ring of keys that can be used to vouch for the new cert. * - * Check the new certificate only against the key or keys passed in the data - * parameter. If one of those is the signing key and validates the new - * certificate, then mark the new certificate as being ok to link. + * Check the new certificate against the key or keys passed in the data + * parameter and against the keys already linked to the destination keyring. If + * one of those is the signing key and validates the new certificate, then mark + * the new certificate as being ok to link. * * Returns 0 if the new certificate was accepted, -ENOKEY if we * couldn't find a matching parent certificate in the trusted list, diff --git a/crypto/asymmetric_keys/selftest.c b/crypto/asymmetric_keys/selftest.c new file mode 100644 index 000000000000..fa0bf7f24284 --- /dev/null +++ b/crypto/asymmetric_keys/selftest.c @@ -0,0 +1,224 @@ +/* Self-testing for signature checking. + * + * Copyright (C) 2022 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#include <linux/kernel.h> +#include <linux/cred.h> +#include <linux/key.h> +#include <crypto/pkcs7.h> +#include "x509_parser.h" + +struct certs_test { + const u8 *data; + size_t data_len; + const u8 *pkcs7; + size_t pkcs7_len; +}; + +/* + * Set of X.509 certificates to provide public keys for the tests. These will + * be loaded into a temporary keyring for the duration of the testing. + */ +static const __initconst u8 certs_selftest_keys[] = { + "\x30\x82\x05\x55\x30\x82\x03\x3d\xa0\x03\x02\x01\x02\x02\x14\x73" + "\x98\xea\x98\x2d\xd0\x2e\xa8\xb1\xcf\x57\xc7\xf2\x97\xb3\xe6\x1a" + "\xfc\x8c\x0a\x30\x0d\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x01\x0b" + "\x05\x00\x30\x34\x31\x32\x30\x30\x06\x03\x55\x04\x03\x0c\x29\x43" + "\x65\x72\x74\x69\x66\x69\x63\x61\x74\x65\x20\x76\x65\x72\x69\x66" + "\x69\x63\x61\x74\x69\x6f\x6e\x20\x73\x65\x6c\x66\x2d\x74\x65\x73" + "\x74\x69\x6e\x67\x20\x6b\x65\x79\x30\x20\x17\x0d\x32\x32\x30\x35" + "\x31\x38\x32\x32\x33\x32\x34\x31\x5a\x18\x0f\x32\x31\x32\x32\x30" + "\x34\x32\x34\x32\x32\x33\x32\x34\x31\x5a\x30\x34\x31\x32\x30\x30" + "\x06\x03\x55\x04\x03\x0c\x29\x43\x65\x72\x74\x69\x66\x69\x63\x61" + "\x74\x65\x20\x76\x65\x72\x69\x66\x69\x63\x61\x74\x69\x6f\x6e\x20" + "\x73\x65\x6c\x66\x2d\x74\x65\x73\x74\x69\x6e\x67\x20\x6b\x65\x79" + "\x30\x82\x02\x22\x30\x0d\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x01" + "\x01\x05\x00\x03\x82\x02\x0f\x00\x30\x82\x02\x0a\x02\x82\x02\x01" + "\x00\xcc\xac\x49\xdd\x3b\xca\xb0\x15\x7e\x84\x6a\xb2\x0a\x69\x5f" + "\x1c\x0a\x61\x82\x3b\x4f\x2c\xa3\x95\x2c\x08\x58\x4b\xb1\x5d\x99" + "\xe0\xc3\xc1\x79\xc2\xb3\xeb\xc0\x1e\x6d\x3e\x54\x1d\xbd\xb7\x92" + "\x7b\x4d\xb5\x95\x58\xb2\x52\x2e\xc6\x24\x4b\x71\x63\x80\x32\x77" + "\xa7\x38\x5e\xdb\x72\xae\x6e\x0d\xec\xfb\xb6\x6d\x01\x7f\xe9\x55" + "\x66\xdf\xbf\x1d\x76\x78\x02\x31\xe8\xe5\x07\xf8\xb7\x82\x5c\x0d" + "\xd4\xbb\xfb\xa2\x59\x0d\x2e\x3a\x78\x95\x3a\x8b\x46\x06\x47\x44" + "\x46\xd7\xcd\x06\x6a\x41\x13\xe3\x19\xf6\xbb\x6e\x38\xf4\x83\x01" + "\xa3\xbf\x4a\x39\x4f\xd7\x0a\xe9\x38\xb3\xf5\x94\x14\x4e\xdd\xf7" + "\x43\xfd\x24\xb2\x49\x3c\xa5\xf7\x7a\x7c\xd4\x45\x3d\x97\x75\x68" + "\xf1\xed\x4c\x42\x0b\x70\xca\x85\xf3\xde\xe5\x88\x2c\xc5\xbe\xb6" + "\x97\x34\xba\x24\x02\xcd\x8b\x86\x9f\xa9\x73\xca\x73\xcf\x92\x81" + "\xee\x75\x55\xbb\x18\x67\x5c\xff\x3f\xb5\xdd\x33\x1b\x0c\xe9\x78" + "\xdb\x5c\xcf\xaa\x5c\x43\x42\xdf\x5e\xa9\x6d\xec\xd7\xd7\xff\xe6" + "\xa1\x3a\x92\x1a\xda\xae\xf6\x8c\x6f\x7b\xd5\xb4\x6e\x06\xe9\x8f" + "\xe8\xde\x09\x31\x89\xed\x0e\x11\xa1\xfa\x8a\xe9\xe9\x64\x59\x62" + "\x53\xda\xd1\x70\xbe\x11\xd4\x99\x97\x11\xcf\x99\xde\x0b\x9d\x94" + "\x7e\xaa\xb8\x52\xea\x37\xdb\x90\x7e\x35\xbd\xd9\xfe\x6d\x0a\x48" + "\x70\x28\xdd\xd5\x0d\x7f\x03\x80\x93\x14\x23\x8f\xb9\x22\xcd\x7c" + "\x29\xfe\xf1\x72\xb5\x5c\x0b\x12\xcf\x9c\x15\xf6\x11\x4c\x7a\x45" + "\x25\x8c\x45\x0a\x34\xac\x2d\x9a\x81\xca\x0b\x13\x22\xcd\xeb\x1a" + "\x38\x88\x18\x97\x96\x08\x81\xaa\xcc\x8f\x0f\x8a\x32\x7b\x76\x68" + "\x03\x68\x43\xbf\x11\xba\x55\x60\xfd\x80\x1c\x0d\x9b\x69\xb6\x09" + "\x72\xbc\x0f\x41\x2f\x07\x82\xc6\xe3\xb2\x13\x91\xc4\x6d\x14\x95" + "\x31\xbe\x19\xbd\xbc\xed\xe1\x4c\x74\xa2\xe0\x78\x0b\xbb\x94\xec" + "\x4c\x53\x3a\xa2\xb5\x84\x1d\x4b\x65\x7e\xdc\xf7\xdb\x36\x7d\xbe" + "\x9e\x3b\x36\x66\x42\x66\x76\x35\xbf\xbe\xf0\xc1\x3c\x7c\xe9\x42" + "\x5c\x24\x53\x03\x05\xa8\x67\x24\x50\x02\x75\xff\x24\x46\x3b\x35" + "\x89\x76\xe6\x70\xda\xc5\x51\x8c\x9a\xe5\x05\xb0\x0b\xd0\x2d\xd4" + "\x7d\x57\x75\x94\x6b\xf9\x0a\xad\x0e\x41\x00\x15\xd0\x4f\xc0\x7f" + "\x90\x2d\x18\x48\x8f\x28\xfe\x5d\xa7\xcd\x99\x9e\xbd\x02\x6c\x8a" + "\x31\xf3\x1c\xc7\x4b\xe6\x93\xcd\x42\xa2\xe4\x68\x10\x47\x9d\xfc" + "\x21\x02\x03\x01\x00\x01\xa3\x5d\x30\x5b\x30\x0c\x06\x03\x55\x1d" + "\x13\x01\x01\xff\x04\x02\x30\x00\x30\x0b\x06\x03\x55\x1d\x0f\x04" + "\x04\x03\x02\x07\x80\x30\x1d\x06\x03\x55\x1d\x0e\x04\x16\x04\x14" + "\xf5\x87\x03\xbb\x33\xce\x1b\x73\xee\x02\xec\xcd\xee\x5b\x88\x17" + "\x51\x8f\xe3\xdb\x30\x1f\x06\x03\x55\x1d\x23\x04\x18\x30\x16\x80" + "\x14\xf5\x87\x03\xbb\x33\xce\x1b\x73\xee\x02\xec\xcd\xee\x5b\x88" + "\x17\x51\x8f\xe3\xdb\x30\x0d\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01" + "\x01\x0b\x05\x00\x03\x82\x02\x01\x00\xc0\x2e\x12\x41\x7b\x73\x85" + "\x16\xc8\xdb\x86\x79\xe8\xf5\xcd\x44\xf4\xc6\xe2\x81\x23\x5e\x47" + "\xcb\xab\x25\xf1\x1e\x58\x3e\x31\x7f\x78\xad\x85\xeb\xfe\x14\x88" + "\x60\xf7\x7f\xd2\x26\xa2\xf4\x98\x2a\xfd\xba\x05\x0c\x20\x33\x12" + "\xcc\x4d\x14\x61\x64\x81\x93\xd3\x33\xed\xc8\xff\xf1\x78\xcc\x5f" + "\x51\x9f\x09\xd7\xbe\x0d\x5c\x74\xfd\x9b\xdf\x52\x4a\xc9\xa8\x71" + "\x25\x33\x04\x10\x67\x36\xd0\xb3\x0b\xc9\xa1\x40\x72\xae\x41\x7b" + "\x68\xe6\xe4\x7b\xd0\x28\xf7\x6d\xe7\x3f\x50\xfc\x91\x7c\x91\x56" + "\xd4\xdf\xa6\xbb\xe8\x4d\x1b\x58\xaa\x28\xfa\xc1\x19\xeb\x11\x2f" + "\x24\x8b\x7c\xc5\xa9\x86\x26\xaa\x6e\xb7\x9b\xd5\xf8\x06\xfb\x02" + "\x52\x7b\x9c\x9e\xa1\xe0\x07\x8b\x5e\xe4\xb8\x55\x29\xf6\x48\x52" + "\x1c\x1b\x54\x2d\x46\xd8\xe5\x71\xb9\x60\xd1\x45\xb5\x92\x89\x8a" + "\x63\x58\x2a\xb3\xc6\xb2\x76\xe2\x3c\x82\x59\x04\xae\x5a\xc4\x99" + "\x7b\x2e\x4b\x46\x57\xb8\x29\x24\xb2\xfd\xee\x2c\x0d\xa4\x83\xfa" + "\x65\x2a\x07\x35\x8b\x97\xcf\xbd\x96\x2e\xd1\x7e\x6c\xc2\x1e\x87" + "\xb6\x6c\x76\x65\xb5\xb2\x62\xda\x8b\xe9\x73\xe3\xdb\x33\xdd\x13" + "\x3a\x17\x63\x6a\x76\xde\x8d\x8f\xe0\x47\x61\x28\x3a\x83\xff\x8f" + "\xe7\xc7\xe0\x4a\xa3\xe5\x07\xcf\xe9\x8c\x35\x35\x2e\xe7\x80\x66" + "\x31\xbf\x91\x58\x0a\xe1\x25\x3d\x38\xd3\xa4\xf0\x59\x34\x47\x07" + "\x62\x0f\xbe\x30\xdd\x81\x88\x58\xf0\x28\xb0\x96\xe5\x82\xf8\x05" + "\xb7\x13\x01\xbc\xfa\xc6\x1f\x86\x72\xcc\xf9\xee\x8e\xd9\xd6\x04" + "\x8c\x24\x6c\xbf\x0f\x5d\x37\x39\xcf\x45\xc1\x93\x3a\xd2\xed\x5c" + "\x58\x79\x74\x86\x62\x30\x7e\x8e\xbb\xdd\x7a\xa9\xed\xca\x40\xcb" + "\x62\x47\xf4\xb4\x9f\x52\x7f\x72\x63\xa8\xf0\x2b\xaf\x45\x2a\x48" + "\x19\x6d\xe3\xfb\xf9\x19\x66\x69\xc8\xcc\x62\x87\x6c\x53\x2b\x2d" + "\x6e\x90\x6c\x54\x3a\x82\x25\x41\xcb\x18\x6a\xa4\x22\xa8\xa1\xc4" + "\x47\xd7\x81\x00\x1c\x15\x51\x0f\x1a\xaf\xef\x9f\xa6\x61\x8c\xbd" + "\x6b\x8b\xed\xe6\xac\x0e\xb6\x3a\x4c\x92\xe6\x0f\x91\x0a\x0f\x71" + "\xc7\xa0\xb9\x0d\x3a\x17\x5a\x6f\x35\xc8\xe7\x50\x4f\x46\xe8\x70" + "\x60\x48\x06\x82\x8b\x66\x58\xe6\x73\x91\x9c\x12\x3d\x35\x8e\x46" + "\xad\x5a\xf5\xb3\xdb\x69\x21\x04\xfd\xd3\x1c\xdf\x94\x9d\x56\xb0" + "\x0a\xd1\x95\x76\x8d\xec\x9e\xdd\x0b\x15\x97\x64\xad\xe5\xf2\x62" + "\x02\xfc\x9e\x5f\x56\x42\x39\x05\xb3" +}; + +/* + * Signed data and detached signature blobs that form the verification tests. + */ +static const __initconst u8 certs_selftest_1_data[] = { + "\x54\x68\x69\x73\x20\x69\x73\x20\x73\x6f\x6d\x65\x20\x74\x65\x73" + "\x74\x20\x64\x61\x74\x61\x20\x75\x73\x65\x64\x20\x66\x6f\x72\x20" + "\x73\x65\x6c\x66\x2d\x74\x65\x73\x74\x69\x6e\x67\x20\x63\x65\x72" + "\x74\x69\x66\x69\x63\x61\x74\x65\x20\x76\x65\x72\x69\x66\x69\x63" + "\x61\x74\x69\x6f\x6e\x2e\x0a" +}; + +static const __initconst u8 certs_selftest_1_pkcs7[] = { + "\x30\x82\x02\xab\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x07\x02\xa0" + "\x82\x02\x9c\x30\x82\x02\x98\x02\x01\x01\x31\x0d\x30\x0b\x06\x09" + "\x60\x86\x48\x01\x65\x03\x04\x02\x01\x30\x0b\x06\x09\x2a\x86\x48" + "\x86\xf7\x0d\x01\x07\x01\x31\x82\x02\x75\x30\x82\x02\x71\x02\x01" + "\x01\x30\x4c\x30\x34\x31\x32\x30\x30\x06\x03\x55\x04\x03\x0c\x29" + "\x43\x65\x72\x74\x69\x66\x69\x63\x61\x74\x65\x20\x76\x65\x72\x69" + "\x66\x69\x63\x61\x74\x69\x6f\x6e\x20\x73\x65\x6c\x66\x2d\x74\x65" + "\x73\x74\x69\x6e\x67\x20\x6b\x65\x79\x02\x14\x73\x98\xea\x98\x2d" + "\xd0\x2e\xa8\xb1\xcf\x57\xc7\xf2\x97\xb3\xe6\x1a\xfc\x8c\x0a\x30" + "\x0b\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x30\x0d\x06\x09" + "\x2a\x86\x48\x86\xf7\x0d\x01\x01\x01\x05\x00\x04\x82\x02\x00\xac" + "\xb0\xf2\x07\xd6\x99\x6d\xc0\xc0\xd9\x8d\x31\x0d\x7e\x04\xeb\xc3" + "\x88\x90\xc4\x58\x46\xd4\xe2\xa0\xa3\x25\xe3\x04\x50\x37\x85\x8c" + "\x91\xc6\xfc\xc5\xd4\x92\xfd\x05\xd8\xb8\xa3\xb8\xba\x89\x13\x00" + "\x88\x79\x99\x51\x6b\x5b\x28\x31\xc0\xb3\x1b\x7a\x68\x2c\x00\xdb" + "\x4b\x46\x11\xf3\xfa\x50\x8e\x19\x89\xa2\x4c\xda\x4c\x89\x01\x11" + "\x89\xee\xd3\xc8\xc1\xe7\xa7\xf6\xb2\xa2\xf8\x65\xb8\x35\x20\x33" + "\xba\x12\x62\xd5\xbd\xaa\x71\xe5\x5b\xc0\x6a\x32\xff\x6a\x2e\x23" + "\xef\x2b\xb6\x58\xb1\xfb\x5f\x82\x34\x40\x6d\x9f\xbc\x27\xac\x37" + "\x23\x99\xcf\x7d\x20\xb2\x39\x01\xc0\x12\xce\xd7\x5d\x2f\xb6\xab" + "\xb5\x56\x4f\xef\xf4\x72\x07\x58\x65\xa9\xeb\x1f\x75\x1c\x5f\x0c" + "\x88\xe0\xa4\xe2\xcd\x73\x2b\x9e\xb2\x05\x7e\x12\xf8\xd0\x66\x41" + "\xcc\x12\x63\xd4\xd6\xac\x9b\x1d\x14\x77\x8d\x1c\x57\xd5\x27\xc6" + "\x49\xa2\x41\x43\xf3\x59\x29\xe5\xcb\xd1\x75\xbc\x3a\x97\x2a\x72" + "\x22\x66\xc5\x3b\xc1\xba\xfc\x53\x18\x98\xe2\x21\x64\xc6\x52\x87" + "\x13\xd5\x7c\x42\xe8\xfb\x9c\x9a\x45\x32\xd5\xa5\x22\x62\x9d\xd4" + "\xcb\xa4\xfa\x77\xbb\x50\x24\x0b\x8b\x88\x99\x15\x56\xa9\x1e\x92" + "\xbf\x5d\x94\x77\xb6\xf1\x67\x01\x60\x06\x58\x5c\xdf\x18\x52\x79" + "\x37\x30\x93\x7d\x87\x04\xf1\xe0\x55\x59\x52\xf3\xc2\xb1\x1c\x5b" + "\x12\x7c\x49\x87\xfb\xf7\xed\xdd\x95\x71\xec\x4b\x1a\x85\x08\xb0" + "\xa0\x36\xc4\x7b\xab\x40\xe0\xf1\x98\xcc\xaf\x19\x40\x8f\x47\x6f" + "\xf0\x6c\x84\x29\x7f\x7f\x04\x46\xcb\x08\x0f\xe0\xc1\xc9\x70\x6e" + "\x95\x3b\xa4\xbc\x29\x2b\x53\x67\x45\x1b\x0d\xbc\x13\xa5\x76\x31" + "\xaf\xb9\xd0\xe0\x60\x12\xd2\xf4\xb7\x7c\x58\x7e\xf6\x2d\xbb\x24" + "\x14\x5a\x20\x24\xa8\x12\xdf\x25\xbd\x42\xce\x96\x7c\x2e\xba\x14" + "\x1b\x81\x9f\x18\x45\xa4\xc6\x70\x3e\x0e\xf0\xd3\x7b\x9c\x10\xbe" + "\xb8\x7a\x89\xc5\x9e\xd9\x97\xdf\xd7\xe7\xc6\x1d\xc0\x20\x6c\xb8" + "\x1e\x3a\x63\xb8\x39\x8e\x8e\x62\xd5\xd2\xb4\xcd\xff\x46\xfc\x8e" + "\xec\x07\x35\x0c\xff\xb0\x05\xe6\xf4\xe5\xfe\xa2\xe3\x0a\xe6\x36" + "\xa7\x4a\x7e\x62\x1d\xc4\x50\x39\x35\x4e\x28\xcb\x4a\xfb\x9d\xdb" + "\xdd\x23\xd6\x53\xb1\x74\x77\x12\xf7\x9c\xf0\x9a\x6b\xf7\xa9\x64" + "\x2d\x86\x21\x2a\xcf\xc6\x54\xf5\xc9\xad\xfa\xb5\x12\xb4\xf3\x51" + "\x77\x55\x3c\x6f\x0c\x32\xd3\x8c\x44\x39\x71\x25\xfe\x96\xd2" +}; + +/* + * List of tests to be run. + */ +#define TEST(data, pkcs7) { data, sizeof(data) - 1, pkcs7, sizeof(pkcs7) - 1 } +static const struct certs_test certs_tests[] __initconst = { + TEST(certs_selftest_1_data, certs_selftest_1_pkcs7), +}; + +int __init fips_signature_selftest(void) +{ + struct key *keyring; + int ret, i; + + pr_notice("Running certificate verification selftests\n"); + + keyring = keyring_alloc(".certs_selftest", + GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, current_cred(), + (KEY_POS_ALL & ~KEY_POS_SETATTR) | + KEY_USR_VIEW | KEY_USR_READ | + KEY_USR_SEARCH, + KEY_ALLOC_NOT_IN_QUOTA, + NULL, NULL); + if (IS_ERR(keyring)) + panic("Can't allocate certs selftest keyring: %ld\n", + PTR_ERR(keyring)); + + ret = x509_load_certificate_list(certs_selftest_keys, + sizeof(certs_selftest_keys) - 1, keyring); + if (ret < 0) + panic("Can't allocate certs selftest keyring: %d\n", ret); + + for (i = 0; i < ARRAY_SIZE(certs_tests); i++) { + const struct certs_test *test = &certs_tests[i]; + struct pkcs7_message *pkcs7; + + pkcs7 = pkcs7_parse_message(test->pkcs7, test->pkcs7_len); + if (IS_ERR(pkcs7)) + panic("Certs selftest %d: pkcs7_parse_message() = %d\n", i, ret); + + pkcs7_supply_detached_data(pkcs7, test->data, test->data_len); + + ret = pkcs7_verify(pkcs7, VERIFYING_MODULE_SIGNATURE); + if (ret < 0) + panic("Certs selftest %d: pkcs7_verify() = %d\n", i, ret); + + ret = pkcs7_validate_trust(pkcs7, keyring); + if (ret < 0) + panic("Certs selftest %d: pkcs7_validate_trust() = %d\n", i, ret); + + pkcs7_free_message(pkcs7); + } + + key_put(keyring); + return 0; +} diff --git a/crypto/asymmetric_keys/signature.c b/crypto/asymmetric_keys/signature.c index e24a031db1e4..2deff81f8af5 100644 --- a/crypto/asymmetric_keys/signature.c +++ b/crypto/asymmetric_keys/signature.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* Signature verification with an asymmetric key * - * See Documentation/crypto/asymmetric-keys.txt + * See Documentation/crypto/asymmetric-keys.rst * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) @@ -35,7 +35,7 @@ void public_key_signature_free(struct public_key_signature *sig) EXPORT_SYMBOL_GPL(public_key_signature_free); /** - * query_asymmetric_key - Get information about an aymmetric key. + * query_asymmetric_key - Get information about an asymmetric key. * @params: Various parameters. * @info: Where to put the information. */ diff --git a/crypto/asymmetric_keys/tpm.asn1 b/crypto/asymmetric_keys/tpm.asn1 deleted file mode 100644 index d7f194232f30..000000000000 --- a/crypto/asymmetric_keys/tpm.asn1 +++ /dev/null @@ -1,5 +0,0 @@ --- --- Unencryted TPM Blob. For details of the format, see: --- http://david.woodhou.se/draft-woodhouse-cert-best-practice.html#I-D.mavrogiannopoulos-tpmuri --- -PrivateKeyInfo ::= OCTET STRING ({ tpm_note_key }) diff --git a/crypto/asymmetric_keys/tpm_parser.c b/crypto/asymmetric_keys/tpm_parser.c deleted file mode 100644 index 96405d8dcd98..000000000000 --- a/crypto/asymmetric_keys/tpm_parser.c +++ /dev/null @@ -1,102 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#define pr_fmt(fmt) "TPM-PARSER: "fmt -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/export.h> -#include <linux/slab.h> -#include <linux/err.h> -#include <keys/asymmetric-subtype.h> -#include <keys/asymmetric-parser.h> -#include <crypto/asym_tpm_subtype.h> -#include "tpm.asn1.h" - -struct tpm_parse_context { - const void *blob; - u32 blob_len; -}; - -/* - * Note the key data of the ASN.1 blob. - */ -int tpm_note_key(void *context, size_t hdrlen, - unsigned char tag, - const void *value, size_t vlen) -{ - struct tpm_parse_context *ctx = context; - - ctx->blob = value; - ctx->blob_len = vlen; - - return 0; -} - -/* - * Parse a TPM-encrypted private key blob. - */ -static struct tpm_key *tpm_parse(const void *data, size_t datalen) -{ - struct tpm_parse_context ctx; - long ret; - - memset(&ctx, 0, sizeof(ctx)); - - /* Attempt to decode the private key */ - ret = asn1_ber_decoder(&tpm_decoder, &ctx, data, datalen); - if (ret < 0) - goto error; - - return tpm_key_create(ctx.blob, ctx.blob_len); - -error: - return ERR_PTR(ret); -} -/* - * Attempt to parse a data blob for a key as a TPM private key blob. - */ -static int tpm_key_preparse(struct key_preparsed_payload *prep) -{ - struct tpm_key *tk; - - /* - * TPM 1.2 keys are max 2048 bits long, so assume the blob is no - * more than 4x that - */ - if (prep->datalen > 256 * 4) - return -EMSGSIZE; - - tk = tpm_parse(prep->data, prep->datalen); - - if (IS_ERR(tk)) - return PTR_ERR(tk); - - /* We're pinning the module by being linked against it */ - __module_get(asym_tpm_subtype.owner); - prep->payload.data[asym_subtype] = &asym_tpm_subtype; - prep->payload.data[asym_key_ids] = NULL; - prep->payload.data[asym_crypto] = tk; - prep->payload.data[asym_auth] = NULL; - prep->quotalen = 100; - return 0; -} - -static struct asymmetric_key_parser tpm_key_parser = { - .owner = THIS_MODULE, - .name = "tpm_parser", - .parse = tpm_key_preparse, -}; - -static int __init tpm_key_init(void) -{ - return register_asymmetric_key_parser(&tpm_key_parser); -} - -static void __exit tpm_key_exit(void) -{ - unregister_asymmetric_key_parser(&tpm_key_parser); -} - -module_init(tpm_key_init); -module_exit(tpm_key_exit); - -MODULE_DESCRIPTION("TPM private key-blob parser"); -MODULE_LICENSE("GPL v2"); diff --git a/crypto/asymmetric_keys/verify_pefile.c b/crypto/asymmetric_keys/verify_pefile.c index cc9dbcecaaca..7553ab18db89 100644 --- a/crypto/asymmetric_keys/verify_pefile.c +++ b/crypto/asymmetric_keys/verify_pefile.c @@ -376,7 +376,7 @@ static int pefile_digest_pe(const void *pebuf, unsigned int pelen, } error: - kzfree(desc); + kfree_sensitive(desc); error_no_desc: crypto_free_shash(tfm); kleave(" = %d", ret); @@ -447,6 +447,6 @@ int verify_pefile_signature(const void *pebuf, unsigned pelen, ret = pefile_digest_pe(pebuf, pelen, &ctx); error: - kzfree(ctx.digest); + kfree_sensitive(ctx.digest); return ret; } diff --git a/crypto/asymmetric_keys/x509.asn1 b/crypto/asymmetric_keys/x509.asn1 index 5c9f4e4a5231..92d59c32f96a 100644 --- a/crypto/asymmetric_keys/x509.asn1 +++ b/crypto/asymmetric_keys/x509.asn1 @@ -7,7 +7,7 @@ Certificate ::= SEQUENCE { TBSCertificate ::= SEQUENCE { version [ 0 ] Version DEFAULT, serialNumber CertificateSerialNumber ({ x509_note_serial }), - signature AlgorithmIdentifier ({ x509_note_pkey_algo }), + signature AlgorithmIdentifier ({ x509_note_sig_algo }), issuer Name ({ x509_note_issuer }), validity Validity, subject Name ({ x509_note_subject }), diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c index 26ec20ef4899..7a9b084e2043 100644 --- a/crypto/asymmetric_keys/x509_cert_parser.c +++ b/crypto/asymmetric_keys/x509_cert_parser.c @@ -19,15 +19,13 @@ struct x509_parse_context { struct x509_certificate *cert; /* Certificate being constructed */ unsigned long data; /* Start of data */ - const void *cert_start; /* Start of cert content */ const void *key; /* Key data */ size_t key_size; /* Size of key data */ const void *params; /* Key parameters */ size_t params_size; /* Size of key parameters */ - enum OID key_algo; /* Public key algorithm */ + enum OID key_algo; /* Algorithm used by the cert's key */ enum OID last_oid; /* Last OID encountered */ - enum OID algo_oid; /* Algorithm OID */ - unsigned char nr_mpi; /* Number of MPIs stored */ + enum OID sig_algo; /* Algorithm used to sign the cert */ u8 o_size; /* Size of organizationName (O) */ u8 cn_size; /* Size of commonName (CN) */ u8 email_size; /* Size of emailAddress */ @@ -187,11 +185,10 @@ int x509_note_tbs_certificate(void *context, size_t hdrlen, } /* - * Record the public key algorithm + * Record the algorithm that was used to sign this certificate. */ -int x509_note_pkey_algo(void *context, size_t hdrlen, - unsigned char tag, - const void *value, size_t vlen) +int x509_note_sig_algo(void *context, size_t hdrlen, unsigned char tag, + const void *value, size_t vlen) { struct x509_parse_context *ctx = context; @@ -227,6 +224,26 @@ int x509_note_pkey_algo(void *context, size_t hdrlen, ctx->cert->sig->hash_algo = "sha224"; goto rsa_pkcs1; + case OID_id_ecdsa_with_sha1: + ctx->cert->sig->hash_algo = "sha1"; + goto ecdsa; + + case OID_id_ecdsa_with_sha224: + ctx->cert->sig->hash_algo = "sha224"; + goto ecdsa; + + case OID_id_ecdsa_with_sha256: + ctx->cert->sig->hash_algo = "sha256"; + goto ecdsa; + + case OID_id_ecdsa_with_sha384: + ctx->cert->sig->hash_algo = "sha384"; + goto ecdsa; + + case OID_id_ecdsa_with_sha512: + ctx->cert->sig->hash_algo = "sha512"; + goto ecdsa; + case OID_gost2012Signature256: ctx->cert->sig->hash_algo = "streebog256"; goto ecrdsa; @@ -234,17 +251,31 @@ int x509_note_pkey_algo(void *context, size_t hdrlen, case OID_gost2012Signature512: ctx->cert->sig->hash_algo = "streebog512"; goto ecrdsa; + + case OID_SM2_with_SM3: + ctx->cert->sig->hash_algo = "sm3"; + goto sm2; } rsa_pkcs1: ctx->cert->sig->pkey_algo = "rsa"; ctx->cert->sig->encoding = "pkcs1"; - ctx->algo_oid = ctx->last_oid; + ctx->sig_algo = ctx->last_oid; return 0; ecrdsa: ctx->cert->sig->pkey_algo = "ecrdsa"; ctx->cert->sig->encoding = "raw"; - ctx->algo_oid = ctx->last_oid; + ctx->sig_algo = ctx->last_oid; + return 0; +sm2: + ctx->cert->sig->pkey_algo = "sm2"; + ctx->cert->sig->encoding = "raw"; + ctx->sig_algo = ctx->last_oid; + return 0; +ecdsa: + ctx->cert->sig->pkey_algo = "ecdsa"; + ctx->cert->sig->encoding = "x962"; + ctx->sig_algo = ctx->last_oid; return 0; } @@ -257,16 +288,23 @@ int x509_note_signature(void *context, size_t hdrlen, { struct x509_parse_context *ctx = context; - pr_debug("Signature type: %u size %zu\n", ctx->last_oid, vlen); + pr_debug("Signature: alg=%u, size=%zu\n", ctx->last_oid, vlen); - if (ctx->last_oid != ctx->algo_oid) { - pr_warn("Got cert with pkey (%u) and sig (%u) algorithm OIDs\n", - ctx->algo_oid, ctx->last_oid); + /* + * In X.509 certificates, the signature's algorithm is stored in two + * places: inside the TBSCertificate (the data that is signed), and + * alongside the signature. These *must* match. + */ + if (ctx->last_oid != ctx->sig_algo) { + pr_warn("signatureAlgorithm (%u) differs from tbsCertificate.signature (%u)\n", + ctx->last_oid, ctx->sig_algo); return -EINVAL; } if (strcmp(ctx->cert->sig->pkey_algo, "rsa") == 0 || - strcmp(ctx->cert->sig->pkey_algo, "ecrdsa") == 0) { + strcmp(ctx->cert->sig->pkey_algo, "ecrdsa") == 0 || + strcmp(ctx->cert->sig->pkey_algo, "sm2") == 0 || + strcmp(ctx->cert->sig->pkey_algo, "ecdsa") == 0) { /* Discard the BIT STRING metadata */ if (vlen < 1 || *(const u8 *)value != 0) return -EBADMSG; @@ -405,8 +443,18 @@ int x509_note_issuer(void *context, size_t hdrlen, const void *value, size_t vlen) { struct x509_parse_context *ctx = context; + struct asymmetric_key_id *kid; + ctx->cert->raw_issuer = value; ctx->cert->raw_issuer_size = vlen; + + if (!ctx->cert->sig->auth_ids[2]) { + kid = asymmetric_key_generate_id(value, vlen, "", 0); + if (IS_ERR(kid)) + return PTR_ERR(kid); + ctx->cert->sig->auth_ids[2] = kid; + } + return x509_fabricate_name(ctx, hdrlen, tag, &ctx->cert->issuer, vlen); } @@ -449,15 +497,44 @@ int x509_extract_key_data(void *context, size_t hdrlen, const void *value, size_t vlen) { struct x509_parse_context *ctx = context; + enum OID oid; ctx->key_algo = ctx->last_oid; - if (ctx->last_oid == OID_rsaEncryption) + switch (ctx->last_oid) { + case OID_rsaEncryption: ctx->cert->pub->pkey_algo = "rsa"; - else if (ctx->last_oid == OID_gost2012PKey256 || - ctx->last_oid == OID_gost2012PKey512) + break; + case OID_gost2012PKey256: + case OID_gost2012PKey512: ctx->cert->pub->pkey_algo = "ecrdsa"; - else + break; + case OID_sm2: + ctx->cert->pub->pkey_algo = "sm2"; + break; + case OID_id_ecPublicKey: + if (parse_OID(ctx->params, ctx->params_size, &oid) != 0) + return -EBADMSG; + + switch (oid) { + case OID_sm2: + ctx->cert->pub->pkey_algo = "sm2"; + break; + case OID_id_prime192v1: + ctx->cert->pub->pkey_algo = "ecdsa-nist-p192"; + break; + case OID_id_prime256v1: + ctx->cert->pub->pkey_algo = "ecdsa-nist-p256"; + break; + case OID_id_ansip384r1: + ctx->cert->pub->pkey_algo = "ecdsa-nist-p384"; + break; + default: + return -ENOPKG; + } + break; + default: return -ENOPKG; + } /* Discard the BIT STRING metadata */ if (vlen < 1 || *(const u8 *)value != 0) diff --git a/crypto/asymmetric_keys/x509_loader.c b/crypto/asymmetric_keys/x509_loader.c new file mode 100644 index 000000000000..1bc169dee22e --- /dev/null +++ b/crypto/asymmetric_keys/x509_loader.c @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#include <linux/kernel.h> +#include <linux/key.h> +#include <keys/asymmetric-type.h> + +int x509_load_certificate_list(const u8 cert_list[], + const unsigned long list_size, + const struct key *keyring) +{ + key_ref_t key; + const u8 *p, *end; + size_t plen; + + p = cert_list; + end = p + list_size; + while (p < end) { + /* Each cert begins with an ASN.1 SEQUENCE tag and must be more + * than 256 bytes in size. + */ + if (end - p < 4) + goto dodgy_cert; + if (p[0] != 0x30 && + p[1] != 0x82) + goto dodgy_cert; + plen = (p[2] << 8) | p[3]; + plen += 4; + if (plen > end - p) + goto dodgy_cert; + + key = key_create_or_update(make_key_ref(keyring, 1), + "asymmetric", + NULL, + p, + plen, + ((KEY_POS_ALL & ~KEY_POS_SETATTR) | + KEY_USR_VIEW | KEY_USR_READ), + KEY_ALLOC_NOT_IN_QUOTA | + KEY_ALLOC_BUILT_IN | + KEY_ALLOC_BYPASS_RESTRICTION); + if (IS_ERR(key)) { + pr_err("Problem loading in-kernel X.509 certificate (%ld)\n", + PTR_ERR(key)); + } else { + pr_notice("Loaded X.509 cert '%s'\n", + key_ref_to_ptr(key)->description); + key_ref_put(key); + } + p += plen; + } + + return 0; + +dodgy_cert: + pr_err("Problem parsing in-kernel X.509 certificate list\n"); + return 0; +} diff --git a/crypto/asymmetric_keys/x509_parser.h b/crypto/asymmetric_keys/x509_parser.h index c233f136fb35..a299c9c56f40 100644 --- a/crypto/asymmetric_keys/x509_parser.h +++ b/crypto/asymmetric_keys/x509_parser.h @@ -22,7 +22,7 @@ struct x509_certificate { time64_t valid_to; const void *tbs; /* Signed data */ unsigned tbs_size; /* Size of signed data */ - unsigned raw_sig_size; /* Size of sigature */ + unsigned raw_sig_size; /* Size of signature */ const void *raw_sig; /* Signature data */ const void *raw_serial; /* Raw serial number in ASN.1 */ unsigned raw_serial_size; @@ -36,12 +36,20 @@ struct x509_certificate { bool seen; /* Infinite recursion prevention */ bool verified; bool self_signed; /* T if self-signed (check unsupported_sig too) */ - bool unsupported_key; /* T if key uses unsupported crypto */ bool unsupported_sig; /* T if signature uses unsupported crypto */ bool blacklisted; }; /* + * selftest.c + */ +#ifdef CONFIG_FIPS_SIGNATURE_SELFTEST +extern int __init fips_signature_selftest(void); +#else +static inline int fips_signature_selftest(void) { return 0; } +#endif + +/* * x509_cert_parser.c */ extern void x509_free_certificate(struct x509_certificate *cert); diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c index d964cc82b69c..0b4943a4592b 100644 --- a/crypto/asymmetric_keys/x509_public_key.c +++ b/crypto/asymmetric_keys/x509_public_key.c @@ -30,17 +30,8 @@ int x509_get_sig_params(struct x509_certificate *cert) pr_devel("==>%s()\n", __func__); - if (!cert->pub->pkey_algo) - cert->unsupported_key = true; - - if (!sig->pkey_algo) - cert->unsupported_sig = true; - - /* We check the hash if we can - even if we can't then verify it */ - if (!sig->hash_algo) { - cert->unsupported_sig = true; - return 0; - } + sig->data = cert->tbs; + sig->data_size = cert->tbs_size; sig->s = kmemdup(cert->raw_sig, cert->raw_sig_size, GFP_KERNEL); if (!sig->s) @@ -78,7 +69,8 @@ int x509_get_sig_params(struct x509_certificate *cert) if (ret < 0) goto error_2; - ret = is_hash_blacklisted(sig->digest, sig->digest_size, "tbs"); + ret = is_hash_blacklisted(sig->digest, sig->digest_size, + BLACKLIST_HASH_X509_TBS); if (ret == -EKEYREJECTED) { pr_err("Cert %*phN is blacklisted\n", sig->digest_size, sig->digest); @@ -125,10 +117,6 @@ int x509_check_for_self_signed(struct x509_certificate *cert) goto out; } - ret = -EKEYREJECTED; - if (strcmp(cert->pub->pkey_algo, cert->sig->pkey_algo) != 0) - goto out; - ret = public_key_verify_signature(cert->pub, cert->sig); if (ret < 0) { if (ret == -ENOPKG) { @@ -168,12 +156,6 @@ static int x509_key_preparse(struct key_preparsed_payload *prep) pr_devel("Cert Issuer: %s\n", cert->issuer); pr_devel("Cert Subject: %s\n", cert->subject); - - if (cert->unsupported_key) { - ret = -ENOPKG; - goto error_free_cert; - } - pr_devel("Cert Key Algo: %s\n", cert->pub->pkey_algo); pr_devel("Cert Valid period: %lld-%lld\n", cert->valid_from, cert->valid_to); @@ -218,6 +200,13 @@ static int x509_key_preparse(struct key_preparsed_payload *prep) goto error_free_desc; kids->id[0] = cert->id; kids->id[1] = cert->skid; + kids->id[2] = asymmetric_key_generate_id(cert->raw_subject, + cert->raw_subject_size, + "", 0); + if (IS_ERR(kids->id[2])) { + ret = PTR_ERR(kids->id[2]); + goto error_free_kids; + } /* We're pinning the module by being linked against it */ __module_get(public_key_subtype.owner); @@ -234,8 +223,11 @@ static int x509_key_preparse(struct key_preparsed_payload *prep) cert->skid = NULL; cert->sig = NULL; desc = NULL; + kids = NULL; ret = 0; +error_free_kids: + kfree(kids); error_free_desc: kfree(desc); error_free_cert: @@ -252,9 +244,15 @@ static struct asymmetric_key_parser x509_key_parser = { /* * Module stuff */ +extern int __init certs_selftest(void); static int __init x509_key_init(void) { - return register_asymmetric_key_parser(&x509_key_parser); + int ret; + + ret = register_asymmetric_key_parser(&x509_key_parser); + if (ret < 0) + return ret; + return fips_signature_selftest(); } static void __exit x509_key_exit(void) diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c index 341ece61cf9b..f9cdc5e91664 100644 --- a/crypto/async_tx/async_pq.c +++ b/crypto/async_tx/async_pq.c @@ -104,7 +104,7 @@ do_async_gen_syndrome(struct dma_chan *chan, * do_sync_gen_syndrome - synchronously calculate a raid6 syndrome */ static void -do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks, +do_sync_gen_syndrome(struct page **blocks, unsigned int *offsets, int disks, size_t len, struct async_submit_ctl *submit) { void **srcs; @@ -121,7 +121,8 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks, BUG_ON(i > disks - 3); /* P or Q can't be zero */ srcs[i] = (void*)raid6_empty_zero_page; } else { - srcs[i] = page_address(blocks[i]) + offset; + srcs[i] = page_address(blocks[i]) + offsets[i]; + if (i < disks - 2) { stop = i; if (start == -1) @@ -138,10 +139,23 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks, async_tx_sync_epilog(submit); } +static inline bool +is_dma_pq_aligned_offs(struct dma_device *dev, unsigned int *offs, + int src_cnt, size_t len) +{ + int i; + + for (i = 0; i < src_cnt; i++) { + if (!is_dma_pq_aligned(dev, offs[i], 0, len)) + return false; + } + return true; +} + /** * async_gen_syndrome - asynchronously calculate a raid6 syndrome * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1 - * @offset: common offset into each block (src and dest) to start transaction + * @offsets: offset array into each block (src and dest) to start transaction * @disks: number of blocks (including missing P or Q, see below) * @len: length of operation in bytes * @submit: submission/completion modifiers @@ -160,7 +174,7 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks, * path. */ struct dma_async_tx_descriptor * -async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, +async_gen_syndrome(struct page **blocks, unsigned int *offsets, int disks, size_t len, struct async_submit_ctl *submit) { int src_cnt = disks - 2; @@ -179,7 +193,7 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, if (unmap && !(submit->flags & ASYNC_TX_PQ_XOR_DST) && (src_cnt <= dma_maxpq(device, 0) || dma_maxpq(device, DMA_PREP_CONTINUE) > 0) && - is_dma_pq_aligned(device, offset, 0, len)) { + is_dma_pq_aligned_offs(device, offsets, disks, len)) { struct dma_async_tx_descriptor *tx; enum dma_ctrl_flags dma_flags = 0; unsigned char coefs[MAX_DISKS]; @@ -196,8 +210,8 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, for (i = 0, j = 0; i < src_cnt; i++) { if (blocks[i] == NULL) continue; - unmap->addr[j] = dma_map_page(device->dev, blocks[i], offset, - len, DMA_TO_DEVICE); + unmap->addr[j] = dma_map_page(device->dev, blocks[i], + offsets[i], len, DMA_TO_DEVICE); coefs[j] = raid6_gfexp[i]; unmap->to_cnt++; j++; @@ -210,7 +224,8 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, unmap->bidi_cnt++; if (P(blocks, disks)) unmap->addr[j++] = dma_map_page(device->dev, P(blocks, disks), - offset, len, DMA_BIDIRECTIONAL); + P(offsets, disks), + len, DMA_BIDIRECTIONAL); else { unmap->addr[j++] = 0; dma_flags |= DMA_PREP_PQ_DISABLE_P; @@ -219,7 +234,8 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, unmap->bidi_cnt++; if (Q(blocks, disks)) unmap->addr[j++] = dma_map_page(device->dev, Q(blocks, disks), - offset, len, DMA_BIDIRECTIONAL); + Q(offsets, disks), + len, DMA_BIDIRECTIONAL); else { unmap->addr[j++] = 0; dma_flags |= DMA_PREP_PQ_DISABLE_Q; @@ -240,13 +256,13 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, if (!P(blocks, disks)) { P(blocks, disks) = pq_scribble_page; - BUG_ON(len + offset > PAGE_SIZE); + P(offsets, disks) = 0; } if (!Q(blocks, disks)) { Q(blocks, disks) = pq_scribble_page; - BUG_ON(len + offset > PAGE_SIZE); + Q(offsets, disks) = 0; } - do_sync_gen_syndrome(blocks, offset, disks, len, submit); + do_sync_gen_syndrome(blocks, offsets, disks, len, submit); return NULL; } @@ -270,6 +286,7 @@ pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, si * @len: length of operation in bytes * @pqres: on val failure SUM_CHECK_P_RESULT and/or SUM_CHECK_Q_RESULT are set * @spare: temporary result buffer for the synchronous case + * @s_off: spare buffer page offset * @submit: submission / completion modifiers * * The same notes from async_gen_syndrome apply to the 'blocks', @@ -278,9 +295,9 @@ pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, si * specified. */ struct dma_async_tx_descriptor * -async_syndrome_val(struct page **blocks, unsigned int offset, int disks, +async_syndrome_val(struct page **blocks, unsigned int *offsets, int disks, size_t len, enum sum_check_flags *pqres, struct page *spare, - struct async_submit_ctl *submit) + unsigned int s_off, struct async_submit_ctl *submit) { struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len); struct dma_device *device = chan ? chan->device : NULL; @@ -295,7 +312,7 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT); if (unmap && disks <= dma_maxpq(device, 0) && - is_dma_pq_aligned(device, offset, 0, len)) { + is_dma_pq_aligned_offs(device, offsets, disks, len)) { struct device *dev = device->dev; dma_addr_t pq[2]; int i, j = 0, src_cnt = 0; @@ -307,7 +324,7 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, for (i = 0; i < disks-2; i++) if (likely(blocks[i])) { unmap->addr[j] = dma_map_page(dev, blocks[i], - offset, len, + offsets[i], len, DMA_TO_DEVICE); coefs[j] = raid6_gfexp[i]; unmap->to_cnt++; @@ -320,7 +337,7 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, dma_flags |= DMA_PREP_PQ_DISABLE_P; } else { pq[0] = dma_map_page(dev, P(blocks, disks), - offset, len, + P(offsets, disks), len, DMA_TO_DEVICE); unmap->addr[j++] = pq[0]; unmap->to_cnt++; @@ -330,7 +347,7 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, dma_flags |= DMA_PREP_PQ_DISABLE_Q; } else { pq[1] = dma_map_page(dev, Q(blocks, disks), - offset, len, + Q(offsets, disks), len, DMA_TO_DEVICE); unmap->addr[j++] = pq[1]; unmap->to_cnt++; @@ -355,7 +372,9 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, async_tx_submit(chan, tx, submit); } else { struct page *p_src = P(blocks, disks); + unsigned int p_off = P(offsets, disks); struct page *q_src = Q(blocks, disks); + unsigned int q_off = Q(offsets, disks); enum async_tx_flags flags_orig = submit->flags; dma_async_tx_callback cb_fn_orig = submit->cb_fn; void *scribble = submit->scribble; @@ -381,27 +400,32 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, if (p_src) { init_async_submit(submit, ASYNC_TX_XOR_ZERO_DST, NULL, NULL, NULL, scribble); - tx = async_xor(spare, blocks, offset, disks-2, len, submit); + tx = async_xor_offs(spare, s_off, + blocks, offsets, disks-2, len, submit); async_tx_quiesce(&tx); - p = page_address(p_src) + offset; - s = page_address(spare) + offset; + p = page_address(p_src) + p_off; + s = page_address(spare) + s_off; *pqres |= !!memcmp(p, s, len) << SUM_CHECK_P; } if (q_src) { P(blocks, disks) = NULL; Q(blocks, disks) = spare; + Q(offsets, disks) = s_off; init_async_submit(submit, 0, NULL, NULL, NULL, scribble); - tx = async_gen_syndrome(blocks, offset, disks, len, submit); + tx = async_gen_syndrome(blocks, offsets, disks, + len, submit); async_tx_quiesce(&tx); - q = page_address(q_src) + offset; - s = page_address(spare) + offset; + q = page_address(q_src) + q_off; + s = page_address(spare) + s_off; *pqres |= !!memcmp(q, s, len) << SUM_CHECK_Q; } /* restore P, Q and submit */ P(blocks, disks) = p_src; + P(offsets, disks) = p_off; Q(blocks, disks) = q_src; + Q(offsets, disks) = q_off; submit->cb_fn = cb_fn_orig; submit->cb_param = cb_param_orig; diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c index f249142ceac4..354b8cd5537f 100644 --- a/crypto/async_tx/async_raid6_recov.c +++ b/crypto/async_tx/async_raid6_recov.c @@ -15,8 +15,9 @@ #include <linux/dmaengine.h> static struct dma_async_tx_descriptor * -async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef, - size_t len, struct async_submit_ctl *submit) +async_sum_product(struct page *dest, unsigned int d_off, + struct page **srcs, unsigned int *src_offs, unsigned char *coef, + size_t len, struct async_submit_ctl *submit) { struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, &dest, 1, srcs, 2, len); @@ -37,11 +38,14 @@ async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef, if (submit->flags & ASYNC_TX_FENCE) dma_flags |= DMA_PREP_FENCE; - unmap->addr[0] = dma_map_page(dev, srcs[0], 0, len, DMA_TO_DEVICE); - unmap->addr[1] = dma_map_page(dev, srcs[1], 0, len, DMA_TO_DEVICE); + unmap->addr[0] = dma_map_page(dev, srcs[0], src_offs[0], + len, DMA_TO_DEVICE); + unmap->addr[1] = dma_map_page(dev, srcs[1], src_offs[1], + len, DMA_TO_DEVICE); unmap->to_cnt = 2; - unmap->addr[2] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); + unmap->addr[2] = dma_map_page(dev, dest, d_off, + len, DMA_BIDIRECTIONAL); unmap->bidi_cnt = 1; /* engine only looks at Q, but expects it to follow P */ pq[1] = unmap->addr[2]; @@ -66,9 +70,9 @@ async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef, async_tx_quiesce(&submit->depend_tx); amul = raid6_gfmul[coef[0]]; bmul = raid6_gfmul[coef[1]]; - a = page_address(srcs[0]); - b = page_address(srcs[1]); - c = page_address(dest); + a = page_address(srcs[0]) + src_offs[0]; + b = page_address(srcs[1]) + src_offs[1]; + c = page_address(dest) + d_off; while (len--) { ax = amul[*a++]; @@ -80,8 +84,9 @@ async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef, } static struct dma_async_tx_descriptor * -async_mult(struct page *dest, struct page *src, u8 coef, size_t len, - struct async_submit_ctl *submit) +async_mult(struct page *dest, unsigned int d_off, struct page *src, + unsigned int s_off, u8 coef, size_t len, + struct async_submit_ctl *submit) { struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, &dest, 1, &src, 1, len); @@ -101,9 +106,11 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len, if (submit->flags & ASYNC_TX_FENCE) dma_flags |= DMA_PREP_FENCE; - unmap->addr[0] = dma_map_page(dev, src, 0, len, DMA_TO_DEVICE); + unmap->addr[0] = dma_map_page(dev, src, s_off, + len, DMA_TO_DEVICE); unmap->to_cnt++; - unmap->addr[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); + unmap->addr[1] = dma_map_page(dev, dest, d_off, + len, DMA_BIDIRECTIONAL); dma_dest[1] = unmap->addr[1]; unmap->bidi_cnt++; unmap->len = len; @@ -133,8 +140,8 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len, */ async_tx_quiesce(&submit->depend_tx); qmul = raid6_gfmul[coef]; - d = page_address(dest); - s = page_address(src); + d = page_address(dest) + d_off; + s = page_address(src) + s_off; while (len--) *d++ = qmul[*s++]; @@ -144,11 +151,14 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len, static struct dma_async_tx_descriptor * __2data_recov_4(int disks, size_t bytes, int faila, int failb, - struct page **blocks, struct async_submit_ctl *submit) + struct page **blocks, unsigned int *offs, + struct async_submit_ctl *submit) { struct dma_async_tx_descriptor *tx = NULL; struct page *p, *q, *a, *b; + unsigned int p_off, q_off, a_off, b_off; struct page *srcs[2]; + unsigned int src_offs[2]; unsigned char coef[2]; enum async_tx_flags flags = submit->flags; dma_async_tx_callback cb_fn = submit->cb_fn; @@ -156,26 +166,34 @@ __2data_recov_4(int disks, size_t bytes, int faila, int failb, void *scribble = submit->scribble; p = blocks[disks-2]; + p_off = offs[disks-2]; q = blocks[disks-1]; + q_off = offs[disks-1]; a = blocks[faila]; + a_off = offs[faila]; b = blocks[failb]; + b_off = offs[failb]; /* in the 4 disk case P + Pxy == P and Q + Qxy == Q */ /* Dx = A*(P+Pxy) + B*(Q+Qxy) */ srcs[0] = p; + src_offs[0] = p_off; srcs[1] = q; + src_offs[1] = q_off; coef[0] = raid6_gfexi[failb-faila]; coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]]; init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); - tx = async_sum_product(b, srcs, coef, bytes, submit); + tx = async_sum_product(b, b_off, srcs, src_offs, coef, bytes, submit); /* Dy = P+Pxy+Dx */ srcs[0] = p; + src_offs[0] = p_off; srcs[1] = b; + src_offs[1] = b_off; init_async_submit(submit, flags | ASYNC_TX_XOR_ZERO_DST, tx, cb_fn, cb_param, scribble); - tx = async_xor(a, srcs, 0, 2, bytes, submit); + tx = async_xor_offs(a, a_off, srcs, src_offs, 2, bytes, submit); return tx; @@ -183,11 +201,14 @@ __2data_recov_4(int disks, size_t bytes, int faila, int failb, static struct dma_async_tx_descriptor * __2data_recov_5(int disks, size_t bytes, int faila, int failb, - struct page **blocks, struct async_submit_ctl *submit) + struct page **blocks, unsigned int *offs, + struct async_submit_ctl *submit) { struct dma_async_tx_descriptor *tx = NULL; struct page *p, *q, *g, *dp, *dq; + unsigned int p_off, q_off, g_off, dp_off, dq_off; struct page *srcs[2]; + unsigned int src_offs[2]; unsigned char coef[2]; enum async_tx_flags flags = submit->flags; dma_async_tx_callback cb_fn = submit->cb_fn; @@ -208,60 +229,77 @@ __2data_recov_5(int disks, size_t bytes, int faila, int failb, BUG_ON(good_srcs > 1); p = blocks[disks-2]; + p_off = offs[disks-2]; q = blocks[disks-1]; + q_off = offs[disks-1]; g = blocks[good]; + g_off = offs[good]; /* Compute syndrome with zero for the missing data pages * Use the dead data pages as temporary storage for delta p and * delta q */ dp = blocks[faila]; + dp_off = offs[faila]; dq = blocks[failb]; + dq_off = offs[failb]; init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); - tx = async_memcpy(dp, g, 0, 0, bytes, submit); + tx = async_memcpy(dp, g, dp_off, g_off, bytes, submit); init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); - tx = async_mult(dq, g, raid6_gfexp[good], bytes, submit); + tx = async_mult(dq, dq_off, g, g_off, + raid6_gfexp[good], bytes, submit); /* compute P + Pxy */ srcs[0] = dp; + src_offs[0] = dp_off; srcs[1] = p; + src_offs[1] = p_off; init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, NULL, NULL, scribble); - tx = async_xor(dp, srcs, 0, 2, bytes, submit); + tx = async_xor_offs(dp, dp_off, srcs, src_offs, 2, bytes, submit); /* compute Q + Qxy */ srcs[0] = dq; + src_offs[0] = dq_off; srcs[1] = q; + src_offs[1] = q_off; init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, NULL, NULL, scribble); - tx = async_xor(dq, srcs, 0, 2, bytes, submit); + tx = async_xor_offs(dq, dq_off, srcs, src_offs, 2, bytes, submit); /* Dx = A*(P+Pxy) + B*(Q+Qxy) */ srcs[0] = dp; + src_offs[0] = dp_off; srcs[1] = dq; + src_offs[1] = dq_off; coef[0] = raid6_gfexi[failb-faila]; coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]]; init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); - tx = async_sum_product(dq, srcs, coef, bytes, submit); + tx = async_sum_product(dq, dq_off, srcs, src_offs, coef, bytes, submit); /* Dy = P+Pxy+Dx */ srcs[0] = dp; + src_offs[0] = dp_off; srcs[1] = dq; + src_offs[1] = dq_off; init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn, cb_param, scribble); - tx = async_xor(dp, srcs, 0, 2, bytes, submit); + tx = async_xor_offs(dp, dp_off, srcs, src_offs, 2, bytes, submit); return tx; } static struct dma_async_tx_descriptor * __2data_recov_n(int disks, size_t bytes, int faila, int failb, - struct page **blocks, struct async_submit_ctl *submit) + struct page **blocks, unsigned int *offs, + struct async_submit_ctl *submit) { struct dma_async_tx_descriptor *tx = NULL; struct page *p, *q, *dp, *dq; + unsigned int p_off, q_off, dp_off, dq_off; struct page *srcs[2]; + unsigned int src_offs[2]; unsigned char coef[2]; enum async_tx_flags flags = submit->flags; dma_async_tx_callback cb_fn = submit->cb_fn; @@ -269,56 +307,74 @@ __2data_recov_n(int disks, size_t bytes, int faila, int failb, void *scribble = submit->scribble; p = blocks[disks-2]; + p_off = offs[disks-2]; q = blocks[disks-1]; + q_off = offs[disks-1]; /* Compute syndrome with zero for the missing data pages * Use the dead data pages as temporary storage for * delta p and delta q */ dp = blocks[faila]; + dp_off = offs[faila]; blocks[faila] = NULL; blocks[disks-2] = dp; + offs[disks-2] = dp_off; dq = blocks[failb]; + dq_off = offs[failb]; blocks[failb] = NULL; blocks[disks-1] = dq; + offs[disks-1] = dq_off; init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); - tx = async_gen_syndrome(blocks, 0, disks, bytes, submit); + tx = async_gen_syndrome(blocks, offs, disks, bytes, submit); /* Restore pointer table */ blocks[faila] = dp; + offs[faila] = dp_off; blocks[failb] = dq; + offs[failb] = dq_off; blocks[disks-2] = p; + offs[disks-2] = p_off; blocks[disks-1] = q; + offs[disks-1] = q_off; /* compute P + Pxy */ srcs[0] = dp; + src_offs[0] = dp_off; srcs[1] = p; + src_offs[1] = p_off; init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, NULL, NULL, scribble); - tx = async_xor(dp, srcs, 0, 2, bytes, submit); + tx = async_xor_offs(dp, dp_off, srcs, src_offs, 2, bytes, submit); /* compute Q + Qxy */ srcs[0] = dq; + src_offs[0] = dq_off; srcs[1] = q; + src_offs[1] = q_off; init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, NULL, NULL, scribble); - tx = async_xor(dq, srcs, 0, 2, bytes, submit); + tx = async_xor_offs(dq, dq_off, srcs, src_offs, 2, bytes, submit); /* Dx = A*(P+Pxy) + B*(Q+Qxy) */ srcs[0] = dp; + src_offs[0] = dp_off; srcs[1] = dq; + src_offs[1] = dq_off; coef[0] = raid6_gfexi[failb-faila]; coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]]; init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); - tx = async_sum_product(dq, srcs, coef, bytes, submit); + tx = async_sum_product(dq, dq_off, srcs, src_offs, coef, bytes, submit); /* Dy = P+Pxy+Dx */ srcs[0] = dp; + src_offs[0] = dp_off; srcs[1] = dq; + src_offs[1] = dq_off; init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn, cb_param, scribble); - tx = async_xor(dp, srcs, 0, 2, bytes, submit); + tx = async_xor_offs(dp, dp_off, srcs, src_offs, 2, bytes, submit); return tx; } @@ -330,11 +386,13 @@ __2data_recov_n(int disks, size_t bytes, int faila, int failb, * @faila: first failed drive index * @failb: second failed drive index * @blocks: array of source pointers where the last two entries are p and q + * @offs: array of offset for pages in blocks * @submit: submission/completion modifiers */ struct dma_async_tx_descriptor * async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, - struct page **blocks, struct async_submit_ctl *submit) + struct page **blocks, unsigned int *offs, + struct async_submit_ctl *submit) { void *scribble = submit->scribble; int non_zero_srcs, i; @@ -358,7 +416,7 @@ async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, if (blocks[i] == NULL) ptrs[i] = (void *) raid6_empty_zero_page; else - ptrs[i] = page_address(blocks[i]); + ptrs[i] = page_address(blocks[i]) + offs[i]; raid6_2data_recov(disks, bytes, faila, failb, ptrs); @@ -383,16 +441,19 @@ async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, * explicitly handle the special case of a 4 disk array with * both data disks missing. */ - return __2data_recov_4(disks, bytes, faila, failb, blocks, submit); + return __2data_recov_4(disks, bytes, faila, failb, + blocks, offs, submit); case 3: /* dma devices do not uniformly understand a single * source pq operation (in contrast to the synchronous * case), so explicitly handle the special case of a 5 disk * array with 2 of 3 data disks missing. */ - return __2data_recov_5(disks, bytes, faila, failb, blocks, submit); + return __2data_recov_5(disks, bytes, faila, failb, + blocks, offs, submit); default: - return __2data_recov_n(disks, bytes, faila, failb, blocks, submit); + return __2data_recov_n(disks, bytes, faila, failb, + blocks, offs, submit); } } EXPORT_SYMBOL_GPL(async_raid6_2data_recov); @@ -403,14 +464,17 @@ EXPORT_SYMBOL_GPL(async_raid6_2data_recov); * @bytes: block size * @faila: failed drive index * @blocks: array of source pointers where the last two entries are p and q + * @offs: array of offset for pages in blocks * @submit: submission/completion modifiers */ struct dma_async_tx_descriptor * async_raid6_datap_recov(int disks, size_t bytes, int faila, - struct page **blocks, struct async_submit_ctl *submit) + struct page **blocks, unsigned int *offs, + struct async_submit_ctl *submit) { struct dma_async_tx_descriptor *tx = NULL; struct page *p, *q, *dq; + unsigned int p_off, q_off, dq_off; u8 coef; enum async_tx_flags flags = submit->flags; dma_async_tx_callback cb_fn = submit->cb_fn; @@ -418,6 +482,7 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila, void *scribble = submit->scribble; int good_srcs, good, i; struct page *srcs[2]; + unsigned int src_offs[2]; pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes); @@ -434,7 +499,7 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila, if (blocks[i] == NULL) ptrs[i] = (void*)raid6_empty_zero_page; else - ptrs[i] = page_address(blocks[i]); + ptrs[i] = page_address(blocks[i]) + offs[i]; raid6_datap_recov(disks, bytes, faila, ptrs); @@ -458,55 +523,67 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila, BUG_ON(good_srcs == 0); p = blocks[disks-2]; + p_off = offs[disks-2]; q = blocks[disks-1]; + q_off = offs[disks-1]; /* Compute syndrome with zero for the missing data page * Use the dead data page as temporary storage for delta q */ dq = blocks[faila]; + dq_off = offs[faila]; blocks[faila] = NULL; blocks[disks-1] = dq; + offs[disks-1] = dq_off; /* in the 4-disk case we only need to perform a single source * multiplication with the one good data block. */ if (good_srcs == 1) { struct page *g = blocks[good]; + unsigned int g_off = offs[good]; init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); - tx = async_memcpy(p, g, 0, 0, bytes, submit); + tx = async_memcpy(p, g, p_off, g_off, bytes, submit); init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); - tx = async_mult(dq, g, raid6_gfexp[good], bytes, submit); + tx = async_mult(dq, dq_off, g, g_off, + raid6_gfexp[good], bytes, submit); } else { init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); - tx = async_gen_syndrome(blocks, 0, disks, bytes, submit); + tx = async_gen_syndrome(blocks, offs, disks, bytes, submit); } /* Restore pointer table */ blocks[faila] = dq; + offs[faila] = dq_off; blocks[disks-1] = q; + offs[disks-1] = q_off; /* calculate g^{-faila} */ coef = raid6_gfinv[raid6_gfexp[faila]]; srcs[0] = dq; + src_offs[0] = dq_off; srcs[1] = q; + src_offs[1] = q_off; init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, NULL, NULL, scribble); - tx = async_xor(dq, srcs, 0, 2, bytes, submit); + tx = async_xor_offs(dq, dq_off, srcs, src_offs, 2, bytes, submit); init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); - tx = async_mult(dq, dq, coef, bytes, submit); + tx = async_mult(dq, dq_off, dq, dq_off, coef, bytes, submit); srcs[0] = p; + src_offs[0] = p_off; srcs[1] = dq; + src_offs[1] = dq_off; init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn, cb_param, scribble); - tx = async_xor(p, srcs, 0, 2, bytes, submit); + tx = async_xor_offs(p, p_off, srcs, src_offs, 2, bytes, submit); return tx; } diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index 4e5eebe52e6a..1a3855284091 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c @@ -97,7 +97,8 @@ do_async_xor(struct dma_chan *chan, struct dmaengine_unmap_data *unmap, } static void -do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, +do_sync_xor_offs(struct page *dest, unsigned int offset, + struct page **src_list, unsigned int *src_offs, int src_cnt, size_t len, struct async_submit_ctl *submit) { int i; @@ -114,7 +115,8 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, /* convert to buffer pointers */ for (i = 0; i < src_cnt; i++) if (src_list[i]) - srcs[xor_src_cnt++] = page_address(src_list[i]) + offset; + srcs[xor_src_cnt++] = page_address(src_list[i]) + + (src_offs ? src_offs[i] : offset); src_cnt = xor_src_cnt; /* set destination address */ dest_buf = page_address(dest) + offset; @@ -135,11 +137,31 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, async_tx_sync_epilog(submit); } +static inline bool +dma_xor_aligned_offsets(struct dma_device *device, unsigned int offset, + unsigned int *src_offs, int src_cnt, int len) +{ + int i; + + if (!is_dma_xor_aligned(device, offset, 0, len)) + return false; + + if (!src_offs) + return true; + + for (i = 0; i < src_cnt; i++) { + if (!is_dma_xor_aligned(device, src_offs[i], 0, len)) + return false; + } + return true; +} + /** - * async_xor - attempt to xor a set of blocks with a dma engine. + * async_xor_offs - attempt to xor a set of blocks with a dma engine. * @dest: destination page + * @offset: dst offset to start transaction * @src_list: array of source pages - * @offset: common src/dst offset to start transaction + * @src_offs: array of source pages offset, NULL means common src/dst offset * @src_cnt: number of source pages * @len: length in bytes * @submit: submission / completion modifiers @@ -148,8 +170,8 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, * * xor_blocks always uses the dest as a source so the * ASYNC_TX_XOR_ZERO_DST flag must be set to not include dest data in - * the calculation. The assumption with dma eninges is that they only - * use the destination buffer as a source when it is explicity specified + * the calculation. The assumption with dma engines is that they only + * use the destination buffer as a source when it is explicitly specified * in the source list. * * src_list note: if the dest is also a source it must be at index zero. @@ -157,8 +179,9 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, * is not specified. */ struct dma_async_tx_descriptor * -async_xor(struct page *dest, struct page **src_list, unsigned int offset, - int src_cnt, size_t len, struct async_submit_ctl *submit) +async_xor_offs(struct page *dest, unsigned int offset, + struct page **src_list, unsigned int *src_offs, + int src_cnt, size_t len, struct async_submit_ctl *submit) { struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR, &dest, 1, src_list, @@ -171,7 +194,8 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset, if (device) unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOWAIT); - if (unmap && is_dma_xor_aligned(device, offset, 0, len)) { + if (unmap && dma_xor_aligned_offsets(device, offset, + src_offs, src_cnt, len)) { struct dma_async_tx_descriptor *tx; int i, j; @@ -184,7 +208,8 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset, continue; unmap->to_cnt++; unmap->addr[j++] = dma_map_page(device->dev, src_list[i], - offset, len, DMA_TO_DEVICE); + src_offs ? src_offs[i] : offset, + len, DMA_TO_DEVICE); } /* map it bidirectional as it may be re-used as a source */ @@ -208,16 +233,49 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset, if (submit->flags & ASYNC_TX_XOR_DROP_DST) { src_cnt--; src_list++; + if (src_offs) + src_offs++; } /* wait for any prerequisite operations */ async_tx_quiesce(&submit->depend_tx); - do_sync_xor(dest, src_list, offset, src_cnt, len, submit); + do_sync_xor_offs(dest, offset, src_list, src_offs, + src_cnt, len, submit); return NULL; } } +EXPORT_SYMBOL_GPL(async_xor_offs); + +/** + * async_xor - attempt to xor a set of blocks with a dma engine. + * @dest: destination page + * @src_list: array of source pages + * @offset: common src/dst offset to start transaction + * @src_cnt: number of source pages + * @len: length in bytes + * @submit: submission / completion modifiers + * + * honored flags: ASYNC_TX_ACK, ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DST + * + * xor_blocks always uses the dest as a source so the + * ASYNC_TX_XOR_ZERO_DST flag must be set to not include dest data in + * the calculation. The assumption with dma engines is that they only + * use the destination buffer as a source when it is explicitly specified + * in the source list. + * + * src_list note: if the dest is also a source it must be at index zero. + * The contents of this array will be overwritten if a scribble region + * is not specified. + */ +struct dma_async_tx_descriptor * +async_xor(struct page *dest, struct page **src_list, unsigned int offset, + int src_cnt, size_t len, struct async_submit_ctl *submit) +{ + return async_xor_offs(dest, offset, src_list, NULL, + src_cnt, len, submit); +} EXPORT_SYMBOL_GPL(async_xor); static int page_is_zero(struct page *p, unsigned int offset, size_t len) @@ -237,10 +295,11 @@ xor_val_chan(struct async_submit_ctl *submit, struct page *dest, } /** - * async_xor_val - attempt a xor parity check with a dma engine. + * async_xor_val_offs - attempt a xor parity check with a dma engine. * @dest: destination page used if the xor is performed synchronously + * @offset: des offset in pages to start transaction * @src_list: array of source pages - * @offset: offset in pages to start transaction + * @src_offs: array of source pages offset, NULL means common src/det offset * @src_cnt: number of source pages * @len: length in bytes * @result: 0 if sum == 0 else non-zero @@ -253,9 +312,10 @@ xor_val_chan(struct async_submit_ctl *submit, struct page *dest, * is not specified. */ struct dma_async_tx_descriptor * -async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, - int src_cnt, size_t len, enum sum_check_flags *result, - struct async_submit_ctl *submit) +async_xor_val_offs(struct page *dest, unsigned int offset, + struct page **src_list, unsigned int *src_offs, + int src_cnt, size_t len, enum sum_check_flags *result, + struct async_submit_ctl *submit) { struct dma_chan *chan = xor_val_chan(submit, dest, src_list, src_cnt, len); struct dma_device *device = chan ? chan->device : NULL; @@ -268,7 +328,7 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, unmap = dmaengine_get_unmap_data(device->dev, src_cnt, GFP_NOWAIT); if (unmap && src_cnt <= device->max_xor && - is_dma_xor_aligned(device, offset, 0, len)) { + dma_xor_aligned_offsets(device, offset, src_offs, src_cnt, len)) { unsigned long dma_prep_flags = 0; int i; @@ -281,7 +341,8 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, for (i = 0; i < src_cnt; i++) { unmap->addr[i] = dma_map_page(device->dev, src_list[i], - offset, len, DMA_TO_DEVICE); + src_offs ? src_offs[i] : offset, + len, DMA_TO_DEVICE); unmap->to_cnt++; } unmap->len = len; @@ -312,7 +373,8 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, submit->flags |= ASYNC_TX_XOR_DROP_DST; submit->flags &= ~ASYNC_TX_ACK; - tx = async_xor(dest, src_list, offset, src_cnt, len, submit); + tx = async_xor_offs(dest, offset, src_list, src_offs, + src_cnt, len, submit); async_tx_quiesce(&tx); @@ -325,6 +387,32 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, return tx; } +EXPORT_SYMBOL_GPL(async_xor_val_offs); + +/** + * async_xor_val - attempt a xor parity check with a dma engine. + * @dest: destination page used if the xor is performed synchronously + * @src_list: array of source pages + * @offset: offset in pages to start transaction + * @src_cnt: number of source pages + * @len: length in bytes + * @result: 0 if sum == 0 else non-zero + * @submit: submission / completion modifiers + * + * honored flags: ASYNC_TX_ACK + * + * src_list note: if the dest is also a source it must be at index zero. + * The contents of this array will be overwritten if a scribble region + * is not specified. + */ +struct dma_async_tx_descriptor * +async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, + int src_cnt, size_t len, enum sum_check_flags *result, + struct async_submit_ctl *submit) +{ + return async_xor_val_offs(dest, offset, src_list, NULL, src_cnt, + len, result, submit); +} EXPORT_SYMBOL_GPL(async_xor_val); MODULE_AUTHOR("Intel Corporation"); diff --git a/crypto/async_tx/raid6test.c b/crypto/async_tx/raid6test.c index 14e73dcd7475..d3fbee1e03e5 100644 --- a/crypto/async_tx/raid6test.c +++ b/crypto/async_tx/raid6test.c @@ -18,6 +18,7 @@ #define NDISKS 64 /* Including P and Q */ static struct page *dataptrs[NDISKS]; +unsigned int dataoffs[NDISKS]; static addr_conv_t addr_conv[NDISKS]; static struct page *data[NDISKS+3]; static struct page *spare; @@ -36,8 +37,9 @@ static void makedata(int disks) int i; for (i = 0; i < disks; i++) { - prandom_bytes(page_address(data[i]), PAGE_SIZE); + get_random_bytes(page_address(data[i]), PAGE_SIZE); dataptrs[i] = data[i]; + dataoffs[i] = 0; } } @@ -52,7 +54,8 @@ static char disk_type(int d, int disks) } /* Recover two failed blocks. */ -static void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, struct page **ptrs) +static void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, + struct page **ptrs, unsigned int *offs) { struct async_submit_ctl submit; struct completion cmp; @@ -66,7 +69,8 @@ static void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, stru if (faila == disks-2) { /* P+Q failure. Just rebuild the syndrome. */ init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv); - tx = async_gen_syndrome(ptrs, 0, disks, bytes, &submit); + tx = async_gen_syndrome(ptrs, offs, + disks, bytes, &submit); } else { struct page *blocks[NDISKS]; struct page *dest; @@ -89,22 +93,26 @@ static void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, stru tx = async_xor(dest, blocks, 0, count, bytes, &submit); init_async_submit(&submit, 0, tx, NULL, NULL, addr_conv); - tx = async_gen_syndrome(ptrs, 0, disks, bytes, &submit); + tx = async_gen_syndrome(ptrs, offs, + disks, bytes, &submit); } } else { if (failb == disks-2) { /* data+P failure. */ init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv); - tx = async_raid6_datap_recov(disks, bytes, faila, ptrs, &submit); + tx = async_raid6_datap_recov(disks, bytes, + faila, ptrs, offs, &submit); } else { /* data+data failure. */ init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv); - tx = async_raid6_2data_recov(disks, bytes, faila, failb, ptrs, &submit); + tx = async_raid6_2data_recov(disks, bytes, + faila, failb, ptrs, offs, &submit); } } init_completion(&cmp); init_async_submit(&submit, ASYNC_TX_ACK, tx, callback, &cmp, addr_conv); - tx = async_syndrome_val(ptrs, 0, disks, bytes, &result, spare, &submit); + tx = async_syndrome_val(ptrs, offs, + disks, bytes, &result, spare, 0, &submit); async_tx_issue_pending(tx); if (wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)) == 0) @@ -126,7 +134,7 @@ static int test_disks(int i, int j, int disks) dataptrs[i] = recovi; dataptrs[j] = recovj; - raid6_dual_recov(disks, PAGE_SIZE, i, j, dataptrs); + raid6_dual_recov(disks, PAGE_SIZE, i, j, dataptrs, dataoffs); erra = memcmp(page_address(data[i]), page_address(recovi), PAGE_SIZE); errb = memcmp(page_address(data[j]), page_address(recovj), PAGE_SIZE); @@ -162,7 +170,7 @@ static int test(int disks, int *tests) /* Generate assumed good syndrome */ init_completion(&cmp); init_async_submit(&submit, ASYNC_TX_ACK, NULL, callback, &cmp, addr_conv); - tx = async_gen_syndrome(dataptrs, 0, disks, PAGE_SIZE, &submit); + tx = async_gen_syndrome(dataptrs, dataoffs, disks, PAGE_SIZE, &submit); async_tx_issue_pending(tx); if (wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)) == 0) { @@ -181,7 +189,7 @@ static int test(int disks, int *tests) } -static int raid6_test(void) +static int __init raid6_test(void) { int err = 0; int tests = 0; @@ -209,7 +217,7 @@ static int raid6_test(void) err += test(12, &tests); } - /* the 24 disk case is special for ioatdma as it is the boudary point + /* the 24 disk case is special for ioatdma as it is the boundary point * at which it needs to switch from 8-source ops to 16-source * ops for continuation (assumes DMA_HAS_PQ_CONTINUE is not set) */ @@ -228,12 +236,12 @@ static int raid6_test(void) return 0; } -static void raid6_test_exit(void) +static void __exit raid6_test_exit(void) { } /* when compiled-in wait for drivers to load first (assumes dma drivers - * are also compliled-in) + * are also compiled-in) */ late_initcall(raid6_test); module_exit(raid6_test_exit); diff --git a/crypto/authenc.c b/crypto/authenc.c index 775e7138fd10..17f674a7cdff 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -253,7 +253,7 @@ static int crypto_authenc_decrypt_tail(struct aead_request *req, dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen); skcipher_request_set_tfm(skreq, ctx->enc); - skcipher_request_set_callback(skreq, aead_request_flags(req), + skcipher_request_set_callback(skreq, flags, req->base.complete, req->base.data); skcipher_request_set_crypt(skreq, src, dst, req->cryptlen - authsize, req->iv); @@ -372,7 +372,6 @@ static void crypto_authenc_free(struct aead_instance *inst) static int crypto_authenc_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; struct authenc_instance_ctx *ctx; @@ -381,14 +380,9 @@ static int crypto_authenc_create(struct crypto_template *tmpl, struct skcipher_alg *enc; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -423,8 +417,6 @@ static int crypto_authenc_create(struct crypto_template *tmpl, enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = (auth_base->cra_flags | - enc->base.cra_flags) & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = enc->base.cra_priority * 10 + auth_base->cra_priority; inst->alg.base.cra_blocksize = enc->base.cra_blocksize; diff --git a/crypto/authencesn.c b/crypto/authencesn.c index 589008146fce..b60e61b1904c 100644 --- a/crypto/authencesn.c +++ b/crypto/authencesn.c @@ -390,7 +390,6 @@ static void crypto_authenc_esn_free(struct aead_instance *inst) static int crypto_authenc_esn_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; struct authenc_esn_instance_ctx *ctx; @@ -399,14 +398,9 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl, struct skcipher_alg *enc; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -437,8 +431,6 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl, enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = (auth_base->cra_flags | - enc->base.cra_flags) & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = enc->base.cra_priority * 10 + auth_base->cra_priority; inst->alg.base.cra_blocksize = enc->base.cra_blocksize; @@ -458,7 +450,7 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl, inst->alg.encrypt = crypto_authenc_esn_encrypt; inst->alg.decrypt = crypto_authenc_esn_decrypt; - inst->free = crypto_authenc_esn_free, + inst->free = crypto_authenc_esn_free; err = aead_register_instance(tmpl, inst); if (err) { diff --git a/crypto/blake2b_generic.c b/crypto/blake2b_generic.c index 1d262374fa4e..6704c0355889 100644 --- a/crypto/blake2b_generic.c +++ b/crypto/blake2b_generic.c @@ -1,55 +1,27 @@ // SPDX-License-Identifier: (GPL-2.0-only OR Apache-2.0) /* - * BLAKE2b reference source code package - reference C implementations + * Generic implementation of the BLAKE2b digest algorithm. Based on the BLAKE2b + * reference implementation, but it has been heavily modified for use in the + * kernel. The reference implementation was: * - * Copyright 2012, Samuel Neves <sneves@dei.uc.pt>. You may use this under the - * terms of the CC0, the OpenSSL Licence, or the Apache Public License 2.0, at - * your option. The terms of these licenses can be found at: + * Copyright 2012, Samuel Neves <sneves@dei.uc.pt>. You may use this under + * the terms of the CC0, the OpenSSL Licence, or the Apache Public License + * 2.0, at your option. The terms of these licenses can be found at: * - * - CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0 - * - OpenSSL license : https://www.openssl.org/source/license.html - * - Apache 2.0 : http://www.apache.org/licenses/LICENSE-2.0 + * - CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0 + * - OpenSSL license : https://www.openssl.org/source/license.html + * - Apache 2.0 : https://www.apache.org/licenses/LICENSE-2.0 * - * More information about the BLAKE2 hash function can be found at - * https://blake2.net. - * - * Note: the original sources have been modified for inclusion in linux kernel - * in terms of coding style, using generic helpers and simplifications of error - * handling. + * More information about BLAKE2 can be found at https://blake2.net. */ #include <asm/unaligned.h> #include <linux/module.h> -#include <linux/string.h> #include <linux/kernel.h> #include <linux/bitops.h> +#include <crypto/internal/blake2b.h> #include <crypto/internal/hash.h> -#define BLAKE2B_160_DIGEST_SIZE (160 / 8) -#define BLAKE2B_256_DIGEST_SIZE (256 / 8) -#define BLAKE2B_384_DIGEST_SIZE (384 / 8) -#define BLAKE2B_512_DIGEST_SIZE (512 / 8) - -enum blake2b_constant { - BLAKE2B_BLOCKBYTES = 128, - BLAKE2B_KEYBYTES = 64, -}; - -struct blake2b_state { - u64 h[8]; - u64 t[2]; - u64 f[2]; - u8 buf[BLAKE2B_BLOCKBYTES]; - size_t buflen; -}; - -static const u64 blake2b_IV[8] = { - 0x6a09e667f3bcc908ULL, 0xbb67ae8584caa73bULL, - 0x3c6ef372fe94f82bULL, 0xa54ff53a5f1d36f1ULL, - 0x510e527fade682d1ULL, 0x9b05688c2b3e6c1fULL, - 0x1f83d9abfb41bd6bULL, 0x5be0cd19137e2179ULL -}; - static const u8 blake2b_sigma[12][16] = { { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 }, @@ -95,8 +67,8 @@ static void blake2b_increment_counter(struct blake2b_state *S, const u64 inc) G(r,7,v[ 3],v[ 4],v[ 9],v[14]); \ } while (0) -static void blake2b_compress(struct blake2b_state *S, - const u8 block[BLAKE2B_BLOCKBYTES]) +static void blake2b_compress_one_generic(struct blake2b_state *S, + const u8 block[BLAKE2B_BLOCK_SIZE]) { u64 m[16]; u64 v[16]; @@ -108,14 +80,14 @@ static void blake2b_compress(struct blake2b_state *S, for (i = 0; i < 8; ++i) v[i] = S->h[i]; - v[ 8] = blake2b_IV[0]; - v[ 9] = blake2b_IV[1]; - v[10] = blake2b_IV[2]; - v[11] = blake2b_IV[3]; - v[12] = blake2b_IV[4] ^ S->t[0]; - v[13] = blake2b_IV[5] ^ S->t[1]; - v[14] = blake2b_IV[6] ^ S->f[0]; - v[15] = blake2b_IV[7] ^ S->f[1]; + v[ 8] = BLAKE2B_IV0; + v[ 9] = BLAKE2B_IV1; + v[10] = BLAKE2B_IV2; + v[11] = BLAKE2B_IV3; + v[12] = BLAKE2B_IV4 ^ S->t[0]; + v[13] = BLAKE2B_IV5 ^ S->t[1]; + v[14] = BLAKE2B_IV6 ^ S->f[0]; + v[15] = BLAKE2B_IV7 ^ S->f[1]; ROUND(0); ROUND(1); @@ -129,7 +101,9 @@ static void blake2b_compress(struct blake2b_state *S, ROUND(9); ROUND(10); ROUND(11); - +#ifdef CONFIG_CC_IS_CLANG +#pragma nounroll /* https://bugs.llvm.org/show_bug.cgi?id=45803 */ +#endif for (i = 0; i < 8; ++i) S->h[i] = S->h[i] ^ v[i] ^ v[i + 8]; } @@ -137,159 +111,54 @@ static void blake2b_compress(struct blake2b_state *S, #undef G #undef ROUND -struct blake2b_tfm_ctx { - u8 key[BLAKE2B_KEYBYTES]; - unsigned int keylen; -}; - -static int blake2b_setkey(struct crypto_shash *tfm, const u8 *key, - unsigned int keylen) +void blake2b_compress_generic(struct blake2b_state *state, + const u8 *block, size_t nblocks, u32 inc) { - struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(tfm); - - if (keylen == 0 || keylen > BLAKE2B_KEYBYTES) - return -EINVAL; - - memcpy(tctx->key, key, keylen); - tctx->keylen = keylen; - - return 0; + do { + blake2b_increment_counter(state, inc); + blake2b_compress_one_generic(state, block); + block += BLAKE2B_BLOCK_SIZE; + } while (--nblocks); } +EXPORT_SYMBOL(blake2b_compress_generic); -static int blake2b_init(struct shash_desc *desc) +static int crypto_blake2b_update_generic(struct shash_desc *desc, + const u8 *in, unsigned int inlen) { - struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); - struct blake2b_state *state = shash_desc_ctx(desc); - const int digestsize = crypto_shash_digestsize(desc->tfm); - - memset(state, 0, sizeof(*state)); - memcpy(state->h, blake2b_IV, sizeof(state->h)); - - /* Parameter block is all zeros except index 0, no xor for 1..7 */ - state->h[0] ^= 0x01010000 | tctx->keylen << 8 | digestsize; - - if (tctx->keylen) { - /* - * Prefill the buffer with the key, next call to _update or - * _final will process it - */ - memcpy(state->buf, tctx->key, tctx->keylen); - state->buflen = BLAKE2B_BLOCKBYTES; - } - return 0; + return crypto_blake2b_update(desc, in, inlen, blake2b_compress_generic); } -static int blake2b_update(struct shash_desc *desc, const u8 *in, - unsigned int inlen) +static int crypto_blake2b_final_generic(struct shash_desc *desc, u8 *out) { - struct blake2b_state *state = shash_desc_ctx(desc); - const size_t left = state->buflen; - const size_t fill = BLAKE2B_BLOCKBYTES - left; - - if (!inlen) - return 0; - - if (inlen > fill) { - state->buflen = 0; - /* Fill buffer */ - memcpy(state->buf + left, in, fill); - blake2b_increment_counter(state, BLAKE2B_BLOCKBYTES); - /* Compress */ - blake2b_compress(state, state->buf); - in += fill; - inlen -= fill; - while (inlen > BLAKE2B_BLOCKBYTES) { - blake2b_increment_counter(state, BLAKE2B_BLOCKBYTES); - blake2b_compress(state, in); - in += BLAKE2B_BLOCKBYTES; - inlen -= BLAKE2B_BLOCKBYTES; - } - } - memcpy(state->buf + state->buflen, in, inlen); - state->buflen += inlen; - - return 0; + return crypto_blake2b_final(desc, out, blake2b_compress_generic); } -static int blake2b_final(struct shash_desc *desc, u8 *out) -{ - struct blake2b_state *state = shash_desc_ctx(desc); - const int digestsize = crypto_shash_digestsize(desc->tfm); - size_t i; - - blake2b_increment_counter(state, state->buflen); - /* Set last block */ - state->f[0] = (u64)-1; - /* Padding */ - memset(state->buf + state->buflen, 0, BLAKE2B_BLOCKBYTES - state->buflen); - blake2b_compress(state, state->buf); - - /* Avoid temporary buffer and switch the internal output to LE order */ - for (i = 0; i < ARRAY_SIZE(state->h); i++) - __cpu_to_le64s(&state->h[i]); - - memcpy(out, state->h, digestsize); - return 0; -} +#define BLAKE2B_ALG(name, driver_name, digest_size) \ + { \ + .base.cra_name = name, \ + .base.cra_driver_name = driver_name, \ + .base.cra_priority = 100, \ + .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \ + .base.cra_blocksize = BLAKE2B_BLOCK_SIZE, \ + .base.cra_ctxsize = sizeof(struct blake2b_tfm_ctx), \ + .base.cra_module = THIS_MODULE, \ + .digestsize = digest_size, \ + .setkey = crypto_blake2b_setkey, \ + .init = crypto_blake2b_init, \ + .update = crypto_blake2b_update_generic, \ + .final = crypto_blake2b_final_generic, \ + .descsize = sizeof(struct blake2b_state), \ + } static struct shash_alg blake2b_algs[] = { - { - .base.cra_name = "blake2b-160", - .base.cra_driver_name = "blake2b-160-generic", - .base.cra_priority = 100, - .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, - .base.cra_blocksize = BLAKE2B_BLOCKBYTES, - .base.cra_ctxsize = sizeof(struct blake2b_tfm_ctx), - .base.cra_module = THIS_MODULE, - .digestsize = BLAKE2B_160_DIGEST_SIZE, - .setkey = blake2b_setkey, - .init = blake2b_init, - .update = blake2b_update, - .final = blake2b_final, - .descsize = sizeof(struct blake2b_state), - }, { - .base.cra_name = "blake2b-256", - .base.cra_driver_name = "blake2b-256-generic", - .base.cra_priority = 100, - .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, - .base.cra_blocksize = BLAKE2B_BLOCKBYTES, - .base.cra_ctxsize = sizeof(struct blake2b_tfm_ctx), - .base.cra_module = THIS_MODULE, - .digestsize = BLAKE2B_256_DIGEST_SIZE, - .setkey = blake2b_setkey, - .init = blake2b_init, - .update = blake2b_update, - .final = blake2b_final, - .descsize = sizeof(struct blake2b_state), - }, { - .base.cra_name = "blake2b-384", - .base.cra_driver_name = "blake2b-384-generic", - .base.cra_priority = 100, - .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, - .base.cra_blocksize = BLAKE2B_BLOCKBYTES, - .base.cra_ctxsize = sizeof(struct blake2b_tfm_ctx), - .base.cra_module = THIS_MODULE, - .digestsize = BLAKE2B_384_DIGEST_SIZE, - .setkey = blake2b_setkey, - .init = blake2b_init, - .update = blake2b_update, - .final = blake2b_final, - .descsize = sizeof(struct blake2b_state), - }, { - .base.cra_name = "blake2b-512", - .base.cra_driver_name = "blake2b-512-generic", - .base.cra_priority = 100, - .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, - .base.cra_blocksize = BLAKE2B_BLOCKBYTES, - .base.cra_ctxsize = sizeof(struct blake2b_tfm_ctx), - .base.cra_module = THIS_MODULE, - .digestsize = BLAKE2B_512_DIGEST_SIZE, - .setkey = blake2b_setkey, - .init = blake2b_init, - .update = blake2b_update, - .final = blake2b_final, - .descsize = sizeof(struct blake2b_state), - } + BLAKE2B_ALG("blake2b-160", "blake2b-160-generic", + BLAKE2B_160_HASH_SIZE), + BLAKE2B_ALG("blake2b-256", "blake2b-256-generic", + BLAKE2B_256_HASH_SIZE), + BLAKE2B_ALG("blake2b-384", "blake2b-384-generic", + BLAKE2B_384_HASH_SIZE), + BLAKE2B_ALG("blake2b-512", "blake2b-512-generic", + BLAKE2B_512_HASH_SIZE), }; static int __init blake2b_mod_init(void) diff --git a/crypto/blake2s_generic.c b/crypto/blake2s_generic.c deleted file mode 100644 index 005783ff45ad..000000000000 --- a/crypto/blake2s_generic.c +++ /dev/null @@ -1,169 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 OR MIT -/* - * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. - */ - -#include <crypto/internal/blake2s.h> -#include <crypto/internal/simd.h> -#include <crypto/internal/hash.h> - -#include <linux/types.h> -#include <linux/jump_label.h> -#include <linux/kernel.h> -#include <linux/module.h> - -static int crypto_blake2s_setkey(struct crypto_shash *tfm, const u8 *key, - unsigned int keylen) -{ - struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(tfm); - - if (keylen == 0 || keylen > BLAKE2S_KEY_SIZE) - return -EINVAL; - - memcpy(tctx->key, key, keylen); - tctx->keylen = keylen; - - return 0; -} - -static int crypto_blake2s_init(struct shash_desc *desc) -{ - struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); - struct blake2s_state *state = shash_desc_ctx(desc); - const int outlen = crypto_shash_digestsize(desc->tfm); - - if (tctx->keylen) - blake2s_init_key(state, outlen, tctx->key, tctx->keylen); - else - blake2s_init(state, outlen); - - return 0; -} - -static int crypto_blake2s_update(struct shash_desc *desc, const u8 *in, - unsigned int inlen) -{ - struct blake2s_state *state = shash_desc_ctx(desc); - const size_t fill = BLAKE2S_BLOCK_SIZE - state->buflen; - - if (unlikely(!inlen)) - return 0; - if (inlen > fill) { - memcpy(state->buf + state->buflen, in, fill); - blake2s_compress_generic(state, state->buf, 1, BLAKE2S_BLOCK_SIZE); - state->buflen = 0; - in += fill; - inlen -= fill; - } - if (inlen > BLAKE2S_BLOCK_SIZE) { - const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE); - /* Hash one less (full) block than strictly possible */ - blake2s_compress_generic(state, in, nblocks - 1, BLAKE2S_BLOCK_SIZE); - in += BLAKE2S_BLOCK_SIZE * (nblocks - 1); - inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1); - } - memcpy(state->buf + state->buflen, in, inlen); - state->buflen += inlen; - - return 0; -} - -static int crypto_blake2s_final(struct shash_desc *desc, u8 *out) -{ - struct blake2s_state *state = shash_desc_ctx(desc); - - blake2s_set_lastblock(state); - memset(state->buf + state->buflen, 0, - BLAKE2S_BLOCK_SIZE - state->buflen); /* Padding */ - blake2s_compress_generic(state, state->buf, 1, state->buflen); - cpu_to_le32_array(state->h, ARRAY_SIZE(state->h)); - memcpy(out, state->h, state->outlen); - memzero_explicit(state, sizeof(*state)); - - return 0; -} - -static struct shash_alg blake2s_algs[] = {{ - .base.cra_name = "blake2s-128", - .base.cra_driver_name = "blake2s-128-generic", - .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, - .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), - .base.cra_priority = 200, - .base.cra_blocksize = BLAKE2S_BLOCK_SIZE, - .base.cra_module = THIS_MODULE, - - .digestsize = BLAKE2S_128_HASH_SIZE, - .setkey = crypto_blake2s_setkey, - .init = crypto_blake2s_init, - .update = crypto_blake2s_update, - .final = crypto_blake2s_final, - .descsize = sizeof(struct blake2s_state), -}, { - .base.cra_name = "blake2s-160", - .base.cra_driver_name = "blake2s-160-generic", - .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, - .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), - .base.cra_priority = 200, - .base.cra_blocksize = BLAKE2S_BLOCK_SIZE, - .base.cra_module = THIS_MODULE, - - .digestsize = BLAKE2S_160_HASH_SIZE, - .setkey = crypto_blake2s_setkey, - .init = crypto_blake2s_init, - .update = crypto_blake2s_update, - .final = crypto_blake2s_final, - .descsize = sizeof(struct blake2s_state), -}, { - .base.cra_name = "blake2s-224", - .base.cra_driver_name = "blake2s-224-generic", - .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, - .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), - .base.cra_priority = 200, - .base.cra_blocksize = BLAKE2S_BLOCK_SIZE, - .base.cra_module = THIS_MODULE, - - .digestsize = BLAKE2S_224_HASH_SIZE, - .setkey = crypto_blake2s_setkey, - .init = crypto_blake2s_init, - .update = crypto_blake2s_update, - .final = crypto_blake2s_final, - .descsize = sizeof(struct blake2s_state), -}, { - .base.cra_name = "blake2s-256", - .base.cra_driver_name = "blake2s-256-generic", - .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, - .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), - .base.cra_priority = 200, - .base.cra_blocksize = BLAKE2S_BLOCK_SIZE, - .base.cra_module = THIS_MODULE, - - .digestsize = BLAKE2S_256_HASH_SIZE, - .setkey = crypto_blake2s_setkey, - .init = crypto_blake2s_init, - .update = crypto_blake2s_update, - .final = crypto_blake2s_final, - .descsize = sizeof(struct blake2s_state), -}}; - -static int __init blake2s_mod_init(void) -{ - return crypto_register_shashes(blake2s_algs, ARRAY_SIZE(blake2s_algs)); -} - -static void __exit blake2s_mod_exit(void) -{ - crypto_unregister_shashes(blake2s_algs, ARRAY_SIZE(blake2s_algs)); -} - -subsys_initcall(blake2s_mod_init); -module_exit(blake2s_mod_exit); - -MODULE_ALIAS_CRYPTO("blake2s-128"); -MODULE_ALIAS_CRYPTO("blake2s-128-generic"); -MODULE_ALIAS_CRYPTO("blake2s-160"); -MODULE_ALIAS_CRYPTO("blake2s-160-generic"); -MODULE_ALIAS_CRYPTO("blake2s-224"); -MODULE_ALIAS_CRYPTO("blake2s-224-generic"); -MODULE_ALIAS_CRYPTO("blake2s-256"); -MODULE_ALIAS_CRYPTO("blake2s-256-generic"); -MODULE_LICENSE("GPL v2"); diff --git a/crypto/blowfish_generic.c b/crypto/blowfish_generic.c index c3c2041fe0c5..003b52c6880e 100644 --- a/crypto/blowfish_generic.c +++ b/crypto/blowfish_generic.c @@ -14,7 +14,7 @@ #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> -#include <asm/byteorder.h> +#include <asm/unaligned.h> #include <linux/crypto.h> #include <linux/types.h> #include <crypto/blowfish.h> @@ -36,12 +36,10 @@ static void bf_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct bf_ctx *ctx = crypto_tfm_ctx(tfm); - const __be32 *in_blk = (const __be32 *)src; - __be32 *const out_blk = (__be32 *)dst; const u32 *P = ctx->p; const u32 *S = ctx->s; - u32 yl = be32_to_cpu(in_blk[0]); - u32 yr = be32_to_cpu(in_blk[1]); + u32 yl = get_unaligned_be32(src); + u32 yr = get_unaligned_be32(src + 4); ROUND(yr, yl, 0); ROUND(yl, yr, 1); @@ -63,19 +61,17 @@ static void bf_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) yl ^= P[16]; yr ^= P[17]; - out_blk[0] = cpu_to_be32(yr); - out_blk[1] = cpu_to_be32(yl); + put_unaligned_be32(yr, dst); + put_unaligned_be32(yl, dst + 4); } static void bf_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct bf_ctx *ctx = crypto_tfm_ctx(tfm); - const __be32 *in_blk = (const __be32 *)src; - __be32 *const out_blk = (__be32 *)dst; const u32 *P = ctx->p; const u32 *S = ctx->s; - u32 yl = be32_to_cpu(in_blk[0]); - u32 yr = be32_to_cpu(in_blk[1]); + u32 yl = get_unaligned_be32(src); + u32 yr = get_unaligned_be32(src + 4); ROUND(yr, yl, 17); ROUND(yl, yr, 16); @@ -97,8 +93,8 @@ static void bf_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) yl ^= P[1]; yr ^= P[0]; - out_blk[0] = cpu_to_be32(yr); - out_blk[1] = cpu_to_be32(yl); + put_unaligned_be32(yr, dst); + put_unaligned_be32(yl, dst + 4); } static struct crypto_alg alg = { @@ -108,7 +104,6 @@ static struct crypto_alg alg = { .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = BF_BLOCK_SIZE, .cra_ctxsize = sizeof(struct bf_ctx), - .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_u = { .cipher = { .cia_min_keysize = BF_MIN_KEY_SIZE, diff --git a/crypto/camellia_generic.c b/crypto/camellia_generic.c index 9a5783e5196a..fd1a88af9e77 100644 --- a/crypto/camellia_generic.c +++ b/crypto/camellia_generic.c @@ -6,15 +6,7 @@ /* * Algorithm Specification - * http://info.isl.ntt.co.jp/crypt/eng/camellia/specifications.html - */ - -/* - * - * NOTE --- NOTE --- NOTE --- NOTE - * This implementation assumes that all memory addresses passed - * as parameters are four-byte aligned. - * + * https://info.isl.ntt.co.jp/crypt/eng/camellia/specifications.html */ #include <linux/crypto.h> @@ -994,16 +986,14 @@ camellia_set_key(struct crypto_tfm *tfm, const u8 *in_key, static void camellia_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { const struct camellia_ctx *cctx = crypto_tfm_ctx(tfm); - const __be32 *src = (const __be32 *)in; - __be32 *dst = (__be32 *)out; unsigned int max; u32 tmp[4]; - tmp[0] = be32_to_cpu(src[0]); - tmp[1] = be32_to_cpu(src[1]); - tmp[2] = be32_to_cpu(src[2]); - tmp[3] = be32_to_cpu(src[3]); + tmp[0] = get_unaligned_be32(in); + tmp[1] = get_unaligned_be32(in + 4); + tmp[2] = get_unaligned_be32(in + 8); + tmp[3] = get_unaligned_be32(in + 12); if (cctx->key_length == 16) max = 24; @@ -1013,25 +1003,23 @@ static void camellia_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) camellia_do_encrypt(cctx->key_table, tmp, max); /* do_encrypt returns 0,1 swapped with 2,3 */ - dst[0] = cpu_to_be32(tmp[2]); - dst[1] = cpu_to_be32(tmp[3]); - dst[2] = cpu_to_be32(tmp[0]); - dst[3] = cpu_to_be32(tmp[1]); + put_unaligned_be32(tmp[2], out); + put_unaligned_be32(tmp[3], out + 4); + put_unaligned_be32(tmp[0], out + 8); + put_unaligned_be32(tmp[1], out + 12); } static void camellia_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { const struct camellia_ctx *cctx = crypto_tfm_ctx(tfm); - const __be32 *src = (const __be32 *)in; - __be32 *dst = (__be32 *)out; unsigned int max; u32 tmp[4]; - tmp[0] = be32_to_cpu(src[0]); - tmp[1] = be32_to_cpu(src[1]); - tmp[2] = be32_to_cpu(src[2]); - tmp[3] = be32_to_cpu(src[3]); + tmp[0] = get_unaligned_be32(in); + tmp[1] = get_unaligned_be32(in + 4); + tmp[2] = get_unaligned_be32(in + 8); + tmp[3] = get_unaligned_be32(in + 12); if (cctx->key_length == 16) max = 24; @@ -1041,10 +1029,10 @@ static void camellia_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) camellia_do_decrypt(cctx->key_table, tmp, max); /* do_decrypt returns 0,1 swapped with 2,3 */ - dst[0] = cpu_to_be32(tmp[2]); - dst[1] = cpu_to_be32(tmp[3]); - dst[2] = cpu_to_be32(tmp[0]); - dst[3] = cpu_to_be32(tmp[1]); + put_unaligned_be32(tmp[2], out); + put_unaligned_be32(tmp[3], out + 4); + put_unaligned_be32(tmp[0], out + 8); + put_unaligned_be32(tmp[1], out + 12); } static struct crypto_alg camellia_alg = { @@ -1054,7 +1042,6 @@ static struct crypto_alg camellia_alg = { .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = CAMELLIA_BLOCK_SIZE, .cra_ctxsize = sizeof(struct camellia_ctx), - .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_u = { .cipher = { diff --git a/crypto/cast5_generic.c b/crypto/cast5_generic.c index 4095085d4e51..0257c14cefc2 100644 --- a/crypto/cast5_generic.c +++ b/crypto/cast5_generic.c @@ -13,7 +13,7 @@ */ -#include <asm/byteorder.h> +#include <asm/unaligned.h> #include <linux/init.h> #include <linux/crypto.h> #include <linux/module.h> @@ -302,8 +302,6 @@ static const u32 sb8[256] = { void __cast5_encrypt(struct cast5_ctx *c, u8 *outbuf, const u8 *inbuf) { - const __be32 *src = (const __be32 *)inbuf; - __be32 *dst = (__be32 *)outbuf; u32 l, r, t; u32 I; /* used by the Fx macros */ u32 *Km; @@ -315,8 +313,8 @@ void __cast5_encrypt(struct cast5_ctx *c, u8 *outbuf, const u8 *inbuf) /* (L0,R0) <-- (m1...m64). (Split the plaintext into left and * right 32-bit halves L0 = m1...m32 and R0 = m33...m64.) */ - l = be32_to_cpu(src[0]); - r = be32_to_cpu(src[1]); + l = get_unaligned_be32(inbuf); + r = get_unaligned_be32(inbuf + 4); /* (16 rounds) for i from 1 to 16, compute Li and Ri as follows: * Li = Ri-1; @@ -347,8 +345,8 @@ void __cast5_encrypt(struct cast5_ctx *c, u8 *outbuf, const u8 *inbuf) /* c1...c64 <-- (R16,L16). (Exchange final blocks L16, R16 and * concatenate to form the ciphertext.) */ - dst[0] = cpu_to_be32(r); - dst[1] = cpu_to_be32(l); + put_unaligned_be32(r, outbuf); + put_unaligned_be32(l, outbuf + 4); } EXPORT_SYMBOL_GPL(__cast5_encrypt); @@ -359,8 +357,6 @@ static void cast5_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) void __cast5_decrypt(struct cast5_ctx *c, u8 *outbuf, const u8 *inbuf) { - const __be32 *src = (const __be32 *)inbuf; - __be32 *dst = (__be32 *)outbuf; u32 l, r, t; u32 I; u32 *Km; @@ -369,8 +365,8 @@ void __cast5_decrypt(struct cast5_ctx *c, u8 *outbuf, const u8 *inbuf) Km = c->Km; Kr = c->Kr; - l = be32_to_cpu(src[0]); - r = be32_to_cpu(src[1]); + l = get_unaligned_be32(inbuf); + r = get_unaligned_be32(inbuf + 4); if (!(c->rr)) { t = l; l = r; r = t ^ F1(r, Km[15], Kr[15]); @@ -391,8 +387,8 @@ void __cast5_decrypt(struct cast5_ctx *c, u8 *outbuf, const u8 *inbuf) t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]); t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]); - dst[0] = cpu_to_be32(r); - dst[1] = cpu_to_be32(l); + put_unaligned_be32(r, outbuf); + put_unaligned_be32(l, outbuf + 4); } EXPORT_SYMBOL_GPL(__cast5_decrypt); @@ -513,7 +509,6 @@ static struct crypto_alg alg = { .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = CAST5_BLOCK_SIZE, .cra_ctxsize = sizeof(struct cast5_ctx), - .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_u = { .cipher = { diff --git a/crypto/cast6_generic.c b/crypto/cast6_generic.c index c77ff6c8a2b2..75346380aa0b 100644 --- a/crypto/cast6_generic.c +++ b/crypto/cast6_generic.c @@ -10,7 +10,7 @@ */ -#include <asm/byteorder.h> +#include <asm/unaligned.h> #include <linux/init.h> #include <linux/crypto.h> #include <linux/module.h> @@ -172,16 +172,14 @@ static inline void QBAR(u32 *block, const u8 *Kr, const u32 *Km) void __cast6_encrypt(const void *ctx, u8 *outbuf, const u8 *inbuf) { const struct cast6_ctx *c = ctx; - const __be32 *src = (const __be32 *)inbuf; - __be32 *dst = (__be32 *)outbuf; u32 block[4]; const u32 *Km; const u8 *Kr; - block[0] = be32_to_cpu(src[0]); - block[1] = be32_to_cpu(src[1]); - block[2] = be32_to_cpu(src[2]); - block[3] = be32_to_cpu(src[3]); + block[0] = get_unaligned_be32(inbuf); + block[1] = get_unaligned_be32(inbuf + 4); + block[2] = get_unaligned_be32(inbuf + 8); + block[3] = get_unaligned_be32(inbuf + 12); Km = c->Km[0]; Kr = c->Kr[0]; Q(block, Kr, Km); Km = c->Km[1]; Kr = c->Kr[1]; Q(block, Kr, Km); @@ -196,10 +194,10 @@ void __cast6_encrypt(const void *ctx, u8 *outbuf, const u8 *inbuf) Km = c->Km[10]; Kr = c->Kr[10]; QBAR(block, Kr, Km); Km = c->Km[11]; Kr = c->Kr[11]; QBAR(block, Kr, Km); - dst[0] = cpu_to_be32(block[0]); - dst[1] = cpu_to_be32(block[1]); - dst[2] = cpu_to_be32(block[2]); - dst[3] = cpu_to_be32(block[3]); + put_unaligned_be32(block[0], outbuf); + put_unaligned_be32(block[1], outbuf + 4); + put_unaligned_be32(block[2], outbuf + 8); + put_unaligned_be32(block[3], outbuf + 12); } EXPORT_SYMBOL_GPL(__cast6_encrypt); @@ -211,16 +209,14 @@ static void cast6_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) void __cast6_decrypt(const void *ctx, u8 *outbuf, const u8 *inbuf) { const struct cast6_ctx *c = ctx; - const __be32 *src = (const __be32 *)inbuf; - __be32 *dst = (__be32 *)outbuf; u32 block[4]; const u32 *Km; const u8 *Kr; - block[0] = be32_to_cpu(src[0]); - block[1] = be32_to_cpu(src[1]); - block[2] = be32_to_cpu(src[2]); - block[3] = be32_to_cpu(src[3]); + block[0] = get_unaligned_be32(inbuf); + block[1] = get_unaligned_be32(inbuf + 4); + block[2] = get_unaligned_be32(inbuf + 8); + block[3] = get_unaligned_be32(inbuf + 12); Km = c->Km[11]; Kr = c->Kr[11]; Q(block, Kr, Km); Km = c->Km[10]; Kr = c->Kr[10]; Q(block, Kr, Km); @@ -235,10 +231,10 @@ void __cast6_decrypt(const void *ctx, u8 *outbuf, const u8 *inbuf) Km = c->Km[1]; Kr = c->Kr[1]; QBAR(block, Kr, Km); Km = c->Km[0]; Kr = c->Kr[0]; QBAR(block, Kr, Km); - dst[0] = cpu_to_be32(block[0]); - dst[1] = cpu_to_be32(block[1]); - dst[2] = cpu_to_be32(block[2]); - dst[3] = cpu_to_be32(block[3]); + put_unaligned_be32(block[0], outbuf); + put_unaligned_be32(block[1], outbuf + 4); + put_unaligned_be32(block[2], outbuf + 8); + put_unaligned_be32(block[3], outbuf + 12); } EXPORT_SYMBOL_GPL(__cast6_decrypt); @@ -254,7 +250,6 @@ static struct crypto_alg alg = { .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = CAST6_BLOCK_SIZE, .cra_ctxsize = sizeof(struct cast6_ctx), - .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_u = { .cipher = { diff --git a/crypto/cbc.c b/crypto/cbc.c index e6f6273a7d39..6c03e96b945f 100644 --- a/crypto/cbc.c +++ b/crypto/cbc.c @@ -6,7 +6,7 @@ */ #include <crypto/algapi.h> -#include <crypto/cbc.h> +#include <crypto/internal/cipher.h> #include <crypto/internal/skcipher.h> #include <linux/err.h> #include <linux/init.h> @@ -14,34 +14,157 @@ #include <linux/log2.h> #include <linux/module.h> -static inline void crypto_cbc_encrypt_one(struct crypto_skcipher *tfm, - const u8 *src, u8 *dst) +static int crypto_cbc_encrypt_segment(struct skcipher_walk *walk, + struct crypto_skcipher *skcipher) { - crypto_cipher_encrypt_one(skcipher_cipher_simple(tfm), dst, src); + unsigned int bsize = crypto_skcipher_blocksize(skcipher); + void (*fn)(struct crypto_tfm *, u8 *, const u8 *); + unsigned int nbytes = walk->nbytes; + u8 *src = walk->src.virt.addr; + u8 *dst = walk->dst.virt.addr; + struct crypto_cipher *cipher; + struct crypto_tfm *tfm; + u8 *iv = walk->iv; + + cipher = skcipher_cipher_simple(skcipher); + tfm = crypto_cipher_tfm(cipher); + fn = crypto_cipher_alg(cipher)->cia_encrypt; + + do { + crypto_xor(iv, src, bsize); + fn(tfm, dst, iv); + memcpy(iv, dst, bsize); + + src += bsize; + dst += bsize; + } while ((nbytes -= bsize) >= bsize); + + return nbytes; +} + +static int crypto_cbc_encrypt_inplace(struct skcipher_walk *walk, + struct crypto_skcipher *skcipher) +{ + unsigned int bsize = crypto_skcipher_blocksize(skcipher); + void (*fn)(struct crypto_tfm *, u8 *, const u8 *); + unsigned int nbytes = walk->nbytes; + u8 *src = walk->src.virt.addr; + struct crypto_cipher *cipher; + struct crypto_tfm *tfm; + u8 *iv = walk->iv; + + cipher = skcipher_cipher_simple(skcipher); + tfm = crypto_cipher_tfm(cipher); + fn = crypto_cipher_alg(cipher)->cia_encrypt; + + do { + crypto_xor(src, iv, bsize); + fn(tfm, src, src); + iv = src; + + src += bsize; + } while ((nbytes -= bsize) >= bsize); + + memcpy(walk->iv, iv, bsize); + + return nbytes; } static int crypto_cbc_encrypt(struct skcipher_request *req) { - return crypto_cbc_encrypt_walk(req, crypto_cbc_encrypt_one); + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + struct skcipher_walk walk; + int err; + + err = skcipher_walk_virt(&walk, req, false); + + while (walk.nbytes) { + if (walk.src.virt.addr == walk.dst.virt.addr) + err = crypto_cbc_encrypt_inplace(&walk, skcipher); + else + err = crypto_cbc_encrypt_segment(&walk, skcipher); + err = skcipher_walk_done(&walk, err); + } + + return err; +} + +static int crypto_cbc_decrypt_segment(struct skcipher_walk *walk, + struct crypto_skcipher *skcipher) +{ + unsigned int bsize = crypto_skcipher_blocksize(skcipher); + void (*fn)(struct crypto_tfm *, u8 *, const u8 *); + unsigned int nbytes = walk->nbytes; + u8 *src = walk->src.virt.addr; + u8 *dst = walk->dst.virt.addr; + struct crypto_cipher *cipher; + struct crypto_tfm *tfm; + u8 *iv = walk->iv; + + cipher = skcipher_cipher_simple(skcipher); + tfm = crypto_cipher_tfm(cipher); + fn = crypto_cipher_alg(cipher)->cia_decrypt; + + do { + fn(tfm, dst, src); + crypto_xor(dst, iv, bsize); + iv = src; + + src += bsize; + dst += bsize; + } while ((nbytes -= bsize) >= bsize); + + memcpy(walk->iv, iv, bsize); + + return nbytes; } -static inline void crypto_cbc_decrypt_one(struct crypto_skcipher *tfm, - const u8 *src, u8 *dst) +static int crypto_cbc_decrypt_inplace(struct skcipher_walk *walk, + struct crypto_skcipher *skcipher) { - crypto_cipher_decrypt_one(skcipher_cipher_simple(tfm), dst, src); + unsigned int bsize = crypto_skcipher_blocksize(skcipher); + void (*fn)(struct crypto_tfm *, u8 *, const u8 *); + unsigned int nbytes = walk->nbytes; + u8 *src = walk->src.virt.addr; + u8 last_iv[MAX_CIPHER_BLOCKSIZE]; + struct crypto_cipher *cipher; + struct crypto_tfm *tfm; + + cipher = skcipher_cipher_simple(skcipher); + tfm = crypto_cipher_tfm(cipher); + fn = crypto_cipher_alg(cipher)->cia_decrypt; + + /* Start of the last block. */ + src += nbytes - (nbytes & (bsize - 1)) - bsize; + memcpy(last_iv, src, bsize); + + for (;;) { + fn(tfm, src, src); + if ((nbytes -= bsize) < bsize) + break; + crypto_xor(src, src - bsize, bsize); + src -= bsize; + } + + crypto_xor(src, walk->iv, bsize); + memcpy(walk->iv, last_iv, bsize); + + return nbytes; } static int crypto_cbc_decrypt(struct skcipher_request *req) { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); struct skcipher_walk walk; int err; err = skcipher_walk_virt(&walk, req, false); while (walk.nbytes) { - err = crypto_cbc_decrypt_blocks(&walk, tfm, - crypto_cbc_decrypt_one); + if (walk.src.virt.addr == walk.dst.virt.addr) + err = crypto_cbc_decrypt_inplace(&walk, skcipher); + else + err = crypto_cbc_decrypt_segment(&walk, skcipher); err = skcipher_walk_done(&walk, err); } diff --git a/crypto/ccm.c b/crypto/ccm.c index 241ecdc5c4e0..6b815ece51c6 100644 --- a/crypto/ccm.c +++ b/crypto/ccm.c @@ -6,6 +6,7 @@ */ #include <crypto/internal/aead.h> +#include <crypto/internal/cipher.h> #include <crypto/internal/hash.h> #include <crypto/internal/skcipher.h> #include <crypto/scatterwalk.h> @@ -447,7 +448,6 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, const char *ctr_name, const char *mac_name) { - struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; struct ccm_instance_ctx *ictx; @@ -455,14 +455,9 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, struct hash_alg_common *mac; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL); if (!inst) @@ -470,7 +465,7 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, ictx = aead_instance_ctx(inst); err = crypto_grab_ahash(&ictx->mac, aead_crypto_instance(inst), - mac_name, 0, CRYPTO_ALG_ASYNC); + mac_name, 0, mask | CRYPTO_ALG_ASYNC); if (err) goto err_free_inst; mac = crypto_spawn_ahash_alg(&ictx->mac); @@ -507,7 +502,6 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, mac->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = ctr->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = (mac->base.cra_priority + ctr->base.cra_priority) / 2; inst->alg.base.cra_blocksize = 1; @@ -712,26 +706,15 @@ static void crypto_rfc4309_free(struct aead_instance *inst) static int crypto_rfc4309_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; struct crypto_aead_spawn *spawn; struct aead_alg *alg; - const char *ccm_name; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); - - ccm_name = crypto_attr_alg_name(tb[1]); - if (IS_ERR(ccm_name)) - return PTR_ERR(ccm_name); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) @@ -739,9 +722,9 @@ static int crypto_rfc4309_create(struct crypto_template *tmpl, spawn = aead_instance_ctx(inst); err = crypto_grab_aead(spawn, aead_crypto_instance(inst), - ccm_name, 0, mask); + crypto_attr_alg_name(tb[1]), 0, mask); if (err) - goto out_free_inst; + goto err_free_inst; alg = crypto_spawn_aead_alg(spawn); @@ -749,11 +732,11 @@ static int crypto_rfc4309_create(struct crypto_template *tmpl, /* We only support 16-byte blocks. */ if (crypto_aead_alg_ivsize(alg) != 16) - goto out_drop_alg; + goto err_free_inst; /* Not a stream cipher? */ if (alg->base.cra_blocksize != 1) - goto out_drop_alg; + goto err_free_inst; err = -ENAMETOOLONG; if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, @@ -762,9 +745,8 @@ static int crypto_rfc4309_create(struct crypto_template *tmpl, snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "rfc4309(%s)", alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) - goto out_drop_alg; + goto err_free_inst; - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = 1; inst->alg.base.cra_alignmask = alg->base.cra_alignmask; @@ -786,17 +768,11 @@ static int crypto_rfc4309_create(struct crypto_template *tmpl, inst->free = crypto_rfc4309_free; err = aead_register_instance(tmpl, inst); - if (err) - goto out_drop_alg; - -out: + if (err) { +err_free_inst: + crypto_rfc4309_free(inst); + } return err; - -out_drop_alg: - crypto_drop_aead(spawn); -out_free_inst: - kfree(inst); - goto out; } static int crypto_cbcmac_digest_setkey(struct crypto_shash *parent, @@ -889,9 +865,10 @@ static int cbcmac_create(struct crypto_template *tmpl, struct rtattr **tb) struct shash_instance *inst; struct crypto_cipher_spawn *spawn; struct crypto_alg *alg; + u32 mask; int err; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask); if (err) return err; @@ -901,7 +878,7 @@ static int cbcmac_create(struct crypto_template *tmpl, struct rtattr **tb) spawn = shash_instance_ctx(inst); err = crypto_grab_cipher(spawn, shash_crypto_instance(inst), - crypto_attr_alg_name(tb[1]), 0, 0); + crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; alg = crypto_spawn_cipher_alg(spawn); @@ -978,3 +955,4 @@ MODULE_ALIAS_CRYPTO("ccm_base"); MODULE_ALIAS_CRYPTO("rfc4309"); MODULE_ALIAS_CRYPTO("ccm"); MODULE_ALIAS_CRYPTO("cbcmac"); +MODULE_IMPORT_NS(CRYPTO_INTERNAL); diff --git a/crypto/cfb.c b/crypto/cfb.c index 4e5219bbcd19..5c36b7b65e2a 100644 --- a/crypto/cfb.c +++ b/crypto/cfb.c @@ -1,4 +1,4 @@ -//SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: GPL-2.0 /* * CFB: Cipher FeedBack mode * @@ -20,6 +20,7 @@ */ #include <crypto/algapi.h> +#include <crypto/internal/cipher.h> #include <crypto/internal/skcipher.h> #include <linux/err.h> #include <linux/init.h> @@ -250,3 +251,4 @@ module_exit(crypto_cfb_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("CFB block cipher mode of operation"); MODULE_ALIAS_CRYPTO("cfb"); +MODULE_IMPORT_NS(CRYPTO_INTERNAL); diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c index ccaea5cb66d1..97bbb135e9a6 100644 --- a/crypto/chacha20poly1305.c +++ b/crypto/chacha20poly1305.c @@ -555,7 +555,6 @@ static void chachapoly_free(struct aead_instance *inst) static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, const char *name, unsigned int ivsize) { - struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; struct chachapoly_instance_ctx *ctx; @@ -566,14 +565,9 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, if (ivsize > CHACHAPOLY_IV_SIZE) return -EINVAL; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -613,8 +607,6 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, poly->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = (chacha->base.cra_flags | - poly->base.cra_flags) & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = (chacha->base.cra_priority + poly->base.cra_priority) / 2; inst->alg.base.cra_blocksize = 1; diff --git a/crypto/cipher.c b/crypto/cipher.c index fd78150deb1c..b47141ed4a9f 100644 --- a/crypto/cipher.c +++ b/crypto/cipher.c @@ -9,6 +9,7 @@ */ #include <crypto/algapi.h> +#include <crypto/internal/cipher.h> #include <linux/kernel.h> #include <linux/crypto.h> #include <linux/errno.h> @@ -53,7 +54,7 @@ int crypto_cipher_setkey(struct crypto_cipher *tfm, return cia->cia_setkey(crypto_cipher_tfm(tfm), key, keylen); } -EXPORT_SYMBOL_GPL(crypto_cipher_setkey); +EXPORT_SYMBOL_NS_GPL(crypto_cipher_setkey, CRYPTO_INTERNAL); static inline void cipher_crypt_one(struct crypto_cipher *tfm, u8 *dst, const u8 *src, bool enc) @@ -81,11 +82,11 @@ void crypto_cipher_encrypt_one(struct crypto_cipher *tfm, { cipher_crypt_one(tfm, dst, src, true); } -EXPORT_SYMBOL_GPL(crypto_cipher_encrypt_one); +EXPORT_SYMBOL_NS_GPL(crypto_cipher_encrypt_one, CRYPTO_INTERNAL); void crypto_cipher_decrypt_one(struct crypto_cipher *tfm, u8 *dst, const u8 *src) { cipher_crypt_one(tfm, dst, src, false); } -EXPORT_SYMBOL_GPL(crypto_cipher_decrypt_one); +EXPORT_SYMBOL_NS_GPL(crypto_cipher_decrypt_one, CRYPTO_INTERNAL); diff --git a/crypto/cmac.c b/crypto/cmac.c index 143a6544c873..f4a5d3bfb376 100644 --- a/crypto/cmac.c +++ b/crypto/cmac.c @@ -11,6 +11,7 @@ * Author: Kazunori Miyazawa <miyazawa@linux-ipv6.org> */ +#include <crypto/internal/cipher.h> #include <crypto/internal/hash.h> #include <linux/err.h> #include <linux/kernel.h> @@ -225,9 +226,10 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb) struct crypto_cipher_spawn *spawn; struct crypto_alg *alg; unsigned long alignmask; + u32 mask; int err; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask); if (err) return err; @@ -237,7 +239,7 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb) spawn = shash_instance_ctx(inst); err = crypto_grab_cipher(spawn, shash_crypto_instance(inst), - crypto_attr_alg_name(tb[1]), 0, 0); + crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; alg = crypto_spawn_cipher_alg(spawn); @@ -312,3 +314,4 @@ module_exit(crypto_cmac_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("CMAC keyed hash algorithm"); MODULE_ALIAS_CRYPTO("cmac"); +MODULE_IMPORT_NS(CRYPTO_INTERNAL); diff --git a/crypto/crc32_generic.c b/crypto/crc32_generic.c index 0e103fb5dd77..a989cb44fd16 100644 --- a/crypto/crc32_generic.c +++ b/crypto/crc32_generic.c @@ -1,26 +1,4 @@ -/* GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see http://www.gnu.org/licenses - * - * Please visit http://www.xyratex.com/contact if you need additional - * information or have any questions. - * - * GPL HEADER END - */ - +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2012 Xyratex Technology Limited */ diff --git a/crypto/crc32c_generic.c b/crypto/crc32c_generic.c index 7fa9b0788685..768614738541 100644 --- a/crypto/crc32c_generic.c +++ b/crypto/crc32c_generic.c @@ -15,7 +15,7 @@ * pages = {}, * month = {June}, *} - * Used by the iSCSI driver, possibly others, and derived from the + * Used by the iSCSI driver, possibly others, and derived from * the iscsi-crc.c module of the linux-iscsi driver at * http://linux-iscsi.sourceforge.net. * @@ -50,7 +50,7 @@ struct chksum_desc_ctx { }; /* - * Steps through buffer one byte at at time, calculates reflected + * Steps through buffer one byte at a time, calculates reflected * crc using table. */ diff --git a/crypto/crc64_rocksoft_generic.c b/crypto/crc64_rocksoft_generic.c new file mode 100644 index 000000000000..9e812bb26dba --- /dev/null +++ b/crypto/crc64_rocksoft_generic.c @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/crc64.h> +#include <linux/module.h> +#include <crypto/internal/hash.h> +#include <asm/unaligned.h> + +static int chksum_init(struct shash_desc *desc) +{ + u64 *crc = shash_desc_ctx(desc); + + *crc = 0; + + return 0; +} + +static int chksum_update(struct shash_desc *desc, const u8 *data, + unsigned int length) +{ + u64 *crc = shash_desc_ctx(desc); + + *crc = crc64_rocksoft_generic(*crc, data, length); + + return 0; +} + +static int chksum_final(struct shash_desc *desc, u8 *out) +{ + u64 *crc = shash_desc_ctx(desc); + + put_unaligned_le64(*crc, out); + return 0; +} + +static int __chksum_finup(u64 crc, const u8 *data, unsigned int len, u8 *out) +{ + crc = crc64_rocksoft_generic(crc, data, len); + put_unaligned_le64(crc, out); + return 0; +} + +static int chksum_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + u64 *crc = shash_desc_ctx(desc); + + return __chksum_finup(*crc, data, len, out); +} + +static int chksum_digest(struct shash_desc *desc, const u8 *data, + unsigned int length, u8 *out) +{ + return __chksum_finup(0, data, length, out); +} + +static struct shash_alg alg = { + .digestsize = sizeof(u64), + .init = chksum_init, + .update = chksum_update, + .final = chksum_final, + .finup = chksum_finup, + .digest = chksum_digest, + .descsize = sizeof(u64), + .base = { + .cra_name = CRC64_ROCKSOFT_STRING, + .cra_driver_name = "crc64-rocksoft-generic", + .cra_priority = 200, + .cra_blocksize = 1, + .cra_module = THIS_MODULE, + } +}; + +static int __init crc64_rocksoft_init(void) +{ + return crypto_register_shash(&alg); +} + +static void __exit crc64_rocksoft_exit(void) +{ + crypto_unregister_shash(&alg); +} + +module_init(crc64_rocksoft_init); +module_exit(crc64_rocksoft_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Rocksoft model CRC64 calculation."); +MODULE_ALIAS_CRYPTO("crc64-rocksoft"); +MODULE_ALIAS_CRYPTO("crc64-rocksoft-generic"); diff --git a/crypto/crct10dif_generic.c b/crypto/crct10dif_generic.c index d90c0070710e..e843982073bb 100644 --- a/crypto/crct10dif_generic.c +++ b/crypto/crct10dif_generic.c @@ -35,7 +35,7 @@ struct chksum_desc_ctx { }; /* - * Steps through buffer one byte at at time, calculates reflected + * Steps through buffer one byte at a time, calculates reflected * crc using table. */ diff --git a/crypto/cryptd.c b/crypto/cryptd.c index d94c75c840a5..668095eca0fa 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -39,6 +39,10 @@ struct cryptd_cpu_queue { }; struct cryptd_queue { + /* + * Protected by disabling BH to allow enqueueing from softinterrupt and + * dequeuing from kworker (cryptd_queue_worker()). + */ struct cryptd_cpu_queue __percpu *cpu_queue; }; @@ -125,28 +129,28 @@ static void cryptd_fini_queue(struct cryptd_queue *queue) static int cryptd_enqueue_request(struct cryptd_queue *queue, struct crypto_async_request *request) { - int cpu, err; + int err; struct cryptd_cpu_queue *cpu_queue; refcount_t *refcnt; - cpu = get_cpu(); + local_bh_disable(); cpu_queue = this_cpu_ptr(queue->cpu_queue); err = crypto_enqueue_request(&cpu_queue->queue, request); refcnt = crypto_tfm_ctx(request->tfm); if (err == -ENOSPC) - goto out_put_cpu; + goto out; - queue_work_on(cpu, cryptd_wq, &cpu_queue->work); + queue_work_on(smp_processor_id(), cryptd_wq, &cpu_queue->work); if (!refcount_read(refcnt)) - goto out_put_cpu; + goto out; refcount_inc(refcnt); -out_put_cpu: - put_cpu(); +out: + local_bh_enable(); return err; } @@ -162,15 +166,10 @@ static void cryptd_queue_worker(struct work_struct *work) cpu_queue = container_of(work, struct cryptd_cpu_queue, work); /* * Only handle one request at a time to avoid hogging crypto workqueue. - * preempt_disable/enable is used to prevent being preempted by - * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent - * cryptd_enqueue_request() being accessed from software interrupts. */ local_bh_disable(); - preempt_disable(); backlog = crypto_get_backlog(&cpu_queue->queue); req = crypto_dequeue_request(&cpu_queue->queue); - preempt_enable(); local_bh_enable(); if (!req) @@ -191,17 +190,20 @@ static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm) return ictx->queue; } -static inline void cryptd_check_internal(struct rtattr **tb, u32 *type, - u32 *mask) +static void cryptd_type_and_mask(struct crypto_attr_type *algt, + u32 *type, u32 *mask) { - struct crypto_attr_type *algt; + /* + * cryptd is allowed to wrap internal algorithms, but in that case the + * resulting cryptd instance will be marked as internal as well. + */ + *type = algt->type & CRYPTO_ALG_INTERNAL; + *mask = algt->mask & CRYPTO_ALG_INTERNAL; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return; + /* No point in cryptd wrapping an algorithm that's already async. */ + *mask |= CRYPTO_ALG_ASYNC; - *type |= algt->type & CRYPTO_ALG_INTERNAL; - *mask |= algt->mask & CRYPTO_ALG_INTERNAL; + *mask |= crypto_algt_inherited_mask(algt); } static int cryptd_init_instance(struct crypto_instance *inst, @@ -364,24 +366,17 @@ static void cryptd_skcipher_free(struct skcipher_instance *inst) static int cryptd_create_skcipher(struct crypto_template *tmpl, struct rtattr **tb, + struct crypto_attr_type *algt, struct cryptd_queue *queue) { struct skcipherd_instance_ctx *ctx; struct skcipher_instance *inst; struct skcipher_alg *alg; - const char *name; u32 type; u32 mask; int err; - type = 0; - mask = CRYPTO_ALG_ASYNC; - - cryptd_check_internal(tb, &type, &mask); - - name = crypto_attr_alg_name(tb[1]); - if (IS_ERR(name)) - return PTR_ERR(name); + cryptd_type_and_mask(algt, &type, &mask); inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -391,18 +386,17 @@ static int cryptd_create_skcipher(struct crypto_template *tmpl, ctx->queue = queue; err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst), - name, type, mask); + crypto_attr_alg_name(tb[1]), type, mask); if (err) - goto out_free_inst; + goto err_free_inst; alg = crypto_spawn_skcipher_alg(&ctx->spawn); err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base); if (err) - goto out_drop_skcipher; - - inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC | - (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); + goto err_free_inst; + inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC | + (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg); inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg); inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg); @@ -421,10 +415,8 @@ static int cryptd_create_skcipher(struct crypto_template *tmpl, err = skcipher_register_instance(tmpl, inst); if (err) { -out_drop_skcipher: - crypto_drop_skcipher(&ctx->spawn); -out_free_inst: - kfree(inst); +err_free_inst: + cryptd_skcipher_free(inst); } return err; } @@ -640,16 +632,17 @@ static void cryptd_hash_free(struct ahash_instance *inst) } static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, + struct crypto_attr_type *algt, struct cryptd_queue *queue) { struct hashd_instance_ctx *ctx; struct ahash_instance *inst; struct shash_alg *alg; - u32 type = 0; - u32 mask = 0; + u32 type; + u32 mask; int err; - cryptd_check_internal(tb, &type, &mask); + cryptd_type_and_mask(algt, &type, &mask); inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -668,10 +661,9 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, if (err) goto err_free_inst; - inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC | - (alg->base.cra_flags & (CRYPTO_ALG_INTERNAL | + inst->alg.halg.base.cra_flags |= CRYPTO_ALG_ASYNC | + (alg->base.cra_flags & (CRYPTO_ALG_INTERNAL| CRYPTO_ALG_OPTIONAL_KEY)); - inst->alg.halg.digestsize = alg->digestsize; inst->alg.halg.statesize = alg->statesize; inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx); @@ -694,8 +686,7 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, err = ahash_register_instance(tmpl, inst); if (err) { err_free_inst: - crypto_drop_shash(&ctx->spawn); - kfree(inst); + cryptd_hash_free(inst); } return err; } @@ -828,21 +819,17 @@ static void cryptd_aead_free(struct aead_instance *inst) static int cryptd_create_aead(struct crypto_template *tmpl, struct rtattr **tb, + struct crypto_attr_type *algt, struct cryptd_queue *queue) { struct aead_instance_ctx *ctx; struct aead_instance *inst; struct aead_alg *alg; - const char *name; - u32 type = 0; - u32 mask = CRYPTO_ALG_ASYNC; + u32 type; + u32 mask; int err; - cryptd_check_internal(tb, &type, &mask); - - name = crypto_attr_alg_name(tb[1]); - if (IS_ERR(name)) - return PTR_ERR(name); + cryptd_type_and_mask(algt, &type, &mask); inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -852,17 +839,17 @@ static int cryptd_create_aead(struct crypto_template *tmpl, ctx->queue = queue; err = crypto_grab_aead(&ctx->aead_spawn, aead_crypto_instance(inst), - name, type, mask); + crypto_attr_alg_name(tb[1]), type, mask); if (err) - goto out_free_inst; + goto err_free_inst; alg = crypto_spawn_aead_alg(&ctx->aead_spawn); err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base); if (err) - goto out_drop_aead; + goto err_free_inst; - inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC | - (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); + inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC | + (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx); inst->alg.ivsize = crypto_aead_alg_ivsize(alg); @@ -879,10 +866,8 @@ static int cryptd_create_aead(struct crypto_template *tmpl, err = aead_register_instance(tmpl, inst); if (err) { -out_drop_aead: - crypto_drop_aead(&ctx->aead_spawn); -out_free_inst: - kfree(inst); +err_free_inst: + cryptd_aead_free(inst); } return err; } @@ -899,11 +884,11 @@ static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb) switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { case CRYPTO_ALG_TYPE_SKCIPHER: - return cryptd_create_skcipher(tmpl, tb, &queue); + return cryptd_create_skcipher(tmpl, tb, algt, &queue); case CRYPTO_ALG_TYPE_HASH: - return cryptd_create_hash(tmpl, tb, &queue); + return cryptd_create_hash(tmpl, tb, algt, &queue); case CRYPTO_ALG_TYPE_AEAD: - return cryptd_create_aead(tmpl, tb, &queue); + return cryptd_create_aead(tmpl, tb, algt, &queue); } return -EINVAL; diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c index eb029ff1e05a..bb8e77077f02 100644 --- a/crypto/crypto_engine.c +++ b/crypto/crypto_engine.c @@ -9,6 +9,7 @@ #include <linux/err.h> #include <linux/delay.h> +#include <linux/device.h> #include <crypto/engine.h> #include <uapi/linux/sched/types.h> #include "internal.h" @@ -22,32 +23,37 @@ * @err: error number */ static void crypto_finalize_request(struct crypto_engine *engine, - struct crypto_async_request *req, int err) + struct crypto_async_request *req, int err) { unsigned long flags; - bool finalize_cur_req = false; + bool finalize_req = false; int ret; struct crypto_engine_ctx *enginectx; - spin_lock_irqsave(&engine->queue_lock, flags); - if (engine->cur_req == req) - finalize_cur_req = true; - spin_unlock_irqrestore(&engine->queue_lock, flags); + /* + * If hardware cannot enqueue more requests + * and retry mechanism is not supported + * make sure we are completing the current request + */ + if (!engine->retry_support) { + spin_lock_irqsave(&engine->queue_lock, flags); + if (engine->cur_req == req) { + finalize_req = true; + engine->cur_req = NULL; + } + spin_unlock_irqrestore(&engine->queue_lock, flags); + } - if (finalize_cur_req) { + if (finalize_req || engine->retry_support) { enginectx = crypto_tfm_ctx(req->tfm); - if (engine->cur_req_prepared && + if (enginectx->op.prepare_request && enginectx->op.unprepare_request) { ret = enginectx->op.unprepare_request(engine, req); if (ret) dev_err(engine->dev, "failed to unprepare request\n"); } - spin_lock_irqsave(&engine->queue_lock, flags); - engine->cur_req = NULL; - engine->cur_req_prepared = false; - spin_unlock_irqrestore(&engine->queue_lock, flags); } - + lockdep_assert_in_softirq(); req->complete(req, err); kthread_queue_work(engine->kworker, &engine->pump_requests); @@ -74,7 +80,7 @@ static void crypto_pump_requests(struct crypto_engine *engine, spin_lock_irqsave(&engine->queue_lock, flags); /* Make sure we are not already running a request */ - if (engine->cur_req) + if (!engine->retry_support && engine->cur_req) goto out; /* If another context is idling then defer */ @@ -108,13 +114,21 @@ static void crypto_pump_requests(struct crypto_engine *engine, goto out; } +start_request: /* Get the fist request from the engine queue to handle */ backlog = crypto_get_backlog(&engine->queue); async_req = crypto_dequeue_request(&engine->queue); if (!async_req) goto out; - engine->cur_req = async_req; + /* + * If hardware doesn't support the retry mechanism, + * keep track of the request we are processing now. + * We'll need it on completion (crypto_finalize_request). + */ + if (!engine->retry_support) + engine->cur_req = async_req; + if (backlog) backlog->complete(backlog, -EINPROGRESS); @@ -130,7 +144,7 @@ static void crypto_pump_requests(struct crypto_engine *engine, ret = engine->prepare_crypt_hardware(engine); if (ret) { dev_err(engine->dev, "failed to prepare crypt hardware\n"); - goto req_err; + goto req_err_2; } } @@ -141,28 +155,90 @@ static void crypto_pump_requests(struct crypto_engine *engine, if (ret) { dev_err(engine->dev, "failed to prepare request: %d\n", ret); - goto req_err; + goto req_err_2; } - engine->cur_req_prepared = true; } if (!enginectx->op.do_one_request) { dev_err(engine->dev, "failed to do request\n"); ret = -EINVAL; - goto req_err; + goto req_err_1; } + ret = enginectx->op.do_one_request(engine, async_req); - if (ret) { - dev_err(engine->dev, "Failed to do one request from queue: %d\n", ret); - goto req_err; + + /* Request unsuccessfully executed by hardware */ + if (ret < 0) { + /* + * If hardware queue is full (-ENOSPC), requeue request + * regardless of backlog flag. + * Otherwise, unprepare and complete the request. + */ + if (!engine->retry_support || + (ret != -ENOSPC)) { + dev_err(engine->dev, + "Failed to do one request from queue: %d\n", + ret); + goto req_err_1; + } + /* + * If retry mechanism is supported, + * unprepare current request and + * enqueue it back into crypto-engine queue. + */ + if (enginectx->op.unprepare_request) { + ret = enginectx->op.unprepare_request(engine, + async_req); + if (ret) + dev_err(engine->dev, + "failed to unprepare request\n"); + } + spin_lock_irqsave(&engine->queue_lock, flags); + /* + * If hardware was unable to execute request, enqueue it + * back in front of crypto-engine queue, to keep the order + * of requests. + */ + crypto_enqueue_request_head(&engine->queue, async_req); + + kthread_queue_work(engine->kworker, &engine->pump_requests); + goto out; } - return; -req_err: - crypto_finalize_request(engine, async_req, ret); + goto retry; + +req_err_1: + if (enginectx->op.unprepare_request) { + ret = enginectx->op.unprepare_request(engine, async_req); + if (ret) + dev_err(engine->dev, "failed to unprepare request\n"); + } + +req_err_2: + async_req->complete(async_req, ret); + +retry: + /* If retry mechanism is supported, send new requests to engine */ + if (engine->retry_support) { + spin_lock_irqsave(&engine->queue_lock, flags); + goto start_request; + } return; out: spin_unlock_irqrestore(&engine->queue_lock, flags); + + /* + * Batch requests is possible only if + * hardware can enqueue multiple requests + */ + if (engine->do_batch_requests) { + ret = engine->do_batch_requests(engine); + if (ret) + dev_err(engine->dev, "failed to do batch requests: %d\n", + ret); + } + + return; } static void crypto_pump_work(struct kthread_work *work) @@ -177,6 +253,7 @@ static void crypto_pump_work(struct kthread_work *work) * crypto_transfer_request - transfer the new request into the engine queue * @engine: the hardware engine * @req: the request need to be listed into the engine queue + * @need_pump: indicates whether queue the pump of request to kthread_work */ static int crypto_transfer_request(struct crypto_engine *engine, struct crypto_async_request *req, @@ -253,6 +330,19 @@ int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine, EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine); /** + * crypto_transfer_kpp_request_to_engine - transfer one kpp_request to list + * into the engine queue + * @engine: the hardware engine + * @req: the request need to be listed into the engine queue + */ +int crypto_transfer_kpp_request_to_engine(struct crypto_engine *engine, + struct kpp_request *req) +{ + return crypto_transfer_request_to_engine(engine, &req->base); +} +EXPORT_SYMBOL_GPL(crypto_transfer_kpp_request_to_engine); + +/** * crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request * to list into the engine queue * @engine: the hardware engine @@ -308,6 +398,19 @@ void crypto_finalize_hash_request(struct crypto_engine *engine, EXPORT_SYMBOL_GPL(crypto_finalize_hash_request); /** + * crypto_finalize_kpp_request - finalize one kpp_request if the request is done + * @engine: the hardware engine + * @req: the request need to be finalized + * @err: error number + */ +void crypto_finalize_kpp_request(struct crypto_engine *engine, + struct kpp_request *req, int err) +{ + return crypto_finalize_request(engine, &req->base, err); +} +EXPORT_SYMBOL_GPL(crypto_finalize_kpp_request); + +/** * crypto_finalize_skcipher_request - finalize one skcipher_request if * the request is done * @engine: the hardware engine @@ -386,17 +489,28 @@ int crypto_engine_stop(struct crypto_engine *engine) EXPORT_SYMBOL_GPL(crypto_engine_stop); /** - * crypto_engine_alloc_init - allocate crypto hardware engine structure and - * initialize it. + * crypto_engine_alloc_init_and_set - allocate crypto hardware engine structure + * and initialize it by setting the maximum number of entries in the software + * crypto-engine queue. * @dev: the device attached with one hardware engine + * @retry_support: whether hardware has support for retry mechanism + * @cbk_do_batch: pointer to a callback function to be invoked when executing + * a batch of requests. + * This has the form: + * callback(struct crypto_engine *engine) + * where: + * @engine: the crypto engine structure. * @rt: whether this queue is set to run as a realtime task + * @qlen: maximum size of the crypto-engine queue * * This must be called from context that can sleep. * Return: the crypto engine structure on success, else NULL. */ -struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt) +struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev, + bool retry_support, + int (*cbk_do_batch)(struct crypto_engine *engine), + bool rt, int qlen) { - struct sched_param param = { .sched_priority = MAX_RT_PRIO / 2 }; struct crypto_engine *engine; if (!dev) @@ -411,12 +525,18 @@ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt) engine->running = false; engine->busy = false; engine->idling = false; - engine->cur_req_prepared = false; + engine->retry_support = retry_support; engine->priv_data = dev; + /* + * Batch requests is possible only if + * hardware has support for retry mechanism. + */ + engine->do_batch_requests = retry_support ? cbk_do_batch : NULL; + snprintf(engine->name, sizeof(engine->name), "%s-engine", dev_name(dev)); - crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN); + crypto_init_queue(&engine->queue, qlen); spin_lock_init(&engine->queue_lock); engine->kworker = kthread_create_worker(0, "%s", engine->name); @@ -428,11 +548,27 @@ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt) if (engine->rt) { dev_info(dev, "will run requests pump with realtime priority\n"); - sched_setscheduler(engine->kworker->task, SCHED_FIFO, ¶m); + sched_set_fifo(engine->kworker->task); } return engine; } +EXPORT_SYMBOL_GPL(crypto_engine_alloc_init_and_set); + +/** + * crypto_engine_alloc_init - allocate crypto hardware engine structure and + * initialize it. + * @dev: the device attached with one hardware engine + * @rt: whether this queue is set to run as a realtime task + * + * This must be called from context that can sleep. + * Return: the crypto engine structure on success, else NULL. + */ +struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt) +{ + return crypto_engine_alloc_init_and_set(dev, false, NULL, rt, + CRYPTO_ENGINE_MAX_QLEN); +} EXPORT_SYMBOL_GPL(crypto_engine_alloc_init); /** diff --git a/crypto/ctr.c b/crypto/ctr.c index a8feab621c6c..23c698b22013 100644 --- a/crypto/ctr.c +++ b/crypto/ctr.c @@ -7,6 +7,7 @@ #include <crypto/algapi.h> #include <crypto/ctr.h> +#include <crypto/internal/cipher.h> #include <crypto/internal/skcipher.h> #include <linux/err.h> #include <linux/init.h> @@ -256,38 +257,24 @@ static void crypto_rfc3686_free(struct skcipher_instance *inst) static int crypto_rfc3686_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; struct skcipher_instance *inst; struct skcipher_alg *alg; struct crypto_skcipher_spawn *spawn; - const char *cipher_name; u32 mask; - int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) - return -EINVAL; - - cipher_name = crypto_attr_alg_name(tb[1]); - if (IS_ERR(cipher_name)) - return PTR_ERR(cipher_name); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) return -ENOMEM; - mask = crypto_requires_sync(algt->type, algt->mask) | - crypto_requires_off(algt->type, algt->mask, - CRYPTO_ALG_NEED_FALLBACK); - spawn = skcipher_instance_ctx(inst); err = crypto_grab_skcipher(spawn, skcipher_crypto_instance(inst), - cipher_name, 0, mask); + crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; @@ -296,27 +283,25 @@ static int crypto_rfc3686_create(struct crypto_template *tmpl, /* We only support 16-byte blocks. */ err = -EINVAL; if (crypto_skcipher_alg_ivsize(alg) != CTR_RFC3686_BLOCK_SIZE) - goto err_drop_spawn; + goto err_free_inst; /* Not a stream cipher? */ if (alg->base.cra_blocksize != 1) - goto err_drop_spawn; + goto err_free_inst; err = -ENAMETOOLONG; if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, "rfc3686(%s)", alg->base.cra_name) >= CRYPTO_MAX_ALG_NAME) - goto err_drop_spawn; + goto err_free_inst; if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "rfc3686(%s)", alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) - goto err_drop_spawn; + goto err_free_inst; inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = 1; inst->alg.base.cra_alignmask = alg->base.cra_alignmask; - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; - inst->alg.ivsize = CTR_RFC3686_IV_SIZE; inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg); inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) + @@ -336,17 +321,11 @@ static int crypto_rfc3686_create(struct crypto_template *tmpl, inst->free = crypto_rfc3686_free; err = skcipher_register_instance(tmpl, inst); - if (err) - goto err_drop_spawn; - -out: - return err; - -err_drop_spawn: - crypto_drop_skcipher(spawn); + if (err) { err_free_inst: - kfree(inst); - goto out; + crypto_rfc3686_free(inst); + } + return err; } static struct crypto_template crypto_ctr_tmpls[] = { @@ -380,3 +359,4 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("CTR block cipher mode of operation"); MODULE_ALIAS_CRYPTO("rfc3686"); MODULE_ALIAS_CRYPTO("ctr"); +MODULE_IMPORT_NS(CRYPTO_INTERNAL); diff --git a/crypto/cts.c b/crypto/cts.c index 48188adc8e91..3766d47ebcc0 100644 --- a/crypto/cts.c +++ b/crypto/cts.c @@ -325,24 +325,13 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb) { struct crypto_skcipher_spawn *spawn; struct skcipher_instance *inst; - struct crypto_attr_type *algt; struct skcipher_alg *alg; - const char *cipher_name; u32 mask; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); - - cipher_name = crypto_attr_alg_name(tb[1]); - if (IS_ERR(cipher_name)) - return PTR_ERR(cipher_name); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) @@ -351,7 +340,7 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb) spawn = skcipher_instance_ctx(inst); err = crypto_grab_skcipher(spawn, skcipher_crypto_instance(inst), - cipher_name, 0, mask); + crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; @@ -359,17 +348,16 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb) err = -EINVAL; if (crypto_skcipher_alg_ivsize(alg) != alg->base.cra_blocksize) - goto err_drop_spawn; + goto err_free_inst; if (strncmp(alg->base.cra_name, "cbc(", 4)) - goto err_drop_spawn; + goto err_free_inst; err = crypto_inst_setname(skcipher_crypto_instance(inst), "cts", &alg->base); if (err) - goto err_drop_spawn; + goto err_free_inst; - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = alg->base.cra_blocksize; inst->alg.base.cra_alignmask = alg->base.cra_alignmask; @@ -391,17 +379,11 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb) inst->free = crypto_cts_free; err = skcipher_register_instance(tmpl, inst); - if (err) - goto err_drop_spawn; - -out: - return err; - -err_drop_spawn: - crypto_drop_skcipher(spawn); + if (err) { err_free_inst: - kfree(inst); - goto out; + crypto_cts_free(inst); + } + return err; } static struct crypto_template crypto_cts_tmpl = { diff --git a/crypto/curve25519-generic.c b/crypto/curve25519-generic.c index bd88fd571393..d055b0784c77 100644 --- a/crypto/curve25519-generic.c +++ b/crypto/curve25519-generic.c @@ -72,12 +72,12 @@ static struct kpp_alg curve25519_alg = { .max_size = curve25519_max_size, }; -static int curve25519_init(void) +static int __init curve25519_init(void) { return crypto_register_kpp(&curve25519_alg); } -static void curve25519_exit(void) +static void __exit curve25519_exit(void) { crypto_unregister_kpp(&curve25519_alg); } diff --git a/crypto/deflate.c b/crypto/deflate.c index 4c0e6c9d942a..b2a46f6dc961 100644 --- a/crypto/deflate.c +++ b/crypto/deflate.c @@ -163,7 +163,7 @@ static void __deflate_exit(void *ctx) static void deflate_free_ctx(struct crypto_scomp *tfm, void *ctx) { __deflate_exit(ctx); - kzfree(ctx); + kfree_sensitive(ctx); } static void deflate_exit(struct crypto_tfm *tfm) diff --git a/crypto/dh.c b/crypto/dh.c index 566f624a2de2..99c3b2ef7adc 100644 --- a/crypto/dh.c +++ b/crypto/dh.c @@ -5,15 +5,16 @@ * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com> */ +#include <linux/fips.h> #include <linux/module.h> #include <crypto/internal/kpp.h> #include <crypto/kpp.h> #include <crypto/dh.h> +#include <crypto/rng.h> #include <linux/mpi.h> struct dh_ctx { MPI p; /* Value is guaranteed to be set. */ - MPI q; /* Value is optional. */ MPI g; /* Value is guaranteed to be set. */ MPI xa; /* Value is guaranteed to be set. */ }; @@ -21,7 +22,6 @@ struct dh_ctx { static void dh_clear_ctx(struct dh_ctx *ctx) { mpi_free(ctx->p); - mpi_free(ctx->q); mpi_free(ctx->g); mpi_free(ctx->xa); memset(ctx, 0, sizeof(*ctx)); @@ -46,6 +46,9 @@ static inline struct dh_ctx *dh_get_ctx(struct crypto_kpp *tfm) static int dh_check_params_length(unsigned int p_len) { + if (fips_enabled) + return (p_len < 2048) ? -EINVAL : 0; + return (p_len < 1536) ? -EINVAL : 0; } @@ -58,12 +61,6 @@ static int dh_set_params(struct dh_ctx *ctx, struct dh *params) if (!ctx->p) return -EINVAL; - if (params->q && params->q_size) { - ctx->q = mpi_read_raw_data(params->q, params->q_size); - if (!ctx->q) - return -EINVAL; - } - ctx->g = mpi_read_raw_data(params->g, params->g_size); if (!ctx->g) return -EINVAL; @@ -100,11 +97,12 @@ err_clear_ctx: /* * SP800-56A public key verification: * - * * If Q is provided as part of the domain paramenters, a full validation - * according to SP800-56A section 5.6.2.3.1 is performed. + * * For the safe-prime groups in FIPS mode, Q can be computed + * trivially from P and a full validation according to SP800-56A + * section 5.6.2.3.1 is performed. * - * * If Q is not provided, a partial validation according to SP800-56A section - * 5.6.2.3.2 is performed. + * * For all other sets of group parameters, only a partial validation + * according to SP800-56A section 5.6.2.3.2 is performed. */ static int dh_is_pubkey_valid(struct dh_ctx *ctx, MPI y) { @@ -115,21 +113,40 @@ static int dh_is_pubkey_valid(struct dh_ctx *ctx, MPI y) * Step 1: Verify that 2 <= y <= p - 2. * * The upper limit check is actually y < p instead of y < p - 1 - * as the mpi_sub_ui function is yet missing. + * in order to save one mpi_sub_ui() invocation here. Note that + * p - 1 is the non-trivial element of the subgroup of order 2 and + * thus, the check on y^q below would fail if y == p - 1. */ if (mpi_cmp_ui(y, 1) < 1 || mpi_cmp(y, ctx->p) >= 0) return -EINVAL; - /* Step 2: Verify that 1 = y^q mod p */ - if (ctx->q) { - MPI val = mpi_alloc(0); + /* + * Step 2: Verify that 1 = y^q mod p + * + * For the safe-prime groups q = (p - 1)/2. + */ + if (fips_enabled) { + MPI val, q; int ret; + val = mpi_alloc(0); if (!val) return -ENOMEM; - ret = mpi_powm(val, y, ctx->q, ctx->p); + q = mpi_alloc(mpi_get_nlimbs(ctx->p)); + if (!q) { + mpi_free(val); + return -ENOMEM; + } + + /* + * ->p is odd, so no need to explicitly subtract one + * from it before shifting to the right. + */ + mpi_rshift(q, ctx->p, 1); + ret = mpi_powm(val, y, q, ctx->p); + mpi_free(q); if (ret) { mpi_free(val); return ret; @@ -179,6 +196,43 @@ static int dh_compute_value(struct kpp_request *req) if (ret) goto err_free_base; + if (fips_enabled) { + /* SP800-56A rev3 5.7.1.1 check: Validation of shared secret */ + if (req->src) { + MPI pone; + + /* z <= 1 */ + if (mpi_cmp_ui(val, 1) < 1) { + ret = -EBADMSG; + goto err_free_base; + } + + /* z == p - 1 */ + pone = mpi_alloc(0); + + if (!pone) { + ret = -ENOMEM; + goto err_free_base; + } + + ret = mpi_sub_ui(pone, ctx->p, 1); + if (!ret && !mpi_cmp(pone, val)) + ret = -EBADMSG; + + mpi_free(pone); + + if (ret) + goto err_free_base; + + /* SP800-56A rev 3 5.6.2.1.3 key check */ + } else { + if (dh_is_pubkey_valid(ctx, val)) { + ret = -EAGAIN; + goto err_free_val; + } + } + } + ret = mpi_write_to_sgl(val, req->dst, req->dst_len, &sign); if (ret) goto err_free_base; @@ -222,13 +276,645 @@ static struct kpp_alg dh = { }, }; -static int dh_init(void) + +struct dh_safe_prime { + unsigned int max_strength; + unsigned int p_size; + const char *p; +}; + +static const char safe_prime_g[] = { 2 }; + +struct dh_safe_prime_instance_ctx { + struct crypto_kpp_spawn dh_spawn; + const struct dh_safe_prime *safe_prime; +}; + +struct dh_safe_prime_tfm_ctx { + struct crypto_kpp *dh_tfm; +}; + +static void dh_safe_prime_free_instance(struct kpp_instance *inst) +{ + struct dh_safe_prime_instance_ctx *ctx = kpp_instance_ctx(inst); + + crypto_drop_kpp(&ctx->dh_spawn); + kfree(inst); +} + +static inline struct dh_safe_prime_instance_ctx *dh_safe_prime_instance_ctx( + struct crypto_kpp *tfm) +{ + return kpp_instance_ctx(kpp_alg_instance(tfm)); +} + +static int dh_safe_prime_init_tfm(struct crypto_kpp *tfm) +{ + struct dh_safe_prime_instance_ctx *inst_ctx = + dh_safe_prime_instance_ctx(tfm); + struct dh_safe_prime_tfm_ctx *tfm_ctx = kpp_tfm_ctx(tfm); + + tfm_ctx->dh_tfm = crypto_spawn_kpp(&inst_ctx->dh_spawn); + if (IS_ERR(tfm_ctx->dh_tfm)) + return PTR_ERR(tfm_ctx->dh_tfm); + + return 0; +} + +static void dh_safe_prime_exit_tfm(struct crypto_kpp *tfm) +{ + struct dh_safe_prime_tfm_ctx *tfm_ctx = kpp_tfm_ctx(tfm); + + crypto_free_kpp(tfm_ctx->dh_tfm); +} + +static u64 __add_u64_to_be(__be64 *dst, unsigned int n, u64 val) +{ + unsigned int i; + + for (i = n; val && i > 0; --i) { + u64 tmp = be64_to_cpu(dst[i - 1]); + + tmp += val; + val = tmp >= val ? 0 : 1; + dst[i - 1] = cpu_to_be64(tmp); + } + + return val; +} + +static void *dh_safe_prime_gen_privkey(const struct dh_safe_prime *safe_prime, + unsigned int *key_size) +{ + unsigned int n, oversampling_size; + __be64 *key; + int err; + u64 h, o; + + /* + * Generate a private key following NIST SP800-56Ar3, + * sec. 5.6.1.1.1 and 5.6.1.1.3 resp.. + * + * 5.6.1.1.1: choose key length N such that + * 2 * ->max_strength <= N <= log2(q) + 1 = ->p_size * 8 - 1 + * with q = (p - 1) / 2 for the safe-prime groups. + * Choose the lower bound's next power of two for N in order to + * avoid excessively large private keys while still + * maintaining some extra reserve beyond the bare minimum in + * most cases. Note that for each entry in safe_prime_groups[], + * the following holds for such N: + * - N >= 256, in particular it is a multiple of 2^6 = 64 + * bits and + * - N < log2(q) + 1, i.e. N respects the upper bound. + */ + n = roundup_pow_of_two(2 * safe_prime->max_strength); + WARN_ON_ONCE(n & ((1u << 6) - 1)); + n >>= 6; /* Convert N into units of u64. */ + + /* + * Reserve one extra u64 to hold the extra random bits + * required as per 5.6.1.1.3. + */ + oversampling_size = (n + 1) * sizeof(__be64); + key = kmalloc(oversampling_size, GFP_KERNEL); + if (!key) + return ERR_PTR(-ENOMEM); + + /* + * 5.6.1.1.3, step 3 (and implicitly step 4): obtain N + 64 + * random bits and interpret them as a big endian integer. + */ + err = -EFAULT; + if (crypto_get_default_rng()) + goto out_err; + + err = crypto_rng_get_bytes(crypto_default_rng, (u8 *)key, + oversampling_size); + crypto_put_default_rng(); + if (err) + goto out_err; + + /* + * 5.6.1.1.3, step 5 is implicit: 2^N < q and thus, + * M = min(2^N, q) = 2^N. + * + * For step 6, calculate + * key = (key[] mod (M - 1)) + 1 = (key[] mod (2^N - 1)) + 1. + * + * In order to avoid expensive divisions, note that + * 2^N mod (2^N - 1) = 1 and thus, for any integer h, + * 2^N * h mod (2^N - 1) = h mod (2^N - 1) always holds. + * The big endian integer key[] composed of n + 1 64bit words + * may be written as key[] = h * 2^N + l, with h = key[0] + * representing the 64 most significant bits and l + * corresponding to the remaining 2^N bits. With the remark + * from above, + * h * 2^N + l mod (2^N - 1) = l + h mod (2^N - 1). + * As both, l and h are less than 2^N, their sum after + * this first reduction is guaranteed to be <= 2^(N + 1) - 2. + * Or equivalently, that their sum can again be written as + * h' * 2^N + l' with h' now either zero or one and if one, + * then l' <= 2^N - 2. Thus, all bits at positions >= N will + * be zero after a second reduction: + * h' * 2^N + l' mod (2^N - 1) = l' + h' mod (2^N - 1). + * At this point, it is still possible that + * l' + h' = 2^N - 1, i.e. that l' + h' mod (2^N - 1) + * is zero. This condition will be detected below by means of + * the final increment overflowing in this case. + */ + h = be64_to_cpu(key[0]); + h = __add_u64_to_be(key + 1, n, h); + h = __add_u64_to_be(key + 1, n, h); + WARN_ON_ONCE(h); + + /* Increment to obtain the final result. */ + o = __add_u64_to_be(key + 1, n, 1); + /* + * The overflow bit o from the increment is either zero or + * one. If zero, key[1:n] holds the final result in big-endian + * order. If one, key[1:n] is zero now, but needs to be set to + * one, c.f. above. + */ + if (o) + key[n] = cpu_to_be64(1); + + /* n is in units of u64, convert to bytes. */ + *key_size = n << 3; + /* Strip the leading extra __be64, which is (virtually) zero by now. */ + memmove(key, &key[1], *key_size); + + return key; + +out_err: + kfree_sensitive(key); + return ERR_PTR(err); +} + +static int dh_safe_prime_set_secret(struct crypto_kpp *tfm, const void *buffer, + unsigned int len) +{ + struct dh_safe_prime_instance_ctx *inst_ctx = + dh_safe_prime_instance_ctx(tfm); + struct dh_safe_prime_tfm_ctx *tfm_ctx = kpp_tfm_ctx(tfm); + struct dh params = {}; + void *buf = NULL, *key = NULL; + unsigned int buf_size; + int err; + + if (buffer) { + err = __crypto_dh_decode_key(buffer, len, ¶ms); + if (err) + return err; + if (params.p_size || params.g_size) + return -EINVAL; + } + + params.p = inst_ctx->safe_prime->p; + params.p_size = inst_ctx->safe_prime->p_size; + params.g = safe_prime_g; + params.g_size = sizeof(safe_prime_g); + + if (!params.key_size) { + key = dh_safe_prime_gen_privkey(inst_ctx->safe_prime, + ¶ms.key_size); + if (IS_ERR(key)) + return PTR_ERR(key); + params.key = key; + } + + buf_size = crypto_dh_key_len(¶ms); + buf = kmalloc(buf_size, GFP_KERNEL); + if (!buf) { + err = -ENOMEM; + goto out; + } + + err = crypto_dh_encode_key(buf, buf_size, ¶ms); + if (err) + goto out; + + err = crypto_kpp_set_secret(tfm_ctx->dh_tfm, buf, buf_size); +out: + kfree_sensitive(buf); + kfree_sensitive(key); + return err; +} + +static void dh_safe_prime_complete_req(struct crypto_async_request *dh_req, + int err) +{ + struct kpp_request *req = dh_req->data; + + kpp_request_complete(req, err); +} + +static struct kpp_request *dh_safe_prime_prepare_dh_req(struct kpp_request *req) +{ + struct dh_safe_prime_tfm_ctx *tfm_ctx = + kpp_tfm_ctx(crypto_kpp_reqtfm(req)); + struct kpp_request *dh_req = kpp_request_ctx(req); + + kpp_request_set_tfm(dh_req, tfm_ctx->dh_tfm); + kpp_request_set_callback(dh_req, req->base.flags, + dh_safe_prime_complete_req, req); + + kpp_request_set_input(dh_req, req->src, req->src_len); + kpp_request_set_output(dh_req, req->dst, req->dst_len); + + return dh_req; +} + +static int dh_safe_prime_generate_public_key(struct kpp_request *req) +{ + struct kpp_request *dh_req = dh_safe_prime_prepare_dh_req(req); + + return crypto_kpp_generate_public_key(dh_req); +} + +static int dh_safe_prime_compute_shared_secret(struct kpp_request *req) +{ + struct kpp_request *dh_req = dh_safe_prime_prepare_dh_req(req); + + return crypto_kpp_compute_shared_secret(dh_req); +} + +static unsigned int dh_safe_prime_max_size(struct crypto_kpp *tfm) +{ + struct dh_safe_prime_tfm_ctx *tfm_ctx = kpp_tfm_ctx(tfm); + + return crypto_kpp_maxsize(tfm_ctx->dh_tfm); +} + +static int __maybe_unused __dh_safe_prime_create( + struct crypto_template *tmpl, struct rtattr **tb, + const struct dh_safe_prime *safe_prime) +{ + struct kpp_instance *inst; + struct dh_safe_prime_instance_ctx *ctx; + const char *dh_name; + struct kpp_alg *dh_alg; + u32 mask; + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_KPP, &mask); + if (err) + return err; + + dh_name = crypto_attr_alg_name(tb[1]); + if (IS_ERR(dh_name)) + return PTR_ERR(dh_name); + + inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); + if (!inst) + return -ENOMEM; + + ctx = kpp_instance_ctx(inst); + + err = crypto_grab_kpp(&ctx->dh_spawn, kpp_crypto_instance(inst), + dh_name, 0, mask); + if (err) + goto err_free_inst; + + err = -EINVAL; + dh_alg = crypto_spawn_kpp_alg(&ctx->dh_spawn); + if (strcmp(dh_alg->base.cra_name, "dh")) + goto err_free_inst; + + ctx->safe_prime = safe_prime; + + err = crypto_inst_setname(kpp_crypto_instance(inst), + tmpl->name, &dh_alg->base); + if (err) + goto err_free_inst; + + inst->alg.set_secret = dh_safe_prime_set_secret; + inst->alg.generate_public_key = dh_safe_prime_generate_public_key; + inst->alg.compute_shared_secret = dh_safe_prime_compute_shared_secret; + inst->alg.max_size = dh_safe_prime_max_size; + inst->alg.init = dh_safe_prime_init_tfm; + inst->alg.exit = dh_safe_prime_exit_tfm; + inst->alg.reqsize = sizeof(struct kpp_request) + dh_alg->reqsize; + inst->alg.base.cra_priority = dh_alg->base.cra_priority; + inst->alg.base.cra_module = THIS_MODULE; + inst->alg.base.cra_ctxsize = sizeof(struct dh_safe_prime_tfm_ctx); + + inst->free = dh_safe_prime_free_instance; + + err = kpp_register_instance(tmpl, inst); + if (err) + goto err_free_inst; + + return 0; + +err_free_inst: + dh_safe_prime_free_instance(inst); + + return err; +} + +#ifdef CONFIG_CRYPTO_DH_RFC7919_GROUPS + +static const struct dh_safe_prime ffdhe2048_prime = { + .max_strength = 112, + .p_size = 256, + .p = + "\xff\xff\xff\xff\xff\xff\xff\xff\xad\xf8\x54\x58\xa2\xbb\x4a\x9a" + "\xaf\xdc\x56\x20\x27\x3d\x3c\xf1\xd8\xb9\xc5\x83\xce\x2d\x36\x95" + "\xa9\xe1\x36\x41\x14\x64\x33\xfb\xcc\x93\x9d\xce\x24\x9b\x3e\xf9" + "\x7d\x2f\xe3\x63\x63\x0c\x75\xd8\xf6\x81\xb2\x02\xae\xc4\x61\x7a" + "\xd3\xdf\x1e\xd5\xd5\xfd\x65\x61\x24\x33\xf5\x1f\x5f\x06\x6e\xd0" + "\x85\x63\x65\x55\x3d\xed\x1a\xf3\xb5\x57\x13\x5e\x7f\x57\xc9\x35" + "\x98\x4f\x0c\x70\xe0\xe6\x8b\x77\xe2\xa6\x89\xda\xf3\xef\xe8\x72" + "\x1d\xf1\x58\xa1\x36\xad\xe7\x35\x30\xac\xca\x4f\x48\x3a\x79\x7a" + "\xbc\x0a\xb1\x82\xb3\x24\xfb\x61\xd1\x08\xa9\x4b\xb2\xc8\xe3\xfb" + "\xb9\x6a\xda\xb7\x60\xd7\xf4\x68\x1d\x4f\x42\xa3\xde\x39\x4d\xf4" + "\xae\x56\xed\xe7\x63\x72\xbb\x19\x0b\x07\xa7\xc8\xee\x0a\x6d\x70" + "\x9e\x02\xfc\xe1\xcd\xf7\xe2\xec\xc0\x34\x04\xcd\x28\x34\x2f\x61" + "\x91\x72\xfe\x9c\xe9\x85\x83\xff\x8e\x4f\x12\x32\xee\xf2\x81\x83" + "\xc3\xfe\x3b\x1b\x4c\x6f\xad\x73\x3b\xb5\xfc\xbc\x2e\xc2\x20\x05" + "\xc5\x8e\xf1\x83\x7d\x16\x83\xb2\xc6\xf3\x4a\x26\xc1\xb2\xef\xfa" + "\x88\x6b\x42\x38\x61\x28\x5c\x97\xff\xff\xff\xff\xff\xff\xff\xff", +}; + +static const struct dh_safe_prime ffdhe3072_prime = { + .max_strength = 128, + .p_size = 384, + .p = + "\xff\xff\xff\xff\xff\xff\xff\xff\xad\xf8\x54\x58\xa2\xbb\x4a\x9a" + "\xaf\xdc\x56\x20\x27\x3d\x3c\xf1\xd8\xb9\xc5\x83\xce\x2d\x36\x95" + "\xa9\xe1\x36\x41\x14\x64\x33\xfb\xcc\x93\x9d\xce\x24\x9b\x3e\xf9" + "\x7d\x2f\xe3\x63\x63\x0c\x75\xd8\xf6\x81\xb2\x02\xae\xc4\x61\x7a" + "\xd3\xdf\x1e\xd5\xd5\xfd\x65\x61\x24\x33\xf5\x1f\x5f\x06\x6e\xd0" + "\x85\x63\x65\x55\x3d\xed\x1a\xf3\xb5\x57\x13\x5e\x7f\x57\xc9\x35" + "\x98\x4f\x0c\x70\xe0\xe6\x8b\x77\xe2\xa6\x89\xda\xf3\xef\xe8\x72" + "\x1d\xf1\x58\xa1\x36\xad\xe7\x35\x30\xac\xca\x4f\x48\x3a\x79\x7a" + "\xbc\x0a\xb1\x82\xb3\x24\xfb\x61\xd1\x08\xa9\x4b\xb2\xc8\xe3\xfb" + "\xb9\x6a\xda\xb7\x60\xd7\xf4\x68\x1d\x4f\x42\xa3\xde\x39\x4d\xf4" + "\xae\x56\xed\xe7\x63\x72\xbb\x19\x0b\x07\xa7\xc8\xee\x0a\x6d\x70" + "\x9e\x02\xfc\xe1\xcd\xf7\xe2\xec\xc0\x34\x04\xcd\x28\x34\x2f\x61" + "\x91\x72\xfe\x9c\xe9\x85\x83\xff\x8e\x4f\x12\x32\xee\xf2\x81\x83" + "\xc3\xfe\x3b\x1b\x4c\x6f\xad\x73\x3b\xb5\xfc\xbc\x2e\xc2\x20\x05" + "\xc5\x8e\xf1\x83\x7d\x16\x83\xb2\xc6\xf3\x4a\x26\xc1\xb2\xef\xfa" + "\x88\x6b\x42\x38\x61\x1f\xcf\xdc\xde\x35\x5b\x3b\x65\x19\x03\x5b" + "\xbc\x34\xf4\xde\xf9\x9c\x02\x38\x61\xb4\x6f\xc9\xd6\xe6\xc9\x07" + "\x7a\xd9\x1d\x26\x91\xf7\xf7\xee\x59\x8c\xb0\xfa\xc1\x86\xd9\x1c" + "\xae\xfe\x13\x09\x85\x13\x92\x70\xb4\x13\x0c\x93\xbc\x43\x79\x44" + "\xf4\xfd\x44\x52\xe2\xd7\x4d\xd3\x64\xf2\xe2\x1e\x71\xf5\x4b\xff" + "\x5c\xae\x82\xab\x9c\x9d\xf6\x9e\xe8\x6d\x2b\xc5\x22\x36\x3a\x0d" + "\xab\xc5\x21\x97\x9b\x0d\xea\xda\x1d\xbf\x9a\x42\xd5\xc4\x48\x4e" + "\x0a\xbc\xd0\x6b\xfa\x53\xdd\xef\x3c\x1b\x20\xee\x3f\xd5\x9d\x7c" + "\x25\xe4\x1d\x2b\x66\xc6\x2e\x37\xff\xff\xff\xff\xff\xff\xff\xff", +}; + +static const struct dh_safe_prime ffdhe4096_prime = { + .max_strength = 152, + .p_size = 512, + .p = + "\xff\xff\xff\xff\xff\xff\xff\xff\xad\xf8\x54\x58\xa2\xbb\x4a\x9a" + "\xaf\xdc\x56\x20\x27\x3d\x3c\xf1\xd8\xb9\xc5\x83\xce\x2d\x36\x95" + "\xa9\xe1\x36\x41\x14\x64\x33\xfb\xcc\x93\x9d\xce\x24\x9b\x3e\xf9" + "\x7d\x2f\xe3\x63\x63\x0c\x75\xd8\xf6\x81\xb2\x02\xae\xc4\x61\x7a" + "\xd3\xdf\x1e\xd5\xd5\xfd\x65\x61\x24\x33\xf5\x1f\x5f\x06\x6e\xd0" + "\x85\x63\x65\x55\x3d\xed\x1a\xf3\xb5\x57\x13\x5e\x7f\x57\xc9\x35" + "\x98\x4f\x0c\x70\xe0\xe6\x8b\x77\xe2\xa6\x89\xda\xf3\xef\xe8\x72" + "\x1d\xf1\x58\xa1\x36\xad\xe7\x35\x30\xac\xca\x4f\x48\x3a\x79\x7a" + "\xbc\x0a\xb1\x82\xb3\x24\xfb\x61\xd1\x08\xa9\x4b\xb2\xc8\xe3\xfb" + "\xb9\x6a\xda\xb7\x60\xd7\xf4\x68\x1d\x4f\x42\xa3\xde\x39\x4d\xf4" + "\xae\x56\xed\xe7\x63\x72\xbb\x19\x0b\x07\xa7\xc8\xee\x0a\x6d\x70" + "\x9e\x02\xfc\xe1\xcd\xf7\xe2\xec\xc0\x34\x04\xcd\x28\x34\x2f\x61" + "\x91\x72\xfe\x9c\xe9\x85\x83\xff\x8e\x4f\x12\x32\xee\xf2\x81\x83" + "\xc3\xfe\x3b\x1b\x4c\x6f\xad\x73\x3b\xb5\xfc\xbc\x2e\xc2\x20\x05" + "\xc5\x8e\xf1\x83\x7d\x16\x83\xb2\xc6\xf3\x4a\x26\xc1\xb2\xef\xfa" + "\x88\x6b\x42\x38\x61\x1f\xcf\xdc\xde\x35\x5b\x3b\x65\x19\x03\x5b" + "\xbc\x34\xf4\xde\xf9\x9c\x02\x38\x61\xb4\x6f\xc9\xd6\xe6\xc9\x07" + "\x7a\xd9\x1d\x26\x91\xf7\xf7\xee\x59\x8c\xb0\xfa\xc1\x86\xd9\x1c" + "\xae\xfe\x13\x09\x85\x13\x92\x70\xb4\x13\x0c\x93\xbc\x43\x79\x44" + "\xf4\xfd\x44\x52\xe2\xd7\x4d\xd3\x64\xf2\xe2\x1e\x71\xf5\x4b\xff" + "\x5c\xae\x82\xab\x9c\x9d\xf6\x9e\xe8\x6d\x2b\xc5\x22\x36\x3a\x0d" + "\xab\xc5\x21\x97\x9b\x0d\xea\xda\x1d\xbf\x9a\x42\xd5\xc4\x48\x4e" + "\x0a\xbc\xd0\x6b\xfa\x53\xdd\xef\x3c\x1b\x20\xee\x3f\xd5\x9d\x7c" + "\x25\xe4\x1d\x2b\x66\x9e\x1e\xf1\x6e\x6f\x52\xc3\x16\x4d\xf4\xfb" + "\x79\x30\xe9\xe4\xe5\x88\x57\xb6\xac\x7d\x5f\x42\xd6\x9f\x6d\x18" + "\x77\x63\xcf\x1d\x55\x03\x40\x04\x87\xf5\x5b\xa5\x7e\x31\xcc\x7a" + "\x71\x35\xc8\x86\xef\xb4\x31\x8a\xed\x6a\x1e\x01\x2d\x9e\x68\x32" + "\xa9\x07\x60\x0a\x91\x81\x30\xc4\x6d\xc7\x78\xf9\x71\xad\x00\x38" + "\x09\x29\x99\xa3\x33\xcb\x8b\x7a\x1a\x1d\xb9\x3d\x71\x40\x00\x3c" + "\x2a\x4e\xce\xa9\xf9\x8d\x0a\xcc\x0a\x82\x91\xcd\xce\xc9\x7d\xcf" + "\x8e\xc9\xb5\x5a\x7f\x88\xa4\x6b\x4d\xb5\xa8\x51\xf4\x41\x82\xe1" + "\xc6\x8a\x00\x7e\x5e\x65\x5f\x6a\xff\xff\xff\xff\xff\xff\xff\xff", +}; + +static const struct dh_safe_prime ffdhe6144_prime = { + .max_strength = 176, + .p_size = 768, + .p = + "\xff\xff\xff\xff\xff\xff\xff\xff\xad\xf8\x54\x58\xa2\xbb\x4a\x9a" + "\xaf\xdc\x56\x20\x27\x3d\x3c\xf1\xd8\xb9\xc5\x83\xce\x2d\x36\x95" + "\xa9\xe1\x36\x41\x14\x64\x33\xfb\xcc\x93\x9d\xce\x24\x9b\x3e\xf9" + "\x7d\x2f\xe3\x63\x63\x0c\x75\xd8\xf6\x81\xb2\x02\xae\xc4\x61\x7a" + "\xd3\xdf\x1e\xd5\xd5\xfd\x65\x61\x24\x33\xf5\x1f\x5f\x06\x6e\xd0" + "\x85\x63\x65\x55\x3d\xed\x1a\xf3\xb5\x57\x13\x5e\x7f\x57\xc9\x35" + "\x98\x4f\x0c\x70\xe0\xe6\x8b\x77\xe2\xa6\x89\xda\xf3\xef\xe8\x72" + "\x1d\xf1\x58\xa1\x36\xad\xe7\x35\x30\xac\xca\x4f\x48\x3a\x79\x7a" + "\xbc\x0a\xb1\x82\xb3\x24\xfb\x61\xd1\x08\xa9\x4b\xb2\xc8\xe3\xfb" + "\xb9\x6a\xda\xb7\x60\xd7\xf4\x68\x1d\x4f\x42\xa3\xde\x39\x4d\xf4" + "\xae\x56\xed\xe7\x63\x72\xbb\x19\x0b\x07\xa7\xc8\xee\x0a\x6d\x70" + "\x9e\x02\xfc\xe1\xcd\xf7\xe2\xec\xc0\x34\x04\xcd\x28\x34\x2f\x61" + "\x91\x72\xfe\x9c\xe9\x85\x83\xff\x8e\x4f\x12\x32\xee\xf2\x81\x83" + "\xc3\xfe\x3b\x1b\x4c\x6f\xad\x73\x3b\xb5\xfc\xbc\x2e\xc2\x20\x05" + "\xc5\x8e\xf1\x83\x7d\x16\x83\xb2\xc6\xf3\x4a\x26\xc1\xb2\xef\xfa" + "\x88\x6b\x42\x38\x61\x1f\xcf\xdc\xde\x35\x5b\x3b\x65\x19\x03\x5b" + "\xbc\x34\xf4\xde\xf9\x9c\x02\x38\x61\xb4\x6f\xc9\xd6\xe6\xc9\x07" + "\x7a\xd9\x1d\x26\x91\xf7\xf7\xee\x59\x8c\xb0\xfa\xc1\x86\xd9\x1c" + "\xae\xfe\x13\x09\x85\x13\x92\x70\xb4\x13\x0c\x93\xbc\x43\x79\x44" + "\xf4\xfd\x44\x52\xe2\xd7\x4d\xd3\x64\xf2\xe2\x1e\x71\xf5\x4b\xff" + "\x5c\xae\x82\xab\x9c\x9d\xf6\x9e\xe8\x6d\x2b\xc5\x22\x36\x3a\x0d" + "\xab\xc5\x21\x97\x9b\x0d\xea\xda\x1d\xbf\x9a\x42\xd5\xc4\x48\x4e" + "\x0a\xbc\xd0\x6b\xfa\x53\xdd\xef\x3c\x1b\x20\xee\x3f\xd5\x9d\x7c" + "\x25\xe4\x1d\x2b\x66\x9e\x1e\xf1\x6e\x6f\x52\xc3\x16\x4d\xf4\xfb" + "\x79\x30\xe9\xe4\xe5\x88\x57\xb6\xac\x7d\x5f\x42\xd6\x9f\x6d\x18" + "\x77\x63\xcf\x1d\x55\x03\x40\x04\x87\xf5\x5b\xa5\x7e\x31\xcc\x7a" + "\x71\x35\xc8\x86\xef\xb4\x31\x8a\xed\x6a\x1e\x01\x2d\x9e\x68\x32" + "\xa9\x07\x60\x0a\x91\x81\x30\xc4\x6d\xc7\x78\xf9\x71\xad\x00\x38" + "\x09\x29\x99\xa3\x33\xcb\x8b\x7a\x1a\x1d\xb9\x3d\x71\x40\x00\x3c" + "\x2a\x4e\xce\xa9\xf9\x8d\x0a\xcc\x0a\x82\x91\xcd\xce\xc9\x7d\xcf" + "\x8e\xc9\xb5\x5a\x7f\x88\xa4\x6b\x4d\xb5\xa8\x51\xf4\x41\x82\xe1" + "\xc6\x8a\x00\x7e\x5e\x0d\xd9\x02\x0b\xfd\x64\xb6\x45\x03\x6c\x7a" + "\x4e\x67\x7d\x2c\x38\x53\x2a\x3a\x23\xba\x44\x42\xca\xf5\x3e\xa6" + "\x3b\xb4\x54\x32\x9b\x76\x24\xc8\x91\x7b\xdd\x64\xb1\xc0\xfd\x4c" + "\xb3\x8e\x8c\x33\x4c\x70\x1c\x3a\xcd\xad\x06\x57\xfc\xcf\xec\x71" + "\x9b\x1f\x5c\x3e\x4e\x46\x04\x1f\x38\x81\x47\xfb\x4c\xfd\xb4\x77" + "\xa5\x24\x71\xf7\xa9\xa9\x69\x10\xb8\x55\x32\x2e\xdb\x63\x40\xd8" + "\xa0\x0e\xf0\x92\x35\x05\x11\xe3\x0a\xbe\xc1\xff\xf9\xe3\xa2\x6e" + "\x7f\xb2\x9f\x8c\x18\x30\x23\xc3\x58\x7e\x38\xda\x00\x77\xd9\xb4" + "\x76\x3e\x4e\x4b\x94\xb2\xbb\xc1\x94\xc6\x65\x1e\x77\xca\xf9\x92" + "\xee\xaa\xc0\x23\x2a\x28\x1b\xf6\xb3\xa7\x39\xc1\x22\x61\x16\x82" + "\x0a\xe8\xdb\x58\x47\xa6\x7c\xbe\xf9\xc9\x09\x1b\x46\x2d\x53\x8c" + "\xd7\x2b\x03\x74\x6a\xe7\x7f\x5e\x62\x29\x2c\x31\x15\x62\xa8\x46" + "\x50\x5d\xc8\x2d\xb8\x54\x33\x8a\xe4\x9f\x52\x35\xc9\x5b\x91\x17" + "\x8c\xcf\x2d\xd5\xca\xce\xf4\x03\xec\x9d\x18\x10\xc6\x27\x2b\x04" + "\x5b\x3b\x71\xf9\xdc\x6b\x80\xd6\x3f\xdd\x4a\x8e\x9a\xdb\x1e\x69" + "\x62\xa6\x95\x26\xd4\x31\x61\xc1\xa4\x1d\x57\x0d\x79\x38\xda\xd4" + "\xa4\x0e\x32\x9c\xd0\xe4\x0e\x65\xff\xff\xff\xff\xff\xff\xff\xff", +}; + +static const struct dh_safe_prime ffdhe8192_prime = { + .max_strength = 200, + .p_size = 1024, + .p = + "\xff\xff\xff\xff\xff\xff\xff\xff\xad\xf8\x54\x58\xa2\xbb\x4a\x9a" + "\xaf\xdc\x56\x20\x27\x3d\x3c\xf1\xd8\xb9\xc5\x83\xce\x2d\x36\x95" + "\xa9\xe1\x36\x41\x14\x64\x33\xfb\xcc\x93\x9d\xce\x24\x9b\x3e\xf9" + "\x7d\x2f\xe3\x63\x63\x0c\x75\xd8\xf6\x81\xb2\x02\xae\xc4\x61\x7a" + "\xd3\xdf\x1e\xd5\xd5\xfd\x65\x61\x24\x33\xf5\x1f\x5f\x06\x6e\xd0" + "\x85\x63\x65\x55\x3d\xed\x1a\xf3\xb5\x57\x13\x5e\x7f\x57\xc9\x35" + "\x98\x4f\x0c\x70\xe0\xe6\x8b\x77\xe2\xa6\x89\xda\xf3\xef\xe8\x72" + "\x1d\xf1\x58\xa1\x36\xad\xe7\x35\x30\xac\xca\x4f\x48\x3a\x79\x7a" + "\xbc\x0a\xb1\x82\xb3\x24\xfb\x61\xd1\x08\xa9\x4b\xb2\xc8\xe3\xfb" + "\xb9\x6a\xda\xb7\x60\xd7\xf4\x68\x1d\x4f\x42\xa3\xde\x39\x4d\xf4" + "\xae\x56\xed\xe7\x63\x72\xbb\x19\x0b\x07\xa7\xc8\xee\x0a\x6d\x70" + "\x9e\x02\xfc\xe1\xcd\xf7\xe2\xec\xc0\x34\x04\xcd\x28\x34\x2f\x61" + "\x91\x72\xfe\x9c\xe9\x85\x83\xff\x8e\x4f\x12\x32\xee\xf2\x81\x83" + "\xc3\xfe\x3b\x1b\x4c\x6f\xad\x73\x3b\xb5\xfc\xbc\x2e\xc2\x20\x05" + "\xc5\x8e\xf1\x83\x7d\x16\x83\xb2\xc6\xf3\x4a\x26\xc1\xb2\xef\xfa" + "\x88\x6b\x42\x38\x61\x1f\xcf\xdc\xde\x35\x5b\x3b\x65\x19\x03\x5b" + "\xbc\x34\xf4\xde\xf9\x9c\x02\x38\x61\xb4\x6f\xc9\xd6\xe6\xc9\x07" + "\x7a\xd9\x1d\x26\x91\xf7\xf7\xee\x59\x8c\xb0\xfa\xc1\x86\xd9\x1c" + "\xae\xfe\x13\x09\x85\x13\x92\x70\xb4\x13\x0c\x93\xbc\x43\x79\x44" + "\xf4\xfd\x44\x52\xe2\xd7\x4d\xd3\x64\xf2\xe2\x1e\x71\xf5\x4b\xff" + "\x5c\xae\x82\xab\x9c\x9d\xf6\x9e\xe8\x6d\x2b\xc5\x22\x36\x3a\x0d" + "\xab\xc5\x21\x97\x9b\x0d\xea\xda\x1d\xbf\x9a\x42\xd5\xc4\x48\x4e" + "\x0a\xbc\xd0\x6b\xfa\x53\xdd\xef\x3c\x1b\x20\xee\x3f\xd5\x9d\x7c" + "\x25\xe4\x1d\x2b\x66\x9e\x1e\xf1\x6e\x6f\x52\xc3\x16\x4d\xf4\xfb" + "\x79\x30\xe9\xe4\xe5\x88\x57\xb6\xac\x7d\x5f\x42\xd6\x9f\x6d\x18" + "\x77\x63\xcf\x1d\x55\x03\x40\x04\x87\xf5\x5b\xa5\x7e\x31\xcc\x7a" + "\x71\x35\xc8\x86\xef\xb4\x31\x8a\xed\x6a\x1e\x01\x2d\x9e\x68\x32" + "\xa9\x07\x60\x0a\x91\x81\x30\xc4\x6d\xc7\x78\xf9\x71\xad\x00\x38" + "\x09\x29\x99\xa3\x33\xcb\x8b\x7a\x1a\x1d\xb9\x3d\x71\x40\x00\x3c" + "\x2a\x4e\xce\xa9\xf9\x8d\x0a\xcc\x0a\x82\x91\xcd\xce\xc9\x7d\xcf" + "\x8e\xc9\xb5\x5a\x7f\x88\xa4\x6b\x4d\xb5\xa8\x51\xf4\x41\x82\xe1" + "\xc6\x8a\x00\x7e\x5e\x0d\xd9\x02\x0b\xfd\x64\xb6\x45\x03\x6c\x7a" + "\x4e\x67\x7d\x2c\x38\x53\x2a\x3a\x23\xba\x44\x42\xca\xf5\x3e\xa6" + "\x3b\xb4\x54\x32\x9b\x76\x24\xc8\x91\x7b\xdd\x64\xb1\xc0\xfd\x4c" + "\xb3\x8e\x8c\x33\x4c\x70\x1c\x3a\xcd\xad\x06\x57\xfc\xcf\xec\x71" + "\x9b\x1f\x5c\x3e\x4e\x46\x04\x1f\x38\x81\x47\xfb\x4c\xfd\xb4\x77" + "\xa5\x24\x71\xf7\xa9\xa9\x69\x10\xb8\x55\x32\x2e\xdb\x63\x40\xd8" + "\xa0\x0e\xf0\x92\x35\x05\x11\xe3\x0a\xbe\xc1\xff\xf9\xe3\xa2\x6e" + "\x7f\xb2\x9f\x8c\x18\x30\x23\xc3\x58\x7e\x38\xda\x00\x77\xd9\xb4" + "\x76\x3e\x4e\x4b\x94\xb2\xbb\xc1\x94\xc6\x65\x1e\x77\xca\xf9\x92" + "\xee\xaa\xc0\x23\x2a\x28\x1b\xf6\xb3\xa7\x39\xc1\x22\x61\x16\x82" + "\x0a\xe8\xdb\x58\x47\xa6\x7c\xbe\xf9\xc9\x09\x1b\x46\x2d\x53\x8c" + "\xd7\x2b\x03\x74\x6a\xe7\x7f\x5e\x62\x29\x2c\x31\x15\x62\xa8\x46" + "\x50\x5d\xc8\x2d\xb8\x54\x33\x8a\xe4\x9f\x52\x35\xc9\x5b\x91\x17" + "\x8c\xcf\x2d\xd5\xca\xce\xf4\x03\xec\x9d\x18\x10\xc6\x27\x2b\x04" + "\x5b\x3b\x71\xf9\xdc\x6b\x80\xd6\x3f\xdd\x4a\x8e\x9a\xdb\x1e\x69" + "\x62\xa6\x95\x26\xd4\x31\x61\xc1\xa4\x1d\x57\x0d\x79\x38\xda\xd4" + "\xa4\x0e\x32\x9c\xcf\xf4\x6a\xaa\x36\xad\x00\x4c\xf6\x00\xc8\x38" + "\x1e\x42\x5a\x31\xd9\x51\xae\x64\xfd\xb2\x3f\xce\xc9\x50\x9d\x43" + "\x68\x7f\xeb\x69\xed\xd1\xcc\x5e\x0b\x8c\xc3\xbd\xf6\x4b\x10\xef" + "\x86\xb6\x31\x42\xa3\xab\x88\x29\x55\x5b\x2f\x74\x7c\x93\x26\x65" + "\xcb\x2c\x0f\x1c\xc0\x1b\xd7\x02\x29\x38\x88\x39\xd2\xaf\x05\xe4" + "\x54\x50\x4a\xc7\x8b\x75\x82\x82\x28\x46\xc0\xba\x35\xc3\x5f\x5c" + "\x59\x16\x0c\xc0\x46\xfd\x82\x51\x54\x1f\xc6\x8c\x9c\x86\xb0\x22" + "\xbb\x70\x99\x87\x6a\x46\x0e\x74\x51\xa8\xa9\x31\x09\x70\x3f\xee" + "\x1c\x21\x7e\x6c\x38\x26\xe5\x2c\x51\xaa\x69\x1e\x0e\x42\x3c\xfc" + "\x99\xe9\xe3\x16\x50\xc1\x21\x7b\x62\x48\x16\xcd\xad\x9a\x95\xf9" + "\xd5\xb8\x01\x94\x88\xd9\xc0\xa0\xa1\xfe\x30\x75\xa5\x77\xe2\x31" + "\x83\xf8\x1d\x4a\x3f\x2f\xa4\x57\x1e\xfc\x8c\xe0\xba\x8a\x4f\xe8" + "\xb6\x85\x5d\xfe\x72\xb0\xa6\x6e\xde\xd2\xfb\xab\xfb\xe5\x8a\x30" + "\xfa\xfa\xbe\x1c\x5d\x71\xa8\x7e\x2f\x74\x1e\xf8\xc1\xfe\x86\xfe" + "\xa6\xbb\xfd\xe5\x30\x67\x7f\x0d\x97\xd1\x1d\x49\xf7\xa8\x44\x3d" + "\x08\x22\xe5\x06\xa9\xf4\x61\x4e\x01\x1e\x2a\x94\x83\x8f\xf8\x8c" + "\xd6\x8c\x8b\xb7\xc5\xc6\x42\x4c\xff\xff\xff\xff\xff\xff\xff\xff", +}; + +static int dh_ffdhe2048_create(struct crypto_template *tmpl, + struct rtattr **tb) +{ + return __dh_safe_prime_create(tmpl, tb, &ffdhe2048_prime); +} + +static int dh_ffdhe3072_create(struct crypto_template *tmpl, + struct rtattr **tb) +{ + return __dh_safe_prime_create(tmpl, tb, &ffdhe3072_prime); +} + +static int dh_ffdhe4096_create(struct crypto_template *tmpl, + struct rtattr **tb) +{ + return __dh_safe_prime_create(tmpl, tb, &ffdhe4096_prime); +} + +static int dh_ffdhe6144_create(struct crypto_template *tmpl, + struct rtattr **tb) +{ + return __dh_safe_prime_create(tmpl, tb, &ffdhe6144_prime); +} + +static int dh_ffdhe8192_create(struct crypto_template *tmpl, + struct rtattr **tb) +{ + return __dh_safe_prime_create(tmpl, tb, &ffdhe8192_prime); +} + +static struct crypto_template crypto_ffdhe_templates[] = { + { + .name = "ffdhe2048", + .create = dh_ffdhe2048_create, + .module = THIS_MODULE, + }, + { + .name = "ffdhe3072", + .create = dh_ffdhe3072_create, + .module = THIS_MODULE, + }, + { + .name = "ffdhe4096", + .create = dh_ffdhe4096_create, + .module = THIS_MODULE, + }, + { + .name = "ffdhe6144", + .create = dh_ffdhe6144_create, + .module = THIS_MODULE, + }, + { + .name = "ffdhe8192", + .create = dh_ffdhe8192_create, + .module = THIS_MODULE, + }, +}; + +#else /* ! CONFIG_CRYPTO_DH_RFC7919_GROUPS */ + +static struct crypto_template crypto_ffdhe_templates[] = {}; + +#endif /* CONFIG_CRYPTO_DH_RFC7919_GROUPS */ + + +static int __init dh_init(void) { - return crypto_register_kpp(&dh); + int err; + + err = crypto_register_kpp(&dh); + if (err) + return err; + + err = crypto_register_templates(crypto_ffdhe_templates, + ARRAY_SIZE(crypto_ffdhe_templates)); + if (err) { + crypto_unregister_kpp(&dh); + return err; + } + + return 0; } -static void dh_exit(void) +static void __exit dh_exit(void) { + crypto_unregister_templates(crypto_ffdhe_templates, + ARRAY_SIZE(crypto_ffdhe_templates)); crypto_unregister_kpp(&dh); } diff --git a/crypto/dh_helper.c b/crypto/dh_helper.c index 9fd5a42eea15..2d499879328b 100644 --- a/crypto/dh_helper.c +++ b/crypto/dh_helper.c @@ -10,7 +10,7 @@ #include <crypto/dh.h> #include <crypto/kpp.h> -#define DH_KPP_SECRET_MIN_SIZE (sizeof(struct kpp_secret) + 4 * sizeof(int)) +#define DH_KPP_SECRET_MIN_SIZE (sizeof(struct kpp_secret) + 3 * sizeof(int)) static inline u8 *dh_pack_data(u8 *dst, u8 *end, const void *src, size_t size) { @@ -28,7 +28,7 @@ static inline const u8 *dh_unpack_data(void *dst, const void *src, size_t size) static inline unsigned int dh_data_size(const struct dh *p) { - return p->key_size + p->p_size + p->q_size + p->g_size; + return p->key_size + p->p_size + p->g_size; } unsigned int crypto_dh_key_len(const struct dh *p) @@ -53,11 +53,9 @@ int crypto_dh_encode_key(char *buf, unsigned int len, const struct dh *params) ptr = dh_pack_data(ptr, end, ¶ms->key_size, sizeof(params->key_size)); ptr = dh_pack_data(ptr, end, ¶ms->p_size, sizeof(params->p_size)); - ptr = dh_pack_data(ptr, end, ¶ms->q_size, sizeof(params->q_size)); ptr = dh_pack_data(ptr, end, ¶ms->g_size, sizeof(params->g_size)); ptr = dh_pack_data(ptr, end, params->key, params->key_size); ptr = dh_pack_data(ptr, end, params->p, params->p_size); - ptr = dh_pack_data(ptr, end, params->q, params->q_size); ptr = dh_pack_data(ptr, end, params->g, params->g_size); if (ptr != end) return -EINVAL; @@ -65,7 +63,7 @@ int crypto_dh_encode_key(char *buf, unsigned int len, const struct dh *params) } EXPORT_SYMBOL_GPL(crypto_dh_encode_key); -int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params) +int __crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params) { const u8 *ptr = buf; struct kpp_secret secret; @@ -79,28 +77,36 @@ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params) ptr = dh_unpack_data(¶ms->key_size, ptr, sizeof(params->key_size)); ptr = dh_unpack_data(¶ms->p_size, ptr, sizeof(params->p_size)); - ptr = dh_unpack_data(¶ms->q_size, ptr, sizeof(params->q_size)); ptr = dh_unpack_data(¶ms->g_size, ptr, sizeof(params->g_size)); if (secret.len != crypto_dh_key_len(params)) return -EINVAL; + /* Don't allocate memory. Set pointers to data within + * the given buffer + */ + params->key = (void *)ptr; + params->p = (void *)(ptr + params->key_size); + params->g = (void *)(ptr + params->key_size + params->p_size); + + return 0; +} + +int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params) +{ + int err; + + err = __crypto_dh_decode_key(buf, len, params); + if (err) + return err; + /* * Don't permit the buffer for 'key' or 'g' to be larger than 'p', since * some drivers assume otherwise. */ if (params->key_size > params->p_size || - params->g_size > params->p_size || params->q_size > params->p_size) + params->g_size > params->p_size) return -EINVAL; - /* Don't allocate memory. Set pointers to data within - * the given buffer - */ - params->key = (void *)ptr; - params->p = (void *)(ptr + params->key_size); - params->q = (void *)(ptr + params->key_size + params->p_size); - params->g = (void *)(ptr + params->key_size + params->p_size + - params->q_size); - /* * Don't permit 'p' to be 0. It's not a prime number, and it's subject * to corner cases such as 'mod 0' being undefined or @@ -109,10 +115,6 @@ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params) if (memchr_inv(params->p, 0, params->p_size) == NULL) return -EINVAL; - /* It is permissible to not provide Q. */ - if (params->q_size == 0) - params->q = NULL; - return 0; } EXPORT_SYMBOL_GPL(crypto_dh_decode_key); diff --git a/crypto/drbg.c b/crypto/drbg.c index b6929eb5f565..982d4ca4526d 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -98,7 +98,9 @@ */ #include <crypto/drbg.h> +#include <crypto/internal/cipher.h> #include <linux/kernel.h> +#include <linux/jiffies.h> /*************************************************************** * Backend cipher definitions available to DRBG @@ -177,16 +179,16 @@ static const struct drbg_core drbg_cores[] = { .backend_cra_name = "hmac(sha384)", }, { .flags = DRBG_HMAC | DRBG_STRENGTH256, - .statelen = 64, /* block length of cipher */ - .blocklen_bytes = 64, - .cra_name = "hmac_sha512", - .backend_cra_name = "hmac(sha512)", - }, { - .flags = DRBG_HMAC | DRBG_STRENGTH256, .statelen = 32, /* block length of cipher */ .blocklen_bytes = 32, .cra_name = "hmac_sha256", .backend_cra_name = "hmac(sha256)", + }, { + .flags = DRBG_HMAC | DRBG_STRENGTH256, + .statelen = 64, /* block length of cipher */ + .blocklen_bytes = 64, + .cra_name = "hmac_sha512", + .backend_cra_name = "hmac(sha512)", }, #endif /* CONFIG_CRYPTO_DRBG_HMAC */ }; @@ -1035,17 +1037,39 @@ static const struct drbg_state_ops drbg_hash_ops = { ******************************************************************/ static inline int __drbg_seed(struct drbg_state *drbg, struct list_head *seed, - int reseed) + int reseed, enum drbg_seed_state new_seed_state) { int ret = drbg->d_ops->update(drbg, seed, reseed); if (ret) return ret; - drbg->seeded = true; + drbg->seeded = new_seed_state; + drbg->last_seed_time = jiffies; /* 10.1.1.2 / 10.1.1.3 step 5 */ drbg->reseed_ctr = 1; + switch (drbg->seeded) { + case DRBG_SEED_STATE_UNSEEDED: + /* Impossible, but handle it to silence compiler warnings. */ + fallthrough; + case DRBG_SEED_STATE_PARTIAL: + /* + * Require frequent reseeds until the seed source is + * fully initialized. + */ + drbg->reseed_threshold = 50; + break; + + case DRBG_SEED_STATE_FULL: + /* + * Seed source has become fully initialized, frequent + * reseeds no longer required. + */ + drbg->reseed_threshold = drbg_max_requests(drbg); + break; + } + return ret; } @@ -1065,12 +1089,10 @@ static inline int drbg_get_random_bytes(struct drbg_state *drbg, return 0; } -static void drbg_async_seed(struct work_struct *work) +static int drbg_seed_from_random(struct drbg_state *drbg) { struct drbg_string data; LIST_HEAD(seedlist); - struct drbg_state *drbg = container_of(work, struct drbg_state, - seed_work); unsigned int entropylen = drbg_sec_strength(drbg->core->flags); unsigned char entropy[32]; int ret; @@ -1081,30 +1103,35 @@ static void drbg_async_seed(struct work_struct *work) drbg_string_fill(&data, entropy, entropylen); list_add_tail(&data.list, &seedlist); - mutex_lock(&drbg->drbg_mutex); - ret = drbg_get_random_bytes(drbg, entropy, entropylen); if (ret) - goto unlock; - - /* If nonblocking pool is initialized, deactivate Jitter RNG */ - crypto_free_rng(drbg->jent); - drbg->jent = NULL; + goto out; - /* Set seeded to false so that if __drbg_seed fails the - * next generate call will trigger a reseed. - */ - drbg->seeded = false; + ret = __drbg_seed(drbg, &seedlist, true, DRBG_SEED_STATE_FULL); - __drbg_seed(drbg, &seedlist, true); +out: + memzero_explicit(entropy, entropylen); + return ret; +} - if (drbg->seeded) - drbg->reseed_threshold = drbg_max_requests(drbg); +static bool drbg_nopr_reseed_interval_elapsed(struct drbg_state *drbg) +{ + unsigned long next_reseed; -unlock: - mutex_unlock(&drbg->drbg_mutex); + /* Don't ever reseed from get_random_bytes() in test mode. */ + if (list_empty(&drbg->test_data.list)) + return false; - memzero_explicit(entropy, entropylen); + /* + * Obtain fresh entropy for the nopr DRBGs after 300s have + * elapsed in order to still achieve sort of partial + * prediction resistance over the time domain at least. Note + * that the period of 300s has been chosen to match the + * CRNG_RESEED_INTERVAL of the get_random_bytes()' chacha + * rngs. + */ + next_reseed = drbg->last_seed_time + 300 * HZ; + return time_after(jiffies, next_reseed); } /* @@ -1126,6 +1153,7 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers, unsigned int entropylen = drbg_sec_strength(drbg->core->flags); struct drbg_string data1; LIST_HEAD(seedlist); + enum drbg_seed_state new_seed_state = DRBG_SEED_STATE_FULL; /* 9.1 / 9.2 / 9.3.1 step 3 */ if (pers && pers->len > (drbg_max_addtl(drbg))) { @@ -1153,6 +1181,9 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers, BUG_ON((entropylen * 2) > sizeof(entropy)); /* Get seed from in-kernel /dev/urandom */ + if (!rng_is_initialized()) + new_seed_state = DRBG_SEED_STATE_PARTIAL; + ret = drbg_get_random_bytes(drbg, entropy, entropylen); if (ret) goto out; @@ -1162,13 +1193,32 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers, pr_devel("DRBG: (re)seeding with %u bytes of entropy\n", entropylen); } else { - /* Get seed from Jitter RNG */ + /* + * Get seed from Jitter RNG, failures are + * fatal only in FIPS mode. + */ ret = crypto_rng_get_bytes(drbg->jent, entropy + entropylen, entropylen); - if (ret) { + if (fips_enabled && ret) { pr_devel("DRBG: jent failed with %d\n", ret); - goto out; + + /* + * Do not treat the transient failure of the + * Jitter RNG as an error that needs to be + * reported. The combined number of the + * maximum reseed threshold times the maximum + * number of Jitter RNG transient errors is + * less than the reseed threshold required by + * SP800-90A allowing us to treat the + * transient errors as such. + * + * However, we mandate that at least the first + * seeding operation must succeed with the + * Jitter RNG. + */ + if (!reseed || ret != -EAGAIN) + goto out; } drbg_string_fill(&data1, entropy, entropylen * 2); @@ -1193,7 +1243,7 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers, memset(drbg->C, 0, drbg_statelen(drbg)); } - ret = __drbg_seed(drbg, &seedlist, reseed); + ret = __drbg_seed(drbg, &seedlist, reseed, new_seed_state); out: memzero_explicit(entropy, entropylen * 2); @@ -1206,19 +1256,19 @@ static inline void drbg_dealloc_state(struct drbg_state *drbg) { if (!drbg) return; - kzfree(drbg->Vbuf); + kfree_sensitive(drbg->Vbuf); drbg->Vbuf = NULL; drbg->V = NULL; - kzfree(drbg->Cbuf); + kfree_sensitive(drbg->Cbuf); drbg->Cbuf = NULL; drbg->C = NULL; - kzfree(drbg->scratchpadbuf); + kfree_sensitive(drbg->scratchpadbuf); drbg->scratchpadbuf = NULL; drbg->reseed_ctr = 0; drbg->d_ops = NULL; drbg->core = NULL; if (IS_ENABLED(CONFIG_CRYPTO_FIPS)) { - kzfree(drbg->prev); + kfree_sensitive(drbg->prev); drbg->prev = NULL; drbg->fips_primed = false; } @@ -1294,8 +1344,10 @@ static inline int drbg_alloc_state(struct drbg_state *drbg) if (IS_ENABLED(CONFIG_CRYPTO_FIPS)) { drbg->prev = kzalloc(drbg_sec_strength(drbg->core->flags), GFP_KERNEL); - if (!drbg->prev) + if (!drbg->prev) { + ret = -ENOMEM; goto fini; + } drbg->fips_primed = false; } @@ -1371,19 +1423,26 @@ static int drbg_generate(struct drbg_state *drbg, * here. The spec is a bit convoluted here, we make it simpler. */ if (drbg->reseed_threshold < drbg->reseed_ctr) - drbg->seeded = false; + drbg->seeded = DRBG_SEED_STATE_UNSEEDED; - if (drbg->pr || !drbg->seeded) { + if (drbg->pr || drbg->seeded == DRBG_SEED_STATE_UNSEEDED) { pr_devel("DRBG: reseeding before generation (prediction " "resistance: %s, state %s)\n", drbg->pr ? "true" : "false", - drbg->seeded ? "seeded" : "unseeded"); + (drbg->seeded == DRBG_SEED_STATE_FULL ? + "seeded" : "unseeded")); /* 9.3.1 steps 7.1 through 7.3 */ len = drbg_seed(drbg, addtl, true); if (len) goto err; /* 9.3.1 step 7.4 */ addtl = NULL; + } else if (rng_is_initialized() && + (drbg->seeded == DRBG_SEED_STATE_PARTIAL || + drbg_nopr_reseed_interval_elapsed(drbg))) { + len = drbg_seed_from_random(drbg); + if (len) + goto err; } if (addtl && 0 < addtl->len) @@ -1476,51 +1535,23 @@ static int drbg_generate_long(struct drbg_state *drbg, return 0; } -static void drbg_schedule_async_seed(struct random_ready_callback *rdy) -{ - struct drbg_state *drbg = container_of(rdy, struct drbg_state, - random_ready); - - schedule_work(&drbg->seed_work); -} - static int drbg_prepare_hrng(struct drbg_state *drbg) { - int err; - /* We do not need an HRNG in test mode. */ if (list_empty(&drbg->test_data.list)) return 0; - INIT_WORK(&drbg->seed_work, drbg_async_seed); - - drbg->random_ready.owner = THIS_MODULE; - drbg->random_ready.func = drbg_schedule_async_seed; - - err = add_random_ready_callback(&drbg->random_ready); - - switch (err) { - case 0: - break; - - case -EALREADY: - err = 0; - /* fall through */ - - default: - drbg->random_ready.func = NULL; - return err; - } - drbg->jent = crypto_alloc_rng("jitterentropy_rng", 0, 0); + if (IS_ERR(drbg->jent)) { + const int err = PTR_ERR(drbg->jent); - /* - * Require frequent reseeds until the seed source is fully - * initialized. - */ - drbg->reseed_threshold = 50; + drbg->jent = NULL; + if (fips_enabled || err != -ENOENT) + return err; + pr_info("DRBG: Continuing without Jitter RNG\n"); + } - return err; + return 0; } /* @@ -1563,7 +1594,8 @@ static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers, if (!drbg->core) { drbg->core = &drbg_cores[coreref]; drbg->pr = pr; - drbg->seeded = false; + drbg->seeded = DRBG_SEED_STATE_UNSEEDED; + drbg->last_seed_time = 0; drbg->reseed_threshold = drbg_max_requests(drbg); ret = drbg_alloc_state(drbg); @@ -1574,14 +1606,6 @@ static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers, if (ret) goto free_everything; - if (IS_ERR(drbg->jent)) { - ret = PTR_ERR(drbg->jent); - drbg->jent = NULL; - if (fips_enabled || ret != -ENOENT) - goto free_everything; - pr_info("DRBG: Continuing without Jitter RNG\n"); - } - reseed = false; } @@ -1614,12 +1638,9 @@ free_everything: */ static int drbg_uninstantiate(struct drbg_state *drbg) { - if (drbg->random_ready.func) { - del_random_ready_callback(&drbg->random_ready); - cancel_work_sync(&drbg->seed_work); + if (!IS_ERR_OR_NULL(drbg->jent)) crypto_free_rng(drbg->jent); - drbg->jent = NULL; - } + drbg->jent = NULL; if (drbg->d_ops) drbg->d_ops->crypto_fini(drbg); @@ -1682,10 +1703,10 @@ static int drbg_init_hash_kernel(struct drbg_state *drbg) static int drbg_fini_hash_kernel(struct drbg_state *drbg) { - struct sdesc *sdesc = (struct sdesc *)drbg->priv_data; + struct sdesc *sdesc = drbg->priv_data; if (sdesc) { crypto_free_shash(sdesc->shash.tfm); - kzfree(sdesc); + kfree_sensitive(sdesc); } drbg->priv_data = NULL; return 0; @@ -1694,7 +1715,7 @@ static int drbg_fini_hash_kernel(struct drbg_state *drbg) static void drbg_kcapi_hmacsetkey(struct drbg_state *drbg, const unsigned char *key) { - struct sdesc *sdesc = (struct sdesc *)drbg->priv_data; + struct sdesc *sdesc = drbg->priv_data; crypto_shash_setkey(sdesc->shash.tfm, key, drbg_statelen(drbg)); } @@ -1702,7 +1723,7 @@ static void drbg_kcapi_hmacsetkey(struct drbg_state *drbg, static int drbg_kcapi_hash(struct drbg_state *drbg, unsigned char *outval, const struct list_head *in) { - struct sdesc *sdesc = (struct sdesc *)drbg->priv_data; + struct sdesc *sdesc = drbg->priv_data; struct drbg_string *input = NULL; crypto_shash_init(&sdesc->shash); @@ -1797,8 +1818,7 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg) static void drbg_kcapi_symsetkey(struct drbg_state *drbg, const unsigned char *key) { - struct crypto_cipher *tfm = - (struct crypto_cipher *)drbg->priv_data; + struct crypto_cipher *tfm = drbg->priv_data; crypto_cipher_setkey(tfm, key, (drbg_keylen(drbg))); } @@ -1806,8 +1826,7 @@ static void drbg_kcapi_symsetkey(struct drbg_state *drbg, static int drbg_kcapi_sym(struct drbg_state *drbg, unsigned char *outval, const struct drbg_string *in) { - struct crypto_cipher *tfm = - (struct crypto_cipher *)drbg->priv_data; + struct crypto_cipher *tfm = drbg->priv_data; /* there is only component in *in */ BUG_ON(in->len < drbg_blocklen(drbg)); @@ -1986,7 +2005,7 @@ static inline int __init drbg_healthcheck_sanity(void) #define OUTBUFLEN 16 unsigned char buf[OUTBUFLEN]; struct drbg_state *drbg = NULL; - int ret = -EFAULT; + int ret; int rc = -EFAULT; bool pr = false; int coreref = 0; @@ -2145,3 +2164,4 @@ MODULE_DESCRIPTION("NIST SP800-90A Deterministic Random Bit Generator (DRBG) " CRYPTO_DRBG_HMAC_STRING CRYPTO_DRBG_CTR_STRING); MODULE_ALIAS_CRYPTO("stdrng"); +MODULE_IMPORT_NS(CRYPTO_INTERNAL); diff --git a/crypto/ecb.c b/crypto/ecb.c index 69a687cbdf21..71fbb0543d64 100644 --- a/crypto/ecb.c +++ b/crypto/ecb.c @@ -6,6 +6,7 @@ */ #include <crypto/algapi.h> +#include <crypto/internal/cipher.h> #include <crypto/internal/skcipher.h> #include <linux/err.h> #include <linux/init.h> diff --git a/crypto/ecc.c b/crypto/ecc.c index 02d35be7702b..7315217c8f73 100644 --- a/crypto/ecc.c +++ b/crypto/ecc.c @@ -24,6 +24,7 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ +#include <crypto/ecc_curve.h> #include <linux/module.h> #include <linux/random.h> #include <linux/slab.h> @@ -31,10 +32,10 @@ #include <linux/fips.h> #include <crypto/ecdh.h> #include <crypto/rng.h> +#include <crypto/internal/ecc.h> #include <asm/unaligned.h> #include <linux/ratelimit.h> -#include "ecc.h" #include "ecc_curve_defs.h" typedef struct { @@ -42,7 +43,14 @@ typedef struct { u64 m_high; } uint128_t; -static inline const struct ecc_curve *ecc_get_curve(unsigned int curve_id) +/* Returns curv25519 curve param */ +const struct ecc_curve *ecc_get_curve25519(void) +{ + return &ecc_25519; +} +EXPORT_SYMBOL(ecc_get_curve25519); + +const struct ecc_curve *ecc_get_curve(unsigned int curve_id) { switch (curve_id) { /* In FIPS mode only allow P256 and higher */ @@ -50,10 +58,13 @@ static inline const struct ecc_curve *ecc_get_curve(unsigned int curve_id) return fips_enabled ? NULL : &nist_p192; case ECC_CURVE_NIST_P256: return &nist_p256; + case ECC_CURVE_NIST_P384: + return &nist_p384; default: return NULL; } } +EXPORT_SYMBOL(ecc_get_curve); static u64 *ecc_alloc_digits_space(unsigned int ndigits) { @@ -67,10 +78,10 @@ static u64 *ecc_alloc_digits_space(unsigned int ndigits) static void ecc_free_digits_space(u64 *space) { - kzfree(space); + kfree_sensitive(space); } -static struct ecc_point *ecc_alloc_point(unsigned int ndigits) +struct ecc_point *ecc_alloc_point(unsigned int ndigits) { struct ecc_point *p = kmalloc(sizeof(*p), GFP_KERNEL); @@ -95,16 +106,18 @@ err_alloc_x: kfree(p); return NULL; } +EXPORT_SYMBOL(ecc_alloc_point); -static void ecc_free_point(struct ecc_point *p) +void ecc_free_point(struct ecc_point *p) { if (!p) return; - kzfree(p->x); - kzfree(p->y); - kzfree(p); + kfree_sensitive(p->x); + kfree_sensitive(p->y); + kfree_sensitive(p); } +EXPORT_SYMBOL(ecc_free_point); static void vli_clear(u64 *vli, unsigned int ndigits) { @@ -128,7 +141,7 @@ bool vli_is_zero(const u64 *vli, unsigned int ndigits) } EXPORT_SYMBOL(vli_is_zero); -/* Returns nonzero if bit bit of vli is set. */ +/* Returns nonzero if bit of vli is set. */ static u64 vli_test_bit(const u64 *vli, unsigned int bit) { return (vli[bit / 64] & ((u64)1 << (bit % 64))); @@ -154,7 +167,7 @@ static unsigned int vli_num_digits(const u64 *vli, unsigned int ndigits) } /* Counts the number of bits required for vli. */ -static unsigned int vli_num_bits(const u64 *vli, unsigned int ndigits) +unsigned int vli_num_bits(const u64 *vli, unsigned int ndigits) { unsigned int i, num_digits; u64 digit; @@ -169,6 +182,7 @@ static unsigned int vli_num_bits(const u64 *vli, unsigned int ndigits) return ((num_digits - 1) * 64 + i); } +EXPORT_SYMBOL(vli_num_bits); /* Set dest from unaligned bit string src. */ void vli_from_be64(u64 *dest, const void *src, unsigned int ndigits) @@ -775,18 +789,133 @@ static void vli_mmod_fast_256(u64 *result, const u64 *product, } } +#define SL32OR32(x32, y32) (((u64)x32 << 32) | y32) +#define AND64H(x64) (x64 & 0xffFFffFF00000000ull) +#define AND64L(x64) (x64 & 0x00000000ffFFffFFull) + +/* Computes result = product % curve_prime + * from "Mathematical routines for the NIST prime elliptic curves" + */ +static void vli_mmod_fast_384(u64 *result, const u64 *product, + const u64 *curve_prime, u64 *tmp) +{ + int carry; + const unsigned int ndigits = 6; + + /* t */ + vli_set(result, product, ndigits); + + /* s1 */ + tmp[0] = 0; // 0 || 0 + tmp[1] = 0; // 0 || 0 + tmp[2] = SL32OR32(product[11], (product[10]>>32)); //a22||a21 + tmp[3] = product[11]>>32; // 0 ||a23 + tmp[4] = 0; // 0 || 0 + tmp[5] = 0; // 0 || 0 + carry = vli_lshift(tmp, tmp, 1, ndigits); + carry += vli_add(result, result, tmp, ndigits); + + /* s2 */ + tmp[0] = product[6]; //a13||a12 + tmp[1] = product[7]; //a15||a14 + tmp[2] = product[8]; //a17||a16 + tmp[3] = product[9]; //a19||a18 + tmp[4] = product[10]; //a21||a20 + tmp[5] = product[11]; //a23||a22 + carry += vli_add(result, result, tmp, ndigits); + + /* s3 */ + tmp[0] = SL32OR32(product[11], (product[10]>>32)); //a22||a21 + tmp[1] = SL32OR32(product[6], (product[11]>>32)); //a12||a23 + tmp[2] = SL32OR32(product[7], (product[6])>>32); //a14||a13 + tmp[3] = SL32OR32(product[8], (product[7]>>32)); //a16||a15 + tmp[4] = SL32OR32(product[9], (product[8]>>32)); //a18||a17 + tmp[5] = SL32OR32(product[10], (product[9]>>32)); //a20||a19 + carry += vli_add(result, result, tmp, ndigits); + + /* s4 */ + tmp[0] = AND64H(product[11]); //a23|| 0 + tmp[1] = (product[10]<<32); //a20|| 0 + tmp[2] = product[6]; //a13||a12 + tmp[3] = product[7]; //a15||a14 + tmp[4] = product[8]; //a17||a16 + tmp[5] = product[9]; //a19||a18 + carry += vli_add(result, result, tmp, ndigits); + + /* s5 */ + tmp[0] = 0; // 0|| 0 + tmp[1] = 0; // 0|| 0 + tmp[2] = product[10]; //a21||a20 + tmp[3] = product[11]; //a23||a22 + tmp[4] = 0; // 0|| 0 + tmp[5] = 0; // 0|| 0 + carry += vli_add(result, result, tmp, ndigits); + + /* s6 */ + tmp[0] = AND64L(product[10]); // 0 ||a20 + tmp[1] = AND64H(product[10]); //a21|| 0 + tmp[2] = product[11]; //a23||a22 + tmp[3] = 0; // 0 || 0 + tmp[4] = 0; // 0 || 0 + tmp[5] = 0; // 0 || 0 + carry += vli_add(result, result, tmp, ndigits); + + /* d1 */ + tmp[0] = SL32OR32(product[6], (product[11]>>32)); //a12||a23 + tmp[1] = SL32OR32(product[7], (product[6]>>32)); //a14||a13 + tmp[2] = SL32OR32(product[8], (product[7]>>32)); //a16||a15 + tmp[3] = SL32OR32(product[9], (product[8]>>32)); //a18||a17 + tmp[4] = SL32OR32(product[10], (product[9]>>32)); //a20||a19 + tmp[5] = SL32OR32(product[11], (product[10]>>32)); //a22||a21 + carry -= vli_sub(result, result, tmp, ndigits); + + /* d2 */ + tmp[0] = (product[10]<<32); //a20|| 0 + tmp[1] = SL32OR32(product[11], (product[10]>>32)); //a22||a21 + tmp[2] = (product[11]>>32); // 0 ||a23 + tmp[3] = 0; // 0 || 0 + tmp[4] = 0; // 0 || 0 + tmp[5] = 0; // 0 || 0 + carry -= vli_sub(result, result, tmp, ndigits); + + /* d3 */ + tmp[0] = 0; // 0 || 0 + tmp[1] = AND64H(product[11]); //a23|| 0 + tmp[2] = product[11]>>32; // 0 ||a23 + tmp[3] = 0; // 0 || 0 + tmp[4] = 0; // 0 || 0 + tmp[5] = 0; // 0 || 0 + carry -= vli_sub(result, result, tmp, ndigits); + + if (carry < 0) { + do { + carry += vli_add(result, result, curve_prime, ndigits); + } while (carry < 0); + } else { + while (carry || vli_cmp(curve_prime, result, ndigits) != 1) + carry -= vli_sub(result, result, curve_prime, ndigits); + } + +} + +#undef SL32OR32 +#undef AND64H +#undef AND64L + /* Computes result = product % curve_prime for different curve_primes. * * Note that curve_primes are distinguished just by heuristic check and * not by complete conformance check. */ static bool vli_mmod_fast(u64 *result, u64 *product, - const u64 *curve_prime, unsigned int ndigits) + const struct ecc_curve *curve) { u64 tmp[2 * ECC_MAX_DIGITS]; + const u64 *curve_prime = curve->p; + const unsigned int ndigits = curve->g.ndigits; - /* Currently, both NIST primes have -1 in lowest qword. */ - if (curve_prime[0] != -1ull) { + /* All NIST curves have name prefix 'nist_' */ + if (strncmp(curve->name, "nist_", 5) != 0) { /* Try to handle Pseudo-Marsenne primes. */ if (curve_prime[ndigits - 1] == -1ull) { vli_mmod_special(result, product, curve_prime, @@ -809,6 +938,9 @@ static bool vli_mmod_fast(u64 *result, u64 *product, case 4: vli_mmod_fast_256(result, product, curve_prime, tmp); break; + case 6: + vli_mmod_fast_384(result, product, curve_prime, tmp); + break; default: pr_err_ratelimited("ecc: unsupported digits size!\n"); return false; @@ -832,22 +964,22 @@ EXPORT_SYMBOL(vli_mod_mult_slow); /* Computes result = (left * right) % curve_prime. */ static void vli_mod_mult_fast(u64 *result, const u64 *left, const u64 *right, - const u64 *curve_prime, unsigned int ndigits) + const struct ecc_curve *curve) { u64 product[2 * ECC_MAX_DIGITS]; - vli_mult(product, left, right, ndigits); - vli_mmod_fast(result, product, curve_prime, ndigits); + vli_mult(product, left, right, curve->g.ndigits); + vli_mmod_fast(result, product, curve); } /* Computes result = left^2 % curve_prime. */ static void vli_mod_square_fast(u64 *result, const u64 *left, - const u64 *curve_prime, unsigned int ndigits) + const struct ecc_curve *curve) { u64 product[2 * ECC_MAX_DIGITS]; - vli_square(product, left, ndigits); - vli_mmod_fast(result, product, curve_prime, ndigits); + vli_square(product, left, curve->g.ndigits); + vli_mmod_fast(result, product, curve); } #define EVEN(vli) (!(vli[0] & 1)) @@ -933,37 +1065,40 @@ EXPORT_SYMBOL(vli_mod_inv); /* ------ Point operations ------ */ /* Returns true if p_point is the point at infinity, false otherwise. */ -static bool ecc_point_is_zero(const struct ecc_point *point) +bool ecc_point_is_zero(const struct ecc_point *point) { return (vli_is_zero(point->x, point->ndigits) && vli_is_zero(point->y, point->ndigits)); } +EXPORT_SYMBOL(ecc_point_is_zero); /* Point multiplication algorithm using Montgomery's ladder with co-Z - * coordinates. From http://eprint.iacr.org/2011/338.pdf + * coordinates. From https://eprint.iacr.org/2011/338.pdf */ /* Double in place */ static void ecc_point_double_jacobian(u64 *x1, u64 *y1, u64 *z1, - u64 *curve_prime, unsigned int ndigits) + const struct ecc_curve *curve) { /* t1 = x, t2 = y, t3 = z */ u64 t4[ECC_MAX_DIGITS]; u64 t5[ECC_MAX_DIGITS]; + const u64 *curve_prime = curve->p; + const unsigned int ndigits = curve->g.ndigits; if (vli_is_zero(z1, ndigits)) return; /* t4 = y1^2 */ - vli_mod_square_fast(t4, y1, curve_prime, ndigits); + vli_mod_square_fast(t4, y1, curve); /* t5 = x1*y1^2 = A */ - vli_mod_mult_fast(t5, x1, t4, curve_prime, ndigits); + vli_mod_mult_fast(t5, x1, t4, curve); /* t4 = y1^4 */ - vli_mod_square_fast(t4, t4, curve_prime, ndigits); + vli_mod_square_fast(t4, t4, curve); /* t2 = y1*z1 = z3 */ - vli_mod_mult_fast(y1, y1, z1, curve_prime, ndigits); + vli_mod_mult_fast(y1, y1, z1, curve); /* t3 = z1^2 */ - vli_mod_square_fast(z1, z1, curve_prime, ndigits); + vli_mod_square_fast(z1, z1, curve); /* t1 = x1 + z1^2 */ vli_mod_add(x1, x1, z1, curve_prime, ndigits); @@ -972,7 +1107,7 @@ static void ecc_point_double_jacobian(u64 *x1, u64 *y1, u64 *z1, /* t3 = x1 - z1^2 */ vli_mod_sub(z1, x1, z1, curve_prime, ndigits); /* t1 = x1^2 - z1^4 */ - vli_mod_mult_fast(x1, x1, z1, curve_prime, ndigits); + vli_mod_mult_fast(x1, x1, z1, curve); /* t3 = 2*(x1^2 - z1^4) */ vli_mod_add(z1, x1, x1, curve_prime, ndigits); @@ -989,7 +1124,7 @@ static void ecc_point_double_jacobian(u64 *x1, u64 *y1, u64 *z1, /* t1 = 3/2*(x1^2 - z1^4) = B */ /* t3 = B^2 */ - vli_mod_square_fast(z1, x1, curve_prime, ndigits); + vli_mod_square_fast(z1, x1, curve); /* t3 = B^2 - A */ vli_mod_sub(z1, z1, t5, curve_prime, ndigits); /* t3 = B^2 - 2A = x3 */ @@ -997,7 +1132,7 @@ static void ecc_point_double_jacobian(u64 *x1, u64 *y1, u64 *z1, /* t5 = A - x3 */ vli_mod_sub(t5, t5, z1, curve_prime, ndigits); /* t1 = B * (A - x3) */ - vli_mod_mult_fast(x1, x1, t5, curve_prime, ndigits); + vli_mod_mult_fast(x1, x1, t5, curve); /* t4 = B * (A - x3) - y1^4 = y3 */ vli_mod_sub(t4, x1, t4, curve_prime, ndigits); @@ -1007,23 +1142,22 @@ static void ecc_point_double_jacobian(u64 *x1, u64 *y1, u64 *z1, } /* Modify (x1, y1) => (x1 * z^2, y1 * z^3) */ -static void apply_z(u64 *x1, u64 *y1, u64 *z, u64 *curve_prime, - unsigned int ndigits) +static void apply_z(u64 *x1, u64 *y1, u64 *z, const struct ecc_curve *curve) { u64 t1[ECC_MAX_DIGITS]; - vli_mod_square_fast(t1, z, curve_prime, ndigits); /* z^2 */ - vli_mod_mult_fast(x1, x1, t1, curve_prime, ndigits); /* x1 * z^2 */ - vli_mod_mult_fast(t1, t1, z, curve_prime, ndigits); /* z^3 */ - vli_mod_mult_fast(y1, y1, t1, curve_prime, ndigits); /* y1 * z^3 */ + vli_mod_square_fast(t1, z, curve); /* z^2 */ + vli_mod_mult_fast(x1, x1, t1, curve); /* x1 * z^2 */ + vli_mod_mult_fast(t1, t1, z, curve); /* z^3 */ + vli_mod_mult_fast(y1, y1, t1, curve); /* y1 * z^3 */ } /* P = (x1, y1) => 2P, (x2, y2) => P' */ static void xycz_initial_double(u64 *x1, u64 *y1, u64 *x2, u64 *y2, - u64 *p_initial_z, u64 *curve_prime, - unsigned int ndigits) + u64 *p_initial_z, const struct ecc_curve *curve) { u64 z[ECC_MAX_DIGITS]; + const unsigned int ndigits = curve->g.ndigits; vli_set(x2, x1, ndigits); vli_set(y2, y1, ndigits); @@ -1034,35 +1168,37 @@ static void xycz_initial_double(u64 *x1, u64 *y1, u64 *x2, u64 *y2, if (p_initial_z) vli_set(z, p_initial_z, ndigits); - apply_z(x1, y1, z, curve_prime, ndigits); + apply_z(x1, y1, z, curve); - ecc_point_double_jacobian(x1, y1, z, curve_prime, ndigits); + ecc_point_double_jacobian(x1, y1, z, curve); - apply_z(x2, y2, z, curve_prime, ndigits); + apply_z(x2, y2, z, curve); } /* Input P = (x1, y1, Z), Q = (x2, y2, Z) * Output P' = (x1', y1', Z3), P + Q = (x3, y3, Z3) * or P => P', Q => P + Q */ -static void xycz_add(u64 *x1, u64 *y1, u64 *x2, u64 *y2, u64 *curve_prime, - unsigned int ndigits) +static void xycz_add(u64 *x1, u64 *y1, u64 *x2, u64 *y2, + const struct ecc_curve *curve) { /* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */ u64 t5[ECC_MAX_DIGITS]; + const u64 *curve_prime = curve->p; + const unsigned int ndigits = curve->g.ndigits; /* t5 = x2 - x1 */ vli_mod_sub(t5, x2, x1, curve_prime, ndigits); /* t5 = (x2 - x1)^2 = A */ - vli_mod_square_fast(t5, t5, curve_prime, ndigits); + vli_mod_square_fast(t5, t5, curve); /* t1 = x1*A = B */ - vli_mod_mult_fast(x1, x1, t5, curve_prime, ndigits); + vli_mod_mult_fast(x1, x1, t5, curve); /* t3 = x2*A = C */ - vli_mod_mult_fast(x2, x2, t5, curve_prime, ndigits); + vli_mod_mult_fast(x2, x2, t5, curve); /* t4 = y2 - y1 */ vli_mod_sub(y2, y2, y1, curve_prime, ndigits); /* t5 = (y2 - y1)^2 = D */ - vli_mod_square_fast(t5, y2, curve_prime, ndigits); + vli_mod_square_fast(t5, y2, curve); /* t5 = D - B */ vli_mod_sub(t5, t5, x1, curve_prime, ndigits); @@ -1071,11 +1207,11 @@ static void xycz_add(u64 *x1, u64 *y1, u64 *x2, u64 *y2, u64 *curve_prime, /* t3 = C - B */ vli_mod_sub(x2, x2, x1, curve_prime, ndigits); /* t2 = y1*(C - B) */ - vli_mod_mult_fast(y1, y1, x2, curve_prime, ndigits); + vli_mod_mult_fast(y1, y1, x2, curve); /* t3 = B - x3 */ vli_mod_sub(x2, x1, t5, curve_prime, ndigits); /* t4 = (y2 - y1)*(B - x3) */ - vli_mod_mult_fast(y2, y2, x2, curve_prime, ndigits); + vli_mod_mult_fast(y2, y2, x2, curve); /* t4 = y3 */ vli_mod_sub(y2, y2, y1, curve_prime, ndigits); @@ -1086,22 +1222,24 @@ static void xycz_add(u64 *x1, u64 *y1, u64 *x2, u64 *y2, u64 *curve_prime, * Output P + Q = (x3, y3, Z3), P - Q = (x3', y3', Z3) * or P => P - Q, Q => P + Q */ -static void xycz_add_c(u64 *x1, u64 *y1, u64 *x2, u64 *y2, u64 *curve_prime, - unsigned int ndigits) +static void xycz_add_c(u64 *x1, u64 *y1, u64 *x2, u64 *y2, + const struct ecc_curve *curve) { /* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */ u64 t5[ECC_MAX_DIGITS]; u64 t6[ECC_MAX_DIGITS]; u64 t7[ECC_MAX_DIGITS]; + const u64 *curve_prime = curve->p; + const unsigned int ndigits = curve->g.ndigits; /* t5 = x2 - x1 */ vli_mod_sub(t5, x2, x1, curve_prime, ndigits); /* t5 = (x2 - x1)^2 = A */ - vli_mod_square_fast(t5, t5, curve_prime, ndigits); + vli_mod_square_fast(t5, t5, curve); /* t1 = x1*A = B */ - vli_mod_mult_fast(x1, x1, t5, curve_prime, ndigits); + vli_mod_mult_fast(x1, x1, t5, curve); /* t3 = x2*A = C */ - vli_mod_mult_fast(x2, x2, t5, curve_prime, ndigits); + vli_mod_mult_fast(x2, x2, t5, curve); /* t4 = y2 + y1 */ vli_mod_add(t5, y2, y1, curve_prime, ndigits); /* t4 = y2 - y1 */ @@ -1110,29 +1248,29 @@ static void xycz_add_c(u64 *x1, u64 *y1, u64 *x2, u64 *y2, u64 *curve_prime, /* t6 = C - B */ vli_mod_sub(t6, x2, x1, curve_prime, ndigits); /* t2 = y1 * (C - B) */ - vli_mod_mult_fast(y1, y1, t6, curve_prime, ndigits); + vli_mod_mult_fast(y1, y1, t6, curve); /* t6 = B + C */ vli_mod_add(t6, x1, x2, curve_prime, ndigits); /* t3 = (y2 - y1)^2 */ - vli_mod_square_fast(x2, y2, curve_prime, ndigits); + vli_mod_square_fast(x2, y2, curve); /* t3 = x3 */ vli_mod_sub(x2, x2, t6, curve_prime, ndigits); /* t7 = B - x3 */ vli_mod_sub(t7, x1, x2, curve_prime, ndigits); /* t4 = (y2 - y1)*(B - x3) */ - vli_mod_mult_fast(y2, y2, t7, curve_prime, ndigits); + vli_mod_mult_fast(y2, y2, t7, curve); /* t4 = y3 */ vli_mod_sub(y2, y2, y1, curve_prime, ndigits); /* t7 = (y2 + y1)^2 = F */ - vli_mod_square_fast(t7, t5, curve_prime, ndigits); + vli_mod_square_fast(t7, t5, curve); /* t7 = x3' */ vli_mod_sub(t7, t7, t6, curve_prime, ndigits); /* t6 = x3' - B */ vli_mod_sub(t6, t7, x1, curve_prime, ndigits); /* t6 = (y2 + y1)*(x3' - B) */ - vli_mod_mult_fast(t6, t6, t5, curve_prime, ndigits); + vli_mod_mult_fast(t6, t6, t5, curve); /* t2 = y3' */ vli_mod_sub(y1, t6, y1, curve_prime, ndigits); @@ -1162,41 +1300,37 @@ static void ecc_point_mult(struct ecc_point *result, vli_set(rx[1], point->x, ndigits); vli_set(ry[1], point->y, ndigits); - xycz_initial_double(rx[1], ry[1], rx[0], ry[0], initial_z, curve_prime, - ndigits); + xycz_initial_double(rx[1], ry[1], rx[0], ry[0], initial_z, curve); for (i = num_bits - 2; i > 0; i--) { nb = !vli_test_bit(scalar, i); - xycz_add_c(rx[1 - nb], ry[1 - nb], rx[nb], ry[nb], curve_prime, - ndigits); - xycz_add(rx[nb], ry[nb], rx[1 - nb], ry[1 - nb], curve_prime, - ndigits); + xycz_add_c(rx[1 - nb], ry[1 - nb], rx[nb], ry[nb], curve); + xycz_add(rx[nb], ry[nb], rx[1 - nb], ry[1 - nb], curve); } nb = !vli_test_bit(scalar, 0); - xycz_add_c(rx[1 - nb], ry[1 - nb], rx[nb], ry[nb], curve_prime, - ndigits); + xycz_add_c(rx[1 - nb], ry[1 - nb], rx[nb], ry[nb], curve); /* Find final 1/Z value. */ /* X1 - X0 */ vli_mod_sub(z, rx[1], rx[0], curve_prime, ndigits); /* Yb * (X1 - X0) */ - vli_mod_mult_fast(z, z, ry[1 - nb], curve_prime, ndigits); + vli_mod_mult_fast(z, z, ry[1 - nb], curve); /* xP * Yb * (X1 - X0) */ - vli_mod_mult_fast(z, z, point->x, curve_prime, ndigits); + vli_mod_mult_fast(z, z, point->x, curve); /* 1 / (xP * Yb * (X1 - X0)) */ vli_mod_inv(z, z, curve_prime, point->ndigits); /* yP / (xP * Yb * (X1 - X0)) */ - vli_mod_mult_fast(z, z, point->y, curve_prime, ndigits); + vli_mod_mult_fast(z, z, point->y, curve); /* Xb * yP / (xP * Yb * (X1 - X0)) */ - vli_mod_mult_fast(z, z, rx[1 - nb], curve_prime, ndigits); + vli_mod_mult_fast(z, z, rx[1 - nb], curve); /* End 1/Z calculation */ - xycz_add(rx[nb], ry[nb], rx[1 - nb], ry[1 - nb], curve_prime, ndigits); + xycz_add(rx[nb], ry[nb], rx[1 - nb], ry[1 - nb], curve); - apply_z(rx[0], ry[0], z, curve_prime, ndigits); + apply_z(rx[0], ry[0], z, curve); vli_set(result->x, rx[0], ndigits); vli_set(result->y, ry[0], ndigits); @@ -1217,9 +1351,9 @@ static void ecc_point_add(const struct ecc_point *result, vli_mod_sub(z, result->x, p->x, curve->p, ndigits); vli_set(px, p->x, ndigits); vli_set(py, p->y, ndigits); - xycz_add(px, py, result->x, result->y, curve->p, ndigits); + xycz_add(px, py, result->x, result->y, curve); vli_mod_inv(z, z, curve->p, ndigits); - apply_z(result->x, result->y, z, curve->p, ndigits); + apply_z(result->x, result->y, z, curve); } /* Computes R = u1P + u2Q mod p using Shamir's trick. @@ -1248,8 +1382,7 @@ void ecc_point_mult_shamir(const struct ecc_point *result, points[2] = q; points[3] = ∑ - num_bits = max(vli_num_bits(u1, ndigits), - vli_num_bits(u2, ndigits)); + num_bits = max(vli_num_bits(u1, ndigits), vli_num_bits(u2, ndigits)); i = num_bits - 1; idx = (!!vli_test_bit(u1, i)) | ((!!vli_test_bit(u2, i)) << 1); point = points[idx]; @@ -1260,7 +1393,7 @@ void ecc_point_mult_shamir(const struct ecc_point *result, z[0] = 1; for (--i; i >= 0; i--) { - ecc_point_double_jacobian(rx, ry, z, curve->p, ndigits); + ecc_point_double_jacobian(rx, ry, z, curve); idx = (!!vli_test_bit(u1, i)) | ((!!vli_test_bit(u2, i)) << 1); point = points[idx]; if (point) { @@ -1270,27 +1403,17 @@ void ecc_point_mult_shamir(const struct ecc_point *result, vli_set(tx, point->x, ndigits); vli_set(ty, point->y, ndigits); - apply_z(tx, ty, z, curve->p, ndigits); + apply_z(tx, ty, z, curve); vli_mod_sub(tz, rx, tx, curve->p, ndigits); - xycz_add(tx, ty, rx, ry, curve->p, ndigits); - vli_mod_mult_fast(z, z, tz, curve->p, ndigits); + xycz_add(tx, ty, rx, ry, curve); + vli_mod_mult_fast(z, z, tz, curve); } } vli_mod_inv(z, z, curve->p, ndigits); - apply_z(rx, ry, z, curve->p, ndigits); + apply_z(rx, ry, z, curve); } EXPORT_SYMBOL(ecc_point_mult_shamir); -static inline void ecc_swap_digits(const u64 *in, u64 *out, - unsigned int ndigits) -{ - const __be64 *src = (__force __be64 *)in; - int i; - - for (i = 0; i < ndigits; i++) - out[i] = be64_to_cpu(src[ndigits - 1 - i]); -} - static int __ecc_is_key_valid(const struct ecc_curve *curve, const u64 *private_key, unsigned int ndigits) { @@ -1404,7 +1527,9 @@ int ecc_make_pub_key(unsigned int curve_id, unsigned int ndigits, } ecc_point_mult(pk, &curve->g, priv, NULL, curve, ndigits); - if (ecc_point_is_zero(pk)) { + + /* SP800-56A rev 3 5.6.2.1.3 key check */ + if (ecc_is_pubkey_valid_full(curve, pk)) { ret = -EAGAIN; goto err_free_point; } @@ -1439,10 +1564,10 @@ int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve, return -EINVAL; /* Check 3: Verify that y^2 == (x^3 + a·x + b) mod p */ - vli_mod_square_fast(yy, pk->y, curve->p, pk->ndigits); /* y^2 */ - vli_mod_square_fast(xxx, pk->x, curve->p, pk->ndigits); /* x^2 */ - vli_mod_mult_fast(xxx, xxx, pk->x, curve->p, pk->ndigits); /* x^3 */ - vli_mod_mult_fast(w, curve->a, pk->x, curve->p, pk->ndigits); /* a·x */ + vli_mod_square_fast(yy, pk->y, curve); /* y^2 */ + vli_mod_square_fast(xxx, pk->x, curve); /* x^2 */ + vli_mod_mult_fast(xxx, xxx, pk->x, curve); /* x^3 */ + vli_mod_mult_fast(w, curve->a, pk->x, curve); /* a·x */ vli_mod_add(w, w, curve->b, curve->p, pk->ndigits); /* a·x + b */ vli_mod_add(w, w, xxx, curve->p, pk->ndigits); /* x^3 + a·x + b */ if (vli_cmp(yy, w, pk->ndigits) != 0) /* Equation */ @@ -1452,6 +1577,33 @@ int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve, } EXPORT_SYMBOL(ecc_is_pubkey_valid_partial); +/* SP800-56A section 5.6.2.3.3 full verification */ +int ecc_is_pubkey_valid_full(const struct ecc_curve *curve, + struct ecc_point *pk) +{ + struct ecc_point *nQ; + + /* Checks 1 through 3 */ + int ret = ecc_is_pubkey_valid_partial(curve, pk); + + if (ret) + return ret; + + /* Check 4: Verify that nQ is the zero point. */ + nQ = ecc_alloc_point(pk->ndigits); + if (!nQ) + return -ENOMEM; + + ecc_point_mult(nQ, pk, curve->n, NULL, curve, pk->ndigits); + if (!ecc_point_is_zero(nQ)) + ret = -EINVAL; + + ecc_free_point(nQ); + + return ret; +} +EXPORT_SYMBOL(ecc_is_pubkey_valid_full); + int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits, const u64 *private_key, const u64 *public_key, u64 *secret) @@ -1495,11 +1647,16 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits, ecc_point_mult(product, pk, priv, rand_z, curve, ndigits); - ecc_swap_digits(product->x, secret, ndigits); - - if (ecc_point_is_zero(product)) + if (ecc_point_is_zero(product)) { ret = -EFAULT; + goto err_validity; + } + + ecc_swap_digits(product->x, secret, ndigits); +err_validity: + memzero_explicit(priv, sizeof(priv)); + memzero_explicit(rand_z, sizeof(rand_z)); ecc_free_point(product); err_alloc_product: ecc_free_point(pk); diff --git a/crypto/ecc_curve_defs.h b/crypto/ecc_curve_defs.h index 69be6c7d228f..9719934c9428 100644 --- a/crypto/ecc_curve_defs.h +++ b/crypto/ecc_curve_defs.h @@ -54,4 +54,53 @@ static struct ecc_curve nist_p256 = { .b = nist_p256_b }; +/* NIST P-384 */ +static u64 nist_p384_g_x[] = { 0x3A545E3872760AB7ull, 0x5502F25DBF55296Cull, + 0x59F741E082542A38ull, 0x6E1D3B628BA79B98ull, + 0x8Eb1C71EF320AD74ull, 0xAA87CA22BE8B0537ull }; +static u64 nist_p384_g_y[] = { 0x7A431D7C90EA0E5Full, 0x0A60B1CE1D7E819Dull, + 0xE9DA3113B5F0B8C0ull, 0xF8F41DBD289A147Cull, + 0x5D9E98BF9292DC29ull, 0x3617DE4A96262C6Full }; +static u64 nist_p384_p[] = { 0x00000000FFFFFFFFull, 0xFFFFFFFF00000000ull, + 0xFFFFFFFFFFFFFFFEull, 0xFFFFFFFFFFFFFFFFull, + 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull }; +static u64 nist_p384_n[] = { 0xECEC196ACCC52973ull, 0x581A0DB248B0A77Aull, + 0xC7634D81F4372DDFull, 0xFFFFFFFFFFFFFFFFull, + 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull }; +static u64 nist_p384_a[] = { 0x00000000FFFFFFFCull, 0xFFFFFFFF00000000ull, + 0xFFFFFFFFFFFFFFFEull, 0xFFFFFFFFFFFFFFFFull, + 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull }; +static u64 nist_p384_b[] = { 0x2a85c8edd3ec2aefull, 0xc656398d8a2ed19dull, + 0x0314088f5013875aull, 0x181d9c6efe814112ull, + 0x988e056be3f82d19ull, 0xb3312fa7e23ee7e4ull }; +static struct ecc_curve nist_p384 = { + .name = "nist_384", + .g = { + .x = nist_p384_g_x, + .y = nist_p384_g_y, + .ndigits = 6, + }, + .p = nist_p384_p, + .n = nist_p384_n, + .a = nist_p384_a, + .b = nist_p384_b +}; + +/* curve25519 */ +static u64 curve25519_g_x[] = { 0x0000000000000009, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000 }; +static u64 curve25519_p[] = { 0xffffffffffffffed, 0xffffffffffffffff, + 0xffffffffffffffff, 0x7fffffffffffffff }; +static u64 curve25519_a[] = { 0x000000000001DB41, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000 }; +static const struct ecc_curve ecc_25519 = { + .name = "curve25519", + .g = { + .x = curve25519_g_x, + .ndigits = 4, + }, + .p = curve25519_p, + .a = curve25519_a, +}; + #endif diff --git a/crypto/ecdh.c b/crypto/ecdh.c index bd599053a8c4..80afee3234fb 100644 --- a/crypto/ecdh.c +++ b/crypto/ecdh.c @@ -6,11 +6,11 @@ */ #include <linux/module.h> +#include <crypto/internal/ecc.h> #include <crypto/internal/kpp.h> #include <crypto/kpp.h> #include <crypto/ecdh.h> #include <linux/scatterlist.h> -#include "ecc.h" struct ecdh_ctx { unsigned int curve_id; @@ -23,42 +23,27 @@ static inline struct ecdh_ctx *ecdh_get_ctx(struct crypto_kpp *tfm) return kpp_tfm_ctx(tfm); } -static unsigned int ecdh_supported_curve(unsigned int curve_id) -{ - switch (curve_id) { - case ECC_CURVE_NIST_P192: return ECC_CURVE_NIST_P192_DIGITS; - case ECC_CURVE_NIST_P256: return ECC_CURVE_NIST_P256_DIGITS; - default: return 0; - } -} - static int ecdh_set_secret(struct crypto_kpp *tfm, const void *buf, unsigned int len) { struct ecdh_ctx *ctx = ecdh_get_ctx(tfm); struct ecdh params; - unsigned int ndigits; - if (crypto_ecdh_decode_key(buf, len, ¶ms) < 0) + if (crypto_ecdh_decode_key(buf, len, ¶ms) < 0 || + params.key_size > sizeof(u64) * ctx->ndigits) return -EINVAL; - ndigits = ecdh_supported_curve(params.curve_id); - if (!ndigits) - return -EINVAL; - - ctx->curve_id = params.curve_id; - ctx->ndigits = ndigits; - if (!params.key || !params.key_size) return ecc_gen_privkey(ctx->curve_id, ctx->ndigits, ctx->private_key); - if (ecc_is_key_valid(ctx->curve_id, ctx->ndigits, - (const u64 *)params.key, params.key_size) < 0) - return -EINVAL; - memcpy(ctx->private_key, params.key, params.key_size); + if (ecc_is_key_valid(ctx->curve_id, ctx->ndigits, + ctx->private_key, params.key_size) < 0) { + memzero_explicit(ctx->private_key, params.key_size); + return -EINVAL; + } return 0; } @@ -124,7 +109,7 @@ static int ecdh_compute_value(struct kpp_request *req) /* fall through */ free_all: - kzfree(shared_secret); + kfree_sensitive(shared_secret); free_pubkey: kfree(public_key); return ret; @@ -138,28 +123,116 @@ static unsigned int ecdh_max_size(struct crypto_kpp *tfm) return ctx->ndigits << (ECC_DIGITS_TO_BYTES_SHIFT + 1); } -static struct kpp_alg ecdh = { +static int ecdh_nist_p192_init_tfm(struct crypto_kpp *tfm) +{ + struct ecdh_ctx *ctx = ecdh_get_ctx(tfm); + + ctx->curve_id = ECC_CURVE_NIST_P192; + ctx->ndigits = ECC_CURVE_NIST_P192_DIGITS; + + return 0; +} + +static struct kpp_alg ecdh_nist_p192 = { + .set_secret = ecdh_set_secret, + .generate_public_key = ecdh_compute_value, + .compute_shared_secret = ecdh_compute_value, + .max_size = ecdh_max_size, + .init = ecdh_nist_p192_init_tfm, + .base = { + .cra_name = "ecdh-nist-p192", + .cra_driver_name = "ecdh-nist-p192-generic", + .cra_priority = 100, + .cra_module = THIS_MODULE, + .cra_ctxsize = sizeof(struct ecdh_ctx), + }, +}; + +static int ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm) +{ + struct ecdh_ctx *ctx = ecdh_get_ctx(tfm); + + ctx->curve_id = ECC_CURVE_NIST_P256; + ctx->ndigits = ECC_CURVE_NIST_P256_DIGITS; + + return 0; +} + +static struct kpp_alg ecdh_nist_p256 = { .set_secret = ecdh_set_secret, .generate_public_key = ecdh_compute_value, .compute_shared_secret = ecdh_compute_value, .max_size = ecdh_max_size, + .init = ecdh_nist_p256_init_tfm, .base = { - .cra_name = "ecdh", - .cra_driver_name = "ecdh-generic", + .cra_name = "ecdh-nist-p256", + .cra_driver_name = "ecdh-nist-p256-generic", .cra_priority = 100, .cra_module = THIS_MODULE, .cra_ctxsize = sizeof(struct ecdh_ctx), }, }; -static int ecdh_init(void) +static int ecdh_nist_p384_init_tfm(struct crypto_kpp *tfm) { - return crypto_register_kpp(&ecdh); + struct ecdh_ctx *ctx = ecdh_get_ctx(tfm); + + ctx->curve_id = ECC_CURVE_NIST_P384; + ctx->ndigits = ECC_CURVE_NIST_P384_DIGITS; + + return 0; +} + +static struct kpp_alg ecdh_nist_p384 = { + .set_secret = ecdh_set_secret, + .generate_public_key = ecdh_compute_value, + .compute_shared_secret = ecdh_compute_value, + .max_size = ecdh_max_size, + .init = ecdh_nist_p384_init_tfm, + .base = { + .cra_name = "ecdh-nist-p384", + .cra_driver_name = "ecdh-nist-p384-generic", + .cra_priority = 100, + .cra_module = THIS_MODULE, + .cra_ctxsize = sizeof(struct ecdh_ctx), + }, +}; + +static bool ecdh_nist_p192_registered; + +static int __init ecdh_init(void) +{ + int ret; + + /* NIST p192 will fail to register in FIPS mode */ + ret = crypto_register_kpp(&ecdh_nist_p192); + ecdh_nist_p192_registered = ret == 0; + + ret = crypto_register_kpp(&ecdh_nist_p256); + if (ret) + goto nist_p256_error; + + ret = crypto_register_kpp(&ecdh_nist_p384); + if (ret) + goto nist_p384_error; + + return 0; + +nist_p384_error: + crypto_unregister_kpp(&ecdh_nist_p256); + +nist_p256_error: + if (ecdh_nist_p192_registered) + crypto_unregister_kpp(&ecdh_nist_p192); + return ret; } -static void ecdh_exit(void) +static void __exit ecdh_exit(void) { - crypto_unregister_kpp(&ecdh); + if (ecdh_nist_p192_registered) + crypto_unregister_kpp(&ecdh_nist_p192); + crypto_unregister_kpp(&ecdh_nist_p256); + crypto_unregister_kpp(&ecdh_nist_p384); } subsys_initcall(ecdh_init); diff --git a/crypto/ecdh_helper.c b/crypto/ecdh_helper.c index 66fcb2ea8154..f18f9028f912 100644 --- a/crypto/ecdh_helper.c +++ b/crypto/ecdh_helper.c @@ -10,7 +10,7 @@ #include <crypto/ecdh.h> #include <crypto/kpp.h> -#define ECDH_KPP_SECRET_MIN_SIZE (sizeof(struct kpp_secret) + 2 * sizeof(short)) +#define ECDH_KPP_SECRET_MIN_SIZE (sizeof(struct kpp_secret) + sizeof(short)) static inline u8 *ecdh_pack_data(void *dst, const void *src, size_t sz) { @@ -46,7 +46,6 @@ int crypto_ecdh_encode_key(char *buf, unsigned int len, return -EINVAL; ptr = ecdh_pack_data(ptr, &secret, sizeof(secret)); - ptr = ecdh_pack_data(ptr, ¶ms->curve_id, sizeof(params->curve_id)); ptr = ecdh_pack_data(ptr, ¶ms->key_size, sizeof(params->key_size)); ecdh_pack_data(ptr, params->key, params->key_size); @@ -67,7 +66,9 @@ int crypto_ecdh_decode_key(const char *buf, unsigned int len, if (secret.type != CRYPTO_KPP_SECRET_TYPE_ECDH) return -EINVAL; - ptr = ecdh_unpack_data(¶ms->curve_id, ptr, sizeof(params->curve_id)); + if (unlikely(len < secret.len)) + return -EINVAL; + ptr = ecdh_unpack_data(¶ms->key_size, ptr, sizeof(params->key_size)); if (secret.len != crypto_ecdh_key_len(params)) return -EINVAL; diff --git a/crypto/ecdsa.c b/crypto/ecdsa.c new file mode 100644 index 000000000000..fbd76498aba8 --- /dev/null +++ b/crypto/ecdsa.c @@ -0,0 +1,376 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (c) 2021 IBM Corporation + */ + +#include <linux/module.h> +#include <crypto/internal/akcipher.h> +#include <crypto/internal/ecc.h> +#include <crypto/akcipher.h> +#include <crypto/ecdh.h> +#include <linux/asn1_decoder.h> +#include <linux/scatterlist.h> + +#include "ecdsasignature.asn1.h" + +struct ecc_ctx { + unsigned int curve_id; + const struct ecc_curve *curve; + + bool pub_key_set; + u64 x[ECC_MAX_DIGITS]; /* pub key x and y coordinates */ + u64 y[ECC_MAX_DIGITS]; + struct ecc_point pub_key; +}; + +struct ecdsa_signature_ctx { + const struct ecc_curve *curve; + u64 r[ECC_MAX_DIGITS]; + u64 s[ECC_MAX_DIGITS]; +}; + +/* + * Get the r and s components of a signature from the X509 certificate. + */ +static int ecdsa_get_signature_rs(u64 *dest, size_t hdrlen, unsigned char tag, + const void *value, size_t vlen, unsigned int ndigits) +{ + size_t keylen = ndigits * sizeof(u64); + ssize_t diff = vlen - keylen; + const char *d = value; + u8 rs[ECC_MAX_BYTES]; + + if (!value || !vlen) + return -EINVAL; + + /* diff = 0: 'value' has exacly the right size + * diff > 0: 'value' has too many bytes; one leading zero is allowed that + * makes the value a positive integer; error on more + * diff < 0: 'value' is missing leading zeros, which we add + */ + if (diff > 0) { + /* skip over leading zeros that make 'value' a positive int */ + if (*d == 0) { + vlen -= 1; + diff--; + d++; + } + if (diff) + return -EINVAL; + } + if (-diff >= keylen) + return -EINVAL; + + if (diff) { + /* leading zeros not given in 'value' */ + memset(rs, 0, -diff); + } + + memcpy(&rs[-diff], d, vlen); + + ecc_swap_digits((u64 *)rs, dest, ndigits); + + return 0; +} + +int ecdsa_get_signature_r(void *context, size_t hdrlen, unsigned char tag, + const void *value, size_t vlen) +{ + struct ecdsa_signature_ctx *sig = context; + + return ecdsa_get_signature_rs(sig->r, hdrlen, tag, value, vlen, + sig->curve->g.ndigits); +} + +int ecdsa_get_signature_s(void *context, size_t hdrlen, unsigned char tag, + const void *value, size_t vlen) +{ + struct ecdsa_signature_ctx *sig = context; + + return ecdsa_get_signature_rs(sig->s, hdrlen, tag, value, vlen, + sig->curve->g.ndigits); +} + +static int _ecdsa_verify(struct ecc_ctx *ctx, const u64 *hash, const u64 *r, const u64 *s) +{ + const struct ecc_curve *curve = ctx->curve; + unsigned int ndigits = curve->g.ndigits; + u64 s1[ECC_MAX_DIGITS]; + u64 u1[ECC_MAX_DIGITS]; + u64 u2[ECC_MAX_DIGITS]; + u64 x1[ECC_MAX_DIGITS]; + u64 y1[ECC_MAX_DIGITS]; + struct ecc_point res = ECC_POINT_INIT(x1, y1, ndigits); + + /* 0 < r < n and 0 < s < n */ + if (vli_is_zero(r, ndigits) || vli_cmp(r, curve->n, ndigits) >= 0 || + vli_is_zero(s, ndigits) || vli_cmp(s, curve->n, ndigits) >= 0) + return -EBADMSG; + + /* hash is given */ + pr_devel("hash : %016llx %016llx ... %016llx\n", + hash[ndigits - 1], hash[ndigits - 2], hash[0]); + + /* s1 = (s^-1) mod n */ + vli_mod_inv(s1, s, curve->n, ndigits); + /* u1 = (hash * s1) mod n */ + vli_mod_mult_slow(u1, hash, s1, curve->n, ndigits); + /* u2 = (r * s1) mod n */ + vli_mod_mult_slow(u2, r, s1, curve->n, ndigits); + /* res = u1*G + u2 * pub_key */ + ecc_point_mult_shamir(&res, u1, &curve->g, u2, &ctx->pub_key, curve); + + /* res.x = res.x mod n (if res.x > order) */ + if (unlikely(vli_cmp(res.x, curve->n, ndigits) == 1)) + /* faster alternative for NIST p384, p256 & p192 */ + vli_sub(res.x, res.x, curve->n, ndigits); + + if (!vli_cmp(res.x, r, ndigits)) + return 0; + + return -EKEYREJECTED; +} + +/* + * Verify an ECDSA signature. + */ +static int ecdsa_verify(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm); + size_t keylen = ctx->curve->g.ndigits * sizeof(u64); + struct ecdsa_signature_ctx sig_ctx = { + .curve = ctx->curve, + }; + u8 rawhash[ECC_MAX_BYTES]; + u64 hash[ECC_MAX_DIGITS]; + unsigned char *buffer; + ssize_t diff; + int ret; + + if (unlikely(!ctx->pub_key_set)) + return -EINVAL; + + buffer = kmalloc(req->src_len + req->dst_len, GFP_KERNEL); + if (!buffer) + return -ENOMEM; + + sg_pcopy_to_buffer(req->src, + sg_nents_for_len(req->src, req->src_len + req->dst_len), + buffer, req->src_len + req->dst_len, 0); + + ret = asn1_ber_decoder(&ecdsasignature_decoder, &sig_ctx, + buffer, req->src_len); + if (ret < 0) + goto error; + + /* if the hash is shorter then we will add leading zeros to fit to ndigits */ + diff = keylen - req->dst_len; + if (diff >= 0) { + if (diff) + memset(rawhash, 0, diff); + memcpy(&rawhash[diff], buffer + req->src_len, req->dst_len); + } else if (diff < 0) { + /* given hash is longer, we take the left-most bytes */ + memcpy(&rawhash, buffer + req->src_len, keylen); + } + + ecc_swap_digits((u64 *)rawhash, hash, ctx->curve->g.ndigits); + + ret = _ecdsa_verify(ctx, hash, sig_ctx.r, sig_ctx.s); + +error: + kfree(buffer); + + return ret; +} + +static int ecdsa_ecc_ctx_init(struct ecc_ctx *ctx, unsigned int curve_id) +{ + ctx->curve_id = curve_id; + ctx->curve = ecc_get_curve(curve_id); + if (!ctx->curve) + return -EINVAL; + + return 0; +} + + +static void ecdsa_ecc_ctx_deinit(struct ecc_ctx *ctx) +{ + ctx->pub_key_set = false; +} + +static int ecdsa_ecc_ctx_reset(struct ecc_ctx *ctx) +{ + unsigned int curve_id = ctx->curve_id; + int ret; + + ecdsa_ecc_ctx_deinit(ctx); + ret = ecdsa_ecc_ctx_init(ctx, curve_id); + if (ret == 0) + ctx->pub_key = ECC_POINT_INIT(ctx->x, ctx->y, + ctx->curve->g.ndigits); + return ret; +} + +/* + * Set the public key given the raw uncompressed key data from an X509 + * certificate. The key data contain the concatenated X and Y coordinates of + * the public key. + */ +static int ecdsa_set_pub_key(struct crypto_akcipher *tfm, const void *key, unsigned int keylen) +{ + struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm); + const unsigned char *d = key; + const u64 *digits = (const u64 *)&d[1]; + unsigned int ndigits; + int ret; + + ret = ecdsa_ecc_ctx_reset(ctx); + if (ret < 0) + return ret; + + if (keylen < 1 || (((keylen - 1) >> 1) % sizeof(u64)) != 0) + return -EINVAL; + /* we only accept uncompressed format indicated by '4' */ + if (d[0] != 4) + return -EINVAL; + + keylen--; + ndigits = (keylen >> 1) / sizeof(u64); + if (ndigits != ctx->curve->g.ndigits) + return -EINVAL; + + ecc_swap_digits(digits, ctx->pub_key.x, ndigits); + ecc_swap_digits(&digits[ndigits], ctx->pub_key.y, ndigits); + ret = ecc_is_pubkey_valid_full(ctx->curve, &ctx->pub_key); + + ctx->pub_key_set = ret == 0; + + return ret; +} + +static void ecdsa_exit_tfm(struct crypto_akcipher *tfm) +{ + struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm); + + ecdsa_ecc_ctx_deinit(ctx); +} + +static unsigned int ecdsa_max_size(struct crypto_akcipher *tfm) +{ + struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm); + + return ctx->pub_key.ndigits << ECC_DIGITS_TO_BYTES_SHIFT; +} + +static int ecdsa_nist_p384_init_tfm(struct crypto_akcipher *tfm) +{ + struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm); + + return ecdsa_ecc_ctx_init(ctx, ECC_CURVE_NIST_P384); +} + +static struct akcipher_alg ecdsa_nist_p384 = { + .verify = ecdsa_verify, + .set_pub_key = ecdsa_set_pub_key, + .max_size = ecdsa_max_size, + .init = ecdsa_nist_p384_init_tfm, + .exit = ecdsa_exit_tfm, + .base = { + .cra_name = "ecdsa-nist-p384", + .cra_driver_name = "ecdsa-nist-p384-generic", + .cra_priority = 100, + .cra_module = THIS_MODULE, + .cra_ctxsize = sizeof(struct ecc_ctx), + }, +}; + +static int ecdsa_nist_p256_init_tfm(struct crypto_akcipher *tfm) +{ + struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm); + + return ecdsa_ecc_ctx_init(ctx, ECC_CURVE_NIST_P256); +} + +static struct akcipher_alg ecdsa_nist_p256 = { + .verify = ecdsa_verify, + .set_pub_key = ecdsa_set_pub_key, + .max_size = ecdsa_max_size, + .init = ecdsa_nist_p256_init_tfm, + .exit = ecdsa_exit_tfm, + .base = { + .cra_name = "ecdsa-nist-p256", + .cra_driver_name = "ecdsa-nist-p256-generic", + .cra_priority = 100, + .cra_module = THIS_MODULE, + .cra_ctxsize = sizeof(struct ecc_ctx), + }, +}; + +static int ecdsa_nist_p192_init_tfm(struct crypto_akcipher *tfm) +{ + struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm); + + return ecdsa_ecc_ctx_init(ctx, ECC_CURVE_NIST_P192); +} + +static struct akcipher_alg ecdsa_nist_p192 = { + .verify = ecdsa_verify, + .set_pub_key = ecdsa_set_pub_key, + .max_size = ecdsa_max_size, + .init = ecdsa_nist_p192_init_tfm, + .exit = ecdsa_exit_tfm, + .base = { + .cra_name = "ecdsa-nist-p192", + .cra_driver_name = "ecdsa-nist-p192-generic", + .cra_priority = 100, + .cra_module = THIS_MODULE, + .cra_ctxsize = sizeof(struct ecc_ctx), + }, +}; +static bool ecdsa_nist_p192_registered; + +static int __init ecdsa_init(void) +{ + int ret; + + /* NIST p192 may not be available in FIPS mode */ + ret = crypto_register_akcipher(&ecdsa_nist_p192); + ecdsa_nist_p192_registered = ret == 0; + + ret = crypto_register_akcipher(&ecdsa_nist_p256); + if (ret) + goto nist_p256_error; + + ret = crypto_register_akcipher(&ecdsa_nist_p384); + if (ret) + goto nist_p384_error; + + return 0; + +nist_p384_error: + crypto_unregister_akcipher(&ecdsa_nist_p256); + +nist_p256_error: + if (ecdsa_nist_p192_registered) + crypto_unregister_akcipher(&ecdsa_nist_p192); + return ret; +} + +static void __exit ecdsa_exit(void) +{ + if (ecdsa_nist_p192_registered) + crypto_unregister_akcipher(&ecdsa_nist_p192); + crypto_unregister_akcipher(&ecdsa_nist_p256); + crypto_unregister_akcipher(&ecdsa_nist_p384); +} + +subsys_initcall(ecdsa_init); +module_exit(ecdsa_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Stefan Berger <stefanb@linux.ibm.com>"); +MODULE_DESCRIPTION("ECDSA generic algorithm"); +MODULE_ALIAS_CRYPTO("ecdsa-generic"); diff --git a/crypto/ecdsasignature.asn1 b/crypto/ecdsasignature.asn1 new file mode 100644 index 000000000000..621ab754fb9f --- /dev/null +++ b/crypto/ecdsasignature.asn1 @@ -0,0 +1,4 @@ +ECDSASignature ::= SEQUENCE { + r INTEGER ({ ecdsa_get_signature_r }), + s INTEGER ({ ecdsa_get_signature_s }) +} diff --git a/crypto/echainiv.c b/crypto/echainiv.c index 4a2f02baba14..69686668625e 100644 --- a/crypto/echainiv.c +++ b/crypto/echainiv.c @@ -115,7 +115,7 @@ static int echainiv_aead_create(struct crypto_template *tmpl, struct aead_instance *inst; int err; - inst = aead_geniv_alloc(tmpl, tb, 0, 0); + inst = aead_geniv_alloc(tmpl, tb); if (IS_ERR(inst)) return PTR_ERR(inst); diff --git a/crypto/ecrdsa.c b/crypto/ecrdsa.c index 887ec21aee49..f3c6b5e15e75 100644 --- a/crypto/ecrdsa.c +++ b/crypto/ecrdsa.c @@ -20,11 +20,12 @@ #include <linux/crypto.h> #include <crypto/streebog.h> #include <crypto/internal/akcipher.h> +#include <crypto/internal/ecc.h> #include <crypto/akcipher.h> #include <linux/oid_registry.h> +#include <linux/scatterlist.h> #include "ecrdsa_params.asn1.h" #include "ecrdsa_pub_key.asn1.h" -#include "ecc.h" #include "ecrdsa_defs.h" #define ECRDSA_MAX_SIG_SIZE (2 * 512 / 8) @@ -112,15 +113,15 @@ static int ecrdsa_verify(struct akcipher_request *req) /* Step 1: verify that 0 < r < q, 0 < s < q */ if (vli_is_zero(r, ndigits) || - vli_cmp(r, ctx->curve->n, ndigits) == 1 || + vli_cmp(r, ctx->curve->n, ndigits) >= 0 || vli_is_zero(s, ndigits) || - vli_cmp(s, ctx->curve->n, ndigits) == 1) + vli_cmp(s, ctx->curve->n, ndigits) >= 0) return -EKEYREJECTED; /* Step 2: calculate hash (h) of the message (passed as input) */ /* Step 3: calculate e = h \mod q */ vli_from_le64(e, digest, ndigits); - if (vli_cmp(e, ctx->curve->n, ndigits) == 1) + if (vli_cmp(e, ctx->curve->n, ndigits) >= 0) vli_sub(e, e, ctx->curve->n, ndigits); if (vli_is_zero(e, ndigits)) e[0] = 1; @@ -136,7 +137,7 @@ static int ecrdsa_verify(struct akcipher_request *req) /* Step 6: calculate point C = z_1P + z_2Q, and R = x_c \mod q */ ecc_point_mult_shamir(&cc, z1, &ctx->curve->g, z2, &ctx->pub_key, ctx->curve); - if (vli_cmp(cc.x, ctx->curve->n, ndigits) == 1) + if (vli_cmp(cc.x, ctx->curve->n, ndigits) >= 0) vli_sub(cc.x, cc.x, ctx->curve->n, ndigits); /* Step 7: if R == r signature is valid */ diff --git a/crypto/ecrdsa_defs.h b/crypto/ecrdsa_defs.h index 170baf039007..0056335b9d03 100644 --- a/crypto/ecrdsa_defs.h +++ b/crypto/ecrdsa_defs.h @@ -13,7 +13,7 @@ #ifndef _CRYTO_ECRDSA_DEFS_H #define _CRYTO_ECRDSA_DEFS_H -#include "ecc.h" +#include <crypto/internal/ecc.h> #define ECRDSA_MAX_SIG_SIZE (2 * 512 / 8) #define ECRDSA_MAX_DIGITS (512 / 64) diff --git a/crypto/essiv.c b/crypto/essiv.c index 465a89c9d1ef..e33369df9034 100644 --- a/crypto/essiv.c +++ b/crypto/essiv.c @@ -30,6 +30,7 @@ #include <crypto/authenc.h> #include <crypto/internal/aead.h> +#include <crypto/internal/cipher.h> #include <crypto/internal/hash.h> #include <crypto/internal/skcipher.h> #include <crypto/scatterwalk.h> @@ -66,7 +67,6 @@ static int essiv_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { struct essiv_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); - SHASH_DESC_ON_STACK(desc, tctx->hash); u8 salt[HASH_MAX_DIGESTSIZE]; int err; @@ -78,8 +78,7 @@ static int essiv_skcipher_setkey(struct crypto_skcipher *tfm, if (err) return err; - desc->tfm = tctx->hash; - err = crypto_shash_digest(desc, key, keylen, salt); + err = crypto_shash_tfm_digest(tctx->hash, key, keylen, salt); if (err) return err; @@ -468,7 +467,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb) return PTR_ERR(shash_name); type = algt->type & algt->mask; - mask = crypto_requires_sync(algt->type, algt->mask); + mask = crypto_algt_inherited_mask(algt); switch (type) { case CRYPTO_ALG_TYPE_SKCIPHER: @@ -527,7 +526,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb) /* Synchronous hash, e.g., "sha256" */ _hash_alg = crypto_alg_mod_lookup(shash_name, CRYPTO_ALG_TYPE_SHASH, - CRYPTO_ALG_TYPE_MASK); + CRYPTO_ALG_TYPE_MASK | mask); if (IS_ERR(_hash_alg)) { err = PTR_ERR(_hash_alg); goto out_drop_skcipher; @@ -544,7 +543,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb) } /* record the driver name so we can instantiate this exact algo later */ - strlcpy(ictx->shash_driver_name, hash_alg->base.cra_driver_name, + strscpy(ictx->shash_driver_name, hash_alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME); /* Instance fields */ @@ -559,7 +558,12 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb) hash_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto out_free_hash; - base->cra_flags = block_base->cra_flags & CRYPTO_ALG_ASYNC; + /* + * hash_alg wasn't gotten via crypto_grab*(), so we need to inherit its + * flags manually. + */ + base->cra_flags |= (hash_alg->base.cra_flags & + CRYPTO_ALG_INHERITED_FLAGS); base->cra_blocksize = block_base->cra_blocksize; base->cra_ctxsize = sizeof(struct essiv_tfm_ctx); base->cra_alignmask = block_base->cra_alignmask; @@ -640,3 +644,4 @@ module_exit(essiv_module_exit); MODULE_DESCRIPTION("ESSIV skcipher/aead wrapper for block encryption"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS_CRYPTO("essiv"); +MODULE_IMPORT_NS(CRYPTO_INTERNAL); diff --git a/crypto/fcrypt.c b/crypto/fcrypt.c index 58f935315cf8..76a04d000c0d 100644 --- a/crypto/fcrypt.c +++ b/crypto/fcrypt.c @@ -63,10 +63,7 @@ do { \ } while (0) /* Rotate right one 64 bit number as a 56 bit number */ -#define ror56_64(k, n) \ -do { \ - k = (k >> n) | ((k & ((1 << n) - 1)) << (56 - n)); \ -} while (0) +#define ror56_64(k, n) (k = (k >> n) | ((k & ((1 << n) - 1)) << (56 - n))) /* * Sboxes for Feistel network derived from @@ -396,7 +393,6 @@ static struct crypto_alg fcrypt_alg = { .cra_blocksize = 8, .cra_ctxsize = sizeof(struct fcrypt_ctx), .cra_module = THIS_MODULE, - .cra_alignmask = 3, .cra_u = { .cipher = { .cia_min_keysize = 8, .cia_max_keysize = 8, diff --git a/crypto/fips.c b/crypto/fips.c index 7b1d8caee669..b05d3c7b3ca5 100644 --- a/crypto/fips.c +++ b/crypto/fips.c @@ -12,6 +12,7 @@ #include <linux/kernel.h> #include <linux/sysctl.h> #include <linux/notifier.h> +#include <generated/utsrelease.h> int fips_enabled; EXPORT_SYMBOL_GPL(fips_enabled); @@ -30,13 +31,37 @@ static int fips_enable(char *str) __setup("fips=", fips_enable); +#define FIPS_MODULE_NAME CONFIG_CRYPTO_FIPS_NAME +#ifdef CONFIG_CRYPTO_FIPS_CUSTOM_VERSION +#define FIPS_MODULE_VERSION CONFIG_CRYPTO_FIPS_VERSION +#else +#define FIPS_MODULE_VERSION UTS_RELEASE +#endif + +static char fips_name[] = FIPS_MODULE_NAME; +static char fips_version[] = FIPS_MODULE_VERSION; + static struct ctl_table crypto_sysctl_table[] = { { - .procname = "fips_enabled", - .data = &fips_enabled, - .maxlen = sizeof(int), - .mode = 0444, - .proc_handler = proc_dointvec + .procname = "fips_enabled", + .data = &fips_enabled, + .maxlen = sizeof(int), + .mode = 0444, + .proc_handler = proc_dointvec + }, + { + .procname = "fips_name", + .data = &fips_name, + .maxlen = 64, + .mode = 0444, + .proc_handler = proc_dostring + }, + { + .procname = "fips_version", + .data = &fips_version, + .maxlen = 64, + .mode = 0444, + .proc_handler = proc_dostring }, {} }; diff --git a/crypto/gcm.c b/crypto/gcm.c index 8e5c0ac65661..338ee0769747 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -139,7 +139,7 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, CRYPTO_TFM_REQ_MASK); err = crypto_ahash_setkey(ghash, (u8 *)&data->hash, sizeof(be128)); out: - kzfree(data); + kfree_sensitive(data); return err; } @@ -578,7 +578,6 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, const char *ctr_name, const char *ghash_name) { - struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; struct gcm_instance_ctx *ctx; @@ -586,14 +585,9 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, struct hash_alg_common *ghash; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -635,8 +629,6 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = (ghash->base.cra_flags | - ctr->base.cra_flags) & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = (ghash->base.cra_priority + ctr->base.cra_priority) / 2; inst->alg.base.cra_blocksize = 1; @@ -835,26 +827,15 @@ static void crypto_rfc4106_free(struct aead_instance *inst) static int crypto_rfc4106_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; struct crypto_aead_spawn *spawn; struct aead_alg *alg; - const char *ccm_name; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); - - ccm_name = crypto_attr_alg_name(tb[1]); - if (IS_ERR(ccm_name)) - return PTR_ERR(ccm_name); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) @@ -862,9 +843,9 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl, spawn = aead_instance_ctx(inst); err = crypto_grab_aead(spawn, aead_crypto_instance(inst), - ccm_name, 0, mask); + crypto_attr_alg_name(tb[1]), 0, mask); if (err) - goto out_free_inst; + goto err_free_inst; alg = crypto_spawn_aead_alg(spawn); @@ -872,11 +853,11 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl, /* Underlying IV size must be 12. */ if (crypto_aead_alg_ivsize(alg) != GCM_AES_IV_SIZE) - goto out_drop_alg; + goto err_free_inst; /* Not a stream cipher? */ if (alg->base.cra_blocksize != 1) - goto out_drop_alg; + goto err_free_inst; err = -ENAMETOOLONG; if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, @@ -885,9 +866,8 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl, snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "rfc4106(%s)", alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) - goto out_drop_alg; + goto err_free_inst; - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = 1; inst->alg.base.cra_alignmask = alg->base.cra_alignmask; @@ -909,17 +889,11 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl, inst->free = crypto_rfc4106_free; err = aead_register_instance(tmpl, inst); - if (err) - goto out_drop_alg; - -out: + if (err) { +err_free_inst: + crypto_rfc4106_free(inst); + } return err; - -out_drop_alg: - crypto_drop_aead(spawn); -out_free_inst: - kfree(inst); - goto out; } static int crypto_rfc4543_setkey(struct crypto_aead *parent, const u8 *key, @@ -1068,50 +1042,37 @@ static void crypto_rfc4543_free(struct aead_instance *inst) static int crypto_rfc4543_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; - struct crypto_aead_spawn *spawn; struct aead_alg *alg; struct crypto_rfc4543_instance_ctx *ctx; - const char *ccm_name; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); - - ccm_name = crypto_attr_alg_name(tb[1]); - if (IS_ERR(ccm_name)) - return PTR_ERR(ccm_name); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) return -ENOMEM; ctx = aead_instance_ctx(inst); - spawn = &ctx->aead; - err = crypto_grab_aead(spawn, aead_crypto_instance(inst), - ccm_name, 0, mask); + err = crypto_grab_aead(&ctx->aead, aead_crypto_instance(inst), + crypto_attr_alg_name(tb[1]), 0, mask); if (err) - goto out_free_inst; + goto err_free_inst; - alg = crypto_spawn_aead_alg(spawn); + alg = crypto_spawn_aead_alg(&ctx->aead); err = -EINVAL; /* Underlying IV size must be 12. */ if (crypto_aead_alg_ivsize(alg) != GCM_AES_IV_SIZE) - goto out_drop_alg; + goto err_free_inst; /* Not a stream cipher? */ if (alg->base.cra_blocksize != 1) - goto out_drop_alg; + goto err_free_inst; err = -ENAMETOOLONG; if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, @@ -1120,9 +1081,8 @@ static int crypto_rfc4543_create(struct crypto_template *tmpl, snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "rfc4543(%s)", alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) - goto out_drop_alg; + goto err_free_inst; - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = 1; inst->alg.base.cra_alignmask = alg->base.cra_alignmask; @@ -1141,20 +1101,14 @@ static int crypto_rfc4543_create(struct crypto_template *tmpl, inst->alg.encrypt = crypto_rfc4543_encrypt; inst->alg.decrypt = crypto_rfc4543_decrypt; - inst->free = crypto_rfc4543_free, + inst->free = crypto_rfc4543_free; err = aead_register_instance(tmpl, inst); - if (err) - goto out_drop_alg; - -out: + if (err) { +err_free_inst: + crypto_rfc4543_free(inst); + } return err; - -out_drop_alg: - crypto_drop_aead(spawn); -out_free_inst: - kfree(inst); - goto out; } static struct crypto_template crypto_gcm_tmpls[] = { diff --git a/crypto/geniv.c b/crypto/geniv.c index dbcc640274cd..bee4621b4f12 100644 --- a/crypto/geniv.c +++ b/crypto/geniv.c @@ -39,27 +39,19 @@ static void aead_geniv_free(struct aead_instance *inst) } struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, - struct rtattr **tb, u32 type, u32 mask) + struct rtattr **tb) { - const char *name; struct crypto_aead_spawn *spawn; - struct crypto_attr_type *algt; struct aead_instance *inst; struct aead_alg *alg; unsigned int ivsize; unsigned int maxauthsize; + u32 mask; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return ERR_CAST(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return ERR_PTR(-EINVAL); - - name = crypto_attr_alg_name(tb[1]); - if (IS_ERR(name)) - return ERR_CAST(name); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return ERR_PTR(err); inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) @@ -67,11 +59,8 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, spawn = aead_instance_ctx(inst); - /* Ignore async algorithms if necessary. */ - mask |= crypto_requires_sync(algt->type, algt->mask); - err = crypto_grab_aead(spawn, aead_crypto_instance(inst), - name, type, mask); + crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; @@ -82,19 +71,18 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, err = -EINVAL; if (ivsize < sizeof(u64)) - goto err_drop_alg; + goto err_free_inst; err = -ENAMETOOLONG; if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", tmpl->name, alg->base.cra_name) >= CRYPTO_MAX_ALG_NAME) - goto err_drop_alg; + goto err_free_inst; if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", tmpl->name, alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) - goto err_drop_alg; + goto err_free_inst; - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = alg->base.cra_blocksize; inst->alg.base.cra_alignmask = alg->base.cra_alignmask; @@ -111,10 +99,8 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, out: return inst; -err_drop_alg: - crypto_drop_aead(spawn); err_free_inst: - kfree(inst); + aead_geniv_free(inst); inst = ERR_PTR(err); goto out; } diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c index a4b1c026aaee..a69ae3e6c16c 100644 --- a/crypto/gf128mul.c +++ b/crypto/gf128mul.c @@ -304,8 +304,8 @@ void gf128mul_free_64k(struct gf128mul_64k *t) int i; for (i = 0; i < 16; i++) - kzfree(t->t[i]); - kzfree(t); + kfree_sensitive(t->t[i]); + kfree_sensitive(t); } EXPORT_SYMBOL(gf128mul_free_64k); diff --git a/crypto/hctr2.c b/crypto/hctr2.c new file mode 100644 index 000000000000..7d00a3bcb667 --- /dev/null +++ b/crypto/hctr2.c @@ -0,0 +1,581 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * HCTR2 length-preserving encryption mode + * + * Copyright 2021 Google LLC + */ + + +/* + * HCTR2 is a length-preserving encryption mode that is efficient on + * processors with instructions to accelerate AES and carryless + * multiplication, e.g. x86 processors with AES-NI and CLMUL, and ARM + * processors with the ARMv8 crypto extensions. + * + * For more details, see the paper: "Length-preserving encryption with HCTR2" + * (https://eprint.iacr.org/2021/1441.pdf) + */ + +#include <crypto/internal/cipher.h> +#include <crypto/internal/hash.h> +#include <crypto/internal/skcipher.h> +#include <crypto/polyval.h> +#include <crypto/scatterwalk.h> +#include <linux/module.h> + +#define BLOCKCIPHER_BLOCK_SIZE 16 + +/* + * The specification allows variable-length tweaks, but Linux's crypto API + * currently only allows algorithms to support a single length. The "natural" + * tweak length for HCTR2 is 16, since that fits into one POLYVAL block for + * the best performance. But longer tweaks are useful for fscrypt, to avoid + * needing to derive per-file keys. So instead we use two blocks, or 32 bytes. + */ +#define TWEAK_SIZE 32 + +struct hctr2_instance_ctx { + struct crypto_cipher_spawn blockcipher_spawn; + struct crypto_skcipher_spawn xctr_spawn; + struct crypto_shash_spawn polyval_spawn; +}; + +struct hctr2_tfm_ctx { + struct crypto_cipher *blockcipher; + struct crypto_skcipher *xctr; + struct crypto_shash *polyval; + u8 L[BLOCKCIPHER_BLOCK_SIZE]; + int hashed_tweak_offset; + /* + * This struct is allocated with extra space for two exported hash + * states. Since the hash state size is not known at compile-time, we + * can't add these to the struct directly. + * + * hashed_tweaklen_divisible; + * hashed_tweaklen_remainder; + */ +}; + +struct hctr2_request_ctx { + u8 first_block[BLOCKCIPHER_BLOCK_SIZE]; + u8 xctr_iv[BLOCKCIPHER_BLOCK_SIZE]; + struct scatterlist *bulk_part_dst; + struct scatterlist *bulk_part_src; + struct scatterlist sg_src[2]; + struct scatterlist sg_dst[2]; + /* + * Sub-request sizes are unknown at compile-time, so they need to go + * after the members with known sizes. + */ + union { + struct shash_desc hash_desc; + struct skcipher_request xctr_req; + } u; + /* + * This struct is allocated with extra space for one exported hash + * state. Since the hash state size is not known at compile-time, we + * can't add it to the struct directly. + * + * hashed_tweak; + */ +}; + +static inline u8 *hctr2_hashed_tweaklen(const struct hctr2_tfm_ctx *tctx, + bool has_remainder) +{ + u8 *p = (u8 *)tctx + sizeof(*tctx); + + if (has_remainder) /* For messages not a multiple of block length */ + p += crypto_shash_statesize(tctx->polyval); + return p; +} + +static inline u8 *hctr2_hashed_tweak(const struct hctr2_tfm_ctx *tctx, + struct hctr2_request_ctx *rctx) +{ + return (u8 *)rctx + tctx->hashed_tweak_offset; +} + +/* + * The input data for each HCTR2 hash step begins with a 16-byte block that + * contains the tweak length and a flag that indicates whether the input is evenly + * divisible into blocks. Since this implementation only supports one tweak + * length, we precompute the two hash states resulting from hashing the two + * possible values of this initial block. This reduces by one block the amount of + * data that needs to be hashed for each encryption/decryption + * + * These precomputed hashes are stored in hctr2_tfm_ctx. + */ +static int hctr2_hash_tweaklen(struct hctr2_tfm_ctx *tctx, bool has_remainder) +{ + SHASH_DESC_ON_STACK(shash, tfm->polyval); + __le64 tweak_length_block[2]; + int err; + + shash->tfm = tctx->polyval; + memset(tweak_length_block, 0, sizeof(tweak_length_block)); + + tweak_length_block[0] = cpu_to_le64(TWEAK_SIZE * 8 * 2 + 2 + has_remainder); + err = crypto_shash_init(shash); + if (err) + return err; + err = crypto_shash_update(shash, (u8 *)tweak_length_block, + POLYVAL_BLOCK_SIZE); + if (err) + return err; + return crypto_shash_export(shash, hctr2_hashed_tweaklen(tctx, has_remainder)); +} + +static int hctr2_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct hctr2_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); + u8 hbar[BLOCKCIPHER_BLOCK_SIZE]; + int err; + + crypto_cipher_clear_flags(tctx->blockcipher, CRYPTO_TFM_REQ_MASK); + crypto_cipher_set_flags(tctx->blockcipher, + crypto_skcipher_get_flags(tfm) & + CRYPTO_TFM_REQ_MASK); + err = crypto_cipher_setkey(tctx->blockcipher, key, keylen); + if (err) + return err; + + crypto_skcipher_clear_flags(tctx->xctr, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(tctx->xctr, + crypto_skcipher_get_flags(tfm) & + CRYPTO_TFM_REQ_MASK); + err = crypto_skcipher_setkey(tctx->xctr, key, keylen); + if (err) + return err; + + memset(hbar, 0, sizeof(hbar)); + crypto_cipher_encrypt_one(tctx->blockcipher, hbar, hbar); + + memset(tctx->L, 0, sizeof(tctx->L)); + tctx->L[0] = 0x01; + crypto_cipher_encrypt_one(tctx->blockcipher, tctx->L, tctx->L); + + crypto_shash_clear_flags(tctx->polyval, CRYPTO_TFM_REQ_MASK); + crypto_shash_set_flags(tctx->polyval, crypto_skcipher_get_flags(tfm) & + CRYPTO_TFM_REQ_MASK); + err = crypto_shash_setkey(tctx->polyval, hbar, BLOCKCIPHER_BLOCK_SIZE); + if (err) + return err; + memzero_explicit(hbar, sizeof(hbar)); + + return hctr2_hash_tweaklen(tctx, true) ?: hctr2_hash_tweaklen(tctx, false); +} + +static int hctr2_hash_tweak(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const struct hctr2_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); + struct hctr2_request_ctx *rctx = skcipher_request_ctx(req); + struct shash_desc *hash_desc = &rctx->u.hash_desc; + int err; + bool has_remainder = req->cryptlen % POLYVAL_BLOCK_SIZE; + + hash_desc->tfm = tctx->polyval; + err = crypto_shash_import(hash_desc, hctr2_hashed_tweaklen(tctx, has_remainder)); + if (err) + return err; + err = crypto_shash_update(hash_desc, req->iv, TWEAK_SIZE); + if (err) + return err; + + // Store the hashed tweak, since we need it when computing both + // H(T || N) and H(T || V). + return crypto_shash_export(hash_desc, hctr2_hashed_tweak(tctx, rctx)); +} + +static int hctr2_hash_message(struct skcipher_request *req, + struct scatterlist *sgl, + u8 digest[POLYVAL_DIGEST_SIZE]) +{ + static const u8 padding[BLOCKCIPHER_BLOCK_SIZE] = { 0x1 }; + struct hctr2_request_ctx *rctx = skcipher_request_ctx(req); + struct shash_desc *hash_desc = &rctx->u.hash_desc; + const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE; + struct sg_mapping_iter miter; + unsigned int remainder = bulk_len % BLOCKCIPHER_BLOCK_SIZE; + int i; + int err = 0; + int n = 0; + + sg_miter_start(&miter, sgl, sg_nents(sgl), + SG_MITER_FROM_SG | SG_MITER_ATOMIC); + for (i = 0; i < bulk_len; i += n) { + sg_miter_next(&miter); + n = min_t(unsigned int, miter.length, bulk_len - i); + err = crypto_shash_update(hash_desc, miter.addr, n); + if (err) + break; + } + sg_miter_stop(&miter); + + if (err) + return err; + + if (remainder) { + err = crypto_shash_update(hash_desc, padding, + BLOCKCIPHER_BLOCK_SIZE - remainder); + if (err) + return err; + } + return crypto_shash_final(hash_desc, digest); +} + +static int hctr2_finish(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const struct hctr2_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); + struct hctr2_request_ctx *rctx = skcipher_request_ctx(req); + u8 digest[POLYVAL_DIGEST_SIZE]; + struct shash_desc *hash_desc = &rctx->u.hash_desc; + int err; + + // U = UU ^ H(T || V) + // or M = MM ^ H(T || N) + hash_desc->tfm = tctx->polyval; + err = crypto_shash_import(hash_desc, hctr2_hashed_tweak(tctx, rctx)); + if (err) + return err; + err = hctr2_hash_message(req, rctx->bulk_part_dst, digest); + if (err) + return err; + crypto_xor(rctx->first_block, digest, BLOCKCIPHER_BLOCK_SIZE); + + // Copy U (or M) into dst scatterlist + scatterwalk_map_and_copy(rctx->first_block, req->dst, + 0, BLOCKCIPHER_BLOCK_SIZE, 1); + return 0; +} + +static void hctr2_xctr_done(struct crypto_async_request *areq, + int err) +{ + struct skcipher_request *req = areq->data; + + if (!err) + err = hctr2_finish(req); + + skcipher_request_complete(req, err); +} + +static int hctr2_crypt(struct skcipher_request *req, bool enc) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const struct hctr2_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); + struct hctr2_request_ctx *rctx = skcipher_request_ctx(req); + u8 digest[POLYVAL_DIGEST_SIZE]; + int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE; + int err; + + // Requests must be at least one block + if (req->cryptlen < BLOCKCIPHER_BLOCK_SIZE) + return -EINVAL; + + // Copy M (or U) into a temporary buffer + scatterwalk_map_and_copy(rctx->first_block, req->src, + 0, BLOCKCIPHER_BLOCK_SIZE, 0); + + // Create scatterlists for N and V + rctx->bulk_part_src = scatterwalk_ffwd(rctx->sg_src, req->src, + BLOCKCIPHER_BLOCK_SIZE); + rctx->bulk_part_dst = scatterwalk_ffwd(rctx->sg_dst, req->dst, + BLOCKCIPHER_BLOCK_SIZE); + + // MM = M ^ H(T || N) + // or UU = U ^ H(T || V) + err = hctr2_hash_tweak(req); + if (err) + return err; + err = hctr2_hash_message(req, rctx->bulk_part_src, digest); + if (err) + return err; + crypto_xor(digest, rctx->first_block, BLOCKCIPHER_BLOCK_SIZE); + + // UU = E(MM) + // or MM = D(UU) + if (enc) + crypto_cipher_encrypt_one(tctx->blockcipher, rctx->first_block, + digest); + else + crypto_cipher_decrypt_one(tctx->blockcipher, rctx->first_block, + digest); + + // S = MM ^ UU ^ L + crypto_xor(digest, rctx->first_block, BLOCKCIPHER_BLOCK_SIZE); + crypto_xor_cpy(rctx->xctr_iv, digest, tctx->L, BLOCKCIPHER_BLOCK_SIZE); + + // V = XCTR(S, N) + // or N = XCTR(S, V) + skcipher_request_set_tfm(&rctx->u.xctr_req, tctx->xctr); + skcipher_request_set_crypt(&rctx->u.xctr_req, rctx->bulk_part_src, + rctx->bulk_part_dst, bulk_len, + rctx->xctr_iv); + skcipher_request_set_callback(&rctx->u.xctr_req, + req->base.flags, + hctr2_xctr_done, req); + return crypto_skcipher_encrypt(&rctx->u.xctr_req) ?: + hctr2_finish(req); +} + +static int hctr2_encrypt(struct skcipher_request *req) +{ + return hctr2_crypt(req, true); +} + +static int hctr2_decrypt(struct skcipher_request *req) +{ + return hctr2_crypt(req, false); +} + +static int hctr2_init_tfm(struct crypto_skcipher *tfm) +{ + struct skcipher_instance *inst = skcipher_alg_instance(tfm); + struct hctr2_instance_ctx *ictx = skcipher_instance_ctx(inst); + struct hctr2_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); + struct crypto_skcipher *xctr; + struct crypto_cipher *blockcipher; + struct crypto_shash *polyval; + unsigned int subreq_size; + int err; + + xctr = crypto_spawn_skcipher(&ictx->xctr_spawn); + if (IS_ERR(xctr)) + return PTR_ERR(xctr); + + blockcipher = crypto_spawn_cipher(&ictx->blockcipher_spawn); + if (IS_ERR(blockcipher)) { + err = PTR_ERR(blockcipher); + goto err_free_xctr; + } + + polyval = crypto_spawn_shash(&ictx->polyval_spawn); + if (IS_ERR(polyval)) { + err = PTR_ERR(polyval); + goto err_free_blockcipher; + } + + tctx->xctr = xctr; + tctx->blockcipher = blockcipher; + tctx->polyval = polyval; + + BUILD_BUG_ON(offsetofend(struct hctr2_request_ctx, u) != + sizeof(struct hctr2_request_ctx)); + subreq_size = max(sizeof_field(struct hctr2_request_ctx, u.hash_desc) + + crypto_shash_descsize(polyval), + sizeof_field(struct hctr2_request_ctx, u.xctr_req) + + crypto_skcipher_reqsize(xctr)); + + tctx->hashed_tweak_offset = offsetof(struct hctr2_request_ctx, u) + + subreq_size; + crypto_skcipher_set_reqsize(tfm, tctx->hashed_tweak_offset + + crypto_shash_statesize(polyval)); + return 0; + +err_free_blockcipher: + crypto_free_cipher(blockcipher); +err_free_xctr: + crypto_free_skcipher(xctr); + return err; +} + +static void hctr2_exit_tfm(struct crypto_skcipher *tfm) +{ + struct hctr2_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); + + crypto_free_cipher(tctx->blockcipher); + crypto_free_skcipher(tctx->xctr); + crypto_free_shash(tctx->polyval); +} + +static void hctr2_free_instance(struct skcipher_instance *inst) +{ + struct hctr2_instance_ctx *ictx = skcipher_instance_ctx(inst); + + crypto_drop_cipher(&ictx->blockcipher_spawn); + crypto_drop_skcipher(&ictx->xctr_spawn); + crypto_drop_shash(&ictx->polyval_spawn); + kfree(inst); +} + +static int hctr2_create_common(struct crypto_template *tmpl, + struct rtattr **tb, + const char *xctr_name, + const char *polyval_name) +{ + u32 mask; + struct skcipher_instance *inst; + struct hctr2_instance_ctx *ictx; + struct skcipher_alg *xctr_alg; + struct crypto_alg *blockcipher_alg; + struct shash_alg *polyval_alg; + char blockcipher_name[CRYPTO_MAX_ALG_NAME]; + int len; + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); + if (err) + return err; + + inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL); + if (!inst) + return -ENOMEM; + ictx = skcipher_instance_ctx(inst); + + /* Stream cipher, xctr(block_cipher) */ + err = crypto_grab_skcipher(&ictx->xctr_spawn, + skcipher_crypto_instance(inst), + xctr_name, 0, mask); + if (err) + goto err_free_inst; + xctr_alg = crypto_spawn_skcipher_alg(&ictx->xctr_spawn); + + err = -EINVAL; + if (strncmp(xctr_alg->base.cra_name, "xctr(", 5)) + goto err_free_inst; + len = strscpy(blockcipher_name, xctr_alg->base.cra_name + 5, + sizeof(blockcipher_name)); + if (len < 1) + goto err_free_inst; + if (blockcipher_name[len - 1] != ')') + goto err_free_inst; + blockcipher_name[len - 1] = 0; + + /* Block cipher, e.g. "aes" */ + err = crypto_grab_cipher(&ictx->blockcipher_spawn, + skcipher_crypto_instance(inst), + blockcipher_name, 0, mask); + if (err) + goto err_free_inst; + blockcipher_alg = crypto_spawn_cipher_alg(&ictx->blockcipher_spawn); + + /* Require blocksize of 16 bytes */ + err = -EINVAL; + if (blockcipher_alg->cra_blocksize != BLOCKCIPHER_BLOCK_SIZE) + goto err_free_inst; + + /* Polyval ε-∆U hash function */ + err = crypto_grab_shash(&ictx->polyval_spawn, + skcipher_crypto_instance(inst), + polyval_name, 0, mask); + if (err) + goto err_free_inst; + polyval_alg = crypto_spawn_shash_alg(&ictx->polyval_spawn); + + /* Ensure Polyval is being used */ + err = -EINVAL; + if (strcmp(polyval_alg->base.cra_name, "polyval") != 0) + goto err_free_inst; + + /* Instance fields */ + + err = -ENAMETOOLONG; + if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, "hctr2(%s)", + blockcipher_alg->cra_name) >= CRYPTO_MAX_ALG_NAME) + goto err_free_inst; + if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, + "hctr2_base(%s,%s)", + xctr_alg->base.cra_driver_name, + polyval_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) + goto err_free_inst; + + inst->alg.base.cra_blocksize = BLOCKCIPHER_BLOCK_SIZE; + inst->alg.base.cra_ctxsize = sizeof(struct hctr2_tfm_ctx) + + polyval_alg->statesize * 2; + inst->alg.base.cra_alignmask = xctr_alg->base.cra_alignmask | + polyval_alg->base.cra_alignmask; + /* + * The hash function is called twice, so it is weighted higher than the + * xctr and blockcipher. + */ + inst->alg.base.cra_priority = (2 * xctr_alg->base.cra_priority + + 4 * polyval_alg->base.cra_priority + + blockcipher_alg->cra_priority) / 7; + + inst->alg.setkey = hctr2_setkey; + inst->alg.encrypt = hctr2_encrypt; + inst->alg.decrypt = hctr2_decrypt; + inst->alg.init = hctr2_init_tfm; + inst->alg.exit = hctr2_exit_tfm; + inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(xctr_alg); + inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(xctr_alg); + inst->alg.ivsize = TWEAK_SIZE; + + inst->free = hctr2_free_instance; + + err = skcipher_register_instance(tmpl, inst); + if (err) { +err_free_inst: + hctr2_free_instance(inst); + } + return err; +} + +static int hctr2_create_base(struct crypto_template *tmpl, struct rtattr **tb) +{ + const char *xctr_name; + const char *polyval_name; + + xctr_name = crypto_attr_alg_name(tb[1]); + if (IS_ERR(xctr_name)) + return PTR_ERR(xctr_name); + + polyval_name = crypto_attr_alg_name(tb[2]); + if (IS_ERR(polyval_name)) + return PTR_ERR(polyval_name); + + return hctr2_create_common(tmpl, tb, xctr_name, polyval_name); +} + +static int hctr2_create(struct crypto_template *tmpl, struct rtattr **tb) +{ + const char *blockcipher_name; + char xctr_name[CRYPTO_MAX_ALG_NAME]; + + blockcipher_name = crypto_attr_alg_name(tb[1]); + if (IS_ERR(blockcipher_name)) + return PTR_ERR(blockcipher_name); + + if (snprintf(xctr_name, CRYPTO_MAX_ALG_NAME, "xctr(%s)", + blockcipher_name) >= CRYPTO_MAX_ALG_NAME) + return -ENAMETOOLONG; + + return hctr2_create_common(tmpl, tb, xctr_name, "polyval"); +} + +static struct crypto_template hctr2_tmpls[] = { + { + /* hctr2_base(xctr_name, polyval_name) */ + .name = "hctr2_base", + .create = hctr2_create_base, + .module = THIS_MODULE, + }, { + /* hctr2(blockcipher_name) */ + .name = "hctr2", + .create = hctr2_create, + .module = THIS_MODULE, + } +}; + +static int __init hctr2_module_init(void) +{ + return crypto_register_templates(hctr2_tmpls, ARRAY_SIZE(hctr2_tmpls)); +} + +static void __exit hctr2_module_exit(void) +{ + return crypto_unregister_templates(hctr2_tmpls, + ARRAY_SIZE(hctr2_tmpls)); +} + +subsys_initcall(hctr2_module_init); +module_exit(hctr2_module_exit); + +MODULE_DESCRIPTION("HCTR2 length-preserving encryption mode"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS_CRYPTO("hctr2"); +MODULE_IMPORT_NS(CRYPTO_INTERNAL); diff --git a/crypto/hmac.c b/crypto/hmac.c index e38bfb948278..3610ff0b6739 100644 --- a/crypto/hmac.c +++ b/crypto/hmac.c @@ -15,6 +15,7 @@ #include <crypto/internal/hash.h> #include <crypto/scatterwalk.h> #include <linux/err.h> +#include <linux/fips.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> @@ -51,6 +52,9 @@ static int hmac_setkey(struct crypto_shash *parent, SHASH_DESC_ON_STACK(shash, hash); unsigned int i; + if (fips_enabled && (keylen < 112 / 8)) + return -EINVAL; + shash->tfm = hash; if (keylen > bs) { @@ -168,11 +172,12 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) struct crypto_shash_spawn *spawn; struct crypto_alg *alg; struct shash_alg *salg; + u32 mask; int err; int ds; int ss; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask); if (err) return err; @@ -182,7 +187,7 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) spawn = shash_instance_ctx(inst); err = crypto_grab_shash(spawn, shash_crypto_instance(inst), - crypto_attr_alg_name(tb[1]), 0, 0); + crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; salg = crypto_spawn_shash_alg(spawn); diff --git a/crypto/internal.h b/crypto/internal.h index d5ebc60c5143..c08385571853 100644 --- a/crypto/internal.h +++ b/crypto/internal.h @@ -10,16 +10,15 @@ #include <crypto/algapi.h> #include <linux/completion.h> -#include <linux/mm.h> -#include <linux/highmem.h> -#include <linux/interrupt.h> -#include <linux/init.h> +#include <linux/jump_label.h> #include <linux/list.h> #include <linux/module.h> -#include <linux/kernel.h> #include <linux/notifier.h> +#include <linux/numa.h> +#include <linux/refcount.h> #include <linux/rwsem.h> -#include <linux/slab.h> +#include <linux/sched.h> +#include <linux/types.h> struct crypto_instance; struct crypto_template; @@ -29,12 +28,27 @@ struct crypto_larval { struct crypto_alg *adult; struct completion completion; u32 mask; + bool test_started; }; +enum { + CRYPTOA_UNSPEC, + CRYPTOA_ALG, + CRYPTOA_TYPE, + __CRYPTOA_MAX, +}; + +#define CRYPTOA_MAX (__CRYPTOA_MAX - 1) + +/* Maximum number of (rtattr) parameters for each template. */ +#define CRYPTO_MAX_ATTRS 32 + extern struct list_head crypto_alg_list; extern struct rw_semaphore crypto_alg_sem; extern struct blocking_notifier_head crypto_chain; +DECLARE_STATIC_KEY_FALSE(crypto_boot_test_finished); + #ifdef CONFIG_PROC_FS void __init crypto_init_proc(void); void __exit crypto_exit_proc(void); @@ -60,20 +74,37 @@ struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask); struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask); void crypto_larval_kill(struct crypto_alg *alg); +void crypto_wait_for_test(struct crypto_larval *larval); void crypto_alg_tested(const char *name, int err); void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list, struct crypto_alg *nalg); void crypto_remove_final(struct list_head *list); +void crypto_shoot_alg(struct crypto_alg *alg); struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, u32 mask); -void *crypto_create_tfm(struct crypto_alg *alg, - const struct crypto_type *frontend); +void *crypto_create_tfm_node(struct crypto_alg *alg, + const struct crypto_type *frontend, int node); + +static inline void *crypto_create_tfm(struct crypto_alg *alg, + const struct crypto_type *frontend) +{ + return crypto_create_tfm_node(alg, frontend, NUMA_NO_NODE); +} + struct crypto_alg *crypto_find_alg(const char *alg_name, const struct crypto_type *frontend, u32 type, u32 mask); -void *crypto_alloc_tfm(const char *alg_name, - const struct crypto_type *frontend, u32 type, u32 mask); + +void *crypto_alloc_tfm_node(const char *alg_name, + const struct crypto_type *frontend, u32 type, u32 mask, + int node); + +static inline void *crypto_alloc_tfm(const char *alg_name, + const struct crypto_type *frontend, u32 type, u32 mask) +{ + return crypto_alloc_tfm_node(alg_name, frontend, type, mask, NUMA_NO_NODE); +} int crypto_probing_notify(unsigned long val, void *v); @@ -124,5 +155,16 @@ static inline void crypto_notify(unsigned long val, void *v) blocking_notifier_call_chain(&crypto_chain, val, v); } +static inline void crypto_yield(u32 flags) +{ + if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) + cond_resched(); +} + +static inline int crypto_is_test_larval(struct crypto_larval *larval) +{ + return larval->alg.cra_driver_name[0]; +} + #endif /* _CRYPTO_INTERNAL_H */ diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c index a5ce8f96790f..2d115bec15ae 100644 --- a/crypto/jitterentropy-kcapi.c +++ b/crypto/jitterentropy-kcapi.c @@ -37,11 +37,10 @@ * DAMAGE. */ +#include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> -#include <linux/fips.h> #include <linux/time.h> -#include <linux/crypto.h> #include <crypto/internal/rng.h> #include "jitterentropy.h" @@ -57,12 +56,7 @@ void *jent_zalloc(unsigned int len) void jent_zfree(void *ptr) { - kzfree(ptr); -} - -int jent_fips_enabled(void) -{ - return fips_enabled; + kfree_sensitive(ptr); } void jent_panic(char *s) @@ -108,6 +102,7 @@ void jent_get_nstime(__u64 *out) struct jitterentropy { spinlock_t jent_lock; struct rand_data *entropy_collector; + unsigned int reset_cnt; }; static int jent_kcapi_init(struct crypto_tfm *tfm) @@ -142,7 +137,33 @@ static int jent_kcapi_random(struct crypto_rng *tfm, int ret = 0; spin_lock(&rng->jent_lock); + + /* Return a permanent error in case we had too many resets in a row. */ + if (rng->reset_cnt > (1<<10)) { + ret = -EFAULT; + goto out; + } + ret = jent_read_entropy(rng->entropy_collector, rdata, dlen); + + /* Reset RNG in case of health failures */ + if (ret < -1) { + pr_warn_ratelimited("Reset Jitter RNG due to health test failure: %s failure\n", + (ret == -2) ? "Repetition Count Test" : + "Adaptive Proportion Test"); + + rng->reset_cnt++; + + ret = -EAGAIN; + } else { + rng->reset_cnt = 0; + + /* Convert the Jitter RNG error into a usable error code */ + if (ret == -1) + ret = -EINVAL; + } + +out: spin_unlock(&rng->jent_lock); return ret; diff --git a/crypto/jitterentropy.c b/crypto/jitterentropy.c index 042157f0d28b..93bff3213823 100644 --- a/crypto/jitterentropy.c +++ b/crypto/jitterentropy.c @@ -2,12 +2,12 @@ * Non-physical true random number generator based on timing jitter -- * Jitter RNG standalone code. * - * Copyright Stephan Mueller <smueller@chronox.de>, 2015 - 2019 + * Copyright Stephan Mueller <smueller@chronox.de>, 2015 - 2020 * * Design * ====== * - * See http://www.chronox.de/jent.html + * See https://www.chronox.de/jent.html * * License * ======= @@ -47,7 +47,7 @@ /* * This Jitterentropy RNG is based on the jitterentropy library - * version 2.1.2 provided at http://www.chronox.de/jent.html + * version 2.2.0 provided at https://www.chronox.de/jent.html */ #ifdef __OPTIMIZE__ @@ -83,6 +83,22 @@ struct rand_data { unsigned int memblocksize; /* Size of one memory block in bytes */ unsigned int memaccessloops; /* Number of memory accesses per random * bit generation */ + + /* Repetition Count Test */ + int rct_count; /* Number of stuck values */ + + /* Adaptive Proportion Test for a significance level of 2^-30 */ +#define JENT_APT_CUTOFF 325 /* Taken from SP800-90B sec 4.4.2 */ +#define JENT_APT_WINDOW_SIZE 512 /* Data window size */ + /* LSB of time stamp to process */ +#define JENT_APT_LSB 16 +#define JENT_APT_WORD_MASK (JENT_APT_LSB - 1) + unsigned int apt_observations; /* Number of collected observations */ + unsigned int apt_count; /* APT counter */ + unsigned int apt_base; /* APT base reference */ + unsigned int apt_base_set:1; /* APT base reference set? */ + + unsigned int health_failure:1; /* Permanent health failure */ }; /* Flags that can be used to initialize the RNG */ @@ -98,14 +114,214 @@ struct rand_data { * variations (2nd derivation of time is * zero). */ #define JENT_ESTUCK 8 /* Too many stuck results during init. */ +#define JENT_EHEALTH 9 /* Health test failed during initialization */ +#define JENT_ERCT 10 /* RCT failed during initialization */ + +/* + * The output n bits can receive more than n bits of min entropy, of course, + * but the fixed output of the conditioning function can only asymptotically + * approach the output size bits of min entropy, not attain that bound. Random + * maps will tend to have output collisions, which reduces the creditable + * output entropy (that is what SP 800-90B Section 3.1.5.1.2 attempts to bound). + * + * The value "64" is justified in Appendix A.4 of the current 90C draft, + * and aligns with NIST's in "epsilon" definition in this document, which is + * that a string can be considered "full entropy" if you can bound the min + * entropy in each bit of output to at least 1-epsilon, where epsilon is + * required to be <= 2^(-32). + */ +#define JENT_ENTROPY_SAFETY_FACTOR 64 + +#include <linux/fips.h> +#include "jitterentropy.h" /*************************************************************************** - * Helper functions + * Adaptive Proportion Test + * + * This test complies with SP800-90B section 4.4.2. ***************************************************************************/ -#include "jitterentropy.h" +/* + * Reset the APT counter + * + * @ec [in] Reference to entropy collector + */ +static void jent_apt_reset(struct rand_data *ec, unsigned int delta_masked) +{ + /* Reset APT counter */ + ec->apt_count = 0; + ec->apt_base = delta_masked; + ec->apt_observations = 0; +} + +/* + * Insert a new entropy event into APT + * + * @ec [in] Reference to entropy collector + * @delta_masked [in] Masked time delta to process + */ +static void jent_apt_insert(struct rand_data *ec, unsigned int delta_masked) +{ + /* Initialize the base reference */ + if (!ec->apt_base_set) { + ec->apt_base = delta_masked; + ec->apt_base_set = 1; + return; + } + + if (delta_masked == ec->apt_base) { + ec->apt_count++; + + if (ec->apt_count >= JENT_APT_CUTOFF) + ec->health_failure = 1; + } + + ec->apt_observations++; + + if (ec->apt_observations >= JENT_APT_WINDOW_SIZE) + jent_apt_reset(ec, delta_masked); +} + +/*************************************************************************** + * Stuck Test and its use as Repetition Count Test + * + * The Jitter RNG uses an enhanced version of the Repetition Count Test + * (RCT) specified in SP800-90B section 4.4.1. Instead of counting identical + * back-to-back values, the input to the RCT is the counting of the stuck + * values during the generation of one Jitter RNG output block. + * + * The RCT is applied with an alpha of 2^{-30} compliant to FIPS 140-2 IG 9.8. + * + * During the counting operation, the Jitter RNG always calculates the RCT + * cut-off value of C. If that value exceeds the allowed cut-off value, + * the Jitter RNG output block will be calculated completely but discarded at + * the end. The caller of the Jitter RNG is informed with an error code. + ***************************************************************************/ + +/* + * Repetition Count Test as defined in SP800-90B section 4.4.1 + * + * @ec [in] Reference to entropy collector + * @stuck [in] Indicator whether the value is stuck + */ +static void jent_rct_insert(struct rand_data *ec, int stuck) +{ + /* + * If we have a count less than zero, a previous RCT round identified + * a failure. We will not overwrite it. + */ + if (ec->rct_count < 0) + return; + + if (stuck) { + ec->rct_count++; + + /* + * The cutoff value is based on the following consideration: + * alpha = 2^-30 as recommended in FIPS 140-2 IG 9.8. + * In addition, we require an entropy value H of 1/OSR as this + * is the minimum entropy required to provide full entropy. + * Note, we collect 64 * OSR deltas for inserting them into + * the entropy pool which should then have (close to) 64 bits + * of entropy. + * + * Note, ec->rct_count (which equals to value B in the pseudo + * code of SP800-90B section 4.4.1) starts with zero. Hence + * we need to subtract one from the cutoff value as calculated + * following SP800-90B. + */ + if ((unsigned int)ec->rct_count >= (31 * ec->osr)) { + ec->rct_count = -1; + ec->health_failure = 1; + } + } else { + ec->rct_count = 0; + } +} + +/* + * Is there an RCT health test failure? + * + * @ec [in] Reference to entropy collector + * + * @return + * 0 No health test failure + * 1 Permanent health test failure + */ +static int jent_rct_failure(struct rand_data *ec) +{ + if (ec->rct_count < 0) + return 1; + return 0; +} + +static inline __u64 jent_delta(__u64 prev, __u64 next) +{ +#define JENT_UINT64_MAX (__u64)(~((__u64) 0)) + return (prev < next) ? (next - prev) : + (JENT_UINT64_MAX - prev + 1 + next); +} + +/* + * Stuck test by checking the: + * 1st derivative of the jitter measurement (time delta) + * 2nd derivative of the jitter measurement (delta of time deltas) + * 3rd derivative of the jitter measurement (delta of delta of time deltas) + * + * All values must always be non-zero. + * + * @ec [in] Reference to entropy collector + * @current_delta [in] Jitter time delta + * + * @return + * 0 jitter measurement not stuck (good bit) + * 1 jitter measurement stuck (reject bit) + */ +static int jent_stuck(struct rand_data *ec, __u64 current_delta) +{ + __u64 delta2 = jent_delta(ec->last_delta, current_delta); + __u64 delta3 = jent_delta(ec->last_delta2, delta2); + + ec->last_delta = current_delta; + ec->last_delta2 = delta2; + + /* + * Insert the result of the comparison of two back-to-back time + * deltas. + */ + jent_apt_insert(ec, current_delta); + + if (!current_delta || !delta2 || !delta3) { + /* RCT with a stuck bit */ + jent_rct_insert(ec, 1); + return 1; + } + + /* RCT with a non-stuck bit */ + jent_rct_insert(ec, 0); + + return 0; +} -/** +/* + * Report any health test failures + * + * @ec [in] Reference to entropy collector + * + * @return + * 0 No health test failure + * 1 Permanent health test failure + */ +static int jent_health_failure(struct rand_data *ec) +{ + return ec->health_failure; +} + +/*************************************************************************** + * Noise sources + ***************************************************************************/ + +/* * Update of the loop count used for the next round of * an entropy collection. * @@ -148,11 +364,7 @@ static __u64 jent_loop_shuffle(struct rand_data *ec, return (shuffle + (1<<min)); } -/*************************************************************************** - * Noise sources - ***************************************************************************/ - -/** +/* * CPU Jitter noise source -- this is the noise source based on the CPU * execution time jitter * @@ -166,18 +378,19 @@ static __u64 jent_loop_shuffle(struct rand_data *ec, * the CPU execution time jitter. Any change to the loop in this function * implies that careful retesting must be done. * - * Input: - * @ec entropy collector struct - * @time time stamp to be injected - * @loop_cnt if a value not equal to 0 is set, use the given value as number of - * loops to perform the folding + * @ec [in] entropy collector struct + * @time [in] time stamp to be injected + * @loop_cnt [in] if a value not equal to 0 is set, use the given value as + * number of loops to perform the folding + * @stuck [in] Is the time stamp identified as stuck? * * Output: * updated ec->data * * @return Number of loops the folding operation is performed */ -static __u64 jent_lfsr_time(struct rand_data *ec, __u64 time, __u64 loop_cnt) +static void jent_lfsr_time(struct rand_data *ec, __u64 time, __u64 loop_cnt, + int stuck) { unsigned int i; __u64 j = 0; @@ -220,12 +433,20 @@ static __u64 jent_lfsr_time(struct rand_data *ec, __u64 time, __u64 loop_cnt) new ^= tmp; } } - ec->data = new; - return fold_loop_cnt; + /* + * If the time stamp is stuck, do not finally insert the value into + * the entropy pool. Although this operation should not do any harm + * even when the time stamp has no entropy, SP800-90B requires that + * any conditioning operation (SP800-90B considers the LFSR to be a + * conditioning operation) to have an identical amount of input + * data according to section 3.1.5. + */ + if (!stuck) + ec->data = new; } -/** +/* * Memory Access noise source -- this is a noise source based on variations in * memory access times * @@ -243,16 +464,13 @@ static __u64 jent_lfsr_time(struct rand_data *ec, __u64 time, __u64 loop_cnt) * to reliably access either L3 or memory, the ec->mem memory must be quite * large which is usually not desirable. * - * Input: - * @ec Reference to the entropy collector with the memory access data -- if - * the reference to the memory block to be accessed is NULL, this noise - * source is disabled - * @loop_cnt if a value not equal to 0 is set, use the given value as number of - * loops to perform the folding - * - * @return Number of memory access operations + * @ec [in] Reference to the entropy collector with the memory access data -- if + * the reference to the memory block to be accessed is NULL, this noise + * source is disabled + * @loop_cnt [in] if a value not equal to 0 is set, use the given value + * number of loops to perform the LFSR */ -static unsigned int jent_memaccess(struct rand_data *ec, __u64 loop_cnt) +static void jent_memaccess(struct rand_data *ec, __u64 loop_cnt) { unsigned int wrap = 0; __u64 i = 0; @@ -262,7 +480,7 @@ static unsigned int jent_memaccess(struct rand_data *ec, __u64 loop_cnt) jent_loop_shuffle(ec, MAX_ACC_LOOP_BIT, MIN_ACC_LOOP_BIT); if (NULL == ec || NULL == ec->mem) - return 0; + return; wrap = ec->memblocksize * ec->memblocks; /* @@ -288,44 +506,12 @@ static unsigned int jent_memaccess(struct rand_data *ec, __u64 loop_cnt) ec->memlocation = ec->memlocation + ec->memblocksize - 1; ec->memlocation = ec->memlocation % wrap; } - return i; } /*************************************************************************** * Start of entropy processing logic ***************************************************************************/ - -/** - * Stuck test by checking the: - * 1st derivation of the jitter measurement (time delta) - * 2nd derivation of the jitter measurement (delta of time deltas) - * 3rd derivation of the jitter measurement (delta of delta of time deltas) - * - * All values must always be non-zero. - * - * Input: - * @ec Reference to entropy collector - * @current_delta Jitter time delta - * - * @return - * 0 jitter measurement not stuck (good bit) - * 1 jitter measurement stuck (reject bit) - */ -static int jent_stuck(struct rand_data *ec, __u64 current_delta) -{ - __s64 delta2 = ec->last_delta - current_delta; - __s64 delta3 = delta2 - ec->last_delta2; - - ec->last_delta = current_delta; - ec->last_delta2 = delta2; - - if (!current_delta || !delta2 || !delta3) - return 1; - - return 0; -} - -/** +/* * This is the heart of the entropy generation: calculate time deltas and * use the CPU jitter in the time deltas. The jitter is injected into the * entropy pool. @@ -334,8 +520,7 @@ static int jent_stuck(struct rand_data *ec, __u64 current_delta) * of this function! This can be done by calling this function * and not using its result. * - * Input: - * @entropy_collector Reference to entropy collector + * @ec [in] Reference to entropy collector * * @return result of stuck test */ @@ -343,6 +528,7 @@ static int jent_measure_jitter(struct rand_data *ec) { __u64 time = 0; __u64 current_delta = 0; + int stuck; /* Invoke one noise source before time measurement to add variations */ jent_memaccess(ec, 0); @@ -352,31 +538,35 @@ static int jent_measure_jitter(struct rand_data *ec) * invocation to measure the timing variations */ jent_get_nstime(&time); - current_delta = time - ec->prev_time; + current_delta = jent_delta(ec->prev_time, time); ec->prev_time = time; + /* Check whether we have a stuck measurement. */ + stuck = jent_stuck(ec, current_delta); + /* Now call the next noise sources which also injects the data */ - jent_lfsr_time(ec, current_delta, 0); + jent_lfsr_time(ec, current_delta, 0, stuck); - /* Check whether we have a stuck measurement. */ - return jent_stuck(ec, current_delta); + return stuck; } -/** +/* * Generator of one 64 bit random number * Function fills rand_data->data * - * Input: - * @ec Reference to entropy collector + * @ec [in] Reference to entropy collector */ static void jent_gen_entropy(struct rand_data *ec) { - unsigned int k = 0; + unsigned int k = 0, safety_factor = 0; + + if (fips_enabled) + safety_factor = JENT_ENTROPY_SAFETY_FACTOR; /* priming of the ->prev_time value */ jent_measure_jitter(ec); - while (1) { + while (!jent_health_failure(ec)) { /* If a stuck measurement is received, repeat measurement */ if (jent_measure_jitter(ec)) continue; @@ -385,37 +575,12 @@ static void jent_gen_entropy(struct rand_data *ec) * We multiply the loop value with ->osr to obtain the * oversampling rate requested by the caller */ - if (++k >= (DATA_SIZE_BITS * ec->osr)) + if (++k >= ((DATA_SIZE_BITS + safety_factor) * ec->osr)) break; } } -/** - * The continuous test required by FIPS 140-2 -- the function automatically - * primes the test if needed. - * - * Return: - * returns normally if FIPS test passed - * panics the kernel if FIPS test failed - */ -static void jent_fips_test(struct rand_data *ec) -{ - if (!jent_fips_enabled()) - return; - - /* prime the FIPS test */ - if (!ec->old_data) { - ec->old_data = ec->data; - jent_gen_entropy(ec); - } - - if (ec->data == ec->old_data) - jent_panic("jitterentropy: Duplicate output detected\n"); - - ec->old_data = ec->data; -} - -/** +/* * Entry function: Obtain entropy for the caller. * * This function invokes the entropy gathering logic as often to generate @@ -425,17 +590,18 @@ static void jent_fips_test(struct rand_data *ec) * This function truncates the last 64 bit entropy value output to the exact * size specified by the caller. * - * Input: - * @ec Reference to entropy collector - * @data pointer to buffer for storing random data -- buffer must already - * exist - * @len size of the buffer, specifying also the requested number of random - * in bytes + * @ec [in] Reference to entropy collector + * @data [in] pointer to buffer for storing random data -- buffer must already + * exist + * @len [in] size of the buffer, specifying also the requested number of random + * in bytes * * @return 0 when request is fulfilled or an error * * The following error codes can occur: * -1 entropy_collector is NULL + * -2 RCT failed + * -3 APT test failed */ int jent_read_entropy(struct rand_data *ec, unsigned char *data, unsigned int len) @@ -445,11 +611,46 @@ int jent_read_entropy(struct rand_data *ec, unsigned char *data, if (!ec) return -1; - while (0 < len) { + while (len > 0) { unsigned int tocopy; jent_gen_entropy(ec); - jent_fips_test(ec); + + if (jent_health_failure(ec)) { + int ret; + + if (jent_rct_failure(ec)) + ret = -2; + else + ret = -3; + + /* + * Re-initialize the noise source + * + * If the health test fails, the Jitter RNG remains + * in failure state and will return a health failure + * during next invocation. + */ + if (jent_entropy_init()) + return ret; + + /* Set APT to initial state */ + jent_apt_reset(ec, 0); + ec->apt_base_set = 0; + + /* Set RCT to initial state */ + ec->rct_count = 0; + + /* Re-enable Jitter RNG */ + ec->health_failure = 0; + + /* + * Return the health test failure status to the + * caller as the generated value is not appropriate. + */ + return ret; + } + if ((DATA_SIZE_BITS / 8) < len) tocopy = (DATA_SIZE_BITS / 8); else @@ -491,7 +692,7 @@ struct rand_data *jent_entropy_collector_alloc(unsigned int osr, } /* verify and set the oversampling rate */ - if (0 == osr) + if (osr == 0) osr = 1; /* minimum sampling rate is 1 */ entropy_collector->osr = osr; @@ -513,11 +714,15 @@ int jent_entropy_init(void) int i; __u64 delta_sum = 0; __u64 old_delta = 0; + unsigned int nonstuck = 0; int time_backwards = 0; int count_mod = 0; int count_stuck = 0; struct rand_data ec = { 0 }; + /* Required for RCT */ + ec.osr = 1; + /* We could perform statistical tests here, but the problem is * that we only have a few loop counts to do testing. These * loop counts may show some slight skew and we produce @@ -539,8 +744,10 @@ int jent_entropy_init(void) /* * TESTLOOPCOUNT needs some loops to identify edge systems. 100 is * definitely too little. + * + * SP800-90B requires at least 1024 initial test cycles. */ -#define TESTLOOPCOUNT 300 +#define TESTLOOPCOUNT 1024 #define CLEARCACHE 100 for (i = 0; (TESTLOOPCOUNT + CLEARCACHE) > i; i++) { __u64 time = 0; @@ -552,13 +759,13 @@ int jent_entropy_init(void) /* Invoke core entropy collection logic */ jent_get_nstime(&time); ec.prev_time = time; - jent_lfsr_time(&ec, time, 0); + jent_lfsr_time(&ec, time, 0, 0); jent_get_nstime(&time2); /* test whether timer works */ if (!time || !time2) return JENT_ENOTIME; - delta = time2 - time; + delta = jent_delta(time, time2); /* * test whether timer is fine grained enough to provide * delta even when called shortly after each other -- this @@ -576,11 +783,33 @@ int jent_entropy_init(void) * etc. with the goal to clear it to get the worst case * measurements. */ - if (CLEARCACHE > i) + if (i < CLEARCACHE) continue; if (stuck) count_stuck++; + else { + nonstuck++; + + /* + * Ensure that the APT succeeded. + * + * With the check below that count_stuck must be less + * than 10% of the overall generated raw entropy values + * it is guaranteed that the APT is invoked at + * floor((TESTLOOPCOUNT * 0.9) / 64) == 14 times. + */ + if ((nonstuck % JENT_APT_WINDOW_SIZE) == 0) { + jent_apt_reset(&ec, + delta & JENT_APT_WORD_MASK); + if (jent_health_failure(&ec)) + return JENT_EHEALTH; + } + } + + /* Validate RCT */ + if (jent_rct_failure(&ec)) + return JENT_ERCT; /* test whether we have an increasing timer */ if (!(time2 > time)) @@ -611,7 +840,7 @@ int jent_entropy_init(void) * should not fail. The value of 3 should cover the NTP case being * performed during our test run. */ - if (3 < time_backwards) + if (time_backwards > 3) return JENT_ENOMONOTONIC; /* diff --git a/crypto/jitterentropy.h b/crypto/jitterentropy.h index c83fff32d130..b7397b617ef0 100644 --- a/crypto/jitterentropy.h +++ b/crypto/jitterentropy.h @@ -2,7 +2,6 @@ extern void *jent_zalloc(unsigned int len); extern void jent_zfree(void *ptr); -extern int jent_fips_enabled(void); extern void jent_panic(char *s); extern void jent_memcpy(void *dest, const void *src, unsigned int n); extern void jent_get_nstime(__u64 *out); diff --git a/crypto/kdf_sp800108.c b/crypto/kdf_sp800108.c new file mode 100644 index 000000000000..58edf7797abf --- /dev/null +++ b/crypto/kdf_sp800108.c @@ -0,0 +1,153 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * SP800-108 Key-derivation function + * + * Copyright (C) 2021, Stephan Mueller <smueller@chronox.de> + */ + +#include <linux/fips.h> +#include <linux/module.h> +#include <crypto/kdf_sp800108.h> +#include <crypto/internal/kdf_selftest.h> + +/* + * SP800-108 CTR KDF implementation + */ +int crypto_kdf108_ctr_generate(struct crypto_shash *kmd, + const struct kvec *info, unsigned int info_nvec, + u8 *dst, unsigned int dlen) +{ + SHASH_DESC_ON_STACK(desc, kmd); + __be32 counter = cpu_to_be32(1); + const unsigned int h = crypto_shash_digestsize(kmd), dlen_orig = dlen; + unsigned int i; + int err = 0; + u8 *dst_orig = dst; + + desc->tfm = kmd; + + while (dlen) { + err = crypto_shash_init(desc); + if (err) + goto out; + + err = crypto_shash_update(desc, (u8 *)&counter, sizeof(__be32)); + if (err) + goto out; + + for (i = 0; i < info_nvec; i++) { + err = crypto_shash_update(desc, info[i].iov_base, + info[i].iov_len); + if (err) + goto out; + } + + if (dlen < h) { + u8 tmpbuffer[HASH_MAX_DIGESTSIZE]; + + err = crypto_shash_final(desc, tmpbuffer); + if (err) + goto out; + memcpy(dst, tmpbuffer, dlen); + memzero_explicit(tmpbuffer, h); + goto out; + } + + err = crypto_shash_final(desc, dst); + if (err) + goto out; + + dlen -= h; + dst += h; + counter = cpu_to_be32(be32_to_cpu(counter) + 1); + } + +out: + if (err) + memzero_explicit(dst_orig, dlen_orig); + shash_desc_zero(desc); + return err; +} +EXPORT_SYMBOL(crypto_kdf108_ctr_generate); + +/* + * The seeding of the KDF + */ +int crypto_kdf108_setkey(struct crypto_shash *kmd, + const u8 *key, size_t keylen, + const u8 *ikm, size_t ikmlen) +{ + unsigned int ds = crypto_shash_digestsize(kmd); + + /* SP800-108 does not support IKM */ + if (ikm || ikmlen) + return -EINVAL; + + /* Check according to SP800-108 section 7.2 */ + if (ds > keylen) + return -EINVAL; + + /* Set the key for the MAC used for the KDF. */ + return crypto_shash_setkey(kmd, key, keylen); +} +EXPORT_SYMBOL(crypto_kdf108_setkey); + +/* + * Test vector obtained from + * http://csrc.nist.gov/groups/STM/cavp/documents/KBKDF800-108/CounterMode.zip + */ +static const struct kdf_testvec kdf_ctr_hmac_sha256_tv_template[] = { + { + .key = "\xdd\x1d\x91\xb7\xd9\x0b\x2b\xd3" + "\x13\x85\x33\xce\x92\xb2\x72\xfb" + "\xf8\xa3\x69\x31\x6a\xef\xe2\x42" + "\xe6\x59\xcc\x0a\xe2\x38\xaf\xe0", + .keylen = 32, + .ikm = NULL, + .ikmlen = 0, + .info = { + .iov_base = "\x01\x32\x2b\x96\xb3\x0a\xcd\x19" + "\x79\x79\x44\x4e\x46\x8e\x1c\x5c" + "\x68\x59\xbf\x1b\x1c\xf9\x51\xb7" + "\xe7\x25\x30\x3e\x23\x7e\x46\xb8" + "\x64\xa1\x45\xfa\xb2\x5e\x51\x7b" + "\x08\xf8\x68\x3d\x03\x15\xbb\x29" + "\x11\xd8\x0a\x0e\x8a\xba\x17\xf3" + "\xb4\x13\xfa\xac", + .iov_len = 60 + }, + .expected = "\x10\x62\x13\x42\xbf\xb0\xfd\x40" + "\x04\x6c\x0e\x29\xf2\xcf\xdb\xf0", + .expectedlen = 16 + } +}; + +static int __init crypto_kdf108_init(void) +{ + int ret = kdf_test(&kdf_ctr_hmac_sha256_tv_template[0], "hmac(sha256)", + crypto_kdf108_setkey, crypto_kdf108_ctr_generate); + + if (ret) { + if (fips_enabled) + panic("alg: self-tests for CTR-KDF (hmac(sha256)) failed (rc=%d)\n", + ret); + + WARN(1, + "alg: self-tests for CTR-KDF (hmac(sha256)) failed (rc=%d)\n", + ret); + } else { + pr_info("alg: self-tests for CTR-KDF (hmac(sha256)) passed\n"); + } + + return ret; +} + +static void __exit crypto_kdf108_exit(void) { } + +module_init(crypto_kdf108_init); +module_exit(crypto_kdf108_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>"); +MODULE_DESCRIPTION("Key Derivation Function conformant to SP800-108"); diff --git a/crypto/keywrap.c b/crypto/keywrap.c index 0355cce21b1e..054d9a216fc9 100644 --- a/crypto/keywrap.c +++ b/crypto/keywrap.c @@ -85,6 +85,7 @@ #include <linux/crypto.h> #include <linux/scatterlist.h> #include <crypto/scatterwalk.h> +#include <crypto/internal/cipher.h> #include <crypto/internal/skcipher.h> struct crypto_kw_block { @@ -113,9 +114,9 @@ static void crypto_kw_scatterlist_ff(struct scatter_walk *walk, scatterwalk_start(walk, sg); scatterwalk_advance(walk, skip); break; - } else - skip -= sg->length; + } + skip -= sg->length; sg = sg_next(sg); } } @@ -316,3 +317,4 @@ MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>"); MODULE_DESCRIPTION("Key Wrapping (RFC3394 / NIST SP800-38F)"); MODULE_ALIAS_CRYPTO("kw"); +MODULE_IMPORT_NS(CRYPTO_INTERNAL); diff --git a/crypto/khazad.c b/crypto/khazad.c index 14ca7f1631c7..f19339954c89 100644 --- a/crypto/khazad.c +++ b/crypto/khazad.c @@ -819,7 +819,7 @@ static void khazad_crypt(const u64 roundKey[KHAZAD_ROUNDS + 1], T6[(int)(state >> 8) & 0xff] ^ T7[(int)(state ) & 0xff] ^ roundKey[r]; - } + } state = (T0[(int)(state >> 56) ] & 0xff00000000000000ULL) ^ (T1[(int)(state >> 48) & 0xff] & 0x00ff000000000000ULL) ^ diff --git a/crypto/kpp.c b/crypto/kpp.c index 313b2c699963..678e871ce418 100644 --- a/crypto/kpp.c +++ b/crypto/kpp.c @@ -68,9 +68,17 @@ static int crypto_kpp_init_tfm(struct crypto_tfm *tfm) return 0; } +static void crypto_kpp_free_instance(struct crypto_instance *inst) +{ + struct kpp_instance *kpp = kpp_instance(inst); + + kpp->free(kpp); +} + static const struct crypto_type crypto_kpp_type = { .extsize = crypto_alg_extsize, .init_tfm = crypto_kpp_init_tfm, + .free = crypto_kpp_free_instance, #ifdef CONFIG_PROC_FS .show = crypto_kpp_show, #endif @@ -87,6 +95,21 @@ struct crypto_kpp *crypto_alloc_kpp(const char *alg_name, u32 type, u32 mask) } EXPORT_SYMBOL_GPL(crypto_alloc_kpp); +int crypto_grab_kpp(struct crypto_kpp_spawn *spawn, + struct crypto_instance *inst, + const char *name, u32 type, u32 mask) +{ + spawn->base.frontend = &crypto_kpp_type; + return crypto_grab_spawn(&spawn->base, inst, name, type, mask); +} +EXPORT_SYMBOL_GPL(crypto_grab_kpp); + +int crypto_has_kpp(const char *alg_name, u32 type, u32 mask) +{ + return crypto_type_has_alg(alg_name, &crypto_kpp_type, type, mask); +} +EXPORT_SYMBOL_GPL(crypto_has_kpp); + static void kpp_prepare_alg(struct kpp_alg *alg) { struct crypto_alg *base = &alg->base; @@ -111,5 +134,17 @@ void crypto_unregister_kpp(struct kpp_alg *alg) } EXPORT_SYMBOL_GPL(crypto_unregister_kpp); +int kpp_register_instance(struct crypto_template *tmpl, + struct kpp_instance *inst) +{ + if (WARN_ON(!inst->free)) + return -EINVAL; + + kpp_prepare_alg(&inst->alg); + + return crypto_register_instance(tmpl, kpp_crypto_instance(inst)); +} +EXPORT_SYMBOL_GPL(kpp_register_instance); + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Key-agreement Protocol Primitives"); diff --git a/crypto/lrw.c b/crypto/lrw.c index 63c485c0d8a6..8d59a66b6525 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c @@ -9,7 +9,7 @@ */ /* This implementation is checked against the test vectors in the above * document and by a test vector provided by Ken Buchanan at - * http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html + * https://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html * * The test vectors are included in the testing module tcrypt.[ch] */ @@ -27,7 +27,7 @@ #define LRW_BLOCK_SIZE 16 -struct priv { +struct lrw_tfm_ctx { struct crypto_skcipher *child; /* @@ -49,12 +49,12 @@ struct priv { be128 mulinc[128]; }; -struct rctx { +struct lrw_request_ctx { be128 t; struct skcipher_request subreq; }; -static inline void setbit128_bbe(void *b, int bit) +static inline void lrw_setbit128_bbe(void *b, int bit) { __set_bit(bit ^ (0x80 - #ifdef __BIG_ENDIAN @@ -65,10 +65,10 @@ static inline void setbit128_bbe(void *b, int bit) ), b); } -static int setkey(struct crypto_skcipher *parent, const u8 *key, - unsigned int keylen) +static int lrw_setkey(struct crypto_skcipher *parent, const u8 *key, + unsigned int keylen) { - struct priv *ctx = crypto_skcipher_ctx(parent); + struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(parent); struct crypto_skcipher *child = ctx->child; int err, bsize = LRW_BLOCK_SIZE; const u8 *tweak = key + keylen - bsize; @@ -92,7 +92,7 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key, /* initialize optimization table */ for (i = 0; i < 128; i++) { - setbit128_bbe(&tmp, i); + lrw_setbit128_bbe(&tmp, i); ctx->mulinc[i] = tmp; gf128mul_64k_bbe(&ctx->mulinc[i], ctx->table); } @@ -108,10 +108,10 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key, * For example: * * u32 counter[4] = { 0xFFFFFFFF, 0x1, 0x0, 0x0 }; - * int i = next_index(&counter); + * int i = lrw_next_index(&counter); * // i == 33, counter == { 0x0, 0x2, 0x0, 0x0 } */ -static int next_index(u32 *counter) +static int lrw_next_index(u32 *counter) { int i, res = 0; @@ -135,14 +135,14 @@ static int next_index(u32 *counter) * We compute the tweak masks twice (both before and after the ECB encryption or * decryption) to avoid having to allocate a temporary buffer and/or make * mutliple calls to the 'ecb(..)' instance, which usually would be slower than - * just doing the next_index() calls again. + * just doing the lrw_next_index() calls again. */ -static int xor_tweak(struct skcipher_request *req, bool second_pass) +static int lrw_xor_tweak(struct skcipher_request *req, bool second_pass) { const int bs = LRW_BLOCK_SIZE; struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct priv *ctx = crypto_skcipher_ctx(tfm); - struct rctx *rctx = skcipher_request_ctx(req); + const struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); + struct lrw_request_ctx *rctx = skcipher_request_ctx(req); be128 t = rctx->t; struct skcipher_walk w; __be32 *iv; @@ -178,7 +178,8 @@ static int xor_tweak(struct skcipher_request *req, bool second_pass) /* T <- I*Key2, using the optimization * discussed in the specification */ - be128_xor(&t, &t, &ctx->mulinc[next_index(counter)]); + be128_xor(&t, &t, + &ctx->mulinc[lrw_next_index(counter)]); } while ((avail -= bs) >= bs); if (second_pass && w.nbytes == w.total) { @@ -194,38 +195,40 @@ static int xor_tweak(struct skcipher_request *req, bool second_pass) return err; } -static int xor_tweak_pre(struct skcipher_request *req) +static int lrw_xor_tweak_pre(struct skcipher_request *req) { - return xor_tweak(req, false); + return lrw_xor_tweak(req, false); } -static int xor_tweak_post(struct skcipher_request *req) +static int lrw_xor_tweak_post(struct skcipher_request *req) { - return xor_tweak(req, true); + return lrw_xor_tweak(req, true); } -static void crypt_done(struct crypto_async_request *areq, int err) +static void lrw_crypt_done(struct crypto_async_request *areq, int err) { struct skcipher_request *req = areq->data; if (!err) { - struct rctx *rctx = skcipher_request_ctx(req); + struct lrw_request_ctx *rctx = skcipher_request_ctx(req); rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - err = xor_tweak_post(req); + err = lrw_xor_tweak_post(req); } skcipher_request_complete(req, err); } -static void init_crypt(struct skcipher_request *req) +static void lrw_init_crypt(struct skcipher_request *req) { - struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); - struct rctx *rctx = skcipher_request_ctx(req); + const struct lrw_tfm_ctx *ctx = + crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); + struct lrw_request_ctx *rctx = skcipher_request_ctx(req); struct skcipher_request *subreq = &rctx->subreq; skcipher_request_set_tfm(subreq, ctx->child); - skcipher_request_set_callback(subreq, req->base.flags, crypt_done, req); + skcipher_request_set_callback(subreq, req->base.flags, lrw_crypt_done, + req); /* pass req->iv as IV (will be used by xor_tweak, ECB will ignore it) */ skcipher_request_set_crypt(subreq, req->dst, req->dst, req->cryptlen, req->iv); @@ -237,33 +240,33 @@ static void init_crypt(struct skcipher_request *req) gf128mul_64k_bbe(&rctx->t, ctx->table); } -static int encrypt(struct skcipher_request *req) +static int lrw_encrypt(struct skcipher_request *req) { - struct rctx *rctx = skcipher_request_ctx(req); + struct lrw_request_ctx *rctx = skcipher_request_ctx(req); struct skcipher_request *subreq = &rctx->subreq; - init_crypt(req); - return xor_tweak_pre(req) ?: + lrw_init_crypt(req); + return lrw_xor_tweak_pre(req) ?: crypto_skcipher_encrypt(subreq) ?: - xor_tweak_post(req); + lrw_xor_tweak_post(req); } -static int decrypt(struct skcipher_request *req) +static int lrw_decrypt(struct skcipher_request *req) { - struct rctx *rctx = skcipher_request_ctx(req); + struct lrw_request_ctx *rctx = skcipher_request_ctx(req); struct skcipher_request *subreq = &rctx->subreq; - init_crypt(req); - return xor_tweak_pre(req) ?: + lrw_init_crypt(req); + return lrw_xor_tweak_pre(req) ?: crypto_skcipher_decrypt(subreq) ?: - xor_tweak_post(req); + lrw_xor_tweak_post(req); } -static int init_tfm(struct crypto_skcipher *tfm) +static int lrw_init_tfm(struct crypto_skcipher *tfm) { struct skcipher_instance *inst = skcipher_alg_instance(tfm); struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst); - struct priv *ctx = crypto_skcipher_ctx(tfm); + struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); struct crypto_skcipher *cipher; cipher = crypto_spawn_skcipher(spawn); @@ -273,45 +276,39 @@ static int init_tfm(struct crypto_skcipher *tfm) ctx->child = cipher; crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(cipher) + - sizeof(struct rctx)); + sizeof(struct lrw_request_ctx)); return 0; } -static void exit_tfm(struct crypto_skcipher *tfm) +static void lrw_exit_tfm(struct crypto_skcipher *tfm) { - struct priv *ctx = crypto_skcipher_ctx(tfm); + struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); if (ctx->table) gf128mul_free_64k(ctx->table); crypto_free_skcipher(ctx->child); } -static void free(struct skcipher_instance *inst) +static void lrw_free_instance(struct skcipher_instance *inst) { crypto_drop_skcipher(skcipher_instance_ctx(inst)); kfree(inst); } -static int create(struct crypto_template *tmpl, struct rtattr **tb) +static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb) { struct crypto_skcipher_spawn *spawn; struct skcipher_instance *inst; - struct crypto_attr_type *algt; struct skcipher_alg *alg; const char *cipher_name; char ecb_name[CRYPTO_MAX_ALG_NAME]; u32 mask; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); + if (err) + return err; cipher_name = crypto_attr_alg_name(tb[1]); if (IS_ERR(cipher_name)) @@ -343,15 +340,15 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) err = -EINVAL; if (alg->base.cra_blocksize != LRW_BLOCK_SIZE) - goto err_drop_spawn; + goto err_free_inst; if (crypto_skcipher_alg_ivsize(alg)) - goto err_drop_spawn; + goto err_free_inst; err = crypto_inst_setname(skcipher_crypto_instance(inst), "lrw", &alg->base); if (err) - goto err_drop_spawn; + goto err_free_inst; err = -EINVAL; cipher_name = alg->base.cra_name; @@ -364,22 +361,21 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) len = strlcpy(ecb_name, cipher_name + 4, sizeof(ecb_name)); if (len < 2 || len >= sizeof(ecb_name)) - goto err_drop_spawn; + goto err_free_inst; if (ecb_name[len - 1] != ')') - goto err_drop_spawn; + goto err_free_inst; ecb_name[len - 1] = 0; if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, "lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME) { err = -ENAMETOOLONG; - goto err_drop_spawn; + goto err_free_inst; } } else - goto err_drop_spawn; + goto err_free_inst; - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE; inst->alg.base.cra_alignmask = alg->base.cra_alignmask | @@ -391,50 +387,45 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) + LRW_BLOCK_SIZE; - inst->alg.base.cra_ctxsize = sizeof(struct priv); + inst->alg.base.cra_ctxsize = sizeof(struct lrw_tfm_ctx); - inst->alg.init = init_tfm; - inst->alg.exit = exit_tfm; + inst->alg.init = lrw_init_tfm; + inst->alg.exit = lrw_exit_tfm; - inst->alg.setkey = setkey; - inst->alg.encrypt = encrypt; - inst->alg.decrypt = decrypt; + inst->alg.setkey = lrw_setkey; + inst->alg.encrypt = lrw_encrypt; + inst->alg.decrypt = lrw_decrypt; - inst->free = free; + inst->free = lrw_free_instance; err = skcipher_register_instance(tmpl, inst); - if (err) - goto err_drop_spawn; - -out: - return err; - -err_drop_spawn: - crypto_drop_skcipher(spawn); + if (err) { err_free_inst: - kfree(inst); - goto out; + lrw_free_instance(inst); + } + return err; } -static struct crypto_template crypto_tmpl = { +static struct crypto_template lrw_tmpl = { .name = "lrw", - .create = create, + .create = lrw_create, .module = THIS_MODULE, }; -static int __init crypto_module_init(void) +static int __init lrw_module_init(void) { - return crypto_register_template(&crypto_tmpl); + return crypto_register_template(&lrw_tmpl); } -static void __exit crypto_module_exit(void) +static void __exit lrw_module_exit(void) { - crypto_unregister_template(&crypto_tmpl); + crypto_unregister_template(&lrw_tmpl); } -subsys_initcall(crypto_module_init); -module_exit(crypto_module_exit); +subsys_initcall(lrw_module_init); +module_exit(lrw_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("LRW block cipher mode"); MODULE_ALIAS_CRYPTO("lrw"); +MODULE_SOFTDEP("pre: ecb"); diff --git a/crypto/md5.c b/crypto/md5.c index 22dc60bc0437..72c0c46fb5ee 100644 --- a/crypto/md5.c +++ b/crypto/md5.c @@ -23,9 +23,6 @@ #include <linux/types.h> #include <asm/byteorder.h> -#define MD5_DIGEST_WORDS 4 -#define MD5_MESSAGE_BYTES 64 - const u8 md5_zero_message_hash[MD5_DIGEST_SIZE] = { 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04, 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e, diff --git a/crypto/michael_mic.c b/crypto/michael_mic.c index 63350c4ad461..f4c31049601c 100644 --- a/crypto/michael_mic.c +++ b/crypto/michael_mic.c @@ -7,7 +7,7 @@ * Copyright (c) 2004 Jouni Malinen <j@w1.fi> */ #include <crypto/internal/hash.h> -#include <asm/byteorder.h> +#include <asm/unaligned.h> #include <linux/init.h> #include <linux/module.h> #include <linux/string.h> @@ -19,7 +19,7 @@ struct michael_mic_ctx { }; struct michael_mic_desc_ctx { - u8 pending[4]; + __le32 pending; size_t pending_len; u32 l, r; @@ -60,13 +60,12 @@ static int michael_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct michael_mic_desc_ctx *mctx = shash_desc_ctx(desc); - const __le32 *src; if (mctx->pending_len) { int flen = 4 - mctx->pending_len; if (flen > len) flen = len; - memcpy(&mctx->pending[mctx->pending_len], data, flen); + memcpy((u8 *)&mctx->pending + mctx->pending_len, data, flen); mctx->pending_len += flen; data += flen; len -= flen; @@ -74,23 +73,21 @@ static int michael_update(struct shash_desc *desc, const u8 *data, if (mctx->pending_len < 4) return 0; - src = (const __le32 *)mctx->pending; - mctx->l ^= le32_to_cpup(src); + mctx->l ^= le32_to_cpu(mctx->pending); michael_block(mctx->l, mctx->r); mctx->pending_len = 0; } - src = (const __le32 *)data; - while (len >= 4) { - mctx->l ^= le32_to_cpup(src++); + mctx->l ^= get_unaligned_le32(data); michael_block(mctx->l, mctx->r); + data += 4; len -= 4; } if (len > 0) { mctx->pending_len = len; - memcpy(mctx->pending, src, len); + memcpy(&mctx->pending, data, len); } return 0; @@ -100,8 +97,7 @@ static int michael_update(struct shash_desc *desc, const u8 *data, static int michael_final(struct shash_desc *desc, u8 *out) { struct michael_mic_desc_ctx *mctx = shash_desc_ctx(desc); - u8 *data = mctx->pending; - __le32 *dst = (__le32 *)out; + u8 *data = (u8 *)&mctx->pending; /* Last block and padding (0x5a, 4..7 x 0) */ switch (mctx->pending_len) { @@ -123,8 +119,8 @@ static int michael_final(struct shash_desc *desc, u8 *out) /* l ^= 0; */ michael_block(mctx->l, mctx->r); - dst[0] = cpu_to_le32(mctx->l); - dst[1] = cpu_to_le32(mctx->r); + put_unaligned_le32(mctx->l, out); + put_unaligned_le32(mctx->r, out + 4); return 0; } @@ -135,13 +131,11 @@ static int michael_setkey(struct crypto_shash *tfm, const u8 *key, { struct michael_mic_ctx *mctx = crypto_shash_ctx(tfm); - const __le32 *data = (const __le32 *)key; - if (keylen != 8) return -EINVAL; - mctx->l = le32_to_cpu(data[0]); - mctx->r = le32_to_cpu(data[1]); + mctx->l = get_unaligned_le32(key); + mctx->r = get_unaligned_le32(key + 4); return 0; } @@ -156,7 +150,6 @@ static struct shash_alg alg = { .cra_name = "michael_mic", .cra_driver_name = "michael_mic-generic", .cra_blocksize = 8, - .cra_alignmask = 3, .cra_ctxsize = sizeof(struct michael_mic_ctx), .cra_module = THIS_MODULE, } diff --git a/crypto/ofb.c b/crypto/ofb.c index 2ec68e3f2c55..b630fdecceee 100644 --- a/crypto/ofb.c +++ b/crypto/ofb.c @@ -8,6 +8,7 @@ */ #include <crypto/algapi.h> +#include <crypto/internal/cipher.h> #include <crypto/internal/skcipher.h> #include <linux/err.h> #include <linux/init.h> @@ -102,3 +103,4 @@ module_exit(crypto_ofb_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("OFB block cipher mode of operation"); MODULE_ALIAS_CRYPTO("ofb"); +MODULE_IMPORT_NS(CRYPTO_INTERNAL); diff --git a/crypto/pcbc.c b/crypto/pcbc.c index ae921fb74dc9..7030f59e46b6 100644 --- a/crypto/pcbc.c +++ b/crypto/pcbc.c @@ -10,6 +10,7 @@ */ #include <crypto/algapi.h> +#include <crypto/internal/cipher.h> #include <crypto/internal/skcipher.h> #include <linux/err.h> #include <linux/init.h> @@ -191,3 +192,4 @@ module_exit(crypto_pcbc_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("PCBC block cipher mode of operation"); MODULE_ALIAS_CRYPTO("pcbc"); +MODULE_IMPORT_NS(CRYPTO_INTERNAL); diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index 1b632139a8c1..9d10b846ccf7 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -78,12 +78,14 @@ static void pcrypt_aead_enc(struct padata_priv *padata) { struct pcrypt_request *preq = pcrypt_padata_request(padata); struct aead_request *req = pcrypt_request_ctx(preq); + int ret; - padata->info = crypto_aead_encrypt(req); + ret = crypto_aead_encrypt(req); - if (padata->info == -EINPROGRESS) + if (ret == -EINPROGRESS) return; + padata->info = ret; padata_do_serial(padata); } @@ -123,12 +125,14 @@ static void pcrypt_aead_dec(struct padata_priv *padata) { struct pcrypt_request *preq = pcrypt_padata_request(padata); struct aead_request *req = pcrypt_request_ctx(preq); + int ret; - padata->info = crypto_aead_decrypt(req); + ret = crypto_aead_decrypt(req); - if (padata->info == -EINPROGRESS) + if (ret == -EINPROGRESS) return; + padata->info = ret; padata_do_serial(padata); } @@ -226,23 +230,14 @@ static int pcrypt_init_instance(struct crypto_instance *inst, } static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb, - u32 type, u32 mask) + struct crypto_attr_type *algt) { struct pcrypt_instance_ctx *ctx; - struct crypto_attr_type *algt; struct aead_instance *inst; struct aead_alg *alg; - const char *name; + u32 mask = crypto_algt_inherited_mask(algt); int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - name = crypto_attr_alg_name(tb[1]); - if (IS_ERR(name)) - return PTR_ERR(name); - inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) return -ENOMEM; @@ -252,23 +247,23 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb, ctx = aead_instance_ctx(inst); ctx->psenc = padata_alloc_shell(pencrypt); if (!ctx->psenc) - goto out_free_inst; + goto err_free_inst; ctx->psdec = padata_alloc_shell(pdecrypt); if (!ctx->psdec) - goto out_free_psenc; + goto err_free_inst; err = crypto_grab_aead(&ctx->spawn, aead_crypto_instance(inst), - name, 0, 0); + crypto_attr_alg_name(tb[1]), 0, mask); if (err) - goto out_free_psdec; + goto err_free_inst; alg = crypto_spawn_aead_alg(&ctx->spawn); err = pcrypt_init_instance(aead_crypto_instance(inst), &alg->base); if (err) - goto out_drop_aead; + goto err_free_inst; - inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC; + inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC; inst->alg.ivsize = crypto_aead_alg_ivsize(alg); inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg); @@ -286,21 +281,11 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb, inst->free = pcrypt_free; err = aead_register_instance(tmpl, inst); - if (err) - goto out_drop_aead; - -out: + if (err) { +err_free_inst: + pcrypt_free(inst); + } return err; - -out_drop_aead: - crypto_drop_aead(&ctx->spawn); -out_free_psdec: - padata_free_shell(ctx->psdec); -out_free_psenc: - padata_free_shell(ctx->psenc); -out_free_inst: - kfree(inst); - goto out; } static int pcrypt_create(struct crypto_template *tmpl, struct rtattr **tb) @@ -313,7 +298,7 @@ static int pcrypt_create(struct crypto_template *tmpl, struct rtattr **tb) switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { case CRYPTO_ALG_TYPE_AEAD: - return pcrypt_create_aead(tmpl, tb, algt->type, algt->mask); + return pcrypt_create_aead(tmpl, tb, algt); } return -EINVAL; @@ -335,7 +320,7 @@ static int pcrypt_init_padata(struct padata_instance **pinst, const char *name) { int ret = -ENOMEM; - *pinst = padata_alloc_possible(name); + *pinst = padata_alloc(name); if (!*pinst) return ret; @@ -346,12 +331,6 @@ static int pcrypt_init_padata(struct padata_instance **pinst, const char *name) return ret; } -static void pcrypt_fini_padata(struct padata_instance *pinst) -{ - padata_stop(pinst); - padata_free(pinst); -} - static struct crypto_template pcrypt_tmpl = { .name = "pcrypt", .create = pcrypt_create, @@ -374,13 +353,10 @@ static int __init pcrypt_init(void) if (err) goto err_deinit_pencrypt; - padata_start(pencrypt); - padata_start(pdecrypt); - return crypto_register_template(&pcrypt_tmpl); err_deinit_pencrypt: - pcrypt_fini_padata(pencrypt); + padata_free(pencrypt); err_unreg_kset: kset_unregister(pcrypt_kset); err: @@ -391,8 +367,8 @@ static void __exit pcrypt_exit(void) { crypto_unregister_template(&pcrypt_tmpl); - pcrypt_fini_padata(pencrypt); - pcrypt_fini_padata(pdecrypt); + padata_free(pencrypt); + padata_free(pdecrypt); kset_unregister(pcrypt_kset); } diff --git a/crypto/polyval-generic.c b/crypto/polyval-generic.c new file mode 100644 index 000000000000..16bfa6925b31 --- /dev/null +++ b/crypto/polyval-generic.c @@ -0,0 +1,245 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * POLYVAL: hash function for HCTR2. + * + * Copyright (c) 2007 Nokia Siemens Networks - Mikko Herranen <mh1@iki.fi> + * Copyright (c) 2009 Intel Corp. + * Author: Huang Ying <ying.huang@intel.com> + * Copyright 2021 Google LLC + */ + +/* + * Code based on crypto/ghash-generic.c + * + * POLYVAL is a keyed hash function similar to GHASH. POLYVAL uses a different + * modulus for finite field multiplication which makes hardware accelerated + * implementations on little-endian machines faster. POLYVAL is used in the + * kernel to implement HCTR2, but was originally specified for AES-GCM-SIV + * (RFC 8452). + * + * For more information see: + * Length-preserving encryption with HCTR2: + * https://eprint.iacr.org/2021/1441.pdf + * AES-GCM-SIV: Nonce Misuse-Resistant Authenticated Encryption: + * https://datatracker.ietf.org/doc/html/rfc8452 + * + * Like GHASH, POLYVAL is not a cryptographic hash function and should + * not be used outside of crypto modes explicitly designed to use POLYVAL. + * + * This implementation uses a convenient trick involving the GHASH and POLYVAL + * fields. This trick allows multiplication in the POLYVAL field to be + * implemented by using multiplication in the GHASH field as a subroutine. An + * element of the POLYVAL field can be converted to an element of the GHASH + * field by computing x*REVERSE(a), where REVERSE reverses the byte-ordering of + * a. Similarly, an element of the GHASH field can be converted back to the + * POLYVAL field by computing REVERSE(x^{-1}*a). For more information, see: + * https://datatracker.ietf.org/doc/html/rfc8452#appendix-A + * + * By using this trick, we do not need to implement the POLYVAL field for the + * generic implementation. + * + * Warning: this generic implementation is not intended to be used in practice + * and is not constant time. For practical use, a hardware accelerated + * implementation of POLYVAL should be used instead. + * + */ + +#include <asm/unaligned.h> +#include <crypto/algapi.h> +#include <crypto/gf128mul.h> +#include <crypto/polyval.h> +#include <crypto/internal/hash.h> +#include <linux/crypto.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> + +struct polyval_tfm_ctx { + struct gf128mul_4k *gf128; +}; + +struct polyval_desc_ctx { + union { + u8 buffer[POLYVAL_BLOCK_SIZE]; + be128 buffer128; + }; + u32 bytes; +}; + +static void copy_and_reverse(u8 dst[POLYVAL_BLOCK_SIZE], + const u8 src[POLYVAL_BLOCK_SIZE]) +{ + u64 a = get_unaligned((const u64 *)&src[0]); + u64 b = get_unaligned((const u64 *)&src[8]); + + put_unaligned(swab64(a), (u64 *)&dst[8]); + put_unaligned(swab64(b), (u64 *)&dst[0]); +} + +/* + * Performs multiplication in the POLYVAL field using the GHASH field as a + * subroutine. This function is used as a fallback for hardware accelerated + * implementations when simd registers are unavailable. + * + * Note: This function is not used for polyval-generic, instead we use the 4k + * lookup table implementation for finite field multiplication. + */ +void polyval_mul_non4k(u8 *op1, const u8 *op2) +{ + be128 a, b; + + // Assume one argument is in Montgomery form and one is not. + copy_and_reverse((u8 *)&a, op1); + copy_and_reverse((u8 *)&b, op2); + gf128mul_x_lle(&a, &a); + gf128mul_lle(&a, &b); + copy_and_reverse(op1, (u8 *)&a); +} +EXPORT_SYMBOL_GPL(polyval_mul_non4k); + +/* + * Perform a POLYVAL update using non4k multiplication. This function is used + * as a fallback for hardware accelerated implementations when simd registers + * are unavailable. + * + * Note: This function is not used for polyval-generic, instead we use the 4k + * lookup table implementation of finite field multiplication. + */ +void polyval_update_non4k(const u8 *key, const u8 *in, + size_t nblocks, u8 *accumulator) +{ + while (nblocks--) { |