aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/include/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'include/crypto')
-rw-r--r--include/crypto/acompress.h80
-rw-r--r--include/crypto/aead.h46
-rw-r--r--include/crypto/aes.h5
-rw-r--r--include/crypto/akcipher.h85
-rw-r--r--include/crypto/algapi.h178
-rw-r--r--include/crypto/aria.h458
-rw-r--r--include/crypto/asym_tpm_subtype.h19
-rw-r--r--include/crypto/b128ops.h14
-rw-r--r--include/crypto/blake2b.h66
-rw-r--r--include/crypto/blake2s.h68
-rw-r--r--include/crypto/cbc.h141
-rw-r--r--include/crypto/chacha.h20
-rw-r--r--include/crypto/cryptd.h3
-rw-r--r--include/crypto/curve25519.h2
-rw-r--r--include/crypto/dh.h26
-rw-r--r--include/crypto/drbg.h11
-rw-r--r--include/crypto/ecc_curve.h62
-rw-r--r--include/crypto/ecdh.h4
-rw-r--r--include/crypto/engine.h121
-rw-r--r--include/crypto/gcm.h22
-rw-r--r--include/crypto/hash.h162
-rw-r--r--include/crypto/hash_info.h4
-rw-r--r--include/crypto/if_alg.h22
-rw-r--r--include/crypto/internal/acompress.h51
-rw-r--r--include/crypto/internal/aead.h49
-rw-r--r--include/crypto/internal/akcipher.h28
-rw-r--r--include/crypto/internal/blake2b.h115
-rw-r--r--include/crypto/internal/blake2s.h27
-rw-r--r--include/crypto/internal/cipher.h220
-rw-r--r--include/crypto/internal/cryptouser.h16
-rw-r--r--include/crypto/internal/ecc.h293
-rw-r--r--include/crypto/internal/engine.h74
-rw-r--r--include/crypto/internal/hash.h68
-rw-r--r--include/crypto/internal/kdf_selftest.h71
-rw-r--r--include/crypto/internal/kpp.h190
-rw-r--r--include/crypto/internal/poly1305.h3
-rw-r--r--include/crypto/internal/scompress.h17
-rw-r--r--include/crypto/internal/sig.h17
-rw-r--r--include/crypto/internal/skcipher.h152
-rw-r--r--include/crypto/kdf_sp800108.h61
-rw-r--r--include/crypto/kpp.h42
-rw-r--r--include/crypto/pcrypt.h2
-rw-r--r--include/crypto/poly1305.h6
-rw-r--r--include/crypto/polyval.h22
-rw-r--r--include/crypto/public_key.h47
-rw-r--r--include/crypto/rng.h20
-rw-r--r--include/crypto/scatterwalk.h21
-rw-r--r--include/crypto/sha1.h46
-rw-r--r--include/crypto/sha1_base.h5
-rw-r--r--include/crypto/sha2.h (renamed from include/crypto/sha.h)43
-rw-r--r--include/crypto/sha256_base.h55
-rw-r--r--include/crypto/sha512_base.h5
-rw-r--r--include/crypto/sig.h140
-rw-r--r--include/crypto/skcipher.h399
-rw-r--r--include/crypto/sm2.h28
-rw-r--r--include/crypto/sm3.h34
-rw-r--r--include/crypto/sm3_base.h3
-rw-r--r--include/crypto/sm4.h29
-rw-r--r--include/crypto/utils.h73
-rw-r--r--include/crypto/xts.h25
60 files changed, 3208 insertions, 908 deletions
diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h
index fcde59c65a81..54937b615239 100644
--- a/include/crypto/acompress.h
+++ b/include/crypto/acompress.h
@@ -8,9 +8,13 @@
*/
#ifndef _CRYPTO_ACOMP_H
#define _CRYPTO_ACOMP_H
+
+#include <linux/atomic.h>
+#include <linux/container_of.h>
#include <linux/crypto.h>
#define CRYPTO_ACOMP_ALLOC_OUTPUT 0x00000001
+#define CRYPTO_ACOMP_DST_MAX 131072
/**
* struct acomp_req - asynchronous (de)compression request
@@ -52,36 +56,10 @@ struct crypto_acomp {
struct crypto_tfm base;
};
-/**
- * struct acomp_alg - asynchronous compression algorithm
- *
- * @compress: Function performs a compress operation
- * @decompress: Function performs a de-compress operation
- * @dst_free: Frees destination buffer if allocated inside the algorithm
- * @init: Initialize the cryptographic transformation object.
- * This function is used to initialize the cryptographic
- * transformation object. This function is called only once at
- * the instantiation time, right after the transformation context
- * was allocated. In case the cryptographic hardware has some
- * special requirements which need to be handled by software, this
- * function shall check for the precise requirement of the
- * transformation and put any software fallbacks in place.
- * @exit: Deinitialize the cryptographic transformation object. This is a
- * counterpart to @init, used to remove various changes set in
- * @init.
- *
- * @reqsize: Context size for (de)compression requests
- * @base: Common crypto API algorithm data structure
- */
-struct acomp_alg {
- int (*compress)(struct acomp_req *req);
- int (*decompress)(struct acomp_req *req);
- void (*dst_free)(struct scatterlist *dst);
- int (*init)(struct crypto_acomp *tfm);
- void (*exit)(struct crypto_acomp *tfm);
- unsigned int reqsize;
- struct crypto_alg base;
-};
+#define COMP_ALG_COMMON { \
+ struct crypto_alg base; \
+}
+struct comp_alg_common COMP_ALG_COMMON;
/**
* DOC: Asynchronous Compression API
@@ -130,9 +108,10 @@ static inline struct crypto_tfm *crypto_acomp_tfm(struct crypto_acomp *tfm)
return &tfm->base;
}
-static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg)
+static inline struct comp_alg_common *__crypto_comp_alg_common(
+ struct crypto_alg *alg)
{
- return container_of(alg, struct acomp_alg, base);
+ return container_of(alg, struct comp_alg_common, base);
}
static inline struct crypto_acomp *__crypto_acomp_tfm(struct crypto_tfm *tfm)
@@ -140,9 +119,10 @@ static inline struct crypto_acomp *__crypto_acomp_tfm(struct crypto_tfm *tfm)
return container_of(tfm, struct crypto_acomp, base);
}
-static inline struct acomp_alg *crypto_acomp_alg(struct crypto_acomp *tfm)
+static inline struct comp_alg_common *crypto_comp_alg_common(
+ struct crypto_acomp *tfm)
{
- return __crypto_acomp_alg(crypto_acomp_tfm(tfm)->__crt_alg);
+ return __crypto_comp_alg_common(crypto_acomp_tfm(tfm)->__crt_alg);
}
static inline unsigned int crypto_acomp_reqsize(struct crypto_acomp *tfm)
@@ -156,6 +136,12 @@ static inline void acomp_request_set_tfm(struct acomp_req *req,
req->base.tfm = crypto_acomp_tfm(tfm);
}
+static inline bool acomp_is_async(struct crypto_acomp *tfm)
+{
+ return crypto_comp_alg_common(tfm)->base.cra_flags &
+ CRYPTO_ALG_ASYNC;
+}
+
static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req)
{
return __crypto_acomp_tfm(req->base.tfm);
@@ -165,6 +151,8 @@ static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req)
* crypto_free_acomp() -- free ACOMPRESS tfm handle
*
* @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
*/
static inline void crypto_free_acomp(struct crypto_acomp *tfm)
{
@@ -216,7 +204,8 @@ static inline void acomp_request_set_callback(struct acomp_req *req,
{
req->base.complete = cmpl;
req->base.data = data;
- req->base.flags = flgs;
+ req->base.flags &= CRYPTO_ACOMP_ALLOC_OUTPUT;
+ req->base.flags |= flgs & ~CRYPTO_ACOMP_ALLOC_OUTPUT;
}
/**
@@ -243,6 +232,7 @@ static inline void acomp_request_set_params(struct acomp_req *req,
req->slen = slen;
req->dlen = dlen;
+ req->flags &= ~CRYPTO_ACOMP_ALLOC_OUTPUT;
if (!req->dst)
req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT;
}
@@ -258,15 +248,7 @@ static inline void acomp_request_set_params(struct acomp_req *req,
*/
static inline int crypto_acomp_compress(struct acomp_req *req)
{
- struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
- struct crypto_alg *alg = tfm->base.__crt_alg;
- unsigned int slen = req->slen;
- int ret;
-
- crypto_stats_get(alg);
- ret = tfm->compress(req);
- crypto_stats_compress(slen, ret, alg);
- return ret;
+ return crypto_acomp_reqtfm(req)->compress(req);
}
/**
@@ -280,15 +262,7 @@ static inline int crypto_acomp_compress(struct acomp_req *req)
*/
static inline int crypto_acomp_decompress(struct acomp_req *req)
{
- struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
- struct crypto_alg *alg = tfm->base.__crt_alg;
- unsigned int slen = req->slen;
- int ret;
-
- crypto_stats_get(alg);
- ret = tfm->decompress(req);
- crypto_stats_decompress(slen, ret, alg);
- return ret;
+ return crypto_acomp_reqtfm(req)->decompress(req);
}
#endif
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
index c32a6f5664e9..0e8a41638678 100644
--- a/include/crypto/aead.h
+++ b/include/crypto/aead.h
@@ -8,9 +8,11 @@
#ifndef _CRYPTO_AEAD_H
#define _CRYPTO_AEAD_H
+#include <linux/atomic.h>
+#include <linux/container_of.h>
#include <linux/crypto.h>
-#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/types.h>
/**
* DOC: Authenticated Encryption With Associated Data (AEAD) Cipher API
@@ -26,15 +28,12 @@
*
* For example: authenc(hmac(sha256), cbc(aes))
*
- * The example code provided for the symmetric key cipher operation
- * applies here as well. Naturally all *skcipher* symbols must be exchanged
- * the *aead* pendants discussed in the following. In addition, for the AEAD
- * operation, the aead_request_set_ad function must be used to set the
- * pointer to the associated data memory location before performing the
- * encryption or decryption operation. In case of an encryption, the associated
- * data memory is filled during the encryption operation. For decryption, the
- * associated data memory must contain data that is used to verify the integrity
- * of the decrypted data. Another deviation from the asynchronous block cipher
+ * The example code provided for the symmetric key cipher operation applies
+ * here as well. Naturally all *skcipher* symbols must be exchanged the *aead*
+ * pendants discussed in the following. In addition, for the AEAD operation,
+ * the aead_request_set_ad function must be used to set the pointer to the
+ * associated data memory location before performing the encryption or
+ * decryption operation. Another deviation from the asynchronous block cipher
* operation is that the caller should explicitly check for -EBADMSG of the
* crypto_aead_decrypt. That error indicates an authentication error, i.e.
* a breach in the integrity of the message. In essence, that -EBADMSG error
@@ -48,7 +47,10 @@
*
* The destination scatterlist has the same layout, except that the plaintext
* (resp. ciphertext) will grow (resp. shrink) by the authentication tag size
- * during encryption (resp. decryption).
+ * during encryption (resp. decryption). The authentication tag is generated
+ * during the encryption operation and appended to the ciphertext. During
+ * decryption, the authentication tag is consumed along with the ciphertext and
+ * used to verify the integrity of the plaintext and the associated data.
*
* In-place encryption/decryption is enabled by using the same scatterlist
* pointer for both the source and destination.
@@ -73,6 +75,7 @@
*/
struct crypto_aead;
+struct scatterlist;
/**
* struct aead_request - AEAD request
@@ -185,12 +188,31 @@ static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm)
/**
* crypto_free_aead() - zeroize and free aead handle
* @tfm: cipher handle to be freed
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
*/
static inline void crypto_free_aead(struct crypto_aead *tfm)
{
crypto_destroy_tfm(tfm, crypto_aead_tfm(tfm));
}
+/**
+ * crypto_has_aead() - Search for the availability of an aead.
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * aead
+ * @type: specifies the type of the aead
+ * @mask: specifies the mask for the aead
+ *
+ * Return: true when the aead is known to the kernel crypto API; false
+ * otherwise
+ */
+int crypto_has_aead(const char *alg_name, u32 type, u32 mask);
+
+static inline const char *crypto_aead_driver_name(struct crypto_aead *tfm)
+{
+ return crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm));
+}
+
static inline struct aead_alg *crypto_aead_alg(struct crypto_aead *tfm)
{
return container_of(crypto_aead_tfm(tfm)->__crt_alg,
@@ -483,7 +505,7 @@ static inline void aead_request_set_callback(struct aead_request *req,
* The memory structure for cipher operation has the following structure:
*
* - AEAD encryption input: assoc data || plaintext
- * - AEAD encryption output: assoc data || cipherntext || auth tag
+ * - AEAD encryption output: assoc data || ciphertext || auth tag
* - AEAD decryption input: assoc data || ciphertext || auth tag
* - AEAD decryption output: assoc data || plaintext
*
diff --git a/include/crypto/aes.h b/include/crypto/aes.h
index 2090729701ab..9339da7c20a8 100644
--- a/include/crypto/aes.h
+++ b/include/crypto/aes.h
@@ -87,4 +87,9 @@ void aes_decrypt(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in);
extern const u8 crypto_aes_sbox[];
extern const u8 crypto_aes_inv_sbox[];
+void aescfb_encrypt(const struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src,
+ int len, const u8 iv[AES_BLOCK_SIZE]);
+void aescfb_decrypt(const struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src,
+ int len, const u8 iv[AES_BLOCK_SIZE]);
+
#endif
diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h
index 1d3aa252caba..18a10cad07aa 100644
--- a/include/crypto/akcipher.h
+++ b/include/crypto/akcipher.h
@@ -7,6 +7,8 @@
*/
#ifndef _CRYPTO_AKCIPHER_H
#define _CRYPTO_AKCIPHER_H
+
+#include <linux/atomic.h>
#include <linux/crypto.h>
/**
@@ -43,9 +45,12 @@ struct akcipher_request {
* struct crypto_akcipher - user-instantiated objects which encapsulate
* algorithms and core processing logic
*
+ * @reqsize: Request context size required by algorithm implementation
* @base: Common crypto API algorithm data structure
*/
struct crypto_akcipher {
+ unsigned int reqsize;
+
struct crypto_tfm base;
};
@@ -86,7 +91,6 @@ struct crypto_akcipher {
* counterpart to @init, used to remove various changes set in
* @init.
*
- * @reqsize: Request context size required by algorithm implementation
* @base: Common crypto API algorithm data structure
*/
struct akcipher_alg {
@@ -102,7 +106,6 @@ struct akcipher_alg {
int (*init)(struct crypto_akcipher *tfm);
void (*exit)(struct crypto_akcipher *tfm);
- unsigned int reqsize;
struct crypto_alg base;
};
@@ -155,7 +158,7 @@ static inline struct akcipher_alg *crypto_akcipher_alg(
static inline unsigned int crypto_akcipher_reqsize(struct crypto_akcipher *tfm)
{
- return crypto_akcipher_alg(tfm)->reqsize;
+ return tfm->reqsize;
}
static inline void akcipher_request_set_tfm(struct akcipher_request *req,
@@ -174,6 +177,8 @@ static inline struct crypto_akcipher *crypto_akcipher_reqtfm(
* crypto_free_akcipher() - free AKCIPHER tfm handle
*
* @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher()
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
*/
static inline void crypto_free_akcipher(struct crypto_akcipher *tfm)
{
@@ -285,15 +290,8 @@ static inline unsigned int crypto_akcipher_maxsize(struct crypto_akcipher *tfm)
static inline int crypto_akcipher_encrypt(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
- struct crypto_alg *calg = tfm->base.__crt_alg;
- unsigned int src_len = req->src_len;
- int ret;
-
- crypto_stats_get(calg);
- ret = alg->encrypt(req);
- crypto_stats_akcipher_encrypt(src_len, ret, calg);
- return ret;
+
+ return crypto_akcipher_alg(tfm)->encrypt(req);
}
/**
@@ -309,18 +307,47 @@ static inline int crypto_akcipher_encrypt(struct akcipher_request *req)
static inline int crypto_akcipher_decrypt(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
- struct crypto_alg *calg = tfm->base.__crt_alg;
- unsigned int src_len = req->src_len;
- int ret;
-
- crypto_stats_get(calg);
- ret = alg->decrypt(req);
- crypto_stats_akcipher_decrypt(src_len, ret, calg);
- return ret;
+
+ return crypto_akcipher_alg(tfm)->decrypt(req);
}
/**
+ * crypto_akcipher_sync_encrypt() - Invoke public key encrypt operation
+ *
+ * Function invokes the specific public key encrypt operation for a given
+ * public key algorithm
+ *
+ * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher()
+ * @src: source buffer
+ * @slen: source length
+ * @dst: destination obuffer
+ * @dlen: destination length
+ *
+ * Return: zero on success; error code in case of error
+ */
+int crypto_akcipher_sync_encrypt(struct crypto_akcipher *tfm,
+ const void *src, unsigned int slen,
+ void *dst, unsigned int dlen);
+
+/**
+ * crypto_akcipher_sync_decrypt() - Invoke public key decrypt operation
+ *
+ * Function invokes the specific public key decrypt operation for a given
+ * public key algorithm
+ *
+ * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher()
+ * @src: source buffer
+ * @slen: source length
+ * @dst: destination obuffer
+ * @dlen: destination length
+ *
+ * Return: Output length on success; error code in case of error
+ */
+int crypto_akcipher_sync_decrypt(struct crypto_akcipher *tfm,
+ const void *src, unsigned int slen,
+ void *dst, unsigned int dlen);
+
+/**
* crypto_akcipher_sign() - Invoke public key sign operation
*
* Function invokes the specific public key sign operation for a given
@@ -333,14 +360,8 @@ static inline int crypto_akcipher_decrypt(struct akcipher_request *req)
static inline int crypto_akcipher_sign(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
- struct crypto_alg *calg = tfm->base.__crt_alg;
- int ret;
- crypto_stats_get(calg);
- ret = alg->sign(req);
- crypto_stats_akcipher_sign(ret, calg);
- return ret;
+ return crypto_akcipher_alg(tfm)->sign(req);
}
/**
@@ -360,14 +381,8 @@ static inline int crypto_akcipher_sign(struct akcipher_request *req)
static inline int crypto_akcipher_verify(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
- struct crypto_alg *calg = tfm->base.__crt_alg;
- int ret;
- crypto_stats_get(calg);
- ret = alg->verify(req);
- crypto_stats_akcipher_verify(ret, calg);
- return ret;
+ return crypto_akcipher_alg(tfm)->verify(req);
}
/**
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index 143d884d65c7..156de41ca760 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -7,10 +7,12 @@
#ifndef _CRYPTO_ALGAPI_H
#define _CRYPTO_ALGAPI_H
+#include <crypto/utils.h>
+#include <linux/align.h>
+#include <linux/cache.h>
#include <linux/crypto.h>
-#include <linux/list.h>
-#include <linux/kernel.h>
-#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
/*
* Maximum values for blocksize and alignmask, used to allocate
@@ -18,20 +20,43 @@
* algs and architectures. Ciphers have a lower maximum size.
*/
#define MAX_ALGAPI_BLOCKSIZE 160
-#define MAX_ALGAPI_ALIGNMASK 63
+#define MAX_ALGAPI_ALIGNMASK 127
#define MAX_CIPHER_BLOCKSIZE 16
#define MAX_CIPHER_ALIGNMASK 15
+#ifdef ARCH_DMA_MINALIGN
+#define CRYPTO_DMA_ALIGN ARCH_DMA_MINALIGN
+#else
+#define CRYPTO_DMA_ALIGN CRYPTO_MINALIGN
+#endif
+
+#define CRYPTO_DMA_PADDING ((CRYPTO_DMA_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1))
+
+/*
+ * Autoloaded crypto modules should only use a prefixed name to avoid allowing
+ * arbitrary modules to be loaded. Loading from userspace may still need the
+ * unprefixed names, so retains those aliases as well.
+ * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3
+ * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro
+ * expands twice on the same line. Instead, use a separate base name for the
+ * alias.
+ */
+#define MODULE_ALIAS_CRYPTO(name) \
+ __MODULE_INFO(alias, alias_userspace, name); \
+ __MODULE_INFO(alias, alias_crypto, "crypto-" name)
+
struct crypto_aead;
struct crypto_instance;
struct module;
+struct notifier_block;
struct rtattr;
+struct scatterlist;
struct seq_file;
+struct sk_buff;
struct crypto_type {
unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask);
unsigned int (*extsize)(struct crypto_alg *alg);
- int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask);
int (*init_tfm)(struct crypto_tfm *tfm);
void (*show)(struct seq_file *m, struct crypto_alg *alg);
int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
@@ -55,6 +80,8 @@ struct crypto_instance {
struct crypto_spawn *spawns;
};
+ struct work_struct free_work;
+
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
@@ -96,6 +123,23 @@ struct scatter_walk {
unsigned int offset;
};
+struct crypto_attr_alg {
+ char name[CRYPTO_MAX_ALG_NAME];
+};
+
+struct crypto_attr_type {
+ u32 type;
+ u32 mask;
+};
+
+/*
+ * Algorithm registration interface.
+ */
+int crypto_register_alg(struct crypto_alg *alg);
+void crypto_unregister_alg(struct crypto_alg *alg);
+int crypto_register_algs(struct crypto_alg *algs, int count);
+void crypto_unregister_algs(struct crypto_alg *algs, int count);
+
void crypto_mod_put(struct crypto_alg *alg);
int crypto_register_template(struct crypto_template *tmpl);
@@ -118,7 +162,6 @@ void *crypto_spawn_tfm2(struct crypto_spawn *spawn);
struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb);
int crypto_check_attr_type(struct rtattr **tb, u32 type, u32 *mask_ret);
const char *crypto_attr_alg_name(struct rtattr *rta);
-int crypto_attr_u32(struct rtattr *rta, u32 *num);
int crypto_inst_setname(struct crypto_instance *inst, const char *name,
struct crypto_alg *alg);
@@ -134,98 +177,45 @@ static inline unsigned int crypto_queue_len(struct crypto_queue *queue)
}
void crypto_inc(u8 *a, unsigned int size);
-void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int size);
-static inline void crypto_xor(u8 *dst, const u8 *src, unsigned int size)
+static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm)
{
- if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
- __builtin_constant_p(size) &&
- (size % sizeof(unsigned long)) == 0) {
- unsigned long *d = (unsigned long *)dst;
- unsigned long *s = (unsigned long *)src;
-
- while (size > 0) {
- *d++ ^= *s++;
- size -= sizeof(unsigned long);
- }
- } else {
- __crypto_xor(dst, dst, src, size);
- }
+ return tfm->__crt_ctx;
}
-static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2,
- unsigned int size)
+static inline void *crypto_tfm_ctx_align(struct crypto_tfm *tfm,
+ unsigned int align)
{
- if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
- __builtin_constant_p(size) &&
- (size % sizeof(unsigned long)) == 0) {
- unsigned long *d = (unsigned long *)dst;
- unsigned long *s1 = (unsigned long *)src1;
- unsigned long *s2 = (unsigned long *)src2;
-
- while (size > 0) {
- *d++ = *s1++ ^ *s2++;
- size -= sizeof(unsigned long);
- }
- } else {
- __crypto_xor(dst, src1, src2, size);
- }
-}
+ if (align <= crypto_tfm_ctx_alignment())
+ align = 1;
-static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm)
-{
- return PTR_ALIGN(crypto_tfm_ctx(tfm),
- crypto_tfm_alg_alignmask(tfm) + 1);
-}
-
-static inline struct crypto_instance *crypto_tfm_alg_instance(
- struct crypto_tfm *tfm)
-{
- return container_of(tfm->__crt_alg, struct crypto_instance, alg);
-}
-
-static inline void *crypto_instance_ctx(struct crypto_instance *inst)
-{
- return inst->__ctx;
+ return PTR_ALIGN(crypto_tfm_ctx(tfm), align);
}
-struct crypto_cipher_spawn {
- struct crypto_spawn base;
-};
-
-static inline int crypto_grab_cipher(struct crypto_cipher_spawn *spawn,
- struct crypto_instance *inst,
- const char *name, u32 type, u32 mask)
+static inline unsigned int crypto_dma_align(void)
{
- type &= ~CRYPTO_ALG_TYPE_MASK;
- type |= CRYPTO_ALG_TYPE_CIPHER;
- mask |= CRYPTO_ALG_TYPE_MASK;
- return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
+ return CRYPTO_DMA_ALIGN;
}
-static inline void crypto_drop_cipher(struct crypto_cipher_spawn *spawn)
+static inline unsigned int crypto_dma_padding(void)
{
- crypto_drop_spawn(&spawn->base);
+ return (crypto_dma_align() - 1) & ~(crypto_tfm_ctx_alignment() - 1);
}
-static inline struct crypto_alg *crypto_spawn_cipher_alg(
- struct crypto_cipher_spawn *spawn)
+static inline void *crypto_tfm_ctx_dma(struct crypto_tfm *tfm)
{
- return spawn->base.alg;
+ return crypto_tfm_ctx_align(tfm, crypto_dma_align());
}
-static inline struct crypto_cipher *crypto_spawn_cipher(
- struct crypto_cipher_spawn *spawn)
+static inline struct crypto_instance *crypto_tfm_alg_instance(
+ struct crypto_tfm *tfm)
{
- u32 type = CRYPTO_ALG_TYPE_CIPHER;
- u32 mask = CRYPTO_ALG_TYPE_MASK;
-
- return __crypto_cipher_cast(crypto_spawn_tfm(&spawn->base, type, mask));
+ return container_of(tfm->__crt_alg, struct crypto_instance, alg);
}
-static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm)
+static inline void *crypto_instance_ctx(struct crypto_instance *inst)
{
- return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher;
+ return inst->__ctx;
}
static inline struct crypto_async_request *crypto_get_backlog(
@@ -260,29 +250,6 @@ static inline u32 crypto_algt_inherited_mask(struct crypto_attr_type *algt)
return crypto_requires_off(algt, CRYPTO_ALG_INHERITED_FLAGS);
}
-noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size);
-
-/**
- * crypto_memneq - Compare two areas of memory without leaking
- * timing information.
- *
- * @a: One area of memory
- * @b: Another area of memory
- * @size: The size of the area.
- *
- * Returns 0 when data is equal, 1 otherwise.
- */
-static inline int crypto_memneq(const void *a, const void *b, size_t size)
-{
- return __crypto_memneq(a, b, size) != 0UL ? 1 : 0;
-}
-
-static inline void crypto_yield(u32 flags)
-{
- if (flags & CRYPTO_TFM_REQ_MAY_SLEEP)
- cond_resched();
-}
-
int crypto_register_notifier(struct notifier_block *nb);
int crypto_unregister_notifier(struct notifier_block *nb);
@@ -293,4 +260,15 @@ enum {
CRYPTO_MSG_ALG_LOADED,
};
+static inline void crypto_request_complete(struct crypto_async_request *req,
+ int err)
+{
+ req->complete(req->data, err);
+}
+
+static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm)
+{
+ return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK;
+}
+
#endif /* _CRYPTO_ALGAPI_H */
diff --git a/include/crypto/aria.h b/include/crypto/aria.h
new file mode 100644
index 000000000000..73295146be11
--- /dev/null
+++ b/include/crypto/aria.h
@@ -0,0 +1,458 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Cryptographic API.
+ *
+ * ARIA Cipher Algorithm.
+ *
+ * Documentation of ARIA can be found in RFC 5794.
+ * Copyright (c) 2022 Taehee Yoo <ap420073@gmail.com>
+ * Copyright (c) 2022 Taehee Yoo <ap420073@gmail.com>
+ *
+ * Information for ARIA
+ * http://210.104.33.10/ARIA/index-e.html (English)
+ * http://seed.kisa.or.kr/ (Korean)
+ *
+ * Public domain version is distributed above.
+ */
+
+#ifndef _CRYPTO_ARIA_H
+#define _CRYPTO_ARIA_H
+
+#include <crypto/algapi.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <asm/byteorder.h>
+
+#define ARIA_MIN_KEY_SIZE 16
+#define ARIA_MAX_KEY_SIZE 32
+#define ARIA_BLOCK_SIZE 16
+#define ARIA_MAX_RD_KEYS 17
+#define ARIA_RD_KEY_WORDS (ARIA_BLOCK_SIZE / sizeof(u32))
+
+struct aria_ctx {
+ u32 enc_key[ARIA_MAX_RD_KEYS][ARIA_RD_KEY_WORDS];
+ u32 dec_key[ARIA_MAX_RD_KEYS][ARIA_RD_KEY_WORDS];
+ int rounds;
+ int key_length;
+};
+
+static const u32 s1[256] = {
+ 0x00636363, 0x007c7c7c, 0x00777777, 0x007b7b7b,
+ 0x00f2f2f2, 0x006b6b6b, 0x006f6f6f, 0x00c5c5c5,
+ 0x00303030, 0x00010101, 0x00676767, 0x002b2b2b,
+ 0x00fefefe, 0x00d7d7d7, 0x00ababab, 0x00767676,
+ 0x00cacaca, 0x00828282, 0x00c9c9c9, 0x007d7d7d,
+ 0x00fafafa, 0x00595959, 0x00474747, 0x00f0f0f0,
+ 0x00adadad, 0x00d4d4d4, 0x00a2a2a2, 0x00afafaf,
+ 0x009c9c9c, 0x00a4a4a4, 0x00727272, 0x00c0c0c0,
+ 0x00b7b7b7, 0x00fdfdfd, 0x00939393, 0x00262626,
+ 0x00363636, 0x003f3f3f, 0x00f7f7f7, 0x00cccccc,
+ 0x00343434, 0x00a5a5a5, 0x00e5e5e5, 0x00f1f1f1,
+ 0x00717171, 0x00d8d8d8, 0x00313131, 0x00151515,
+ 0x00040404, 0x00c7c7c7, 0x00232323, 0x00c3c3c3,
+ 0x00181818, 0x00969696, 0x00050505, 0x009a9a9a,
+ 0x00070707, 0x00121212, 0x00808080, 0x00e2e2e2,
+ 0x00ebebeb, 0x00272727, 0x00b2b2b2, 0x00757575,
+ 0x00090909, 0x00838383, 0x002c2c2c, 0x001a1a1a,
+ 0x001b1b1b, 0x006e6e6e, 0x005a5a5a, 0x00a0a0a0,
+ 0x00525252, 0x003b3b3b, 0x00d6d6d6, 0x00b3b3b3,
+ 0x00292929, 0x00e3e3e3, 0x002f2f2f, 0x00848484,
+ 0x00535353, 0x00d1d1d1, 0x00000000, 0x00ededed,
+ 0x00202020, 0x00fcfcfc, 0x00b1b1b1, 0x005b5b5b,
+ 0x006a6a6a, 0x00cbcbcb, 0x00bebebe, 0x00393939,
+ 0x004a4a4a, 0x004c4c4c, 0x00585858, 0x00cfcfcf,
+ 0x00d0d0d0, 0x00efefef, 0x00aaaaaa, 0x00fbfbfb,
+ 0x00434343, 0x004d4d4d, 0x00333333, 0x00858585,
+ 0x00454545, 0x00f9f9f9, 0x00020202, 0x007f7f7f,
+ 0x00505050, 0x003c3c3c, 0x009f9f9f, 0x00a8a8a8,
+ 0x00515151, 0x00a3a3a3, 0x00404040, 0x008f8f8f,
+ 0x00929292, 0x009d9d9d, 0x00383838, 0x00f5f5f5,
+ 0x00bcbcbc, 0x00b6b6b6, 0x00dadada, 0x00212121,
+ 0x00101010, 0x00ffffff, 0x00f3f3f3, 0x00d2d2d2,
+ 0x00cdcdcd, 0x000c0c0c, 0x00131313, 0x00ececec,
+ 0x005f5f5f, 0x00979797, 0x00444444, 0x00171717,
+ 0x00c4c4c4, 0x00a7a7a7, 0x007e7e7e, 0x003d3d3d,
+ 0x00646464, 0x005d5d5d, 0x00191919, 0x00737373,
+ 0x00606060, 0x00818181, 0x004f4f4f, 0x00dcdcdc,
+ 0x00222222, 0x002a2a2a, 0x00909090, 0x00888888,
+ 0x00464646, 0x00eeeeee, 0x00b8b8b8, 0x00141414,
+ 0x00dedede, 0x005e5e5e, 0x000b0b0b, 0x00dbdbdb,
+ 0x00e0e0e0, 0x00323232, 0x003a3a3a, 0x000a0a0a,
+ 0x00494949, 0x00060606, 0x00242424, 0x005c5c5c,
+ 0x00c2c2c2, 0x00d3d3d3, 0x00acacac, 0x00626262,
+ 0x00919191, 0x00959595, 0x00e4e4e4, 0x00797979,
+ 0x00e7e7e7, 0x00c8c8c8, 0x00373737, 0x006d6d6d,
+ 0x008d8d8d, 0x00d5d5d5, 0x004e4e4e, 0x00a9a9a9,
+ 0x006c6c6c, 0x00565656, 0x00f4f4f4, 0x00eaeaea,
+ 0x00656565, 0x007a7a7a, 0x00aeaeae, 0x00080808,
+ 0x00bababa, 0x00787878, 0x00252525, 0x002e2e2e,
+ 0x001c1c1c, 0x00a6a6a6, 0x00b4b4b4, 0x00c6c6c6,
+ 0x00e8e8e8, 0x00dddddd, 0x00747474, 0x001f1f1f,
+ 0x004b4b4b, 0x00bdbdbd, 0x008b8b8b, 0x008a8a8a,
+ 0x00707070, 0x003e3e3e, 0x00b5b5b5, 0x00666666,
+ 0x00484848, 0x00030303, 0x00f6f6f6, 0x000e0e0e,
+ 0x00616161, 0x00353535, 0x00575757, 0x00b9b9b9,
+ 0x00868686, 0x00c1c1c1, 0x001d1d1d, 0x009e9e9e,
+ 0x00e1e1e1, 0x00f8f8f8, 0x00989898, 0x00111111,
+ 0x00696969, 0x00d9d9d9, 0x008e8e8e, 0x00949494,
+ 0x009b9b9b, 0x001e1e1e, 0x00878787, 0x00e9e9e9,
+ 0x00cecece, 0x00555555, 0x00282828, 0x00dfdfdf,
+ 0x008c8c8c, 0x00a1a1a1, 0x00898989, 0x000d0d0d,
+ 0x00bfbfbf, 0x00e6e6e6, 0x00424242, 0x00686868,
+ 0x00414141, 0x00999999, 0x002d2d2d, 0x000f0f0f,
+ 0x00b0b0b0, 0x00545454, 0x00bbbbbb, 0x00161616
+};
+
+static const u32 s2[256] = {
+ 0xe200e2e2, 0x4e004e4e, 0x54005454, 0xfc00fcfc,
+ 0x94009494, 0xc200c2c2, 0x4a004a4a, 0xcc00cccc,
+ 0x62006262, 0x0d000d0d, 0x6a006a6a, 0x46004646,
+ 0x3c003c3c, 0x4d004d4d, 0x8b008b8b, 0xd100d1d1,
+ 0x5e005e5e, 0xfa00fafa, 0x64006464, 0xcb00cbcb,
+ 0xb400b4b4, 0x97009797, 0xbe00bebe, 0x2b002b2b,
+ 0xbc00bcbc, 0x77007777, 0x2e002e2e, 0x03000303,
+ 0xd300d3d3, 0x19001919, 0x59005959, 0xc100c1c1,
+ 0x1d001d1d, 0x06000606, 0x41004141, 0x6b006b6b,
+ 0x55005555, 0xf000f0f0, 0x99009999, 0x69006969,
+ 0xea00eaea, 0x9c009c9c, 0x18001818, 0xae00aeae,
+ 0x63006363, 0xdf00dfdf, 0xe700e7e7, 0xbb00bbbb,
+ 0x00000000, 0x73007373, 0x66006666, 0xfb00fbfb,
+ 0x96009696, 0x4c004c4c, 0x85008585, 0xe400e4e4,
+ 0x3a003a3a, 0x09000909, 0x45004545, 0xaa00aaaa,
+ 0x0f000f0f, 0xee00eeee, 0x10001010, 0xeb00ebeb,
+ 0x2d002d2d, 0x7f007f7f, 0xf400f4f4, 0x29002929,
+ 0xac00acac, 0xcf00cfcf, 0xad00adad, 0x91009191,
+ 0x8d008d8d, 0x78007878, 0xc800c8c8, 0x95009595,
+ 0xf900f9f9, 0x2f002f2f, 0xce00cece, 0xcd00cdcd,
+ 0x08000808, 0x7a007a7a, 0x88008888, 0x38003838,
+ 0x5c005c5c, 0x83008383, 0x2a002a2a, 0x28002828,
+ 0x47004747, 0xdb00dbdb, 0xb800b8b8, 0xc700c7c7,
+ 0x93009393, 0xa400a4a4, 0x12001212, 0x53005353,
+ 0xff00ffff, 0x87008787, 0x0e000e0e, 0x31003131,
+ 0x36003636, 0x21002121, 0x58005858, 0x48004848,
+ 0x01000101, 0x8e008e8e, 0x37003737, 0x74007474,
+ 0x32003232, 0xca00caca, 0xe900e9e9, 0xb100b1b1,
+ 0xb700b7b7, 0xab00abab, 0x0c000c0c, 0xd700d7d7,
+ 0xc400c4c4, 0x56005656, 0x42004242, 0x26002626,
+ 0x07000707, 0x98009898, 0x60006060, 0xd900d9d9,
+ 0xb600b6b6, 0xb900b9b9, 0x11001111, 0x40004040,
+ 0xec00ecec, 0x20002020, 0x8c008c8c, 0xbd00bdbd,
+ 0xa000a0a0, 0xc900c9c9, 0x84008484, 0x04000404,
+ 0x49004949, 0x23002323, 0xf100f1f1, 0x4f004f4f,
+ 0x50005050, 0x1f001f1f, 0x13001313, 0xdc00dcdc,
+ 0xd800d8d8, 0xc000c0c0, 0x9e009e9e, 0x57005757,
+ 0xe300e3e3, 0xc300c3c3, 0x7b007b7b, 0x65006565,
+ 0x3b003b3b, 0x02000202, 0x8f008f8f, 0x3e003e3e,
+ 0xe800e8e8, 0x25002525, 0x92009292, 0xe500e5e5,
+ 0x15001515, 0xdd00dddd, 0xfd00fdfd, 0x17001717,
+ 0xa900a9a9, 0xbf00bfbf, 0xd400d4d4, 0x9a009a9a,
+ 0x7e007e7e, 0xc500c5c5, 0x39003939, 0x67006767,
+ 0xfe00fefe, 0x76007676, 0x9d009d9d, 0x43004343,
+ 0xa700a7a7, 0xe100e1e1, 0xd000d0d0, 0xf500f5f5,
+ 0x68006868, 0xf200f2f2, 0x1b001b1b, 0x34003434,
+ 0x70007070, 0x05000505, 0xa300a3a3, 0x8a008a8a,
+ 0xd500d5d5, 0x79007979, 0x86008686, 0xa800a8a8,
+ 0x30003030, 0xc600c6c6, 0x51005151, 0x4b004b4b,
+ 0x1e001e1e, 0xa600a6a6, 0x27002727, 0xf600f6f6,
+ 0x35003535, 0xd200d2d2, 0x6e006e6e, 0x24002424,
+ 0x16001616, 0x82008282, 0x5f005f5f, 0xda00dada,
+ 0xe600e6e6, 0x75007575, 0xa200a2a2, 0xef00efef,
+ 0x2c002c2c, 0xb200b2b2, 0x1c001c1c, 0x9f009f9f,
+ 0x5d005d5d, 0x6f006f6f, 0x80008080, 0x0a000a0a,
+ 0x72007272, 0x44004444, 0x9b009b9b, 0x6c006c6c,
+ 0x90009090, 0x0b000b0b, 0x5b005b5b, 0x33003333,
+ 0x7d007d7d, 0x5a005a5a, 0x52005252, 0xf300f3f3,
+ 0x61006161, 0xa100a1a1, 0xf700f7f7, 0xb000b0b0,
+ 0xd600d6d6, 0x3f003f3f, 0x7c007c7c, 0x6d006d6d,
+ 0xed00eded, 0x14001414, 0xe000e0e0, 0xa500a5a5,
+ 0x3d003d3d, 0x22002222, 0xb300b3b3, 0xf800f8f8,
+ 0x89008989, 0xde00dede, 0x71007171, 0x1a001a1a,
+ 0xaf00afaf, 0xba00baba, 0xb500b5b5, 0x81008181
+};
+
+static const u32 x1[256] = {
+ 0x52520052, 0x09090009, 0x6a6a006a, 0xd5d500d5,
+ 0x30300030, 0x36360036, 0xa5a500a5, 0x38380038,
+ 0xbfbf00bf, 0x40400040, 0xa3a300a3, 0x9e9e009e,
+ 0x81810081, 0xf3f300f3, 0xd7d700d7, 0xfbfb00fb,
+ 0x7c7c007c, 0xe3e300e3, 0x39390039, 0x82820082,
+ 0x9b9b009b, 0x2f2f002f, 0xffff00ff, 0x87870087,
+ 0x34340034, 0x8e8e008e, 0x43430043, 0x44440044,
+ 0xc4c400c4, 0xdede00de, 0xe9e900e9, 0xcbcb00cb,
+ 0x54540054, 0x7b7b007b, 0x94940094, 0x32320032,
+ 0xa6a600a6, 0xc2c200c2, 0x23230023, 0x3d3d003d,
+ 0xeeee00ee, 0x4c4c004c, 0x95950095, 0x0b0b000b,
+ 0x42420042, 0xfafa00fa, 0xc3c300c3, 0x4e4e004e,
+ 0x08080008, 0x2e2e002e, 0xa1a100a1, 0x66660066,
+ 0x28280028, 0xd9d900d9, 0x24240024, 0xb2b200b2,
+ 0x76760076, 0x5b5b005b, 0xa2a200a2, 0x49490049,
+ 0x6d6d006d, 0x8b8b008b, 0xd1d100d1, 0x25250025,
+ 0x72720072, 0xf8f800f8, 0xf6f600f6, 0x64640064,
+ 0x86860086, 0x68680068, 0x98980098, 0x16160016,
+ 0xd4d400d4, 0xa4a400a4, 0x5c5c005c, 0xcccc00cc,
+ 0x5d5d005d, 0x65650065, 0xb6b600b6, 0x92920092,
+ 0x6c6c006c, 0x70700070, 0x48480048, 0x50500050,
+ 0xfdfd00fd, 0xeded00ed, 0xb9b900b9, 0xdada00da,
+ 0x5e5e005e, 0x15150015, 0x46460046, 0x57570057,
+ 0xa7a700a7, 0x8d8d008d, 0x9d9d009d, 0x84840084,
+ 0x90900090, 0xd8d800d8, 0xabab00ab, 0x00000000,
+ 0x8c8c008c, 0xbcbc00bc, 0xd3d300d3, 0x0a0a000a,
+ 0xf7f700f7, 0xe4e400e4, 0x58580058, 0x05050005,
+ 0xb8b800b8, 0xb3b300b3, 0x45450045, 0x06060006,
+ 0xd0d000d0, 0x2c2c002c, 0x1e1e001e, 0x8f8f008f,
+ 0xcaca00ca, 0x3f3f003f, 0x0f0f000f, 0x02020002,
+ 0xc1c100c1, 0xafaf00af, 0xbdbd00bd, 0x03030003,
+ 0x01010001, 0x13130013, 0x8a8a008a, 0x6b6b006b,
+ 0x3a3a003a, 0x91910091, 0x11110011, 0x41410041,
+ 0x4f4f004f, 0x67670067, 0xdcdc00dc, 0xeaea00ea,
+ 0x97970097, 0xf2f200f2, 0xcfcf00cf, 0xcece00ce,
+ 0xf0f000f0, 0xb4b400b4, 0xe6e600e6, 0x73730073,
+ 0x96960096, 0xacac00ac, 0x74740074, 0x22220022,
+ 0xe7e700e7, 0xadad00ad, 0x35350035, 0x85850085,
+ 0xe2e200e2, 0xf9f900f9, 0x37370037, 0xe8e800e8,
+ 0x1c1c001c, 0x75750075, 0xdfdf00df, 0x6e6e006e,
+ 0x47470047, 0xf1f100f1, 0x1a1a001a, 0x71710071,
+ 0x1d1d001d, 0x29290029, 0xc5c500c5, 0x89890089,
+ 0x6f6f006f, 0xb7b700b7, 0x62620062, 0x0e0e000e,
+ 0xaaaa00aa, 0x18180018, 0xbebe00be, 0x1b1b001b,
+ 0xfcfc00fc, 0x56560056, 0x3e3e003e, 0x4b4b004b,
+ 0xc6c600c6, 0xd2d200d2, 0x79790079, 0x20200020,
+ 0x9a9a009a, 0xdbdb00db, 0xc0c000c0, 0xfefe00fe,
+ 0x78780078, 0xcdcd00cd, 0x5a5a005a, 0xf4f400f4,
+ 0x1f1f001f, 0xdddd00dd, 0xa8a800a8, 0x33330033,
+ 0x88880088, 0x07070007, 0xc7c700c7, 0x31310031,
+ 0xb1b100b1, 0x12120012, 0x10100010, 0x59590059,
+ 0x27270027, 0x80800080, 0xecec00ec, 0x5f5f005f,
+ 0x60600060, 0x51510051, 0x7f7f007f, 0xa9a900a9,
+ 0x19190019, 0xb5b500b5, 0x4a4a004a, 0x0d0d000d,
+ 0x2d2d002d, 0xe5e500e5, 0x7a7a007a, 0x9f9f009f,
+ 0x93930093, 0xc9c900c9, 0x9c9c009c, 0xefef00ef,
+ 0xa0a000a0, 0xe0e000e0, 0x3b3b003b, 0x4d4d004d,
+ 0xaeae00ae, 0x2a2a002a, 0xf5f500f5, 0xb0b000b0,
+ 0xc8c800c8, 0xebeb00eb, 0xbbbb00bb, 0x3c3c003c,
+ 0x83830083, 0x53530053, 0x99990099, 0x61610061,
+ 0x17170017, 0x2b2b002b, 0x04040004, 0x7e7e007e,
+ 0xbaba00ba, 0x77770077, 0xd6d600d6, 0x26260026,
+ 0xe1e100e1, 0x69690069, 0x14140014, 0x63630063,
+ 0x55550055, 0x21210021, 0x0c0c000c, 0x7d7d007d
+};
+
+static const u32 x2[256] = {
+ 0x30303000, 0x68686800, 0x99999900, 0x1b1b1b00,
+ 0x87878700, 0xb9b9b900, 0x21212100, 0x78787800,
+ 0x50505000, 0x39393900, 0xdbdbdb00, 0xe1e1e100,
+ 0x72727200, 0x09090900, 0x62626200, 0x3c3c3c00,
+ 0x3e3e3e00, 0x7e7e7e00, 0x5e5e5e00, 0x8e8e8e00,
+ 0xf1f1f100, 0xa0a0a000, 0xcccccc00, 0xa3a3a300,
+ 0x2a2a2a00, 0x1d1d1d00, 0xfbfbfb00, 0xb6b6b600,
+ 0xd6d6d600, 0x20202000, 0xc4c4c400, 0x8d8d8d00,
+ 0x81818100, 0x65656500, 0xf5f5f500, 0x89898900,
+ 0xcbcbcb00, 0x9d9d9d00, 0x77777700, 0xc6c6c600,
+ 0x57575700, 0x43434300, 0x56565600, 0x17171700,
+ 0xd4d4d400, 0x40404000, 0x1a1a1a00, 0x4d4d4d00,
+ 0xc0c0c000, 0x63636300, 0x6c6c6c00, 0xe3e3e300,
+ 0xb7b7b700, 0xc8c8c800, 0x64646400, 0x6a6a6a00,
+ 0x53535300, 0xaaaaaa00, 0x38383800, 0x98989800,
+ 0x0c0c0c00, 0xf4f4f400, 0x9b9b9b00, 0xededed00,
+ 0x7f7f7f00, 0x22222200, 0x76767600, 0xafafaf00,
+ 0xdddddd00, 0x3a3a3a00, 0x0b0b0b00, 0x58585800,
+ 0x67676700, 0x88888800, 0x06060600, 0xc3c3c300,
+ 0x35353500, 0x0d0d0d00, 0x01010100, 0x8b8b8b00,
+ 0x8c8c8c00, 0xc2c2c200, 0xe6e6e600, 0x5f5f5f00,
+ 0x02020200, 0x24242400, 0x75757500, 0x93939300,
+ 0x66666600, 0x1e1e1e00, 0xe5e5e500, 0xe2e2e200,
+ 0x54545400, 0xd8d8d800, 0x10101000, 0xcecece00,
+ 0x7a7a7a00, 0xe8e8e800, 0x08080800, 0x2c2c2c00,
+ 0x12121200, 0x97979700, 0x32323200, 0xababab00,
+ 0xb4b4b400, 0x27272700, 0x0a0a0a00, 0x23232300,
+ 0xdfdfdf00, 0xefefef00, 0xcacaca00, 0xd9d9d900,
+ 0xb8b8b800, 0xfafafa00, 0xdcdcdc00, 0x31313100,
+ 0x6b6b6b00, 0xd1d1d100, 0xadadad00, 0x19191900,
+ 0x49494900, 0xbdbdbd00, 0x51515100, 0x96969600,
+ 0xeeeeee00, 0xe4e4e400, 0xa8a8a800, 0x41414100,
+ 0xdadada00, 0xffffff00, 0xcdcdcd00, 0x55555500,
+ 0x86868600, 0x36363600, 0xbebebe00, 0x61616100,
+ 0x52525200, 0xf8f8f800, 0xbbbbbb00, 0x0e0e0e00,
+ 0x82828200, 0x48484800, 0x69696900, 0x9a9a9a00,
+ 0xe0e0e000, 0x47474700, 0x9e9e9e00, 0x5c5c5c00,
+ 0x04040400, 0x4b4b4b00, 0x34343400, 0x15151500,
+ 0x79797900, 0x26262600, 0xa7a7a700, 0xdedede00,
+ 0x29292900, 0xaeaeae00, 0x92929200, 0xd7d7d700,
+ 0x84848400, 0xe9e9e900, 0xd2d2d200, 0xbababa00,
+ 0x5d5d5d00, 0xf3f3f300, 0xc5c5c500, 0xb0b0b000,
+ 0xbfbfbf00, 0xa4a4a400, 0x3b3b3b00, 0x71717100,
+ 0x44444400, 0x46464600, 0x2b2b2b00, 0xfcfcfc00,
+ 0xebebeb00, 0x6f6f6f00, 0xd5d5d500, 0xf6f6f600,
+ 0x14141400, 0xfefefe00, 0x7c7c7c00, 0x70707000,
+ 0x5a5a5a00, 0x7d7d7d00, 0xfdfdfd00, 0x2f2f2f00,
+ 0x18181800, 0x83838300, 0x16161600, 0xa5a5a500,
+ 0x91919100, 0x1f1f1f00, 0x05050500, 0x95959500,
+ 0x74747400, 0xa9a9a900, 0xc1c1c100, 0x5b5b5b00,
+ 0x4a4a4a00, 0x85858500, 0x6d6d6d00, 0x13131300,
+ 0x07070700, 0x4f4f4f00, 0x4e4e4e00, 0x45454500,
+ 0xb2b2b200, 0x0f0f0f00, 0xc9c9c900, 0x1c1c1c00,
+ 0xa6a6a600, 0xbcbcbc00, 0xececec00, 0x73737300,
+ 0x90909000, 0x7b7b7b00, 0xcfcfcf00, 0x59595900,
+ 0x8f8f8f00, 0xa1a1a100, 0xf9f9f900, 0x2d2d2d00,
+ 0xf2f2f200, 0xb1b1b100, 0x00000000, 0x94949400,
+ 0x37373700, 0x9f9f9f00, 0xd0d0d000, 0x2e2e2e00,
+ 0x9c9c9c00, 0x6e6e6e00, 0x28282800, 0x3f3f3f00,
+ 0x80808000, 0xf0f0f000, 0x3d3d3d00, 0xd3d3d300,
+ 0x25252500, 0x8a8a8a00, 0xb5b5b500, 0xe7e7e700,
+ 0x42424200, 0xb3b3b300, 0xc7c7c700, 0xeaeaea00,
+ 0xf7f7f700, 0x4c4c4c00, 0x11111100, 0x33333300,
+ 0x03030300, 0xa2a2a200, 0xacacac00, 0x60606000
+};
+
+static inline u32 rotl32(u32 v, u32 r)
+{
+ return ((v << r) | (v >> (32 - r)));
+}
+
+static inline u32 rotr32(u32 v, u32 r)
+{
+ return ((v >> r) | (v << (32 - r)));
+}
+
+static inline u32 bswap32(u32 v)
+{
+ return ((v << 24) ^
+ (v >> 24) ^
+ ((v & 0x0000ff00) << 8) ^
+ ((v & 0x00ff0000) >> 8));
+}
+
+static inline u8 get_u8(u32 x, u32 y)
+{
+ return (x >> ((3 - y) * 8));
+}
+
+static inline u32 make_u32(u8 v0, u8 v1, u8 v2, u8 v3)
+{
+ return ((u32)v0 << 24) | ((u32)v1 << 16) | ((u32)v2 << 8) | ((u32)v3);
+}
+
+static inline u32 aria_m(u32 t0)
+{
+ return rotr32(t0, 8) ^ rotr32(t0 ^ rotr32(t0, 8), 16);
+}
+
+/* S-Box Layer 1 + M */
+static inline void aria_sbox_layer1_with_pre_diff(u32 *t0, u32 *t1, u32 *t2,
+ u32 *t3)
+{
+ *t0 = s1[get_u8(*t0, 0)] ^
+ s2[get_u8(*t0, 1)] ^
+ x1[get_u8(*t0, 2)] ^
+ x2[get_u8(*t0, 3)];
+ *t1 = s1[get_u8(*t1, 0)] ^
+ s2[get_u8(*t1, 1)] ^
+ x1[get_u8(*t1, 2)] ^
+ x2[get_u8(*t1, 3)];
+ *t2 = s1[get_u8(*t2, 0)] ^
+ s2[get_u8(*t2, 1)] ^
+ x1[get_u8(*t2, 2)] ^
+ x2[get_u8(*t2, 3)];
+ *t3 = s1[get_u8(*t3, 0)] ^
+ s2[get_u8(*t3, 1)] ^
+ x1[get_u8(*t3, 2)] ^
+ x2[get_u8(*t3, 3)];
+}
+
+/* S-Box Layer 2 + M */
+static inline void aria_sbox_layer2_with_pre_diff(u32 *t0, u32 *t1, u32 *t2,
+ u32 *t3)
+{
+ *t0 = x1[get_u8(*t0, 0)] ^
+ x2[get_u8(*t0, 1)] ^
+ s1[get_u8(*t0, 2)] ^
+ s2[get_u8(*t0, 3)];
+ *t1 = x1[get_u8(*t1, 0)] ^
+ x2[get_u8(*t1, 1)] ^
+ s1[get_u8(*t1, 2)] ^
+ s2[get_u8(*t1, 3)];
+ *t2 = x1[get_u8(*t2, 0)] ^
+ x2[get_u8(*t2, 1)] ^
+ s1[get_u8(*t2, 2)] ^
+ s2[get_u8(*t2, 3)];
+ *t3 = x1[get_u8(*t3, 0)] ^
+ x2[get_u8(*t3, 1)] ^
+ s1[get_u8(*t3, 2)] ^
+ s2[get_u8(*t3, 3)];
+}
+
+/* Word-level diffusion */
+static inline void aria_diff_word(u32 *t0, u32 *t1, u32 *t2, u32 *t3)
+{
+ *t1 ^= *t2;
+ *t2 ^= *t3;
+ *t0 ^= *t1;
+
+ *t3 ^= *t1;
+ *t2 ^= *t0;
+ *t1 ^= *t2;
+}
+
+/* Byte-level diffusion */
+static inline void aria_diff_byte(u32 *t1, u32 *t2, u32 *t3)
+{
+ *t1 = ((*t1 << 8) & 0xff00ff00) ^ ((*t1 >> 8) & 0x00ff00ff);
+ *t2 = rotr32(*t2, 16);
+ *t3 = bswap32(*t3);
+}
+
+/* Key XOR Layer */
+static inline void aria_add_round_key(u32 *rk, u32 *t0, u32 *t1, u32 *t2,
+ u32 *t3)
+{
+ *t0 ^= rk[0];
+ *t1 ^= rk[1];
+ *t2 ^= rk[2];
+ *t3 ^= rk[3];
+}
+/* Odd round Substitution & Diffusion */
+static inline void aria_subst_diff_odd(u32 *t0, u32 *t1, u32 *t2, u32 *t3)
+{
+ aria_sbox_layer1_with_pre_diff(t0, t1, t2, t3);
+ aria_diff_word(t0, t1, t2, t3);
+ aria_diff_byte(t1, t2, t3);
+ aria_diff_word(t0, t1, t2, t3);
+}
+
+/* Even round Substitution & Diffusion */
+static inline void aria_subst_diff_even(u32 *t0, u32 *t1, u32 *t2, u32 *t3)
+{
+ aria_sbox_layer2_with_pre_diff(t0, t1, t2, t3);
+ aria_diff_word(t0, t1, t2, t3);
+ aria_diff_byte(t3, t0, t1);
+ aria_diff_word(t0, t1, t2, t3);
+}
+
+/* Q, R Macro expanded ARIA GSRK */
+static inline void aria_gsrk(u32 *rk, u32 *x, u32 *y, u32 n)
+{
+ int q = 4 - (n / 32);
+ int r = n % 32;
+
+ rk[0] = (x[0]) ^
+ ((y[q % 4]) >> r) ^
+ ((y[(q + 3) % 4]) << (32 - r));
+ rk[1] = (x[1]) ^
+ ((y[(q + 1) % 4]) >> r) ^
+ ((y[q % 4]) << (32 - r));
+ rk[2] = (x[2]) ^
+ ((y[(q + 2) % 4]) >> r) ^
+ ((y[(q + 1) % 4]) << (32 - r));
+ rk[3] = (x[3]) ^
+ ((y[(q + 3) % 4]) >> r) ^
+ ((y[(q + 2) % 4]) << (32 - r));
+}
+
+void aria_encrypt(void *ctx, u8 *out, const u8 *in);
+void aria_decrypt(void *ctx, u8 *out, const u8 *in);
+int aria_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len);
+
+#endif
diff --git a/include/crypto/asym_tpm_subtype.h b/include/crypto/asym_tpm_subtype.h
deleted file mode 100644
index 48198c36d6b9..000000000000
--- a/include/crypto/asym_tpm_subtype.h
+++ /dev/null
@@ -1,19 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#ifndef _LINUX_ASYM_TPM_SUBTYPE_H
-#define _LINUX_ASYM_TPM_SUBTYPE_H
-
-#include <linux/keyctl.h>
-
-struct tpm_key {
- void *blob;
- u32 blob_len;
- uint16_t key_len; /* Size in bits of the key */
- const void *pub_key; /* pointer inside blob to the public key bytes */
- uint16_t pub_key_len; /* length of the public key */
-};
-
-struct tpm_key *tpm_key_create(const void *blob, uint32_t blob_len);
-
-extern struct asymmetric_key_subtype asym_tpm_subtype;
-
-#endif /* _LINUX_ASYM_TPM_SUBTYPE_H */
diff --git a/include/crypto/b128ops.h b/include/crypto/b128ops.h
index 0b8e6bc55301..f3b37cbb3131 100644
--- a/include/crypto/b128ops.h
+++ b/include/crypto/b128ops.h
@@ -50,10 +50,6 @@
#include <linux/types.h>
typedef struct {
- u64 a, b;
-} u128;
-
-typedef struct {
__be64 a, b;
} be128;
@@ -61,20 +57,16 @@ typedef struct {
__le64 b, a;
} le128;
-static inline void u128_xor(u128 *r, const u128 *p, const u128 *q)
+static inline void be128_xor(be128 *r, const be128 *p, const be128 *q)
{
r->a = p->a ^ q->a;
r->b = p->b ^ q->b;
}
-static inline void be128_xor(be128 *r, const be128 *p, const be128 *q)
-{
- u128_xor((u128 *)r, (u128 *)p, (u128 *)q);
-}
-
static inline void le128_xor(le128 *r, const le128 *p, const le128 *q)
{
- u128_xor((u128 *)r, (u128 *)p, (u128 *)q);
+ r->a = p->a ^ q->a;
+ r->b = p->b ^ q->b;
}
#endif /* _CRYPTO_B128OPS_H */
diff --git a/include/crypto/blake2b.h b/include/crypto/blake2b.h
new file mode 100644
index 000000000000..0c0176285349
--- /dev/null
+++ b/include/crypto/blake2b.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+
+#ifndef _CRYPTO_BLAKE2B_H
+#define _CRYPTO_BLAKE2B_H
+
+#include <linux/bug.h>
+#include <linux/types.h>
+#include <linux/string.h>
+
+enum blake2b_lengths {
+ BLAKE2B_BLOCK_SIZE = 128,
+ BLAKE2B_HASH_SIZE = 64,
+ BLAKE2B_KEY_SIZE = 64,
+
+ BLAKE2B_160_HASH_SIZE = 20,
+ BLAKE2B_256_HASH_SIZE = 32,
+ BLAKE2B_384_HASH_SIZE = 48,
+ BLAKE2B_512_HASH_SIZE = 64,
+};
+
+struct blake2b_state {
+ /* 'h', 't', and 'f' are used in assembly code, so keep them as-is. */
+ u64 h[8];
+ u64 t[2];
+ u64 f[2];
+ u8 buf[BLAKE2B_BLOCK_SIZE];
+ unsigned int buflen;
+ unsigned int outlen;
+};
+
+enum blake2b_iv {
+ BLAKE2B_IV0 = 0x6A09E667F3BCC908ULL,
+ BLAKE2B_IV1 = 0xBB67AE8584CAA73BULL,
+ BLAKE2B_IV2 = 0x3C6EF372FE94F82BULL,
+ BLAKE2B_IV3 = 0xA54FF53A5F1D36F1ULL,
+ BLAKE2B_IV4 = 0x510E527FADE682D1ULL,
+ BLAKE2B_IV5 = 0x9B05688C2B3E6C1FULL,
+ BLAKE2B_IV6 = 0x1F83D9ABFB41BD6BULL,
+ BLAKE2B_IV7 = 0x5BE0CD19137E2179ULL,
+};
+
+static inline void __blake2b_init(struct blake2b_state *state, size_t outlen,
+ const void *key, size_t keylen)
+{
+ state->h[0] = BLAKE2B_IV0 ^ (0x01010000 | keylen << 8 | outlen);
+ state->h[1] = BLAKE2B_IV1;
+ state->h[2] = BLAKE2B_IV2;
+ state->h[3] = BLAKE2B_IV3;
+ state->h[4] = BLAKE2B_IV4;
+ state->h[5] = BLAKE2B_IV5;
+ state->h[6] = BLAKE2B_IV6;
+ state->h[7] = BLAKE2B_IV7;
+ state->t[0] = 0;
+ state->t[1] = 0;
+ state->f[0] = 0;
+ state->f[1] = 0;
+ state->buflen = 0;
+ state->outlen = outlen;
+ if (keylen) {
+ memcpy(state->buf, key, keylen);
+ memset(&state->buf[keylen], 0, BLAKE2B_BLOCK_SIZE - keylen);
+ state->buflen = BLAKE2B_BLOCK_SIZE;
+ }
+}
+
+#endif /* _CRYPTO_BLAKE2B_H */
diff --git a/include/crypto/blake2s.h b/include/crypto/blake2s.h
index b471deac28ff..f9ffd39194eb 100644
--- a/include/crypto/blake2s.h
+++ b/include/crypto/blake2s.h
@@ -3,15 +3,14 @@
* Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
*/
-#ifndef BLAKE2S_H
-#define BLAKE2S_H
+#ifndef _CRYPTO_BLAKE2S_H
+#define _CRYPTO_BLAKE2S_H
+#include <linux/bug.h>
+#include <linux/kconfig.h>
#include <linux/types.h>
-#include <linux/kernel.h>
#include <linux/string.h>
-#include <asm/bug.h>
-
enum blake2s_lengths {
BLAKE2S_BLOCK_SIZE = 64,
BLAKE2S_HASH_SIZE = 32,
@@ -24,6 +23,7 @@ enum blake2s_lengths {
};
struct blake2s_state {
+ /* 'h', 't', and 'f' are used in assembly code, so keep them as-is. */
u32 h[8];
u32 t[2];
u32 f[2];
@@ -43,29 +43,34 @@ enum blake2s_iv {
BLAKE2S_IV7 = 0x5BE0CD19UL,
};
-void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen);
-void blake2s_final(struct blake2s_state *state, u8 *out);
-
-static inline void blake2s_init_param(struct blake2s_state *state,
- const u32 param)
+static inline void __blake2s_init(struct blake2s_state *state, size_t outlen,
+ const void *key, size_t keylen)
{
- *state = (struct blake2s_state){{
- BLAKE2S_IV0 ^ param,
- BLAKE2S_IV1,
- BLAKE2S_IV2,
- BLAKE2S_IV3,
- BLAKE2S_IV4,
- BLAKE2S_IV5,
- BLAKE2S_IV6,
- BLAKE2S_IV7,
- }};
+ state->h[0] = BLAKE2S_IV0 ^ (0x01010000 | keylen << 8 | outlen);
+ state->h[1] = BLAKE2S_IV1;
+ state->h[2] = BLAKE2S_IV2;
+ state->h[3] = BLAKE2S_IV3;
+ state->h[4] = BLAKE2S_IV4;
+ state->h[5] = BLAKE2S_IV5;
+ state->h[6] = BLAKE2S_IV6;
+ state->h[7] = BLAKE2S_IV7;
+ state->t[0] = 0;
+ state->t[1] = 0;
+ state->f[0] = 0;
+ state->f[1] = 0;
+ state->buflen = 0;
+ state->outlen = outlen;
+ if (keylen) {
+ memcpy(state->buf, key, keylen);
+ memset(&state->buf[keylen], 0, BLAKE2S_BLOCK_SIZE - keylen);
+ state->buflen = BLAKE2S_BLOCK_SIZE;
+ }
}
static inline void blake2s_init(struct blake2s_state *state,
const size_t outlen)
{
- blake2s_init_param(state, 0x01010000 | outlen);
- state->outlen = outlen;
+ __blake2s_init(state, outlen, NULL, 0);
}
static inline void blake2s_init_key(struct blake2s_state *state,
@@ -75,12 +80,12 @@ static inline void blake2s_init_key(struct blake2s_state *state,
WARN_ON(IS_ENABLED(DEBUG) && (!outlen || outlen > BLAKE2S_HASH_SIZE ||
!key || !keylen || keylen > BLAKE2S_KEY_SIZE));
- blake2s_init_param(state, 0x01010000 | keylen << 8 | outlen);
- memcpy(state->buf, key, keylen);
- state->buflen = BLAKE2S_BLOCK_SIZE;
- state->outlen = outlen;
+ __blake2s_init(state, outlen, key, keylen);
}
+void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen);
+void blake2s_final(struct blake2s_state *state, u8 *out);
+
static inline void blake2s(u8 *out, const u8 *in, const u8 *key,
const size_t outlen, const size_t inlen,
const size_t keylen)
@@ -91,16 +96,9 @@ static inline void blake2s(u8 *out, const u8 *in, const u8 *key,
outlen > BLAKE2S_HASH_SIZE || keylen > BLAKE2S_KEY_SIZE ||
(!key && keylen)));
- if (keylen)
- blake2s_init_key(&state, outlen, key, keylen);
- else
- blake2s_init(&state, outlen);
-
+ __blake2s_init(&state, outlen, key, keylen);
blake2s_update(&state, in, inlen);
blake2s_final(&state, out);
}
-void blake2s256_hmac(u8 *out, const u8 *in, const u8 *key, const size_t inlen,
- const size_t keylen);
-
-#endif /* BLAKE2S_H */
+#endif /* _CRYPTO_BLAKE2S_H */
diff --git a/include/crypto/cbc.h b/include/crypto/cbc.h
deleted file mode 100644
index 2b6422db42e2..000000000000
--- a/include/crypto/cbc.h
+++ /dev/null
@@ -1,141 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * CBC: Cipher Block Chaining mode
- *
- * Copyright (c) 2016 Herbert Xu <herbert@gondor.apana.org.au>
- */
-
-#ifndef _CRYPTO_CBC_H
-#define _CRYPTO_CBC_H
-
-#include <crypto/internal/skcipher.h>
-#include <linux/string.h>
-#include <linux/types.h>
-
-static inline int crypto_cbc_encrypt_segment(
- struct skcipher_walk *walk, struct crypto_skcipher *tfm,
- void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
-{
- unsigned int bsize = crypto_skcipher_blocksize(tfm);
- unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
- u8 *dst = walk->dst.virt.addr;
- u8 *iv = walk->iv;
-
- do {
- crypto_xor(iv, src, bsize);
- fn(tfm, iv, dst);
- memcpy(iv, dst, bsize);
-
- src += bsize;
- dst += bsize;
- } while ((nbytes -= bsize) >= bsize);
-
- return nbytes;
-}
-
-static inline int crypto_cbc_encrypt_inplace(
- struct skcipher_walk *walk, struct crypto_skcipher *tfm,
- void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
-{
- unsigned int bsize = crypto_skcipher_blocksize(tfm);
- unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
- u8 *iv = walk->iv;
-
- do {
- crypto_xor(src, iv, bsize);
- fn(tfm, src, src);
- iv = src;
-
- src += bsize;
- } while ((nbytes -= bsize) >= bsize);
-
- memcpy(walk->iv, iv, bsize);
-
- return nbytes;
-}
-
-static inline int crypto_cbc_encrypt_walk(struct skcipher_request *req,
- void (*fn)(struct crypto_skcipher *,
- const u8 *, u8 *))
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct skcipher_walk walk;
- int err;
-
- err = skcipher_walk_virt(&walk, req, false);
-
- while (walk.nbytes) {
- if (walk.src.virt.addr == walk.dst.virt.addr)
- err = crypto_cbc_encrypt_inplace(&walk, tfm, fn);
- else
- err = crypto_cbc_encrypt_segment(&walk, tfm, fn);
- err = skcipher_walk_done(&walk, err);
- }
-
- return err;
-}
-
-static inline int crypto_cbc_decrypt_segment(
- struct skcipher_walk *walk, struct crypto_skcipher *tfm,
- void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
-{
- unsigned int bsize = crypto_skcipher_blocksize(tfm);
- unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
- u8 *dst = walk->dst.virt.addr;
- u8 *iv = walk->iv;
-
- do {
- fn(tfm, src, dst);
- crypto_xor(dst, iv, bsize);
- iv = src;
-
- src += bsize;
- dst += bsize;
- } while ((nbytes -= bsize) >= bsize);
-
- memcpy(walk->iv, iv, bsize);
-
- return nbytes;
-}
-
-static inline int crypto_cbc_decrypt_inplace(
- struct skcipher_walk *walk, struct crypto_skcipher *tfm,
- void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
-{
- unsigned int bsize = crypto_skcipher_blocksize(tfm);
- unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
- u8 last_iv[MAX_CIPHER_BLOCKSIZE];
-
- /* Start of the last block. */
- src += nbytes - (nbytes & (bsize - 1)) - bsize;
- memcpy(last_iv, src, bsize);
-
- for (;;) {
- fn(tfm, src, src);
- if ((nbytes -= bsize) < bsize)
- break;
- crypto_xor(src, src - bsize, bsize);
- src -= bsize;
- }
-
- crypto_xor(src, walk->iv, bsize);
- memcpy(walk->iv, last_iv, bsize);
-
- return nbytes;
-}
-
-static inline int crypto_cbc_decrypt_blocks(
- struct skcipher_walk *walk, struct crypto_skcipher *tfm,
- void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
-{
- if (walk->src.virt.addr == walk->dst.virt.addr)
- return crypto_cbc_decrypt_inplace(walk, tfm, fn);
- else
- return crypto_cbc_decrypt_segment(walk, tfm, fn);
-}
-
-#endif /* _CRYPTO_CBC_H */
diff --git a/include/crypto/chacha.h b/include/crypto/chacha.h
index 3a1c72fdb7cf..b3ea73b81944 100644
--- a/include/crypto/chacha.h
+++ b/include/crypto/chacha.h
@@ -47,13 +47,25 @@ static inline void hchacha_block(const u32 *state, u32 *out, int nrounds)
hchacha_block_generic(state, out, nrounds);
}
+enum chacha_constants { /* expand 32-byte k */
+ CHACHA_CONSTANT_EXPA = 0x61707865U,
+ CHACHA_CONSTANT_ND_3 = 0x3320646eU,
+ CHACHA_CONSTANT_2_BY = 0x79622d32U,
+ CHACHA_CONSTANT_TE_K = 0x6b206574U
+};
+
+static inline void chacha_init_consts(u32 *state)
+{
+ state[0] = CHACHA_CONSTANT_EXPA;
+ state[1] = CHACHA_CONSTANT_ND_3;
+ state[2] = CHACHA_CONSTANT_2_BY;
+ state[3] = CHACHA_CONSTANT_TE_K;
+}
+
void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv);
static inline void chacha_init_generic(u32 *state, const u32 *key, const u8 *iv)
{
- state[0] = 0x61707865; /* "expa" */
- state[1] = 0x3320646e; /* "nd 3" */
- state[2] = 0x79622d32; /* "2-by" */
- state[3] = 0x6b206574; /* "te k" */
+ chacha_init_consts(state);
state[4] = key[0];
state[5] = key[1];
state[6] = key[2];
diff --git a/include/crypto/cryptd.h b/include/crypto/cryptd.h
index 23169f4d87e6..796d986e58e1 100644
--- a/include/crypto/cryptd.h
+++ b/include/crypto/cryptd.h
@@ -13,7 +13,8 @@
#ifndef _CRYPTO_CRYPT_H
#define _CRYPTO_CRYPT_H
-#include <linux/kernel.h>
+#include <linux/types.h>
+
#include <crypto/aead.h>
#include <crypto/hash.h>
#include <crypto/skcipher.h>
diff --git a/include/crypto/curve25519.h b/include/crypto/curve25519.h
index 4e6dc840b159..ece6a9b5fafc 100644
--- a/include/crypto/curve25519.h
+++ b/include/crypto/curve25519.h
@@ -28,6 +28,8 @@ void curve25519_arch(u8 out[CURVE25519_KEY_SIZE],
void curve25519_base_arch(u8 pub[CURVE25519_KEY_SIZE],
const u8 secret[CURVE25519_KEY_SIZE]);
+bool curve25519_selftest(void);
+
static inline
bool __must_check curve25519(u8 mypublic[CURVE25519_KEY_SIZE],
const u8 secret[CURVE25519_KEY_SIZE],
diff --git a/include/crypto/dh.h b/include/crypto/dh.h
index d71e9858ab86..7b863e911cb4 100644
--- a/include/crypto/dh.h
+++ b/include/crypto/dh.h
@@ -24,21 +24,17 @@
*
* @key: Private DH key
* @p: Diffie-Hellman parameter P
- * @q: Diffie-Hellman parameter Q
* @g: Diffie-Hellman generator G
* @key_size: Size of the private DH key
* @p_size: Size of DH parameter P
- * @q_size: Size of DH parameter Q
* @g_size: Size of DH generator G
*/
struct dh {
- void *key;
- void *p;
- void *q;
- void *g;
+ const void *key;
+ const void *p;
+ const void *g;
unsigned int key_size;
unsigned int p_size;
- unsigned int q_size;
unsigned int g_size;
};
@@ -83,4 +79,20 @@ int crypto_dh_encode_key(char *buf, unsigned int len, const struct dh *params);
*/
int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params);
+/**
+ * __crypto_dh_decode_key() - decode a private key without parameter checks
+ * @buf: Buffer holding a packet key that should be decoded
+ * @len: Length of the packet private key buffer
+ * @params: Buffer allocated by the caller that is filled with the
+ * unpacked DH private key.
+ *
+ * Internal function providing the same services as the exported
+ * crypto_dh_decode_key(), but without any of those basic parameter
+ * checks conducted by the latter.
+ *
+ * Return: -EINVAL if buffer has insufficient size, 0 on success
+ */
+int __crypto_dh_decode_key(const char *buf, unsigned int len,
+ struct dh *params);
+
#endif
diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h
index c4165126937e..af5ad51d3eef 100644
--- a/include/crypto/drbg.h
+++ b/include/crypto/drbg.h
@@ -105,6 +105,12 @@ struct drbg_test_data {
struct drbg_string *testentropy; /* TEST PARAMETER: test entropy */
};
+enum drbg_seed_state {
+ DRBG_SEED_STATE_UNSEEDED,
+ DRBG_SEED_STATE_PARTIAL, /* Seeded with !rng_is_initialized() */
+ DRBG_SEED_STATE_FULL,
+};
+
struct drbg_state {
struct mutex drbg_mutex; /* lock around DRBG */
unsigned char *V; /* internal state 10.1.1.1 1a) */
@@ -127,16 +133,15 @@ struct drbg_state {
struct crypto_wait ctr_wait; /* CTR mode async wait obj */
struct scatterlist sg_in, sg_out; /* CTR mode SGLs */
- bool seeded; /* DRBG fully seeded? */
+ enum drbg_seed_state seeded; /* DRBG fully seeded? */
+ unsigned long last_seed_time;
bool pr; /* Prediction resistance enabled? */
bool fips_primed; /* Continuous test primed? */
unsigned char *prev; /* FIPS 140-2 continuous test value */
- struct work_struct seed_work; /* asynchronous seeding support */
struct crypto_rng *jent;
const struct drbg_state_ops *d_ops;
const struct drbg_core *core;
struct drbg_string test_data;
- struct random_ready_callback random_ready;
};
static inline __u8 drbg_statelen(struct drbg_state *drbg)
diff --git a/include/crypto/ecc_curve.h b/include/crypto/ecc_curve.h
new file mode 100644
index 000000000000..7d90c5e82266
--- /dev/null
+++ b/include/crypto/ecc_curve.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2021 HiSilicon */
+
+#ifndef _CRYTO_ECC_CURVE_H
+#define _CRYTO_ECC_CURVE_H
+
+#include <linux/types.h>
+
+/**
+ * struct ecc_point - elliptic curve point in affine coordinates
+ *
+ * @x: X coordinate in vli form.
+ * @y: Y coordinate in vli form.
+ * @ndigits: Length of vlis in u64 qwords.
+ */
+struct ecc_point {
+ u64 *x;
+ u64 *y;
+ u8 ndigits;
+};
+
+/**
+ * struct ecc_curve - definition of elliptic curve
+ *
+ * @name: Short name of the curve.
+ * @nbits: The number of bits of a curve.
+ * @g: Generator point of the curve.
+ * @p: Prime number, if Barrett's reduction is used for this curve
+ * pre-calculated value 'mu' is appended to the @p after ndigits.
+ * Use of Barrett's reduction is heuristically determined in
+ * vli_mmod_fast().
+ * @n: Order of the curve group.
+ * @a: Curve parameter a.
+ * @b: Curve parameter b.
+ */
+struct ecc_curve {
+ char *name;
+ u32 nbits;
+ struct ecc_point g;
+ u64 *p;
+ u64 *n;
+ u64 *a;
+ u64 *b;
+};
+
+/**
+ * ecc_get_curve() - get elliptic curve;
+ * @curve_id: Curves IDs:
+ * defined in 'include/crypto/ecdh.h';
+ *
+ * Returns curve if get curve succssful, NULL otherwise
+ */
+const struct ecc_curve *ecc_get_curve(unsigned int curve_id);
+
+/**
+ * ecc_get_curve25519() - get curve25519 curve;
+ *
+ * Returns curve25519
+ */
+const struct ecc_curve *ecc_get_curve25519(void);
+
+#endif
diff --git a/include/crypto/ecdh.h b/include/crypto/ecdh.h
index a5b805b5526d..9784ecdd2fb4 100644
--- a/include/crypto/ecdh.h
+++ b/include/crypto/ecdh.h
@@ -25,16 +25,16 @@
/* Curves IDs */
#define ECC_CURVE_NIST_P192 0x0001
#define ECC_CURVE_NIST_P256 0x0002
+#define ECC_CURVE_NIST_P384 0x0003
+#define ECC_CURVE_NIST_P521 0x0004
/**
* struct ecdh - define an ECDH private key
*
- * @curve_id: ECC curve the key is based on.
* @key: Private ECDH key
* @key_size: Size of the private ECDH key
*/
struct ecdh {
- unsigned short curve_id;
char *key;
unsigned short key_size;
};
diff --git a/include/crypto/engine.h b/include/crypto/engine.h
index 3f06e40d063a..545dbefe3e13 100644
--- a/include/crypto/engine.h
+++ b/include/crypto/engine.h
@@ -7,86 +7,47 @@
#ifndef _CRYPTO_ENGINE_H
#define _CRYPTO_ENGINE_H
-#include <linux/crypto.h>
-#include <linux/list.h>
-#include <linux/kernel.h>
-#include <linux/kthread.h>
-#include <crypto/algapi.h>
#include <crypto/aead.h>
#include <crypto/akcipher.h>
#include <crypto/hash.h>
+#include <crypto/kpp.h>
#include <crypto/skcipher.h>
+#include <linux/types.h>
-#define ENGINE_NAME_LEN 30
-/*
- * struct crypto_engine - crypto hardware engine
- * @name: the engine name
- * @idling: the engine is entering idle state
- * @busy: request pump is busy
- * @running: the engine is on working
- * @retry_support: indication that the hardware allows re-execution
- * of a failed backlog request
- * crypto-engine, in head position to keep order
- * @list: link with the global crypto engine list
- * @queue_lock: spinlock to syncronise access to request queue
- * @queue: the crypto queue of the engine
- * @rt: whether this queue is set to run as a realtime task
- * @prepare_crypt_hardware: a request will soon arrive from the queue
- * so the subsystem requests the driver to prepare the hardware
- * by issuing this call
- * @unprepare_crypt_hardware: there are currently no more requests on the
- * queue so the subsystem notifies the driver that it may relax the
- * hardware by issuing this call
- * @do_batch_requests: execute a batch of requests. Depends on multiple
- * requests support.
- * @kworker: kthread worker struct for request pump
- * @pump_requests: work struct for scheduling work to the request pump
- * @priv_data: the engine private data
- * @cur_req: the current request which is on processing
- */
-struct crypto_engine {
- char name[ENGINE_NAME_LEN];
- bool idling;
- bool busy;
- bool running;
-
- bool retry_support;
-
- struct list_head list;
- spinlock_t queue_lock;
- struct crypto_queue queue;
- struct device *dev;
-
- bool rt;
-
- int (*prepare_crypt_hardware)(struct crypto_engine *engine);
- int (*unprepare_crypt_hardware)(struct crypto_engine *engine);
- int (*do_batch_requests)(struct crypto_engine *engine);
-
-
- struct kthread_worker *kworker;
- struct kthread_work pump_requests;
-
- void *priv_data;
- struct crypto_async_request *cur_req;
-};
+struct crypto_engine;
+struct device;
/*
* struct crypto_engine_op - crypto hardware engine operations
- * @prepare__request: do some prepare if need before handle the current request
- * @unprepare_request: undo any work done by prepare_request()
* @do_one_request: do encryption for current request
*/
struct crypto_engine_op {
- int (*prepare_request)(struct crypto_engine *engine,
- void *areq);
- int (*unprepare_request)(struct crypto_engine *engine,
- void *areq);
int (*do_one_request)(struct crypto_engine *engine,
void *areq);
};
-struct crypto_engine_ctx {
+struct aead_engine_alg {
+ struct aead_alg base;
+ struct crypto_engine_op op;
+};
+
+struct ahash_engine_alg {
+ struct ahash_alg base;
+ struct crypto_engine_op op;
+};
+
+struct akcipher_engine_alg {
+ struct akcipher_alg base;
+ struct crypto_engine_op op;
+};
+
+struct kpp_engine_alg {
+ struct kpp_alg base;
+ struct crypto_engine_op op;
+};
+
+struct skcipher_engine_alg {
+ struct skcipher_alg base;
struct crypto_engine_op op;
};
@@ -96,6 +57,8 @@ int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
struct akcipher_request *req);
int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
struct ahash_request *req);
+int crypto_transfer_kpp_request_to_engine(struct crypto_engine *engine,
+ struct kpp_request *req);
int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
struct skcipher_request *req);
void crypto_finalize_aead_request(struct crypto_engine *engine,
@@ -104,6 +67,8 @@ void crypto_finalize_akcipher_request(struct crypto_engine *engine,
struct akcipher_request *req, int err);
void crypto_finalize_hash_request(struct crypto_engine *engine,
struct ahash_request *req, int err);
+void crypto_finalize_kpp_request(struct crypto_engine *engine,
+ struct kpp_request *req, int err);
void crypto_finalize_skcipher_request(struct crypto_engine *engine,
struct skcipher_request *req, int err);
int crypto_engine_start(struct crypto_engine *engine);
@@ -113,6 +78,30 @@ struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
bool retry_support,
int (*cbk_do_batch)(struct crypto_engine *engine),
bool rt, int qlen);
-int crypto_engine_exit(struct crypto_engine *engine);
+void crypto_engine_exit(struct crypto_engine *engine);
+
+int crypto_engine_register_aead(struct aead_engine_alg *alg);
+void crypto_engine_unregister_aead(struct aead_engine_alg *alg);
+int crypto_engine_register_aeads(struct aead_engine_alg *algs, int count);
+void crypto_engine_unregister_aeads(struct aead_engine_alg *algs, int count);
+
+int crypto_engine_register_ahash(struct ahash_engine_alg *alg);
+void crypto_engine_unregister_ahash(struct ahash_engine_alg *alg);
+int crypto_engine_register_ahashes(struct ahash_engine_alg *algs, int count);
+void crypto_engine_unregister_ahashes(struct ahash_engine_alg *algs,
+ int count);
+
+int crypto_engine_register_akcipher(struct akcipher_engine_alg *alg);
+void crypto_engine_unregister_akcipher(struct akcipher_engine_alg *alg);
+
+int crypto_engine_register_kpp(struct kpp_engine_alg *alg);
+void crypto_engine_unregister_kpp(struct kpp_engine_alg *alg);
+
+int crypto_engine_register_skcipher(struct skcipher_engine_alg *alg);
+void crypto_engine_unregister_skcipher(struct skcipher_engine_alg *alg);
+int crypto_engine_register_skciphers(struct skcipher_engine_alg *algs,
+ int count);
+void crypto_engine_unregister_skciphers(struct skcipher_engine_alg *algs,
+ int count);
#endif /* _CRYPTO_ENGINE_H */
diff --git a/include/crypto/gcm.h b/include/crypto/gcm.h
index 9d7eff04f224..fd9df607a836 100644
--- a/include/crypto/gcm.h
+++ b/include/crypto/gcm.h
@@ -3,6 +3,9 @@
#include <linux/errno.h>
+#include <crypto/aes.h>
+#include <crypto/gf128mul.h>
+
#define GCM_AES_IV_SIZE 12
#define GCM_RFC4106_IV_SIZE 8
#define GCM_RFC4543_IV_SIZE 8
@@ -60,4 +63,23 @@ static inline int crypto_ipsec_check_assoclen(unsigned int assoclen)
return 0;
}
+
+struct aesgcm_ctx {
+ be128 ghash_key;
+ struct crypto_aes_ctx aes_ctx;
+ unsigned int authsize;
+};
+
+int aesgcm_expandkey(struct aesgcm_ctx *ctx, const u8 *key,
+ unsigned int keysize, unsigned int authsize);
+
+void aesgcm_encrypt(const struct aesgcm_ctx *ctx, u8 *dst, const u8 *src,
+ int crypt_len, const u8 *assoc, int assoc_len,
+ const u8 iv[GCM_AES_IV_SIZE], u8 *authtag);
+
+bool __must_check aesgcm_decrypt(const struct aesgcm_ctx *ctx, u8 *dst,
+ const u8 *src, int crypt_len, const u8 *assoc,
+ int assoc_len, const u8 iv[GCM_AES_IV_SIZE],
+ const u8 *authtag);
+
#endif
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 0d1b403888c9..2d5ea9f9ff43 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -8,6 +8,7 @@
#ifndef _CRYPTO_HASH_H
#define _CRYPTO_HASH_H
+#include <linux/atomic.h>
#include <linux/crypto.h>
#include <linux/string.h>
@@ -22,7 +23,7 @@ struct crypto_ahash;
* crypto_unregister_shash().
*/
-/**
+/*
* struct hash_alg_common - define properties of message digest
* @digestsize: Size of the result of the transformation. A buffer of this size
* must be available to the @final and @finup calls, so they can
@@ -39,12 +40,13 @@ struct crypto_ahash;
* The hash_alg_common data structure now adds the hash-specific
* information.
*/
-struct hash_alg_common {
- unsigned int digestsize;
- unsigned int statesize;
-
- struct crypto_alg base;
-};
+#define HASH_ALG_COMMON { \
+ unsigned int digestsize; \
+ unsigned int statesize; \
+ \
+ struct crypto_alg base; \
+}
+struct hash_alg_common HASH_ALG_COMMON;
struct ahash_request {
struct crypto_async_request base;
@@ -59,11 +61,6 @@ struct ahash_request {
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
-#define AHASH_REQUEST_ON_STACK(name, ahash) \
- char __##name##_desc[sizeof(struct ahash_request) + \
- crypto_ahash_reqsize(ahash)] CRYPTO_MINALIGN_ATTR; \
- struct ahash_request *name = (void *)__##name##_desc
-
/**
* struct ahash_alg - asynchronous message digest definition
* @init: **[mandatory]** Initialize the transformation context. Intended only to initialize the
@@ -123,6 +120,18 @@ struct ahash_request {
* data so the transformation can continue from this point onward. No
* data processing happens at this point. Driver must not use
* req->result.
+ * @init_tfm: Initialize the cryptographic transformation object.
+ * This function is called only once at the instantiation
+ * time, right after the transformation context was
+ * allocated. In case the cryptographic hardware has
+ * some special requirements which need to be handled
+ * by software, this function shall check for the precise
+ * requirement of the transformation and put any software
+ * fallbacks in place.
+ * @exit_tfm: Deinitialize the cryptographic transformation object.
+ * This is a counterpart to @init_tfm, used to remove
+ * various changes set in @init_tfm.
+ * @clone_tfm: Copy transform into new object, may allocate memory.
* @halg: see struct hash_alg_common
*/
struct ahash_alg {
@@ -135,13 +144,16 @@ struct ahash_alg {
int (*import)(struct ahash_request *req, const void *in);
int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen);
+ int (*init_tfm)(struct crypto_ahash *tfm);
+ void (*exit_tfm)(struct crypto_ahash *tfm);
+ int (*clone_tfm)(struct crypto_ahash *dst, struct crypto_ahash *src);
struct hash_alg_common halg;
};
struct shash_desc {
struct crypto_shash *tfm;
- void *__ctx[] CRYPTO_MINALIGN_ATTR;
+ void *__ctx[] __aligned(ARCH_SLAB_MINALIGN);
};
#define HASH_MAX_DIGESTSIZE 64
@@ -152,11 +164,9 @@ struct shash_desc {
*/
#define HASH_MAX_DESCSIZE (sizeof(struct shash_desc) + 360)
-#define HASH_MAX_STATESIZE 512
-
-#define SHASH_DESC_ON_STACK(shash, ctx) \
- char __##shash##_desc[sizeof(struct shash_desc) + \
- HASH_MAX_DESCSIZE] CRYPTO_MINALIGN_ATTR; \
+#define SHASH_DESC_ON_STACK(shash, ctx) \
+ char __##shash##_desc[sizeof(struct shash_desc) + HASH_MAX_DESCSIZE] \
+ __aligned(__alignof__(struct shash_desc)); \
struct shash_desc *shash = (struct shash_desc *)__##shash##_desc
/**
@@ -180,12 +190,12 @@ struct shash_desc {
* @exit_tfm: Deinitialize the cryptographic transformation object.
* This is a counterpart to @init_tfm, used to remove
* various changes set in @init_tfm.
- * @digestsize: see struct ahash_alg
- * @statesize: see struct ahash_alg
+ * @clone_tfm: Copy transform into new object, may allocate memory.
* @descsize: Size of the operational state for the message digest. This state
* size is the memory size that needs to be allocated for
* shash_desc.__ctx
- * @base: internally used
+ * @halg: see struct hash_alg_common
+ * @HASH_ALG_COMMON: see struct hash_alg_common
*/
struct shash_alg {
int (*init)(struct shash_desc *desc);
@@ -202,28 +212,20 @@ struct shash_alg {
unsigned int keylen);
int (*init_tfm)(struct crypto_shash *tfm);
void (*exit_tfm)(struct crypto_shash *tfm);
+ int (*clone_tfm)(struct crypto_shash *dst, struct crypto_shash *src);
unsigned int descsize;
- /* These fields must match hash_alg_common. */
- unsigned int digestsize
- __attribute__ ((aligned(__alignof__(struct hash_alg_common))));
- unsigned int statesize;
-
- struct crypto_alg base;
+ union {
+ struct HASH_ALG_COMMON;
+ struct hash_alg_common halg;
+ };
};
+#undef HASH_ALG_COMMON
struct crypto_ahash {
- int (*init)(struct ahash_request *req);
- int (*update)(struct ahash_request *req);
- int (*final)(struct ahash_request *req);
- int (*finup)(struct ahash_request *req);
- int (*digest)(struct ahash_request *req);
- int (*export)(struct ahash_request *req, void *out);
- int (*import)(struct ahash_request *req, const void *in);
- int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
- unsigned int keylen);
-
+ bool using_shash; /* Underlying algorithm is shash, not ahash */
+ unsigned int statesize;
unsigned int reqsize;
struct crypto_tfm base;
};
@@ -265,6 +267,8 @@ static inline struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm)
struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
u32 mask);
+struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *tfm);
+
static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm)
{
return &tfm->base;
@@ -273,6 +277,8 @@ static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm)
/**
* crypto_free_ahash() - zeroize and free the ahash handle
* @tfm: cipher handle to be freed
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
*/
static inline void crypto_free_ahash(struct crypto_ahash *tfm)
{
@@ -301,12 +307,6 @@ static inline const char *crypto_ahash_driver_name(struct crypto_ahash *tfm)
return crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
}
-static inline unsigned int crypto_ahash_alignmask(
- struct crypto_ahash *tfm)
-{
- return crypto_tfm_alg_alignmask(crypto_ahash_tfm(tfm));
-}
-
/**
* crypto_ahash_blocksize() - obtain block size for cipher
* @tfm: cipher handle
@@ -360,7 +360,7 @@ static inline unsigned int crypto_ahash_digestsize(struct crypto_ahash *tfm)
*/
static inline unsigned int crypto_ahash_statesize(struct crypto_ahash *tfm)
{
- return crypto_hash_alg_common(tfm)->statesize;
+ return tfm->statesize;
}
static inline u32 crypto_ahash_get_flags(struct crypto_ahash *tfm)
@@ -448,7 +448,7 @@ int crypto_ahash_finup(struct ahash_request *req);
*
* Return:
* 0 if the message digest was successfully calculated;
- * -EINPROGRESS if data is feeded into hardware (DMA) or queued for later;
+ * -EINPROGRESS if data is fed into hardware (DMA) or queued for later;
* -EBUSY if queue is full and request should be resubmitted later;
* other < 0 if an error occurred
*/
@@ -478,10 +478,7 @@ int crypto_ahash_digest(struct ahash_request *req);
*
* Return: 0 if the export was successful; < 0 if an error occurred
*/
-static inline int crypto_ahash_export(struct ahash_request *req, void *out)
-{
- return crypto_ahash_reqtfm(req)->export(req, out);
-}
+int crypto_ahash_export(struct ahash_request *req, void *out);
/**
* crypto_ahash_import() - import message digest state
@@ -494,15 +491,7 @@ static inline int crypto_ahash_export(struct ahash_request *req, void *out)
*
* Return: 0 if the import was successful; < 0 if an error occurred
*/
-static inline int crypto_ahash_import(struct ahash_request *req, const void *in)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
-
- if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
- return -ENOKEY;
-
- return tfm->import(req, in);
-}
+int crypto_ahash_import(struct ahash_request *req, const void *in);
/**
* crypto_ahash_init() - (re)initialize message digest handle
@@ -515,15 +504,7 @@ static inline int crypto_ahash_import(struct ahash_request *req, const void *in)
*
* Return: see crypto_ahash_final()
*/
-static inline int crypto_ahash_init(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
-
- if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
- return -ENOKEY;
-
- return tfm->init(req);
-}
+int crypto_ahash_init(struct ahash_request *req);
/**
* crypto_ahash_update() - add data to message digest for processing
@@ -536,18 +517,7 @@ static inline int crypto_ahash_init(struct ahash_request *req)
*
* Return: see crypto_ahash_final()
*/
-static inline int crypto_ahash_update(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct crypto_alg *alg = tfm->base.__crt_alg;
- unsigned int nbytes = req->nbytes;
- int ret;
-
- crypto_stats_get(alg);
- ret = crypto_ahash_reqtfm(req)->update(req);
- crypto_stats_ahash_update(nbytes, ret, alg);
- return ret;
-}
+int crypto_ahash_update(struct ahash_request *req);
/**
* DOC: Asynchronous Hash Request Handle
@@ -586,19 +556,20 @@ static inline void ahash_request_set_tfm(struct ahash_request *req,
*
* Return: allocated request handle in case of success, or NULL if out of memory
*/
-static inline struct ahash_request *ahash_request_alloc(
+static inline struct ahash_request *ahash_request_alloc_noprof(
struct crypto_ahash *tfm, gfp_t gfp)
{
struct ahash_request *req;
- req = kmalloc(sizeof(struct ahash_request) +
- crypto_ahash_reqsize(tfm), gfp);
+ req = kmalloc_noprof(sizeof(struct ahash_request) +
+ crypto_ahash_reqsize(tfm), gfp);
if (likely(req))
ahash_request_set_tfm(req, tfm);
return req;
}
+#define ahash_request_alloc(...) alloc_hooks(ahash_request_alloc_noprof(__VA_ARGS__))
/**
* ahash_request_free() - zeroize and free the request data structure
@@ -708,6 +679,10 @@ static inline void ahash_request_set_crypt(struct ahash_request *req,
struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type,
u32 mask);
+struct crypto_shash *crypto_clone_shash(struct crypto_shash *tfm);
+
+int crypto_has_shash(const char *alg_name, u32 type, u32 mask);
+
static inline struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm)
{
return &tfm->base;
@@ -716,6 +691,8 @@ static inline struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm)
/**
* crypto_free_shash() - zeroize and free the message digest handle
* @tfm: cipher handle to be freed
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
*/
static inline void crypto_free_shash(struct crypto_shash *tfm)
{
@@ -732,12 +709,6 @@ static inline const char *crypto_shash_driver_name(struct crypto_shash *tfm)
return crypto_tfm_alg_driver_name(crypto_shash_tfm(tfm));
}
-static inline unsigned int crypto_shash_alignmask(
- struct crypto_shash *tfm)
-{
- return crypto_tfm_alg_alignmask(crypto_shash_tfm(tfm));
-}
-
/**
* crypto_shash_blocksize() - obtain block size for cipher
* @tfm: cipher handle
@@ -886,10 +857,7 @@ int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data,
* Context: Any context.
* Return: 0 if the export creation was successful; < 0 if an error occurred
*/
-static inline int crypto_shash_export(struct shash_desc *desc, void *out)
-{
- return crypto_shash_alg(desc->tfm)->export(desc, out);
-}
+int crypto_shash_export(struct shash_desc *desc, void *out);
/**
* crypto_shash_import() - import operational state
@@ -903,15 +871,7 @@ static inline int crypto_shash_export(struct shash_desc *desc, void *out)
* Context: Any context.
* Return: 0 if the import was successful; < 0 if an error occurred
*/
-static inline int crypto_shash_import(struct shash_desc *desc, const void *in)
-{
- struct crypto_shash *tfm = desc->tfm;
-
- if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
- return -ENOKEY;
-
- return crypto_shash_alg(tfm)->import(desc, in);
-}
+int crypto_shash_import(struct shash_desc *desc, const void *in);
/**
* crypto_shash_init() - (re)initialize message digest
diff --git a/include/crypto/hash_info.h b/include/crypto/hash_info.h
index eb9d2e368969..d6927739f8b2 100644
--- a/include/crypto/hash_info.h
+++ b/include/crypto/hash_info.h
@@ -8,7 +8,9 @@
#ifndef _CRYPTO_HASH_INFO_H
#define _CRYPTO_HASH_INFO_H
-#include <crypto/sha.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
+#include <crypto/sha3.h>
#include <crypto/md5.h>
#include <crypto/streebog.h>
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index ee6412314f8f..f7b3b93f3a49 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -21,8 +21,6 @@
#define ALG_MAX_PAGES 16
-struct crypto_async_request;
-
struct alg_sock {
/* struct sock must be the first member of struct alg_sock */
struct sock sk;
@@ -46,6 +44,7 @@ struct af_alg_type {
void *(*bind)(const char *name, u32 type, u32 mask);
void (*release)(void *private);
int (*setkey)(void *private, const u8 *key, unsigned int keylen);
+ int (*setentropy)(void *private, sockptr_t entropy, unsigned int len);
int (*accept)(void *private, struct sock *sk);
int (*accept_nokey)(void *private, struct sock *sk);
int (*setauthsize)(void *private, unsigned int authsize);
@@ -57,9 +56,9 @@ struct af_alg_type {
};
struct af_alg_sgl {
- struct scatterlist sg[ALG_MAX_PAGES + 1];
- struct page *pages[ALG_MAX_PAGES];
- unsigned int npages;
+ struct sg_table sgt;
+ struct scatterlist sgl[ALG_MAX_PAGES + 1];
+ bool need_unpin;
};
/* TX SGL entry */
@@ -122,6 +121,7 @@ struct af_alg_async_req {
*
* @tsgl_list: Link to TX SGL
* @iv: IV for cipher operation
+ * @state: Existing state for continuing operation
* @aead_assoclen: Length of AAD for AEAD cipher operations
* @completion: Work queue for synchronous operation
* @used: TX bytes sent to kernel. This variable is used to
@@ -137,11 +137,13 @@ struct af_alg_async_req {
* recvmsg is invoked.
* @init: True if metadata has been sent.
* @len: Length of memory allocated for this data structure.
+ * @inflight: Non-zero when AIO requests are in flight.
*/
struct af_alg_ctx {
struct list_head tsgl_list;
void *iv;
+ void *state;
size_t aead_assoclen;
struct crypto_wait wait;
@@ -155,6 +157,8 @@ struct af_alg_ctx {
bool init;
unsigned int len;
+
+ unsigned int inflight;
};
int af_alg_register_type(const struct af_alg_type *type);
@@ -162,9 +166,9 @@ int af_alg_unregister_type(const struct af_alg_type *type);
int af_alg_release(struct socket *sock);
void af_alg_release_parent(struct sock *sk);
-int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern);
+int af_alg_accept(struct sock *sk, struct socket *newsock,
+ struct proto_accept_arg *arg);
-int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len);
void af_alg_free_sg(struct af_alg_sgl *sgl);
static inline struct alg_sock *alg_sk(struct sock *sk)
@@ -231,10 +235,8 @@ void af_alg_wmem_wakeup(struct sock *sk);
int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min);
int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
unsigned int ivsize);
-ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
- int offset, size_t size, int flags);
void af_alg_free_resources(struct af_alg_async_req *areq);
-void af_alg_async_cb(struct crypto_async_request *_req, int err);
+void af_alg_async_cb(void *data, int err);
__poll_t af_alg_poll(struct file *file, struct socket *sock,
poll_table *wait);
struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h
index cfc47e18820f..8831edaafc05 100644
--- a/include/crypto/internal/acompress.h
+++ b/include/crypto/internal/acompress.h
@@ -8,7 +8,46 @@
*/
#ifndef _CRYPTO_ACOMP_INT_H
#define _CRYPTO_ACOMP_INT_H
+
#include <crypto/acompress.h>
+#include <crypto/algapi.h>
+
+/**
+ * struct acomp_alg - asynchronous compression algorithm
+ *
+ * @compress: Function performs a compress operation
+ * @decompress: Function performs a de-compress operation
+ * @dst_free: Frees destination buffer if allocated inside the algorithm
+ * @init: Initialize the cryptographic transformation object.
+ * This function is used to initialize the cryptographic
+ * transformation object. This function is called only once at
+ * the instantiation time, right after the transformation context
+ * was allocated. In case the cryptographic hardware has some
+ * special requirements which need to be handled by software, this
+ * function shall check for the precise requirement of the
+ * transformation and put any software fallbacks in place.
+ * @exit: Deinitialize the cryptographic transformation object. This is a
+ * counterpart to @init, used to remove various changes set in
+ * @init.
+ *
+ * @reqsize: Context size for (de)compression requests
+ * @base: Common crypto API algorithm data structure
+ * @calg: Cmonn algorithm data structure shared with scomp
+ */
+struct acomp_alg {
+ int (*compress)(struct acomp_req *req);
+ int (*decompress)(struct acomp_req *req);
+ void (*dst_free)(struct scatterlist *dst);
+ int (*init)(struct crypto_acomp *tfm);
+ void (*exit)(struct crypto_acomp *tfm);
+
+ unsigned int reqsize;
+
+ union {
+ struct COMP_ALG_COMMON;
+ struct comp_alg_common calg;
+ };
+};
/*
* Transform internal helpers.
@@ -26,23 +65,19 @@ static inline void *acomp_tfm_ctx(struct crypto_acomp *tfm)
static inline void acomp_request_complete(struct acomp_req *req,
int err)
{
- req->base.complete(&req->base, err);
-}
-
-static inline const char *acomp_alg_name(struct crypto_acomp *tfm)
-{
- return crypto_acomp_tfm(tfm)->__crt_alg->cra_name;
+ crypto_request_complete(&req->base, err);
}
-static inline struct acomp_req *__acomp_request_alloc(struct crypto_acomp *tfm)
+static inline struct acomp_req *__acomp_request_alloc_noprof(struct crypto_acomp *tfm)
{
struct acomp_req *req;
- req = kzalloc(sizeof(*req) + crypto_acomp_reqsize(tfm), GFP_KERNEL);
+ req = kzalloc_noprof(sizeof(*req) + crypto_acomp_reqsize(tfm), GFP_KERNEL);
if (likely(req))
acomp_request_set_tfm(req, tfm);
return req;
}
+#define __acomp_request_alloc(...) alloc_hooks(__acomp_request_alloc_noprof(__VA_ARGS__))
static inline void __acomp_request_free(struct acomp_req *req)
{
diff --git a/include/crypto/internal/aead.h b/include/crypto/internal/aead.h
index 27b7b0224ea6..28a95eb3182d 100644
--- a/include/crypto/internal/aead.h
+++ b/include/crypto/internal/aead.h
@@ -39,6 +39,11 @@ static inline void *crypto_aead_ctx(struct crypto_aead *tfm)
return crypto_tfm_ctx(&tfm->base);
}
+static inline void *crypto_aead_ctx_dma(struct crypto_aead *tfm)
+{
+ return crypto_tfm_ctx_dma(&tfm->base);
+}
+
static inline struct crypto_instance *aead_crypto_instance(
struct aead_instance *inst)
{
@@ -65,9 +70,19 @@ static inline void *aead_request_ctx(struct aead_request *req)
return req->__ctx;
}
+static inline void *aead_request_ctx_dma(struct aead_request *req)
+{
+ unsigned int align = crypto_dma_align();
+
+ if (align <= crypto_tfm_ctx_alignment())
+ align = 1;
+
+ return PTR_ALIGN(aead_request_ctx(req), align);
+}
+
static inline void aead_request_complete(struct aead_request *req, int err)
{
- req->base.complete(&req->base, err);
+ crypto_request_complete(&req->base, err);
}
static inline u32 aead_request_flags(struct aead_request *req)
@@ -108,35 +123,17 @@ static inline void crypto_aead_set_reqsize(struct crypto_aead *aead,
aead->reqsize = reqsize;
}
-static inline void aead_init_queue(struct aead_queue *queue,
- unsigned int max_qlen)
+static inline void crypto_aead_set_reqsize_dma(struct crypto_aead *aead,
+ unsigned int reqsize)
{
- crypto_init_queue(&queue->base, max_qlen);
-}
-
-static inline int aead_enqueue_request(struct aead_queue *queue,
- struct aead_request *request)
-{
- return crypto_enqueue_request(&queue->base, &request->base);
-}
-
-static inline struct aead_request *aead_dequeue_request(
- struct aead_queue *queue)
-{
- struct crypto_async_request *req;
-
- req = crypto_dequeue_request(&queue->base);
-
- return req ? container_of(req, struct aead_request, base) : NULL;
+ reqsize += crypto_dma_align() & ~(crypto_tfm_ctx_alignment() - 1);
+ aead->reqsize = reqsize;
}
-static inline struct aead_request *aead_get_backlog(struct aead_queue *queue)
+static inline void aead_init_queue(struct aead_queue *queue,
+ unsigned int max_qlen)
{
- struct crypto_async_request *req;
-
- req = crypto_get_backlog(&queue->base);
-
- return req ? container_of(req, struct aead_request, base) : NULL;
+ crypto_init_queue(&queue->base, max_qlen);
}
static inline unsigned int crypto_aead_alg_chunksize(struct aead_alg *alg)
diff --git a/include/crypto/internal/akcipher.h b/include/crypto/internal/akcipher.h
index 8d3220c9ab77..a0fba4b2eccf 100644
--- a/include/crypto/internal/akcipher.h
+++ b/include/crypto/internal/akcipher.h
@@ -33,21 +33,43 @@ static inline void *akcipher_request_ctx(struct akcipher_request *req)
return req->__ctx;
}
+static inline void *akcipher_request_ctx_dma(struct akcipher_request *req)
+{
+ unsigned int align = crypto_dma_align();
+
+ if (align <= crypto_tfm_ctx_alignment())
+ align = 1;
+
+ return PTR_ALIGN(akcipher_request_ctx(req), align);
+}
+
static inline void akcipher_set_reqsize(struct crypto_akcipher *akcipher,
unsigned int reqsize)
{
- crypto_akcipher_alg(akcipher)->reqsize = reqsize;
+ akcipher->reqsize = reqsize;
+}
+
+static inline void akcipher_set_reqsize_dma(struct crypto_akcipher *akcipher,
+ unsigned int reqsize)
+{
+ reqsize += crypto_dma_align() & ~(crypto_tfm_ctx_alignment() - 1);
+ akcipher->reqsize = reqsize;
}
static inline void *akcipher_tfm_ctx(struct crypto_akcipher *tfm)
{
- return tfm->base.__crt_ctx;
+ return crypto_tfm_ctx(&tfm->base);
+}
+
+static inline void *akcipher_tfm_ctx_dma(struct crypto_akcipher *tfm)
+{
+ return crypto_tfm_ctx_dma(&tfm->base);
}
static inline void akcipher_request_complete(struct akcipher_request *req,
int err)
{
- req->base.complete(&req->base, err);
+ crypto_request_complete(&req->base, err);
}
static inline const char *akcipher_alg_name(struct crypto_akcipher *tfm)
diff --git a/include/crypto/internal/blake2b.h b/include/crypto/internal/blake2b.h
new file mode 100644
index 000000000000..982fe5e8471c
--- /dev/null
+++ b/include/crypto/internal/blake2b.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Helper functions for BLAKE2b implementations.
+ * Keep this in sync with the corresponding BLAKE2s header.
+ */
+
+#ifndef _CRYPTO_INTERNAL_BLAKE2B_H
+#define _CRYPTO_INTERNAL_BLAKE2B_H
+
+#include <crypto/blake2b.h>
+#include <crypto/internal/hash.h>
+#include <linux/string.h>
+
+void blake2b_compress_generic(struct blake2b_state *state,
+ const u8 *block, size_t nblocks, u32 inc);
+
+static inline void blake2b_set_lastblock(struct blake2b_state *state)
+{
+ state->f[0] = -1;
+}
+
+typedef void (*blake2b_compress_t)(struct blake2b_state *state,
+ const u8 *block, size_t nblocks, u32 inc);
+
+static inline void __blake2b_update(struct blake2b_state *state,
+ const u8 *in, size_t inlen,
+ blake2b_compress_t compress)
+{
+ const size_t fill = BLAKE2B_BLOCK_SIZE - state->buflen;
+
+ if (unlikely(!inlen))
+ return;
+ if (inlen > fill) {
+ memcpy(state->buf + state->buflen, in, fill);
+ (*compress)(state, state->buf, 1, BLAKE2B_BLOCK_SIZE);
+ state->buflen = 0;
+ in += fill;
+ inlen -= fill;
+ }
+ if (inlen > BLAKE2B_BLOCK_SIZE) {
+ const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2B_BLOCK_SIZE);
+ /* Hash one less (full) block than strictly possible */
+ (*compress)(state, in, nblocks - 1, BLAKE2B_BLOCK_SIZE);
+ in += BLAKE2B_BLOCK_SIZE * (nblocks - 1);
+ inlen -= BLAKE2B_BLOCK_SIZE * (nblocks - 1);
+ }
+ memcpy(state->buf + state->buflen, in, inlen);
+ state->buflen += inlen;
+}
+
+static inline void __blake2b_final(struct blake2b_state *state, u8 *out,
+ blake2b_compress_t compress)
+{
+ int i;
+
+ blake2b_set_lastblock(state);
+ memset(state->buf + state->buflen, 0,
+ BLAKE2B_BLOCK_SIZE - state->buflen); /* Padding */
+ (*compress)(state, state->buf, 1, state->buflen);
+ for (i = 0; i < ARRAY_SIZE(state->h); i++)
+ __cpu_to_le64s(&state->h[i]);
+ memcpy(out, state->h, state->outlen);
+}
+
+/* Helper functions for shash implementations of BLAKE2b */
+
+struct blake2b_tfm_ctx {
+ u8 key[BLAKE2B_KEY_SIZE];
+ unsigned int keylen;
+};
+
+static inline int crypto_blake2b_setkey(struct crypto_shash *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(tfm);
+
+ if (keylen == 0 || keylen > BLAKE2B_KEY_SIZE)
+ return -EINVAL;
+
+ memcpy(tctx->key, key, keylen);
+ tctx->keylen = keylen;
+
+ return 0;
+}
+
+static inline int crypto_blake2b_init(struct shash_desc *desc)
+{
+ const struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+ struct blake2b_state *state = shash_desc_ctx(desc);
+ unsigned int outlen = crypto_shash_digestsize(desc->tfm);
+
+ __blake2b_init(state, outlen, tctx->key, tctx->keylen);
+ return 0;
+}
+
+static inline int crypto_blake2b_update(struct shash_desc *desc,
+ const u8 *in, unsigned int inlen,
+ blake2b_compress_t compress)
+{
+ struct blake2b_state *state = shash_desc_ctx(desc);
+
+ __blake2b_update(state, in, inlen, compress);
+ return 0;
+}
+
+static inline int crypto_blake2b_final(struct shash_desc *desc, u8 *out,
+ blake2b_compress_t compress)
+{
+ struct blake2b_state *state = shash_desc_ctx(desc);
+
+ __blake2b_final(state, out, compress);
+ return 0;
+}
+
+#endif /* _CRYPTO_INTERNAL_BLAKE2B_H */
diff --git a/include/crypto/internal/blake2s.h b/include/crypto/internal/blake2s.h
index 74ff77032e52..506d56530ca9 100644
--- a/include/crypto/internal/blake2s.h
+++ b/include/crypto/internal/blake2s.h
@@ -1,24 +1,21 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Helper functions for BLAKE2s implementations.
+ * Keep this in sync with the corresponding BLAKE2b header.
+ */
-#ifndef BLAKE2S_INTERNAL_H
-#define BLAKE2S_INTERNAL_H
+#ifndef _CRYPTO_INTERNAL_BLAKE2S_H
+#define _CRYPTO_INTERNAL_BLAKE2S_H
#include <crypto/blake2s.h>
+#include <linux/string.h>
-struct blake2s_tfm_ctx {
- u8 key[BLAKE2S_KEY_SIZE];
- unsigned int keylen;
-};
-
-void blake2s_compress_generic(struct blake2s_state *state,const u8 *block,
+void blake2s_compress_generic(struct blake2s_state *state, const u8 *block,
size_t nblocks, const u32 inc);
-void blake2s_compress_arch(struct blake2s_state *state,const u8 *block,
- size_t nblocks, const u32 inc);
+void blake2s_compress(struct blake2s_state *state, const u8 *block,
+ size_t nblocks, const u32 inc);
-static inline void blake2s_set_lastblock(struct blake2s_state *state)
-{
- state->f[0] = -1;
-}
+bool blake2s_selftest(void);
-#endif /* BLAKE2S_INTERNAL_H */
+#endif /* _CRYPTO_INTERNAL_BLAKE2S_H */
diff --git a/include/crypto/internal/cipher.h b/include/crypto/internal/cipher.h
new file mode 100644
index 000000000000..5030f6d2df31
--- /dev/null
+++ b/include/crypto/internal/cipher.h
@@ -0,0 +1,220 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
+ * Copyright (c) 2002 David S. Miller (davem@redhat.com)
+ * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no>
+ * and Nettle, by Niels Möller.
+ */
+
+#ifndef _CRYPTO_INTERNAL_CIPHER_H
+#define _CRYPTO_INTERNAL_CIPHER_H
+
+#include <crypto/algapi.h>
+
+struct crypto_cipher {
+ struct crypto_tfm base;
+};
+
+/**
+ * DOC: Single Block Cipher API
+ *
+ * The single block cipher API is used with the ciphers of type
+ * CRYPTO_ALG_TYPE_CIPHER (listed as type "cipher" in /proc/crypto).
+ *
+ * Using the single block cipher API calls, operations with the basic cipher
+ * primitive can be implemented. These cipher primitives exclude any block
+ * chaining operations including IV handling.
+ *
+ * The purpose of this single block cipher API is to support the implementation
+ * of templates or other concepts that only need to perform the cipher operation
+ * on one block at a time. Templates invoke the underlying cipher primitive
+ * block-wise and process either the input or the output data of these cipher
+ * operations.
+ */
+
+static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm)
+{
+ return (struct crypto_cipher *)tfm;
+}
+
+/**
+ * crypto_alloc_cipher() - allocate single block cipher handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * single block cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for a single block cipher. The returned struct
+ * crypto_cipher is the cipher handle that is required for any subsequent API
+ * invocation for that single block cipher.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
+static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name,
+ u32 type, u32 mask)
+{
+ type &= ~CRYPTO_ALG_TYPE_MASK;
+ type |= CRYPTO_ALG_TYPE_CIPHER;
+ mask |= CRYPTO_ALG_TYPE_MASK;
+
+ return __crypto_cipher_cast(crypto_alloc_base(alg_name, type, mask));
+}
+
+static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm)
+{
+ return &tfm->base;
+}
+
+/**
+ * crypto_free_cipher() - zeroize and free the single block cipher handle
+ * @tfm: cipher handle to be freed
+ */
+static inline void crypto_free_cipher(struct crypto_cipher *tfm)
+{
+ crypto_free_tfm(crypto_cipher_tfm(tfm));
+}
+
+/**
+ * crypto_has_cipher() - Search for the availability of a single block cipher
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * single block cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Return: true when the single block cipher is known to the kernel crypto API;
+ * false otherwise
+ */
+static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask)
+{
+ type &= ~CRYPTO_ALG_TYPE_MASK;
+ type |= CRYPTO_ALG_TYPE_CIPHER;
+ mask |= CRYPTO_ALG_TYPE_MASK;
+
+ return crypto_has_alg(alg_name, type, mask);
+}
+
+/**
+ * crypto_cipher_blocksize() - obtain block size for cipher
+ * @tfm: cipher handle
+ *
+ * The block size for the single block cipher referenced with the cipher handle
+ * tfm is returned. The caller may use that information to allocate appropriate
+ * memory for the data returned by the encryption or decryption operation
+ *
+ * Return: block size of cipher
+ */
+static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm)
+{
+ return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm));
+}
+
+static inline unsigned int crypto_cipher_alignmask(struct crypto_cipher *tfm)
+{
+ return crypto_tfm_alg_alignmask(crypto_cipher_tfm(tfm));
+}
+
+static inline u32 crypto_cipher_get_flags(struct crypto_cipher *tfm)
+{
+ return crypto_tfm_get_flags(crypto_cipher_tfm(tfm));
+}
+
+static inline void crypto_cipher_set_flags(struct crypto_cipher *tfm,
+ u32 flags)
+{
+ crypto_tfm_set_flags(crypto_cipher_tfm(tfm), flags);
+}
+
+static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm,
+ u32 flags)
+{
+ crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags);
+}
+
+/**
+ * crypto_cipher_setkey() - set key for cipher
+ * @tfm: cipher handle
+ * @key: buffer holding the key
+ * @keylen: length of the key in bytes
+ *
+ * The caller provided key is set for the single block cipher referenced by the
+ * cipher handle.
+ *
+ * Note, the key length determines the cipher type. Many block ciphers implement
+ * different cipher modes depending on the key size, such as AES-128 vs AES-192
+ * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
+ * is performed.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
+int crypto_cipher_setkey(struct crypto_cipher *tfm,
+ const u8 *key, unsigned int keylen);
+
+/**
+ * crypto_cipher_encrypt_one() - encrypt one block of plaintext
+ * @tfm: cipher handle
+ * @dst: points to the buffer that will be filled with the ciphertext
+ * @src: buffer holding the plaintext to be encrypted
+ *
+ * Invoke the encryption operation of one block. The caller must ensure that
+ * the plaintext and ciphertext buffers are at least one block in size.
+ */
+void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
+ u8 *dst, const u8 *src);
+
+/**
+ * crypto_cipher_decrypt_one() - decrypt one block of ciphertext
+ * @tfm: cipher handle
+ * @dst: points to the buffer that will be filled with the plaintext
+ * @src: buffer holding the ciphertext to be decrypted
+ *
+ * Invoke the decryption operation of one block. The caller must ensure that
+ * the plaintext and ciphertext buffers are at least one block in size.
+ */
+void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
+ u8 *dst, const u8 *src);
+
+struct crypto_cipher *crypto_clone_cipher(struct crypto_cipher *cipher);
+
+struct crypto_cipher_spawn {
+ struct crypto_spawn base;
+};
+
+static inline int crypto_grab_cipher(struct crypto_cipher_spawn *spawn,
+ struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask)
+{
+ type &= ~CRYPTO_ALG_TYPE_MASK;
+ type |= CRYPTO_ALG_TYPE_CIPHER;
+ mask |= CRYPTO_ALG_TYPE_MASK;
+ return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
+}
+
+static inline void crypto_drop_cipher(struct crypto_cipher_spawn *spawn)
+{
+ crypto_drop_spawn(&spawn->base);
+}
+
+static inline struct crypto_alg *crypto_spawn_cipher_alg(
+ struct crypto_cipher_spawn *spawn)
+{
+ return spawn->base.alg;
+}
+
+static inline struct crypto_cipher *crypto_spawn_cipher(
+ struct crypto_cipher_spawn *spawn)
+{
+ u32 type = CRYPTO_ALG_TYPE_CIPHER;
+ u32 mask = CRYPTO_ALG_TYPE_MASK;
+
+ return __crypto_cipher_cast(crypto_spawn_tfm(&spawn->base, type, mask));
+}
+
+static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm)
+{
+ return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher;
+}
+
+#endif
diff --git a/include/crypto/internal/cryptouser.h b/include/crypto/internal/cryptouser.h
deleted file mode 100644
index fd54074332f5..000000000000
--- a/include/crypto/internal/cryptouser.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#include <linux/cryptouser.h>
-#include <net/netlink.h>
-
-struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact);
-
-#ifdef CONFIG_CRYPTO_STATS
-int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, struct nlattr **attrs);
-#else
-static inline int crypto_reportstat(struct sk_buff *in_skb,
- struct nlmsghdr *in_nlh,
- struct nlattr **attrs)
-{
- return -ENOTSUPP;
-}
-#endif
diff --git a/include/crypto/internal/ecc.h b/include/crypto/internal/ecc.h
new file mode 100644
index 000000000000..f7e75e1e71f3
--- /dev/null
+++ b/include/crypto/internal/ecc.h
@@ -0,0 +1,293 @@
+/*
+ * Copyright (c) 2013, Kenneth MacKay
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _CRYPTO_ECC_H
+#define _CRYPTO_ECC_H
+
+#include <crypto/ecc_curve.h>
+#include <asm/unaligned.h>
+
+/* One digit is u64 qword. */
+#define ECC_CURVE_NIST_P192_DIGITS 3
+#define ECC_CURVE_NIST_P256_DIGITS 4
+#define ECC_CURVE_NIST_P384_DIGITS 6
+#define ECC_CURVE_NIST_P521_DIGITS 9
+#define ECC_MAX_DIGITS DIV_ROUND_UP(521, 64) /* NIST P521 */
+
+#define ECC_DIGITS_TO_BYTES_SHIFT 3
+
+#define ECC_MAX_BYTES (ECC_MAX_DIGITS << ECC_DIGITS_TO_BYTES_SHIFT)
+
+#define ECC_POINT_INIT(x, y, ndigits) (struct ecc_point) { x, y, ndigits }
+
+/**
+ * ecc_swap_digits() - Copy ndigits from big endian array to native array
+ * @in: Input array
+ * @out: Output array
+ * @ndigits: Number of digits to copy
+ */
+static inline void ecc_swap_digits(const void *in, u64 *out, unsigned int ndigits)
+{
+ const __be64 *src = (__force __be64 *)in;
+ int i;
+
+ for (i = 0; i < ndigits; i++)
+ out[i] = get_unaligned_be64(&src[ndigits - 1 - i]);
+}
+
+/**
+ * ecc_digits_from_bytes() - Create ndigits-sized digits array from byte array
+ * @in: Input byte array
+ * @nbytes Size of input byte array
+ * @out Output digits array
+ * @ndigits: Number of digits to create from byte array
+ */
+void ecc_digits_from_bytes(const u8 *in, unsigned int nbytes,
+ u64 *out, unsigned int ndigits);
+
+/**
+ * ecc_is_key_valid() - Validate a given ECDH private key
+ *
+ * @curve_id: id representing the curve to use
+ * @ndigits: curve's number of digits
+ * @private_key: private key to be used for the given curve
+ * @private_key_len: private key length
+ *
+ * Returns 0 if the key is acceptable, a negative value otherwise
+ */
+int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits,
+ const u64 *private_key, unsigned int private_key_len);
+
+/**
+ * ecc_gen_privkey() - Generates an ECC private key.
+ * The private key is a random integer in the range 0 < random < n, where n is a
+ * prime that is the order of the cyclic subgroup generated by the distinguished
+ * point G.
+ * @curve_id: id representing the curve to use
+ * @ndigits: curve number of digits
+ * @private_key: buffer for storing the generated private key
+ *
+ * Returns 0 if the private key was generated successfully, a negative value
+ * if an error occurred.
+ */
+int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits,
+ u64 *private_key);
+
+/**
+ * ecc_make_pub_key() - Compute an ECC public key
+ *
+ * @curve_id: id representing the curve to use
+ * @ndigits: curve's number of digits
+ * @private_key: pregenerated private key for the given curve
+ * @public_key: buffer for storing the generated public key
+ *
+ * Returns 0 if the public key was generated successfully, a negative value
+ * if an error occurred.
+ */
+int ecc_make_pub_key(const unsigned int curve_id, unsigned int ndigits,
+ const u64 *private_key, u64 *public_key);
+
+/**
+ * crypto_ecdh_shared_secret() - Compute a shared secret
+ *
+ * @curve_id: id representing the curve to use
+ * @ndigits: curve's number of digits
+ * @private_key: private key of part A
+ * @public_key: public key of counterpart B
+ * @secret: buffer for storing the calculated shared secret
+ *
+ * Note: It is recommended that you hash the result of crypto_ecdh_shared_secret
+ * before using it for symmetric encryption or HMAC.
+ *
+ * Returns 0 if the shared secret was generated successfully, a negative value
+ * if an error occurred.
+ */
+int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
+ const u64 *private_key, const u64 *public_key,
+ u64 *secret);
+
+/**
+ * ecc_is_pubkey_valid_partial() - Partial public key validation
+ *
+ * @curve: elliptic curve domain parameters
+ * @pk: public key as a point
+ *
+ * Valdiate public key according to SP800-56A section 5.6.2.3.4 ECC Partial
+ * Public-Key Validation Routine.
+ *
+ * Note: There is no check that the public key is in the correct elliptic curve
+ * subgroup.
+ *
+ * Return: 0 if validation is successful, -EINVAL if validation is failed.
+ */
+int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve,
+ struct ecc_point *pk);
+
+/**
+ * ecc_is_pubkey_valid_full() - Full public key validation
+ *
+ * @curve: elliptic curve domain parameters
+ * @pk: public key as a point
+ *
+ * Valdiate public key according to SP800-56A section 5.6.2.3.3 ECC Full
+ * Public-Key Validation Routine.
+ *
+ * Return: 0 if validation is successful, -EINVAL if validation is failed.
+ */
+int ecc_is_pubkey_valid_full(const struct ecc_curve *curve,
+ struct ecc_point *pk);
+
+/**
+ * vli_is_zero() - Determine is vli is zero
+ *
+ * @vli: vli to check.
+ * @ndigits: length of the @vli
+ */
+bool vli_is_zero(const u64 *vli, unsigned int ndigits);
+
+/**
+ * vli_cmp() - compare left and right vlis
+ *
+ * @left: vli
+ * @right: vli
+ * @ndigits: length of both vlis
+ *
+ * Returns sign of @left - @right, i.e. -1 if @left < @right,
+ * 0 if @left == @right, 1 if @left > @right.
+ */
+int vli_cmp(const u64 *left, const u64 *right, unsigned int ndigits);
+
+/**
+ * vli_sub() - Subtracts right from left
+ *
+ * @result: where to write result
+ * @left: vli
+ * @right vli
+ * @ndigits: length of all vlis
+ *
+ * Note: can modify in-place.
+ *
+ * Return: carry bit.
+ */
+u64 vli_sub(u64 *result, const u64 *left, const u64 *right,
+ unsigned int ndigits);
+
+/**
+ * vli_from_be64() - Load vli from big-endian u64 array
+ *
+ * @dest: destination vli
+ * @src: source array of u64 BE values
+ * @ndigits: length of both vli and array
+ */
+void vli_from_be64(u64 *dest, const void *src, unsigned int ndigits);
+
+/**
+ * vli_from_le64() - Load vli from little-endian u64 array
+ *
+ * @dest: destination vli
+ * @src: source array of u64 LE values
+ * @ndigits: length of both vli and array
+ */
+void vli_from_le64(u64 *dest, const void *src, unsigned int ndigits);
+
+/**
+ * vli_mod_inv() - Modular inversion
+ *
+ * @result: where to write vli number
+ * @input: vli value to operate on
+ * @mod: modulus
+ * @ndigits: length of all vlis
+ */
+void vli_mod_inv(u64 *result, const u64 *input, const u64 *mod,
+ unsigned int ndigits);
+
+/**
+ * vli_mod_mult_slow() - Modular multiplication
+ *
+ * @result: where to write result value
+ * @left: vli number to multiply with @right
+ * @right: vli number to multiply with @left
+ * @mod: modulus
+ * @ndigits: length of all vlis
+ *
+ * Note: Assumes that mod is big enough curve order.
+ */
+void vli_mod_mult_slow(u64 *result, const u64 *left, const u64 *right,
+ const u64 *mod, unsigned int ndigits);
+
+/**
+ * vli_num_bits() - Counts the number of bits required for vli.
+ *
+ * @vli: vli to check.
+ * @ndigits: Length of the @vli
+ *
+ * Return: The number of bits required to represent @vli.
+ */
+unsigned int vli_num_bits(const u64 *vli, unsigned int ndigits);
+
+/**
+ * ecc_aloc_point() - Allocate ECC point.
+ *
+ * @ndigits: Length of vlis in u64 qwords.
+ *
+ * Return: Pointer to the allocated point or NULL if allocation failed.
+ */
+struct ecc_point *ecc_alloc_point(unsigned int ndigits);
+
+/**
+ * ecc_free_point() - Free ECC point.
+ *
+ * @p: The point to free.
+ */
+void ecc_free_point(struct ecc_point *p);
+
+/**
+ * ecc_point_is_zero() - Check if point is zero.
+ *
+ * @p: Point to check for zero.
+ *
+ * Return: true if point is the point at infinity, false otherwise.
+ */
+bool ecc_point_is_zero(const struct ecc_point *point);
+
+/**
+ * ecc_point_mult_shamir() - Add two points multiplied by scalars
+ *
+ * @result: resulting point
+ * @x: scalar to multiply with @p
+ * @p: point to multiply with @x
+ * @y: scalar to multiply with @q
+ * @q: point to multiply with @y
+ * @curve: curve
+ *
+ * Returns result = x * p + x * q over the curve.
+ * This works faster than two multiplications and addition.
+ */
+void ecc_point_mult_shamir(const struct ecc_point *result,
+ const u64 *x, const struct ecc_point *p,
+ const u64 *y, const struct ecc_point *q,
+ const struct ecc_curve *curve);
+
+#endif
diff --git a/include/crypto/internal/engine.h b/include/crypto/internal/engine.h
new file mode 100644
index 000000000000..fbf4be56cf12
--- /dev/null
+++ b/include/crypto/internal/engine.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Crypto engine API
+ *
+ * Copyright (c) 2016 Baolin Wang <baolin.wang@linaro.org>
+ * Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au>
+ */
+#ifndef _CRYPTO_INTERNAL_ENGINE_H
+#define _CRYPTO_INTERNAL_ENGINE_H
+
+#include <crypto/algapi.h>
+#include <crypto/engine.h>
+#include <linux/kthread.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+
+#define ENGINE_NAME_LEN 30
+
+struct device;
+
+/*
+ * struct crypto_engine - crypto hardware engine
+ * @name: the engine name
+ * @idling: the engine is entering idle state
+ * @busy: request pump is busy
+ * @running: the engine is on working
+ * @retry_support: indication that the hardware allows re-execution
+ * of a failed backlog request
+ * crypto-engine, in head position to keep order
+ * @list: link with the global crypto engine list
+ * @queue_lock: spinlock to synchronise access to request queue
+ * @queue: the crypto queue of the engine
+ * @rt: whether this queue is set to run as a realtime task
+ * @prepare_crypt_hardware: a request will soon arrive from the queue
+ * so the subsystem requests the driver to prepare the hardware
+ * by issuing this call
+ * @unprepare_crypt_hardware: there are currently no more requests on the
+ * queue so the subsystem notifies the driver that it may relax the
+ * hardware by issuing this call
+ * @do_batch_requests: execute a batch of requests. Depends on multiple
+ * requests support.
+ * @kworker: kthread worker struct for request pump
+ * @pump_requests: work struct for scheduling work to the request pump
+ * @priv_data: the engine private data
+ * @cur_req: the current request which is on processing
+ */
+struct crypto_engine {
+ char name[ENGINE_NAME_LEN];
+ bool idling;
+ bool busy;
+ bool running;
+
+ bool retry_support;
+
+ struct list_head list;
+ spinlock_t queue_lock;
+ struct crypto_queue queue;
+ struct device *dev;
+
+ bool rt;
+
+ int (*prepare_crypt_hardware)(struct crypto_engine *engine);
+ int (*unprepare_crypt_hardware)(struct crypto_engine *engine);
+ int (*do_batch_requests)(struct crypto_engine *engine);
+
+
+ struct kthread_worker *kworker;
+ struct kthread_work pump_requests;
+
+ void *priv_data;
+ struct crypto_async_request *cur_req;
+};
+
+#endif
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index 89f6f46ab2b8..58967593b6b4 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -18,15 +18,13 @@ struct crypto_hash_walk {
char *data;
unsigned int offset;
- unsigned int alignmask;
+ unsigned int flags;
struct page *pg;
unsigned int entrylen;
unsigned int total;
struct scatterlist *sg;
-
- unsigned int flags;
};
struct ahash_instance {
@@ -62,25 +60,12 @@ struct crypto_shash_spawn {
int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err);
int crypto_hash_walk_first(struct ahash_request *req,
struct crypto_hash_walk *walk);
-int crypto_ahash_walk_first(struct ahash_request *req,
- struct crypto_hash_walk *walk);
-
-static inline int crypto_ahash_walk_done(struct crypto_hash_walk *walk,
- int err)
-{
- return crypto_hash_walk_done(walk, err);
-}
static inline int crypto_hash_walk_last(struct crypto_hash_walk *walk)
{
return !(walk->entrylen | walk->total);
}
-static inline int crypto_ahash_walk_last(struct crypto_hash_walk *walk)
-{
- return crypto_hash_walk_last(walk);
-}
-
int crypto_register_ahash(struct ahash_alg *alg);
void crypto_unregister_ahash(struct ahash_alg *alg);
int crypto_register_ahashes(struct ahash_alg *algs, int count);
@@ -102,8 +87,6 @@ static inline bool crypto_shash_alg_needs_key(struct shash_alg *alg)
!(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY);
}
-bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg);
-
int crypto_grab_ahash(struct crypto_ahash_spawn *spawn,
struct crypto_instance *inst,
const char *name, u32 type, u32 mask);
@@ -146,25 +129,47 @@ int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc);
int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc);
int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc);
-int crypto_init_shash_ops_async(struct crypto_tfm *tfm);
-
static inline void *crypto_ahash_ctx(struct crypto_ahash *tfm)
{
return crypto_tfm_ctx(crypto_ahash_tfm(tfm));
}
+static inline void *crypto_ahash_ctx_dma(struct crypto_ahash *tfm)
+{
+ return crypto_tfm_ctx_dma(crypto_ahash_tfm(tfm));
+}
+
static inline struct ahash_alg *__crypto_ahash_alg(struct crypto_alg *alg)
{
return container_of(__crypto_hash_alg_common(alg), struct ahash_alg,
halg);
}
+static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
+{
+ return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
+ halg);
+}
+
+static inline void crypto_ahash_set_statesize(struct crypto_ahash *tfm,
+ unsigned int size)
+{
+ tfm->statesize = size;
+}
+
static inline void crypto_ahash_set_reqsize(struct crypto_ahash *tfm,
unsigned int reqsize)
{
tfm->reqsize = reqsize;
}
+static inline void crypto_ahash_set_reqsize_dma(struct crypto_ahash *ahash,
+ unsigned int reqsize)
+{
+ reqsize += crypto_dma_align() & ~(crypto_tfm_ctx_alignment() - 1);
+ ahash->reqsize = reqsize;
+}
+
static inline struct crypto_instance *ahash_crypto_instance(
struct ahash_instance *inst)
{
@@ -177,14 +182,30 @@ static inline struct ahash_instance *ahash_instance(
return container_of(inst, struct ahash_instance, s.base);
}
+static inline struct ahash_instance *ahash_alg_instance(
+ struct crypto_ahash *ahash)
+{
+ return ahash_instance(crypto_tfm_alg_instance(&ahash->base));
+}
+
static inline void *ahash_instance_ctx(struct ahash_instance *inst)
{
return crypto_instance_ctx(ahash_crypto_instance(inst));
}
+static inline void *ahash_request_ctx_dma(struct ahash_request *req)
+{
+ unsigned int align = crypto_dma_align();
+
+ if (align <= crypto_tfm_ctx_alignment())
+ align = 1;
+
+ return PTR_ALIGN(ahash_request_ctx(req), align);
+}
+
static inline void ahash_request_complete(struct ahash_request *req, int err)
{
- req->base.complete(&req->base, err);
+ crypto_request_complete(&req->base, err);
}
static inline u32 ahash_request_flags(struct ahash_request *req)
@@ -244,11 +265,6 @@ static inline struct crypto_shash *crypto_spawn_shash(
return crypto_spawn_tfm2(&spawn->base);
}
-static inline void *crypto_shash_ctx_aligned(struct crypto_shash *tfm)
-{
- return crypto_tfm_ctx_aligned(&tfm->base);
-}
-
static inline struct crypto_shash *__crypto_shash_cast(struct crypto_tfm *tfm)
{
return container_of(tfm, struct crypto_shash, base);
diff --git a/include/crypto/internal/kdf_selftest.h b/include/crypto/internal/kdf_selftest.h
new file mode 100644
index 000000000000..4d03d2af57b7
--- /dev/null
+++ b/include/crypto/internal/kdf_selftest.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (C) 2021, Stephan Mueller <smueller@chronox.de>
+ */
+
+#ifndef _CRYPTO_KDF_SELFTEST_H
+#define _CRYPTO_KDF_SELFTEST_H
+
+#include <crypto/hash.h>
+#include <linux/uio.h>
+
+struct kdf_testvec {
+ unsigned char *key;
+ size_t keylen;
+ unsigned char *ikm;
+ size_t ikmlen;
+ struct kvec info;
+ unsigned char *expected;
+ size_t expectedlen;
+};
+
+static inline int
+kdf_test(const struct kdf_testvec *test, const char *name,
+ int (*crypto_kdf_setkey)(struct crypto_shash *kmd,
+ const u8 *key, size_t keylen,
+ const u8 *ikm, size_t ikmlen),
+ int (*crypto_kdf_generate)(struct crypto_shash *kmd,
+ const struct kvec *info,
+ unsigned int info_nvec,
+ u8 *dst, unsigned int dlen))
+{
+ struct crypto_shash *kmd;
+ int ret;
+ u8 *buf = kzalloc(test->expectedlen, GFP_KERNEL);
+
+ if (!buf)
+ return -ENOMEM;
+
+ kmd = crypto_alloc_shash(name, 0, 0);
+ if (IS_ERR(kmd)) {
+ pr_err("alg: kdf: could not allocate hash handle for %s\n",
+ name);
+ kfree(buf);
+ return -ENOMEM;
+ }
+
+ ret = crypto_kdf_setkey(kmd, test->key, test->keylen,
+ test->ikm, test->ikmlen);
+ if (ret) {
+ pr_err("alg: kdf: could not set key derivation key\n");
+ goto err;
+ }
+
+ ret = crypto_kdf_generate(kmd, &test->info, 1, buf, test->expectedlen);
+ if (ret) {
+ pr_err("alg: kdf: could not obtain key data\n");
+ goto err;
+ }
+
+ ret = memcmp(test->expected, buf, test->expectedlen);
+ if (ret)
+ ret = -EINVAL;
+
+err:
+ crypto_free_shash(kmd);
+ kfree(buf);
+ return ret;
+}
+
+#endif /* _CRYPTO_KDF_SELFTEST_H */
diff --git a/include/crypto/internal/kpp.h b/include/crypto/internal/kpp.h
index 659b642efada..0a6db8c4a9a0 100644
--- a/include/crypto/internal/kpp.h
+++ b/include/crypto/internal/kpp.h
@@ -10,6 +10,38 @@
#include <crypto/kpp.h>
#include <crypto/algapi.h>
+/**
+ * struct kpp_instance - KPP template instance
+ * @free: Callback getting invoked upon instance destruction. Must be set.
+ * @s: Internal. Generic crypto core instance state properly layout
+ * to alias with @alg as needed.
+ * @alg: The &struct kpp_alg implementation provided by the instance.
+ */
+struct kpp_instance {
+ void (*free)(struct kpp_instance *inst);
+ union {
+ struct {
+ char head[offsetof(struct kpp_alg, base)];
+ struct crypto_instance base;
+ } s;
+ struct kpp_alg alg;
+ };
+};
+
+/**
+ * struct crypto_kpp_spawn - KPP algorithm spawn
+ * @base: Internal. Generic crypto core spawn state.
+ *
+ * Template instances can get a hold on some inner KPP algorithm by
+ * binding a &struct crypto_kpp_spawn via
+ * crypto_grab_kpp(). Transforms may subsequently get instantiated
+ * from the referenced inner &struct kpp_alg by means of
+ * crypto_spawn_kpp().
+ */
+struct crypto_kpp_spawn {
+ struct crypto_spawn base;
+};
+
/*
* Transform internal helpers.
*/
@@ -18,14 +50,42 @@ static inline void *kpp_request_ctx(struct kpp_request *req)
return req->__ctx;
}
+static inline void *kpp_request_ctx_dma(struct kpp_request *req)
+{
+ unsigned int align = crypto_dma_align();
+
+ if (align <= crypto_tfm_ctx_alignment())
+ align = 1;
+
+ return PTR_ALIGN(kpp_request_ctx(req), align);
+}
+
+static inline void kpp_set_reqsize(struct crypto_kpp *kpp,
+ unsigned int reqsize)
+{
+ kpp->reqsize = reqsize;
+}
+
+static inline void kpp_set_reqsize_dma(struct crypto_kpp *kpp,
+ unsigned int reqsize)
+{
+ reqsize += crypto_dma_align() & ~(crypto_tfm_ctx_alignment() - 1);
+ kpp->reqsize = reqsize;
+}
+
static inline void *kpp_tfm_ctx(struct crypto_kpp *tfm)
{
- return tfm->base.__crt_ctx;
+ return crypto_tfm_ctx(&tfm->base);
+}
+
+static inline void *kpp_tfm_ctx_dma(struct crypto_kpp *tfm)
+{
+ return crypto_tfm_ctx_dma(&tfm->base);
}
static inline void kpp_request_complete(struct kpp_request *req, int err)
{
- req->base.complete(&req->base, err);
+ crypto_request_complete(&req->base, err);
}
static inline const char *kpp_alg_name(struct crypto_kpp *tfm)
@@ -33,6 +93,62 @@ static inline const char *kpp_alg_name(struct crypto_kpp *tfm)
return crypto_kpp_tfm(tfm)->__crt_alg->cra_name;
}
+/*
+ * Template instance internal helpers.
+ */
+/**
+ * kpp_crypto_instance() - Cast a &struct kpp_instance to the corresponding
+ * generic &struct crypto_instance.
+ * @inst: Pointer to the &struct kpp_instance to be cast.
+ * Return: A pointer to the &struct crypto_instance embedded in @inst.
+ */
+static inline struct crypto_instance *kpp_crypto_instance(
+ struct kpp_instance *inst)
+{
+ return &inst->s.base;
+}
+
+/**
+ * kpp_instance() - Cast a generic &struct crypto_instance to the corresponding
+ * &struct kpp_instance.
+ * @inst: Pointer to the &struct crypto_instance to be cast.
+ * Return: A pointer to the &struct kpp_instance @inst is embedded in.
+ */
+static inline struct kpp_instance *kpp_instance(struct crypto_instance *inst)
+{
+ return container_of(inst, struct kpp_instance, s.base);
+}
+
+/**
+ * kpp_alg_instance() - Get the &struct kpp_instance a given KPP transform has
+ * been instantiated from.
+ * @kpp: The KPP transform instantiated from some &struct kpp_instance.
+ * Return: The &struct kpp_instance associated with @kpp.
+ */
+static inline struct kpp_instance *kpp_alg_instance(struct crypto_kpp *kpp)
+{
+ return kpp_instance(crypto_tfm_alg_instance(&kpp->base));
+}
+
+/**
+ * kpp_instance_ctx() - Get a pointer to a &struct kpp_instance's implementation
+ * specific context data.
+ * @inst: The &struct kpp_instance whose context data to access.
+ *
+ * A KPP template implementation may allocate extra memory beyond the
+ * end of a &struct kpp_instance instantiated from &crypto_template.create().
+ * This function provides a means to obtain a pointer to this area.
+ *
+ * Return: A pointer to the implementation specific context data.
+ */
+static inline void *kpp_instance_ctx(struct kpp_instance *inst)
+{
+ return crypto_instance_ctx(kpp_crypto_instance(inst));
+}
+
+/*
+ * KPP algorithm (un)registration functions.
+ */
/**
* crypto_register_kpp() -- Register key-agreement protocol primitives algorithm
*
@@ -56,4 +172,74 @@ int crypto_register_kpp(struct kpp_alg *alg);
*/
void crypto_unregister_kpp(struct kpp_alg *alg);
+/**
+ * kpp_register_instance() - Register a KPP template instance.
+ * @tmpl: The instantiating template.
+ * @inst: The KPP template instance to be registered.
+ * Return: %0 on success, negative error code otherwise.
+ */
+int kpp_register_instance(struct crypto_template *tmpl,
+ struct kpp_instance *inst);
+
+/*
+ * KPP spawn related functions.
+ */
+/**
+ * crypto_grab_kpp() - Look up a KPP algorithm and bind a spawn to it.
+ * @spawn: The KPP spawn to bind.
+ * @inst: The template instance owning @spawn.
+ * @name: The KPP algorithm name to look up.
+ * @type: The type bitset to pass on to the lookup.
+ * @mask: The mask bismask to pass on to the lookup.
+ * Return: %0 on success, a negative error code otherwise.
+ */
+int crypto_grab_kpp(struct crypto_kpp_spawn *spawn,
+ struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask);
+
+/**
+ * crypto_drop_kpp() - Release a spawn previously bound via crypto_grab_kpp().
+ * @spawn: The spawn to release.
+ */
+static inline void crypto_drop_kpp(struct crypto_kpp_spawn *spawn)
+{
+ crypto_drop_spawn(&spawn->base);
+}
+
+/**
+ * crypto_spawn_kpp_alg() - Get the algorithm a KPP spawn has been bound to.
+ * @spawn: The spawn to get the referenced &struct kpp_alg for.
+ *
+ * This function as well as the returned result are safe to use only
+ * after @spawn has been successfully bound via crypto_grab_kpp() and
+ * up to until the template instance owning @spawn has either been
+ * registered successfully or the spawn has been released again via
+ * crypto_drop_spawn().
+ *
+ * Return: A pointer to the &struct kpp_alg referenced from the spawn.
+ */
+static inline struct kpp_alg *crypto_spawn_kpp_alg(
+ struct crypto_kpp_spawn *spawn)
+{
+ return container_of(spawn->base.alg, struct kpp_alg, base);
+}
+
+/**
+ * crypto_spawn_kpp() - Create a transform from a KPP spawn.
+ * @spawn: The spawn previously bound to some &struct kpp_alg via
+ * crypto_grab_kpp().
+ *
+ * Once a &struct crypto_kpp_spawn has been successfully bound to a
+ * &struct kpp_alg via crypto_grab_kpp(), transforms for the latter
+ * may get instantiated from the former by means of this function.
+ *
+ * Return: A pointer to the freshly created KPP transform on success
+ * or an ``ERR_PTR()`` otherwise.
+ */
+static inline struct crypto_kpp *crypto_spawn_kpp(
+ struct crypto_kpp_spawn *spawn)
+{
+ return crypto_spawn_tfm2(&spawn->base);
+}
+
#endif
diff --git a/include/crypto/internal/poly1305.h b/include/crypto/internal/poly1305.h
index 064e52ca5248..196aa769f296 100644
--- a/include/crypto/internal/poly1305.h
+++ b/include/crypto/internal/poly1305.h
@@ -18,7 +18,8 @@
* only the ε-almost-∆-universal hash function (not the full MAC) is computed.
*/
-void poly1305_core_setkey(struct poly1305_core_key *key, const u8 *raw_key);
+void poly1305_core_setkey(struct poly1305_core_key *key,
+ const u8 raw_key[POLY1305_BLOCK_SIZE]);
static inline void poly1305_core_init(struct poly1305_state *state)
{
*state = (struct poly1305_state){};
diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h
index f834274c2493..07a10fd2d321 100644
--- a/include/crypto/internal/scompress.h
+++ b/include/crypto/internal/scompress.h
@@ -8,10 +8,14 @@
*/
#ifndef _CRYPTO_SCOMP_INT_H
#define _CRYPTO_SCOMP_INT_H
-#include <linux/crypto.h>
+
+#include <crypto/acompress.h>
+#include <crypto/algapi.h>
#define SCOMP_SCRATCH_SIZE 131072
+struct acomp_req;
+
struct crypto_scomp {
struct crypto_tfm base;
};
@@ -24,6 +28,7 @@ struct crypto_scomp {
* @compress: Function performs a compress operation
* @decompress: Function performs a de-compress operation
* @base: Common crypto API algorithm data structure
+ * @calg: Cmonn algorithm data structure shared with acomp
*/
struct scomp_alg {
void *(*alloc_ctx)(struct crypto_scomp *tfm);
@@ -34,7 +39,11 @@ struct scomp_alg {
int (*decompress)(struct crypto_scomp *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx);
- struct crypto_alg base;
+
+ union {
+ struct COMP_ALG_COMMON;
+ struct comp_alg_common calg;
+ };
};
static inline struct scomp_alg *__crypto_scomp_alg(struct crypto_alg *alg)
@@ -89,10 +98,6 @@ static inline int crypto_scomp_decompress(struct crypto_scomp *tfm,
ctx);
}
-int crypto_init_scomp_ops_async(struct crypto_tfm *tfm);
-struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req);
-void crypto_acomp_scomp_free_ctx(struct acomp_req *req);
-
/**
* crypto_register_scomp() -- Register synchronous compression algorithm
*
diff --git a/include/crypto/internal/sig.h b/include/crypto/internal/sig.h
new file mode 100644
index 000000000000..97cb26ef8115
--- /dev/null
+++ b/include/crypto/internal/sig.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Public Key Signature Algorithm
+ *
+ * Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au>
+ */
+#ifndef _CRYPTO_INTERNAL_SIG_H
+#define _CRYPTO_INTERNAL_SIG_H
+
+#include <crypto/algapi.h>
+#include <crypto/sig.h>
+
+static inline void *crypto_sig_ctx(struct crypto_sig *tfm)
+{
+ return crypto_tfm_ctx(&tfm->base);
+}
+#endif
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
index 10226c12c5df..7ae42afdcf3e 100644
--- a/include/crypto/internal/skcipher.h
+++ b/include/crypto/internal/skcipher.h
@@ -9,10 +9,19 @@
#define _CRYPTO_INTERNAL_SKCIPHER_H
#include <crypto/algapi.h>
+#include <crypto/internal/cipher.h>
#include <crypto/skcipher.h>
#include <linux/list.h>
#include <linux/types.h>
+/*
+ * Set this if your algorithm is sync but needs a reqsize larger
+ * than MAX_SYNC_SKCIPHER_REQSIZE.
+ *
+ * Reuse bit that is specific to hash algorithms.
+ */
+#define CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE CRYPTO_ALG_OPTIONAL_KEY
+
struct aead_request;
struct rtattr;
@@ -27,10 +36,25 @@ struct skcipher_instance {
};
};
+struct lskcipher_instance {
+ void (*free)(struct lskcipher_instance *inst);
+ union {
+ struct {
+ char head[offsetof(struct lskcipher_alg, co.base)];
+ struct crypto_instance base;
+ } s;
+ struct lskcipher_alg alg;
+ };
+};
+
struct crypto_skcipher_spawn {
struct crypto_spawn base;
};
+struct crypto_lskcipher_spawn {
+ struct crypto_spawn base;
+};
+
struct skcipher_walk {
union {
struct {
@@ -71,6 +95,12 @@ static inline struct crypto_instance *skcipher_crypto_instance(
return &inst->s.base;
}
+static inline struct crypto_instance *lskcipher_crypto_instance(
+ struct lskcipher_instance *inst)
+{
+ return &inst->s.base;
+}
+
static inline struct skcipher_instance *skcipher_alg_instance(
struct crypto_skcipher *skcipher)
{
@@ -78,35 +108,62 @@ static inline struct skcipher_instance *skcipher_alg_instance(
struct skcipher_instance, alg);
}
+static inline struct lskcipher_instance *lskcipher_alg_instance(
+ struct crypto_lskcipher *lskcipher)
+{
+ return container_of(crypto_lskcipher_alg(lskcipher),
+ struct lskcipher_instance, alg);
+}
+
static inline void *skcipher_instance_ctx(struct skcipher_instance *inst)
{
return crypto_instance_ctx(skcipher_crypto_instance(inst));
}
+static inline void *lskcipher_instance_ctx(struct lskcipher_instance *inst)
+{
+ return crypto_instance_ctx(lskcipher_crypto_instance(inst));
+}
+
static inline void skcipher_request_complete(struct skcipher_request *req, int err)
{
- req->base.complete(&req->base, err);
+ crypto_request_complete(&req->base, err);
}
int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
struct crypto_instance *inst,
const char *name, u32 type, u32 mask);
+int crypto_grab_lskcipher(struct crypto_lskcipher_spawn *spawn,
+ struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask);
+
static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn)
{
crypto_drop_spawn(&spawn->base);
}
-static inline struct skcipher_alg *crypto_skcipher_spawn_alg(
- struct crypto_skcipher_spawn *spawn)
+static inline void crypto_drop_lskcipher(struct crypto_lskcipher_spawn *spawn)
{
- return container_of(spawn->base.alg, struct skcipher_alg, base);
+ crypto_drop_spawn(&spawn->base);
+}
+
+static inline struct lskcipher_alg *crypto_lskcipher_spawn_alg(
+ struct crypto_lskcipher_spawn *spawn)
+{
+ return container_of(spawn->base.alg, struct lskcipher_alg, co.base);
}
-static inline struct skcipher_alg *crypto_spawn_skcipher_alg(
+static inline struct skcipher_alg_common *crypto_spawn_skcipher_alg_common(
struct crypto_skcipher_spawn *spawn)
{
- return crypto_skcipher_spawn_alg(spawn);
+ return container_of(spawn->base.alg, struct skcipher_alg_common, base);
+}
+
+static inline struct lskcipher_alg *crypto_spawn_lskcipher_alg(
+ struct crypto_lskcipher_spawn *spawn)
+{
+ return crypto_lskcipher_spawn_alg(spawn);
}
static inline struct crypto_skcipher *crypto_spawn_skcipher(
@@ -115,12 +172,25 @@ static inline struct crypto_skcipher *crypto_spawn_skcipher(
return crypto_spawn_tfm2(&spawn->base);
}
+static inline struct crypto_lskcipher *crypto_spawn_lskcipher(
+ struct crypto_lskcipher_spawn *spawn)
+{
+ return crypto_spawn_tfm2(&spawn->base);
+}
+
static inline void crypto_skcipher_set_reqsize(
struct crypto_skcipher *skcipher, unsigned int reqsize)
{
skcipher->reqsize = reqsize;
}
+static inline void crypto_skcipher_set_reqsize_dma(
+ struct crypto_skcipher *skcipher, unsigned int reqsize)
+{
+ reqsize += crypto_dma_align() & ~(crypto_tfm_ctx_alignment() - 1);
+ skcipher->reqsize = reqsize;
+}
+
int crypto_register_skcipher(struct skcipher_alg *alg);
void crypto_unregister_skcipher(struct skcipher_alg *alg);
int crypto_register_skciphers(struct skcipher_alg *algs, int count);
@@ -128,11 +198,17 @@ void crypto_unregister_skciphers(struct skcipher_alg *algs, int count);
int skcipher_register_instance(struct crypto_template *tmpl,
struct skcipher_instance *inst);
+int crypto_register_lskcipher(struct lskcipher_alg *alg);
+void crypto_unregister_lskcipher(struct lskcipher_alg *alg);
+int crypto_register_lskciphers(struct lskcipher_alg *algs, int count);
+void crypto_unregister_lskciphers(struct lskcipher_alg *algs, int count);
+int lskcipher_register_instance(struct crypto_template *tmpl,
+ struct lskcipher_instance *inst);
+
int skcipher_walk_done(struct skcipher_walk *walk, int err);
int skcipher_walk_virt(struct skcipher_walk *walk,
struct skcipher_request *req,
bool atomic);
-void skcipher_walk_atomise(struct skcipher_walk *walk);
int skcipher_walk_async(struct skcipher_walk *walk,
struct skcipher_request *req);
int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
@@ -151,49 +227,34 @@ static inline void *crypto_skcipher_ctx(struct crypto_skcipher *tfm)
return crypto_tfm_ctx(&tfm->base);
}
-static inline void *skcipher_request_ctx(struct skcipher_request *req)
+static inline void *crypto_lskcipher_ctx(struct crypto_lskcipher *tfm)
{
- return req->__ctx;
+ return crypto_tfm_ctx(&tfm->base);
}
-static inline u32 skcipher_request_flags(struct skcipher_request *req)
+static inline void *crypto_skcipher_ctx_dma(struct crypto_skcipher *tfm)
{
- return req->base.flags;
+ return crypto_tfm_ctx_dma(&tfm->base);
}
-static inline unsigned int crypto_skcipher_alg_min_keysize(
- struct skcipher_alg *alg)
+static inline void *skcipher_request_ctx(struct skcipher_request *req)
{
- return alg->min_keysize;
+ return req->__ctx;
}
-static inline unsigned int crypto_skcipher_alg_max_keysize(
- struct skcipher_alg *alg)
+static inline void *skcipher_request_ctx_dma(struct skcipher_request *req)
{
- return alg->max_keysize;
-}
+ unsigned int align = crypto_dma_align();
-static inline unsigned int crypto_skcipher_alg_walksize(
- struct skcipher_alg *alg)
-{
- return alg->walksize;
+ if (align <= crypto_tfm_ctx_alignment())
+ align = 1;
+
+ return PTR_ALIGN(skcipher_request_ctx(req), align);
}
-/**
- * crypto_skcipher_walksize() - obtain walk size
- * @tfm: cipher handle
- *
- * In some cases, algorithms can only perform optimally when operating on
- * multiple blocks in parallel. This is reflected by the walksize, which
- * must be a multiple of the chunksize (or equal if the concern does not
- * apply)
- *
- * Return: walk size in bytes
- */
-static inline unsigned int crypto_skcipher_walksize(
- struct crypto_skcipher *tfm)
+static inline u32 skcipher_request_flags(struct skcipher_request *req)
{
- return crypto_skcipher_alg_walksize(crypto_skcipher_alg(tfm));
+ return req->base.flags;
}
/* Helpers for simple block cipher modes of operation */
@@ -219,5 +280,24 @@ static inline struct crypto_alg *skcipher_ialg_simple(
return crypto_spawn_cipher_alg(spawn);
}
+static inline struct crypto_lskcipher *lskcipher_cipher_simple(
+ struct crypto_lskcipher *tfm)
+{
+ struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm);
+
+ return *ctx;
+}
+
+struct lskcipher_instance *lskcipher_alloc_instance_simple(
+ struct crypto_template *tmpl, struct rtattr **tb);
+
+static inline struct lskcipher_alg *lskcipher_ialg_simple(
+ struct lskcipher_instance *inst)
+{
+ struct crypto_lskcipher_spawn *spawn = lskcipher_instance_ctx(inst);
+
+ return crypto_lskcipher_spawn_alg(spawn);
+}
+
#endif /* _CRYPTO_INTERNAL_SKCIPHER_H */
diff --git a/include/crypto/kdf_sp800108.h b/include/crypto/kdf_sp800108.h
new file mode 100644
index 000000000000..b7b20a778fb7
--- /dev/null
+++ b/include/crypto/kdf_sp800108.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (C) 2021, Stephan Mueller <smueller@chronox.de>
+ */
+
+#ifndef _CRYPTO_KDF108_H
+#define _CRYPTO_KDF108_H
+
+#include <crypto/hash.h>
+#include <linux/uio.h>
+
+/**
+ * Counter KDF generate operation according to SP800-108 section 5.1
+ * as well as SP800-56A section 5.8.1 (Single-step KDF).
+ *
+ * @kmd Keyed message digest whose key was set with crypto_kdf108_setkey or
+ * unkeyed message digest
+ * @info optional context and application specific information - this may be
+ * NULL
+ * @info_vec number of optional context/application specific information entries
+ * @dst destination buffer that the caller already allocated
+ * @dlen length of the destination buffer - the KDF derives that amount of
+ * bytes.
+ *
+ * To comply with SP800-108, the caller must provide Label || 0x00 || Context
+ * in the info parameter.
+ *
+ * @return 0 on success, < 0 on error
+ */
+int crypto_kdf108_ctr_generate(struct crypto_shash *kmd,
+ const struct kvec *info, unsigned int info_nvec,
+ u8 *dst, unsigned int dlen);
+
+/**
+ * Counter KDF setkey operation
+ *
+ * @kmd Keyed message digest allocated by the caller. The key should not have
+ * been set.
+ * @key Seed key to be used to initialize the keyed message digest context.
+ * @keylen This length of the key buffer.
+ * @ikm The SP800-108 KDF does not support IKM - this parameter must be NULL
+ * @ikmlen This parameter must be 0.
+ *
+ * According to SP800-108 section 7.2, the seed key must be at least as large as
+ * the message digest size of the used keyed message digest. This limitation
+ * is enforced by the implementation.
+ *
+ * SP800-108 allows the use of either a HMAC or a hash primitive. When
+ * the caller intends to use a hash primitive, the call to
+ * crypto_kdf108_setkey is not required and the key derivation operation can
+ * immediately performed using crypto_kdf108_ctr_generate after allocating
+ * a handle.
+ *
+ * @return 0 on success, < 0 on error
+ */
+int crypto_kdf108_setkey(struct crypto_shash *kmd,
+ const u8 *key, size_t keylen,
+ const u8 *ikm, size_t ikmlen);
+
+#endif /* _CRYPTO_KDF108_H */
diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h
index 88b591215d5c..2d9c4de57b69 100644
--- a/include/crypto/kpp.h
+++ b/include/crypto/kpp.h
@@ -8,7 +8,11 @@
#ifndef _CRYPTO_KPP_
#define _CRYPTO_KPP_
+
+#include <linux/atomic.h>
+#include <linux/container_of.h>
#include <linux/crypto.h>
+#include <linux/slab.h>
/**
* struct kpp_request
@@ -37,9 +41,13 @@ struct kpp_request {
* struct crypto_kpp - user-instantiated object which encapsulate
* algorithms and core processing logic
*
+ * @reqsize: Request context size required by algorithm
+ * implementation
* @base: Common crypto API algorithm data structure
*/
struct crypto_kpp {
+ unsigned int reqsize;
+
struct crypto_tfm base;
};
@@ -64,8 +72,6 @@ struct crypto_kpp {
* put in place here.
* @exit: Undo everything @init did.
*
- * @reqsize: Request context size required by algorithm
- * implementation
* @base: Common crypto API algorithm data structure
*/
struct kpp_alg {
@@ -79,7 +85,6 @@ struct kpp_alg {
int (*init)(struct crypto_kpp *tfm);
void (*exit)(struct crypto_kpp *tfm);
- unsigned int reqsize;
struct crypto_alg base;
};
@@ -104,6 +109,8 @@ struct kpp_alg {
*/
struct crypto_kpp *crypto_alloc_kpp(const char *alg_name, u32 type, u32 mask);
+int crypto_has_kpp(const char *alg_name, u32 type, u32 mask);
+
static inline struct crypto_tfm *crypto_kpp_tfm(struct crypto_kpp *tfm)
{
return &tfm->base;
@@ -126,7 +133,7 @@ static inline struct kpp_alg *crypto_kpp_alg(struct crypto_kpp *tfm)
static inline unsigned int crypto_kpp_reqsize(struct crypto_kpp *tfm)
{
- return crypto_kpp_alg(tfm)->reqsize;
+ return tfm->reqsize;
}
static inline void kpp_request_set_tfm(struct kpp_request *req,
@@ -154,6 +161,8 @@ static inline void crypto_kpp_set_flags(struct crypto_kpp *tfm, u32 flags)
* crypto_free_kpp() - free KPP tfm handle
*
* @tfm: KPP tfm handle allocated with crypto_alloc_kpp()
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
*/
static inline void crypto_free_kpp(struct crypto_kpp *tfm)
{
@@ -281,14 +290,7 @@ struct kpp_secret {
static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm,
const void *buffer, unsigned int len)
{
- struct kpp_alg *alg = crypto_kpp_alg(tfm);
- struct crypto_alg *calg = tfm->base.__crt_alg;
- int ret;
-
- crypto_stats_get(calg);
- ret = alg->set_secret(tfm, buffer, len);
- crypto_stats_kpp_set_secret(calg, ret);
- return ret;
+ return crypto_kpp_alg(tfm)->set_secret(tfm, buffer, len);
}
/**
@@ -307,14 +309,8 @@ static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm,
static inline int crypto_kpp_generate_public_key(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
- struct kpp_alg *alg = crypto_kpp_alg(tfm);
- struct crypto_alg *calg = tfm->base.__crt_alg;
- int ret;
- crypto_stats_get(calg);
- ret = alg->generate_public_key(req);
- crypto_stats_kpp_generate_public_key(calg, ret);
- return ret;
+ return crypto_kpp_alg(tfm)->generate_public_key(req);
}
/**
@@ -330,14 +326,8 @@ static inline int crypto_kpp_generate_public_key(struct kpp_request *req)
static inline int crypto_kpp_compute_shared_secret(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
- struct kpp_alg *alg = crypto_kpp_alg(tfm);
- struct crypto_alg *calg = tfm->base.__crt_alg;
- int ret;
- crypto_stats_get(calg);
- ret = alg->compute_shared_secret(req);
- crypto_stats_kpp_compute_shared_secret(calg, ret);
- return ret;
+ return crypto_kpp_alg(tfm)->compute_shared_secret(req);
}
/**
diff --git a/include/crypto/pcrypt.h b/include/crypto/pcrypt.h
index b9bc3436196a..234d7cf3cf5e 100644
--- a/include/crypto/pcrypt.h
+++ b/include/crypto/pcrypt.h
@@ -9,8 +9,8 @@
#ifndef _CRYPTO_PCRYPT_H
#define _CRYPTO_PCRYPT_H
+#include <linux/container_of.h>
#include <linux/crypto.h>
-#include <linux/kernel.h>
#include <linux/padata.h>
struct pcrypt_request {
diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h
index f1f67fc749cf..090692ec3bc7 100644
--- a/include/crypto/poly1305.h
+++ b/include/crypto/poly1305.h
@@ -58,8 +58,10 @@ struct poly1305_desc_ctx {
};
};
-void poly1305_init_arch(struct poly1305_desc_ctx *desc, const u8 *key);
-void poly1305_init_generic(struct poly1305_desc_ctx *desc, const u8 *key);
+void poly1305_init_arch(struct poly1305_desc_ctx *desc,
+ const u8 key[POLY1305_KEY_SIZE]);
+void poly1305_init_generic(struct poly1305_desc_ctx *desc,
+ const u8 key[POLY1305_KEY_SIZE]);
static inline void poly1305_init(struct poly1305_desc_ctx *desc, const u8 *key)
{
diff --git a/include/crypto/polyval.h b/include/crypto/polyval.h
new file mode 100644
index 000000000000..1d630f371f77
--- /dev/null
+++ b/include/crypto/polyval.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common values for the Polyval hash algorithm
+ *
+ * Copyright 2021 Google LLC
+ */
+
+#ifndef _CRYPTO_POLYVAL_H
+#define _CRYPTO_POLYVAL_H
+
+#include <linux/types.h>
+#include <linux/crypto.h>
+
+#define POLYVAL_BLOCK_SIZE 16
+#define POLYVAL_DIGEST_SIZE 16
+
+void polyval_mul_non4k(u8 *op1, const u8 *op2);
+
+void polyval_update_non4k(const u8 *key, const u8 *in,
+ size_t nblocks, u8 *accumulator);
+
+#endif
diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h
index 11f535cfb810..b7f308977c84 100644
--- a/include/crypto/public_key.h
+++ b/include/crypto/public_key.h
@@ -10,6 +10,7 @@
#ifndef _LINUX_PUBLIC_KEY_H
#define _LINUX_PUBLIC_KEY_H
+#include <linux/errno.h>
#include <linux/keyctl.h>
#include <linux/oid_registry.h>
@@ -28,6 +29,10 @@ struct public_key {
bool key_is_private;
const char *id_type;
const char *pkey_algo;
+ unsigned long key_eflags; /* key extension flags */
+#define KEY_EFLAG_CA 0 /* set if the CA basic constraints is set */
+#define KEY_EFLAG_DIGITALSIG 1 /* set if the digitalSignature usage is set */
+#define KEY_EFLAG_KEYCERTSIGN 2 /* set if the keyCertSign usage is set */
};
extern void public_key_free(struct public_key *key);
@@ -36,11 +41,11 @@ extern void public_key_free(struct public_key *key);
* Public key cryptography signature data
*/
struct public_key_signature {
- struct asymmetric_key_id *auth_ids[2];
+ struct asymmetric_key_id *auth_ids[3];
u8 *s; /* Signature */
- u32 s_size; /* Number of bytes in signature */
u8 *digest;
- u8 digest_size; /* Number of bytes in digest */
+ u32 s_size; /* Number of bytes in signature */
+ u32 digest_size; /* Number of bytes in digest */
const char *pkey_algo;
const char *hash_algo;
const char *encoding;
@@ -69,6 +74,33 @@ extern int restrict_link_by_key_or_keyring_chain(struct key *trust_keyring,
const union key_payload *payload,
struct key *trusted);
+#if IS_REACHABLE(CONFIG_ASYMMETRIC_KEY_TYPE)
+extern int restrict_link_by_ca(struct key *dest_keyring,
+ const struct key_type *type,
+ const union key_payload *payload,
+ struct key *trust_keyring);
+int restrict_link_by_digsig(struct key *dest_keyring,
+ const struct key_type *type,
+ const union key_payload *payload,
+ struct key *trust_keyring);
+#else
+static inline int restrict_link_by_ca(struct key *dest_keyring,
+ const struct key_type *type,
+ const union key_payload *payload,
+ struct key *trust_keyring)
+{
+ return 0;
+}
+
+static inline int restrict_link_by_digsig(struct key *dest_keyring,
+ const struct key_type *type,
+ const union key_payload *payload,
+ struct key *trust_keyring)
+{
+ return 0;
+}
+#endif
+
extern int query_asymmetric_key(const struct kernel_pkey_params *,
struct kernel_pkey_query *);
@@ -78,7 +110,16 @@ extern int create_signature(struct kernel_pkey_params *, const void *, void *);
extern int verify_signature(const struct key *,
const struct public_key_signature *);
+#if IS_REACHABLE(CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE)
int public_key_verify_signature(const struct public_key *pkey,
const struct public_key_signature *sig);
+#else
+static inline
+int public_key_verify_signature(const struct public_key *pkey,
+ const struct public_key_signature *sig)
+{
+ return -EINVAL;
+}
+#endif
#endif /* _LINUX_PUBLIC_KEY_H */
diff --git a/include/crypto/rng.h b/include/crypto/rng.h
index 8b4b844b4eef..5ac4388f50e1 100644
--- a/include/crypto/rng.h
+++ b/include/crypto/rng.h
@@ -9,6 +9,8 @@
#ifndef _CRYPTO_RNG_H
#define _CRYPTO_RNG_H
+#include <linux/atomic.h>
+#include <linux/container_of.h>
#include <linux/crypto.h>
struct crypto_rng;
@@ -94,6 +96,11 @@ static inline struct crypto_tfm *crypto_rng_tfm(struct crypto_rng *tfm)
return &tfm->base;
}
+static inline struct rng_alg *__crypto_rng_alg(struct crypto_alg *alg)
+{
+ return container_of(alg, struct rng_alg, base);
+}
+
/**
* crypto_rng_alg - obtain name of RNG
* @tfm: cipher handle
@@ -104,13 +111,14 @@ static inline struct crypto_tfm *crypto_rng_tfm(struct crypto_rng *tfm)
*/
static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm)
{
- return container_of(crypto_rng_tfm(tfm)->__crt_alg,
- struct rng_alg, base);
+ return __crypto_rng_alg(crypto_rng_tfm(tfm)->__crt_alg);
}
/**
* crypto_free_rng() - zeroize and free RNG handle
* @tfm: cipher handle to be freed
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
*/
static inline void crypto_free_rng(struct crypto_rng *tfm)
{
@@ -135,13 +143,7 @@ static inline int crypto_rng_generate(struct crypto_rng *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int dlen)
{
- struct crypto_alg *alg = tfm->base.__crt_alg;
- int ret;
-
- crypto_stats_get(alg);
- ret = crypto_rng_alg(tfm)->generate(tfm, src, slen, dst, dlen);
- crypto_stats_rng_generate(alg, dlen, ret);
- return ret;
+ return crypto_rng_alg(tfm)->generate(tfm, src, slen, dst, dlen);
}
/**
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
index c837d0775474..32fc4473175b 100644
--- a/include/crypto/scatterwalk.h
+++ b/include/crypto/scatterwalk.h
@@ -12,8 +12,9 @@
#define _CRYPTO_SCATTERWALK_H
#include <crypto/algapi.h>
+
#include <linux/highmem.h>
-#include <linux/kernel.h>
+#include <linux/mm.h>
#include <linux/scatterlist.h>
static inline void scatterwalk_crypto_chain(struct scatterlist *head,
@@ -45,12 +46,6 @@ static inline void scatterwalk_advance(struct scatter_walk *walk,
walk->offset += nbytes;
}
-static inline unsigned int scatterwalk_aligned(struct scatter_walk *walk,
- unsigned int alignmask)
-{
- return !(walk->offset & alignmask);
-}
-
static inline struct page *scatterwalk_page(struct scatter_walk *walk)
{
return sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
@@ -58,7 +53,7 @@ static inline struct page *scatterwalk_page(struct scatter_walk *walk)
static inline void scatterwalk_unmap(void *vaddr)
{
- kunmap_atomic(vaddr);
+ kunmap_local(vaddr);
}
static inline void scatterwalk_start(struct scatter_walk *walk,
@@ -70,7 +65,7 @@ static inline void scatterwalk_start(struct scatter_walk *walk,
static inline void *scatterwalk_map(struct scatter_walk *walk)
{
- return kmap_atomic(scatterwalk_page(walk)) +
+ return kmap_local_page(scatterwalk_page(walk)) +
offset_in_page(walk->offset);
}
@@ -81,12 +76,7 @@ static inline void scatterwalk_pagedone(struct scatter_walk *walk, int out,
struct page *page;
page = sg_page(walk->sg) + ((walk->offset - 1) >> PAGE_SHIFT);
- /* Test ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE first as
- * PageSlab cannot be optimised away per se due to
- * use of volatile pointer.
- */
- if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE && !PageSlab(page))
- flush_dcache_page(page);
+ flush_dcache_page(page);
}
if (more && walk->offset >= walk->sg->offset + walk->sg->length)
@@ -103,7 +93,6 @@ static inline void scatterwalk_done(struct scatter_walk *walk, int out,
void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
size_t nbytes, int out);
-void *scatterwalk_map(struct scatter_walk *walk);
void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
unsigned int start, unsigned int nbytes, int out);
diff --git a/include/crypto/sha1.h b/include/crypto/sha1.h
new file mode 100644
index 000000000000..044ecea60ac8
--- /dev/null
+++ b/include/crypto/sha1.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common values for SHA-1 algorithms
+ */
+
+#ifndef _CRYPTO_SHA1_H
+#define _CRYPTO_SHA1_H
+
+#include <linux/types.h>
+
+#define SHA1_DIGEST_SIZE 20
+#define SHA1_BLOCK_SIZE 64
+
+#define SHA1_H0 0x67452301UL
+#define SHA1_H1 0xefcdab89UL
+#define SHA1_H2 0x98badcfeUL
+#define SHA1_H3 0x10325476UL
+#define SHA1_H4 0xc3d2e1f0UL
+
+extern const u8 sha1_zero_message_hash[SHA1_DIGEST_SIZE];
+
+struct sha1_state {
+ u32 state[SHA1_DIGEST_SIZE / 4];
+ u64 count;
+ u8 buffer[SHA1_BLOCK_SIZE];
+};
+
+struct shash_desc;
+
+extern int crypto_sha1_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len);
+
+extern int crypto_sha1_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *hash);
+
+/*
+ * An implementation of SHA-1's compression function. Don't use in new code!
+ * You shouldn't be using SHA-1, and even if you *have* to use SHA-1, this isn't
+ * the correct way to hash something with SHA-1 (use crypto_shash instead).
+ */
+#define SHA1_DIGEST_WORDS (SHA1_DIGEST_SIZE / 4)
+#define SHA1_WORKSPACE_WORDS 16
+void sha1_init(__u32 *buf);
+void sha1_transform(__u32 *digest, const char *data, __u32 *W);
+
+#endif /* _CRYPTO_SHA1_H */
diff --git a/include/crypto/sha1_base.h b/include/crypto/sha1_base.h
index 20fd1f7468af..2e0e7c3827d1 100644
--- a/include/crypto/sha1_base.h
+++ b/include/crypto/sha1_base.h
@@ -9,9 +9,10 @@
#define _CRYPTO_SHA1_BASE_H
#include <crypto/internal/hash.h>
-#include <crypto/sha.h>
+#include <crypto/sha1.h>
#include <linux/crypto.h>
#include <linux/module.h>
+#include <linux/string.h>
#include <asm/unaligned.h>
@@ -101,7 +102,7 @@ static inline int sha1_base_finish(struct shash_desc *desc, u8 *out)
for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(__be32); i++)
put_unaligned_be32(sctx->state[i], digest++);
- *sctx = (struct sha1_state){};
+ memzero_explicit(sctx, sizeof(*sctx));
return 0;
}
diff --git a/include/crypto/sha.h b/include/crypto/sha2.h
index 4ff3da816630..b9e9281d76c9 100644
--- a/include/crypto/sha.h
+++ b/include/crypto/sha2.h
@@ -1,16 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Common values for SHA algorithms
+ * Common values for SHA-2 algorithms
*/
-#ifndef _CRYPTO_SHA_H
-#define _CRYPTO_SHA_H
+#ifndef _CRYPTO_SHA2_H
+#define _CRYPTO_SHA2_H
#include <linux/types.h>
-#define SHA1_DIGEST_SIZE 20
-#define SHA1_BLOCK_SIZE 64
-
#define SHA224_DIGEST_SIZE 28
#define SHA224_BLOCK_SIZE 64
@@ -23,12 +20,6 @@
#define SHA512_DIGEST_SIZE 64
#define SHA512_BLOCK_SIZE 128
-#define SHA1_H0 0x67452301UL
-#define SHA1_H1 0xefcdab89UL
-#define SHA1_H2 0x98badcfeUL
-#define SHA1_H3 0x10325476UL
-#define SHA1_H4 0xc3d2e1f0UL
-
#define SHA224_H0 0xc1059ed8UL
#define SHA224_H1 0x367cd507UL
#define SHA224_H2 0x3070dd17UL
@@ -65,8 +56,6 @@
#define SHA512_H6 0x1f83d9abfb41bd6bULL
#define SHA512_H7 0x5be0cd19137e2179ULL
-extern const u8 sha1_zero_message_hash[SHA1_DIGEST_SIZE];
-
extern const u8 sha224_zero_message_hash[SHA224_DIGEST_SIZE];
extern const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE];
@@ -75,12 +64,6 @@ extern const u8 sha384_zero_message_hash[SHA384_DIGEST_SIZE];
extern const u8 sha512_zero_message_hash[SHA512_DIGEST_SIZE];
-struct sha1_state {
- u32 state[SHA1_DIGEST_SIZE / 4];
- u64 count;
- u8 buffer[SHA1_BLOCK_SIZE];
-};
-
struct sha256_state {
u32 state[SHA256_DIGEST_SIZE / 4];
u64 count;
@@ -95,12 +78,6 @@ struct sha512_state {
struct shash_desc;
-extern int crypto_sha1_update(struct shash_desc *desc, const u8 *data,
- unsigned int len);
-
-extern int crypto_sha1_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *hash);
-
extern int crypto_sha256_update(struct shash_desc *desc, const u8 *data,
unsigned int len);
@@ -114,16 +91,6 @@ extern int crypto_sha512_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *hash);
/*
- * An implementation of SHA-1's compression function. Don't use in new code!
- * You shouldn't be using SHA-1, and even if you *have* to use SHA-1, this isn't
- * the correct way to hash something with SHA-1 (use crypto_shash instead).
- */
-#define SHA1_DIGEST_WORDS (SHA1_DIGEST_SIZE / 4)
-#define SHA1_WORKSPACE_WORDS 16
-void sha1_init(__u32 *buf);
-void sha1_transform(__u32 *digest, const char *data, __u32 *W);
-
-/*
* Stand-alone implementation of the SHA256 algorithm. It is designed to
* have as little dependencies as possible so it can be used in the
* kexec_file purgatory. In other cases you should generally use the
@@ -161,7 +128,7 @@ static inline void sha224_init(struct sha256_state *sctx)
sctx->state[7] = SHA224_H7;
sctx->count = 0;
}
-void sha224_update(struct sha256_state *sctx, const u8 *data, unsigned int len);
+/* Simply use sha256_update as it is equivalent to sha224_update. */
void sha224_final(struct sha256_state *sctx, u8 *out);
-#endif
+#endif /* _CRYPTO_SHA2_H */
diff --git a/include/crypto/sha256_base.h b/include/crypto/sha256_base.h
index 6ded110783ae..ab904d82236f 100644
--- a/include/crypto/sha256_base.h
+++ b/include/crypto/sha256_base.h
@@ -8,12 +8,12 @@
#ifndef _CRYPTO_SHA256_BASE_H
#define _CRYPTO_SHA256_BASE_H
-#include <crypto/internal/hash.h>
-#include <crypto/sha.h>
-#include <linux/crypto.h>
-#include <linux/module.h>
-
+#include <asm/byteorder.h>
#include <asm/unaligned.h>
+#include <crypto/internal/hash.h>
+#include <crypto/sha2.h>
+#include <linux/string.h>
+#include <linux/types.h>
typedef void (sha256_block_fn)(struct sha256_state *sst, u8 const *src,
int blocks);
@@ -34,12 +34,11 @@ static inline int sha256_base_init(struct shash_desc *desc)
return 0;
}
-static inline int sha256_base_do_update(struct shash_desc *desc,
- const u8 *data,
- unsigned int len,
- sha256_block_fn *block_fn)
+static inline int lib_sha256_base_do_update(struct sha256_state *sctx,
+ const u8 *data,
+ unsigned int len,
+ sha256_block_fn *block_fn)
{
- struct sha256_state *sctx = shash_desc_ctx(desc);
unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
sctx->count += len;
@@ -72,11 +71,20 @@ static inline int sha256_base_do_update(struct shash_desc *desc,
return 0;
}
-static inline int sha256_base_do_finalize(struct shash_desc *desc,
- sha256_block_fn *block_fn)
+static inline int sha256_base_do_update(struct shash_desc *desc,
+ const u8 *data,
+ unsigned int len,
+ sha256_block_fn *block_fn)
{
- const int bit_offset = SHA256_BLOCK_SIZE - sizeof(__be64);
struct sha256_state *sctx = shash_desc_ctx(desc);
+
+ return lib_sha256_base_do_update(sctx, data, len, block_fn);
+}
+
+static inline int lib_sha256_base_do_finalize(struct sha256_state *sctx,
+ sha256_block_fn *block_fn)
+{
+ const int bit_offset = SHA256_BLOCK_SIZE - sizeof(__be64);
__be64 *bits = (__be64 *)(sctx->buf + bit_offset);
unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
@@ -95,18 +103,33 @@ static inline int sha256_base_do_finalize(struct shash_desc *desc,
return 0;
}
-static inline int sha256_base_finish(struct shash_desc *desc, u8 *out)
+static inline int sha256_base_do_finalize(struct shash_desc *desc,
+ sha256_block_fn *block_fn)
{
- unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
struct sha256_state *sctx = shash_desc_ctx(desc);
+
+ return lib_sha256_base_do_finalize(sctx, block_fn);
+}
+
+static inline int lib_sha256_base_finish(struct sha256_state *sctx, u8 *out,
+ unsigned int digest_size)
+{
__be32 *digest = (__be32 *)out;
int i;
for (i = 0; digest_size > 0; i++, digest_size -= sizeof(__be32))
put_unaligned_be32(sctx->state[i], digest++);
- *sctx = (struct sha256_state){};
+ memzero_explicit(sctx, sizeof(*sctx));
return 0;
}
+static inline int sha256_base_finish(struct shash_desc *desc, u8 *out)
+{
+ unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+
+ return lib_sha256_base_finish(sctx, out, digest_size);
+}
+
#endif /* _CRYPTO_SHA256_BASE_H */
diff --git a/include/crypto/sha512_base.h b/include/crypto/sha512_base.h
index fb19c77494dc..b370b3340b16 100644
--- a/include/crypto/sha512_base.h
+++ b/include/crypto/sha512_base.h
@@ -9,9 +9,10 @@
#define _CRYPTO_SHA512_BASE_H
#include <crypto/internal/hash.h>
-#include <crypto/sha.h>
+#include <crypto/sha2.h>
#include <linux/crypto.h>
#include <linux/module.h>
+#include <linux/string.h>
#include <asm/unaligned.h>
@@ -126,7 +127,7 @@ static inline int sha512_base_finish(struct shash_desc *desc, u8 *out)
for (i = 0; digest_size > 0; i++, digest_size -= sizeof(__be64))
put_unaligned_be64(sctx->state[i], digest++);
- *sctx = (struct sha512_state){};
+ memzero_explicit(sctx, sizeof(*sctx));
return 0;
}
diff --git a/include/crypto/sig.h b/include/crypto/sig.h
new file mode 100644
index 000000000000..d25186bb2be3
--- /dev/null
+++ b/include/crypto/sig.h
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Public Key Signature Algorithm
+ *
+ * Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au>
+ */
+#ifndef _CRYPTO_SIG_H
+#define _CRYPTO_SIG_H
+
+#include <linux/crypto.h>
+
+/**
+ * struct crypto_sig - user-instantiated objects which encapsulate
+ * algorithms and core processing logic
+ *
+ * @base: Common crypto API algorithm data structure
+ */
+struct crypto_sig {
+ struct crypto_tfm base;
+};
+
+/**
+ * DOC: Generic Public Key Signature API
+ *
+ * The Public Key Signature API is used with the algorithms of type
+ * CRYPTO_ALG_TYPE_SIG (listed as type "sig" in /proc/crypto)
+ */
+
+/**
+ * crypto_alloc_sig() - allocate signature tfm handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * signing algorithm e.g. "ecdsa"
+ * @type: specifies the type of the algorithm
+ * @mask: specifies the mask for the algorithm
+ *
+ * Allocate a handle for public key signature algorithm. The returned struct
+ * crypto_sig is the handle that is required for any subsequent
+ * API invocation for signature operations.
+ *
+ * Return: allocated handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
+struct crypto_sig *crypto_alloc_sig(const char *alg_name, u32 type, u32 mask);
+
+static inline struct crypto_tfm *crypto_sig_tfm(struct crypto_sig *tfm)
+{
+ return &tfm->base;
+}
+
+/**
+ * crypto_free_sig() - free signature tfm handle
+ *
+ * @tfm: signature tfm handle allocated with crypto_alloc_sig()
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
+ */
+static inline void crypto_free_sig(struct crypto_sig *tfm)
+{
+ crypto_destroy_tfm(tfm, crypto_sig_tfm(tfm));
+}
+
+/**
+ * crypto_sig_maxsize() - Get len for output buffer
+ *
+ * Function returns the dest buffer size required for a given key.
+ * Function assumes that the key is already set in the transformation. If this
+ * function is called without a setkey or with a failed setkey, you will end up
+ * in a NULL dereference.
+ *
+ * @tfm: signature tfm handle allocated with crypto_alloc_sig()
+ */
+int crypto_sig_maxsize(struct crypto_sig *tfm);
+
+/**
+ * crypto_sig_sign() - Invoke signing operation
+ *
+ * Function invokes the specific signing operation for a given algorithm
+ *
+ * @tfm: signature tfm handle allocated with crypto_alloc_sig()
+ * @src: source buffer
+ * @slen: source length
+ * @dst: destination obuffer
+ * @dlen: destination length
+ *
+ * Return: zero on success; error code in case of error
+ */
+int crypto_sig_sign(struct crypto_sig *tfm,
+ const void *src, unsigned int slen,
+ void *dst, unsigned int dlen);
+
+/**
+ * crypto_sig_verify() - Invoke signature verification
+ *
+ * Function invokes the specific signature verification operation
+ * for a given algorithm.
+ *
+ * @tfm: signature tfm handle allocated with crypto_alloc_sig()
+ * @src: source buffer
+ * @slen: source length
+ * @digest: digest
+ * @dlen: digest length
+ *
+ * Return: zero on verification success; error code in case of error.
+ */
+int crypto_sig_verify(struct crypto_sig *tfm,
+ const void *src, unsigned int slen,
+ const void *digest, unsigned int dlen);
+
+/**
+ * crypto_sig_set_pubkey() - Invoke set public key operation
+ *
+ * Function invokes the algorithm specific set key function, which knows
+ * how to decode and interpret the encoded key and parameters
+ *
+ * @tfm: tfm handle
+ * @key: BER encoded public key, algo OID, paramlen, BER encoded
+ * parameters
+ * @keylen: length of the key (not including other data)
+ *
+ * Return: zero on success; error code in case of error
+ */
+int crypto_sig_set_pubkey(struct crypto_sig *tfm,
+ const void *key, unsigned int keylen);
+
+/**
+ * crypto_sig_set_privkey() - Invoke set private key operation
+ *
+ * Function invokes the algorithm specific set key function, which knows
+ * how to decode and interpret the encoded key and parameters
+ *
+ * @tfm: tfm handle
+ * @key: BER encoded private key, algo OID, paramlen, BER encoded
+ * parameters
+ * @keylen: length of the key (not including other data)
+ *
+ * Return: zero on success; error code in case of error
+ */
+int crypto_sig_set_privkey(struct crypto_sig *tfm,
+ const void *key, unsigned int keylen);
+#endif
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index 6a733b171a5d..18a86e0af016 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -8,9 +8,25 @@
#ifndef _CRYPTO_SKCIPHER_H
#define _CRYPTO_SKCIPHER_H
+#include <linux/atomic.h>
+#include <linux/container_of.h>
#include <linux/crypto.h>
-#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+/* Set this bit if the lskcipher operation is a continuation. */
+#define CRYPTO_LSKCIPHER_FLAG_CONT 0x00000001
+/* Set this bit if the lskcipher operation is final. */
+#define CRYPTO_LSKCIPHER_FLAG_FINAL 0x00000002
+/* The bit CRYPTO_TFM_REQ_MAY_SLEEP can also be set if needed. */
+
+/* Set this bit if the skcipher operation is a continuation. */
+#define CRYPTO_SKCIPHER_REQ_CONT 0x00000001
+/* Set this bit if the skcipher operation is not final. */
+#define CRYPTO_SKCIPHER_REQ_NOTFINAL 0x00000002
+
+struct scatterlist;
/**
* struct skcipher_request - Symmetric key cipher request
@@ -44,8 +60,12 @@ struct crypto_sync_skcipher {
struct crypto_skcipher base;
};
-/**
- * struct skcipher_alg - symmetric key cipher definition
+struct crypto_lskcipher {
+ struct crypto_tfm base;
+};
+
+/*
+ * struct skcipher_alg_common - common properties of skcipher_alg
* @min_keysize: Minimum key size supported by the transformation. This is the
* smallest key length supported by this transformation algorithm.
* This must be set to one of the pre-defined values as this is
@@ -56,6 +76,26 @@ struct crypto_sync_skcipher {
* This must be set to one of the pre-defined values as this is
* not hardware specific. Possible values for this field can be
* found via git grep "_MAX_KEY_SIZE" include/crypto/
+ * @ivsize: IV size applicable for transformation. The consumer must provide an
+ * IV of exactly that size to perform the encrypt or decrypt operation.
+ * @chunksize: Equal to the block size except for stream ciphers such as
+ * CTR where it is set to the underlying block size.
+ * @statesize: Size of the internal state for the algorithm.
+ * @base: Definition of a generic crypto algorithm.
+ */
+#define SKCIPHER_ALG_COMMON { \
+ unsigned int min_keysize; \
+ unsigned int max_keysize; \
+ unsigned int ivsize; \
+ unsigned int chunksize; \
+ unsigned int statesize; \
+ \
+ struct crypto_alg base; \
+}
+struct skcipher_alg_common SKCIPHER_ALG_COMMON;
+
+/**
+ * struct skcipher_alg - symmetric key cipher definition
* @setkey: Set key for the transformation. This function is used to either
* program a supplied key into the hardware or store the key in the
* transformation context for programming it later. Note that this
@@ -79,6 +119,17 @@ struct crypto_sync_skcipher {
* be called in parallel with the same transformation object.
* @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt
* and the conditions are exactly the same.
+ * @export: Export partial state of the transformation. This function dumps the
+ * entire state of the ongoing transformation into a provided block of
+ * data so it can be @import 'ed back later on. This is useful in case
+ * you want to save partial result of the transformation after
+ * processing certain amount of data and reload this partial result
+ * multiple times later on for multiple re-use. No data processing
+ * happens at this point.
+ * @import: Import partial state of the transformation. This function loads the
+ * entire state of the ongoing transformation from a provided block of
+ * data so the transformation can continue from this point onward. No
+ * data processing happens at this point.
* @init: Initialize the cryptographic transformation object. This function
* is used to initialize the cryptographic transformation object.
* This function is called only once at the instantiation time, right
@@ -90,14 +141,10 @@ struct crypto_sync_skcipher {
* @exit: Deinitialize the cryptographic transformation object. This is a
* counterpart to @init, used to remove various changes set in
* @init.
- * @ivsize: IV size applicable for transformation. The consumer must provide an
- * IV of exactly that size to perform the encrypt or decrypt operation.
- * @chunksize: Equal to the block size except for stream ciphers such as
- * CTR where it is set to the underlying block size.
* @walksize: Equal to the chunk size except in cases where the algorithm is
* considerably more efficient if it can operate on multiple chunks
* in parallel. Should be a multiple of chunksize.
- * @base: Definition of a generic crypto algorithm.
+ * @co: see struct skcipher_alg_common
*
* All fields except @ivsize are mandatory and must be filled.
*/
@@ -106,16 +153,63 @@ struct skcipher_alg {
unsigned int keylen);
int (*encrypt)(struct skcipher_request *req);
int (*decrypt)(struct skcipher_request *req);
+ int (*export)(struct skcipher_request *req, void *out);
+ int (*import)(struct skcipher_request *req, const void *in);
int (*init)(struct crypto_skcipher *tfm);
void (*exit)(struct crypto_skcipher *tfm);
- unsigned int min_keysize;
- unsigned int max_keysize;
- unsigned int ivsize;
- unsigned int chunksize;
unsigned int walksize;
- struct crypto_alg base;
+ union {
+ struct SKCIPHER_ALG_COMMON;
+ struct skcipher_alg_common co;
+ };
+};
+
+/**
+ * struct lskcipher_alg - linear symmetric key cipher definition
+ * @setkey: Set key for the transformation. This function is used to either
+ * program a supplied key into the hardware or store the key in the
+ * transformation context for programming it later. Note that this
+ * function does modify the transformation context. This function can
+ * be called multiple times during the existence of the transformation
+ * object, so one must make sure the key is properly reprogrammed into
+ * the hardware. This function is also responsible for checking the key
+ * length for validity. In case a software fallback was put in place in
+ * the @cra_init call, this function might need to use the fallback if
+ * the algorithm doesn't support all of the key sizes.
+ * @encrypt: Encrypt a number of bytes. This function is used to encrypt
+ * the supplied data. This function shall not modify
+ * the transformation context, as this function may be called
+ * in parallel with the same transformation object. Data
+ * may be left over if length is not a multiple of blocks
+ * and there is more to come (final == false). The number of
+ * left-over bytes should be returned in case of success.
+ * The siv field shall be as long as ivsize + statesize with
+ * the IV placed at the front. The state will be used by the
+ * algorithm internally.
+ * @decrypt: Decrypt a number of bytes. This is a reverse counterpart to
+ * @encrypt and the conditions are exactly the same.
+ * @init: Initialize the cryptographic transformation object. This function
+ * is used to initialize the cryptographic transformation object.
+ * This function is called only once at the instantiation time, right
+ * after the transformation context was allocated.
+ * @exit: Deinitialize the cryptographic transformation object. This is a
+ * counterpart to @init, used to remove various changes set in
+ * @init.
+ * @co: see struct skcipher_alg_common
+ */
+struct lskcipher_alg {
+ int (*setkey)(struct crypto_lskcipher *tfm, const u8 *key,
+ unsigned int keylen);
+ int (*encrypt)(struct crypto_lskcipher *tfm, const u8 *src,
+ u8 *dst, unsigned len, u8 *siv, u32 flags);
+ int (*decrypt)(struct crypto_lskcipher *tfm, const u8 *src,
+ u8 *dst, unsigned len, u8 *siv, u32 flags);
+ int (*init)(struct crypto_lskcipher *tfm);
+ void (*exit)(struct crypto_lskcipher *tfm);
+
+ struct skcipher_alg_common co;
};
#define MAX_SYNC_SKCIPHER_REQSIZE 384
@@ -187,15 +281,41 @@ struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(const char *alg_name,
u32 type, u32 mask);
+
+/**
+ * crypto_alloc_lskcipher() - allocate linear symmetric key cipher handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * lskcipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for an lskcipher. The returned struct
+ * crypto_lskcipher is the cipher handle that is required for any subsequent
+ * API invocation for that lskcipher.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
+struct crypto_lskcipher *crypto_alloc_lskcipher(const char *alg_name,
+ u32 type, u32 mask);
+
static inline struct crypto_tfm *crypto_skcipher_tfm(
struct crypto_skcipher *tfm)
{
return &tfm->base;
}
+static inline struct crypto_tfm *crypto_lskcipher_tfm(
+ struct crypto_lskcipher *tfm)
+{
+ return &tfm->base;
+}
+
/**
* crypto_free_skcipher() - zeroize and free cipher handle
* @tfm: cipher handle to be freed
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
*/
static inline void crypto_free_skcipher(struct crypto_skcipher *tfm)
{
@@ -208,6 +328,17 @@ static inline void crypto_free_sync_skcipher(struct crypto_sync_skcipher *tfm)
}
/**
+ * crypto_free_lskcipher() - zeroize and free cipher handle
+ * @tfm: cipher handle to be freed
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
+ */
+static inline void crypto_free_lskcipher(struct crypto_lskcipher *tfm)
+{
+ crypto_destroy_tfm(tfm, crypto_lskcipher_tfm(tfm));
+}
+
+/**
* crypto_has_skcipher() - Search for the availability of an skcipher.
* @alg_name: is the cra_name / name or cra_driver_name / driver name of the
* skcipher
@@ -225,6 +356,19 @@ static inline const char *crypto_skcipher_driver_name(
return crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm));
}
+static inline const char *crypto_lskcipher_driver_name(
+ struct crypto_lskcipher *tfm)
+{
+ return crypto_tfm_alg_driver_name(crypto_lskcipher_tfm(tfm));
+}
+
+static inline struct skcipher_alg_common *crypto_skcipher_alg_common(
+ struct crypto_skcipher *tfm)
+{
+ return container_of(crypto_skcipher_tfm(tfm)->__crt_alg,
+ struct skcipher_alg_common, base);
+}
+
static inline struct skcipher_alg *crypto_skcipher_alg(
struct crypto_skcipher *tfm)
{
@@ -232,9 +376,11 @@ static inline struct skcipher_alg *crypto_skcipher_alg(
struct skcipher_alg, base);
}
-static inline unsigned int crypto_skcipher_alg_ivsize(struct skcipher_alg *alg)
+static inline struct lskcipher_alg *crypto_lskcipher_alg(
+ struct crypto_lskcipher *tfm)
{
- return alg->ivsize;
+ return container_of(crypto_lskcipher_tfm(tfm)->__crt_alg,
+ struct lskcipher_alg, co.base);
}
/**
@@ -248,7 +394,7 @@ static inline unsigned int crypto_skcipher_alg_ivsize(struct skcipher_alg *alg)
*/
static inline unsigned int crypto_skcipher_ivsize(struct crypto_skcipher *tfm)
{
- return crypto_skcipher_alg(tfm)->ivsize;
+ return crypto_skcipher_alg_common(tfm)->ivsize;
}
static inline unsigned int crypto_sync_skcipher_ivsize(
@@ -258,6 +404,21 @@ static inline unsigned int crypto_sync_skcipher_ivsize(
}
/**
+ * crypto_lskcipher_ivsize() - obtain IV size
+ * @tfm: cipher handle
+ *
+ * The size of the IV for the lskcipher referenced by the cipher handle is
+ * returned. This IV size may be zero if the cipher does not need an IV.
+ *
+ * Return: IV size in bytes
+ */
+static inline unsigned int crypto_lskcipher_ivsize(
+ struct crypto_lskcipher *tfm)
+{
+ return crypto_lskcipher_alg(tfm)->co.ivsize;
+}
+
+/**
* crypto_skcipher_blocksize() - obtain block size of cipher
* @tfm: cipher handle
*
@@ -273,10 +434,20 @@ static inline unsigned int crypto_skcipher_blocksize(
return crypto_tfm_alg_blocksize(crypto_skcipher_tfm(tfm));
}
-static inline unsigned int crypto_skcipher_alg_chunksize(
- struct skcipher_alg *alg)
+/**
+ * crypto_lskcipher_blocksize() - obtain block size of cipher
+ * @tfm: cipher handle
+ *
+ * The block size for the lskcipher referenced with the cipher handle is
+ * returned. The caller may use that information to allocate appropriate
+ * memory for the data returned by the encryption or decryption operation
+ *
+ * Return: block size of cipher
+ */
+static inline unsigned int crypto_lskcipher_blocksize(
+ struct crypto_lskcipher *tfm)
{
- return alg->chunksize;
+ return crypto_tfm_alg_blocksize(crypto_lskcipher_tfm(tfm));
}
/**
@@ -293,7 +464,58 @@ static inline unsigned int crypto_skcipher_alg_chunksize(
static inline unsigned int crypto_skcipher_chunksize(
struct crypto_skcipher *tfm)
{
- return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm));
+ return crypto_skcipher_alg_common(tfm)->chunksize;
+}
+
+/**
+ * crypto_lskcipher_chunksize() - obtain chunk size
+ * @tfm: cipher handle
+ *
+ * The block size is set to one for ciphers such as CTR. However,
+ * you still need to provide incremental updates in multiples of
+ * the underlying block size as the IV does not have sub-block
+ * granularity. This is known in this API as the chunk size.
+ *
+ * Return: chunk size in bytes
+ */
+static inline unsigned int crypto_lskcipher_chunksize(
+ struct crypto_lskcipher *tfm)
+{
+ return crypto_lskcipher_alg(tfm)->co.chunksize;
+}
+
+/**
+ * crypto_skcipher_statesize() - obtain state size
+ * @tfm: cipher handle
+ *
+ * Some algorithms cannot be chained with the IV alone. They carry
+ * internal state which must be replicated if data is to be processed
+ * incrementally. The size of that state can be obtained with this
+ * function.
+ *
+ * Return: state size in bytes
+ */
+static inline unsigned int crypto_skcipher_statesize(
+ struct crypto_skcipher *tfm)
+{
+ return crypto_skcipher_alg_common(tfm)->statesize;
+}
+
+/**
+ * crypto_lskcipher_statesize() - obtain state size
+ * @tfm: cipher handle
+ *
+ * Some algorithms cannot be chained with the IV alone. They carry
+ * internal state which must be replicated if data is to be processed
+ * incrementally. The size of that state can be obtained with this
+ * function.
+ *
+ * Return: state size in bytes
+ */
+static inline unsigned int crypto_lskcipher_statesize(
+ struct crypto_lskcipher *tfm)
+{
+ return crypto_lskcipher_alg(tfm)->co.statesize;
}
static inline unsigned int crypto_sync_skcipher_blocksize(
@@ -308,6 +530,12 @@ static inline unsigned int crypto_skcipher_alignmask(
return crypto_tfm_alg_alignmask(crypto_skcipher_tfm(tfm));
}
+static inline unsigned int crypto_lskcipher_alignmask(
+ struct crypto_lskcipher *tfm)
+{
+ return crypto_tfm_alg_alignmask(crypto_lskcipher_tfm(tfm));
+}
+
static inline u32 crypto_skcipher_get_flags(struct crypto_skcipher *tfm)
{
return crypto_tfm_get_flags(crypto_skcipher_tfm(tfm));
@@ -343,6 +571,23 @@ static inline void crypto_sync_skcipher_clear_flags(
crypto_skcipher_clear_flags(&tfm->base, flags);
}
+static inline u32 crypto_lskcipher_get_flags(struct crypto_lskcipher *tfm)
+{
+ return crypto_tfm_get_flags(crypto_lskcipher_tfm(tfm));
+}
+
+static inline void crypto_lskcipher_set_flags(struct crypto_lskcipher *tfm,
+ u32 flags)
+{
+ crypto_tfm_set_flags(crypto_lskcipher_tfm(tfm), flags);
+}
+
+static inline void crypto_lskcipher_clear_flags(struct crypto_lskcipher *tfm,
+ u32 flags)
+{
+ crypto_tfm_clear_flags(crypto_lskcipher_tfm(tfm), flags);
+}
+
/**
* crypto_skcipher_setkey() - set key for cipher
* @tfm: cipher handle
@@ -368,16 +613,47 @@ static inline int crypto_sync_skcipher_setkey(struct crypto_sync_skcipher *tfm,
return crypto_skcipher_setkey(&tfm->base, key, keylen);
}
+/**
+ * crypto_lskcipher_setkey() - set key for cipher
+ * @tfm: cipher handle
+ * @key: buffer holding the key
+ * @keylen: length of the key in bytes
+ *
+ * The caller provided key is set for the lskcipher referenced by the cipher
+ * handle.
+ *
+ * Note, the key length determines the cipher type. Many block ciphers implement
+ * different cipher modes depending on the key size, such as AES-128 vs AES-192
+ * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
+ * is performed.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
+int crypto_lskcipher_setkey(struct crypto_lskcipher *tfm,
+ const u8 *key, unsigned int keylen);
+
static inline unsigned int crypto_skcipher_min_keysize(
struct crypto_skcipher *tfm)
{
- return crypto_skcipher_alg(tfm)->min_keysize;
+ return crypto_skcipher_alg_common(tfm)->min_keysize;
}
static inline unsigned int crypto_skcipher_max_keysize(
struct crypto_skcipher *tfm)
{
- return crypto_skcipher_alg(tfm)->max_keysize;
+ return crypto_skcipher_alg_common(tfm)->max_keysize;
+}
+
+static inline unsigned int crypto_lskcipher_min_keysize(
+ struct crypto_lskcipher *tfm)
+{
+ return crypto_lskcipher_alg(tfm)->co.min_keysize;
+}
+
+static inline unsigned int crypto_lskcipher_max_keysize(
+ struct crypto_lskcipher *tfm)
+{
+ return crypto_lskcipher_alg(tfm)->co.max_keysize;
}
/**
@@ -430,6 +706,78 @@ int crypto_skcipher_encrypt(struct skcipher_request *req);
int crypto_skcipher_decrypt(struct skcipher_request *req);
/**
+ * crypto_skcipher_export() - export partial state
+ * @req: reference to the skcipher_request handle that holds all information
+ * needed to perform the operation
+ * @out: output buffer of sufficient size that can hold the state
+ *
+ * Export partial state of the transformation. This function dumps the
+ * entire state of the ongoing transformation into a provided block of
+ * data so it can be @import 'ed back later on. This is useful in case
+ * you want to save partial result of the transformation after
+ * processing certain amount of data and reload this partial result
+ * multiple times later on for multiple re-use. No data processing
+ * happens at this point.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
+int crypto_skcipher_export(struct skcipher_request *req, void *out);
+
+/**
+ * crypto_skcipher_import() - import partial state
+ * @req: reference to the skcipher_request handle that holds all information
+ * needed to perform the operation
+ * @in: buffer holding the state
+ *
+ * Import partial state of the transformation. This function loads the
+ * entire state of the ongoing transformation from a provided block of
+ * data so the transformation can continue from this point onward. No
+ * data processing happens at this point.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
+int crypto_skcipher_import(struct skcipher_request *req, const void *in);
+
+/**
+ * crypto_lskcipher_encrypt() - encrypt plaintext
+ * @tfm: lskcipher handle
+ * @src: source buffer
+ * @dst: destination buffer
+ * @len: number of bytes to process
+ * @siv: IV + state for the cipher operation. The length of the IV must
+ * comply with the IV size defined by crypto_lskcipher_ivsize. The
+ * IV is then followed with a buffer with the length as specified by
+ * crypto_lskcipher_statesize.
+ * Encrypt plaintext data using the lskcipher handle.
+ *
+ * Return: >=0 if the cipher operation was successful, if positive
+ * then this many bytes have been left unprocessed;
+ * < 0 if an error occurred
+ */
+int crypto_lskcipher_encrypt(struct crypto_lskcipher *tfm, const u8 *src,
+ u8 *dst, unsigned len, u8 *siv);
+
+/**
+ * crypto_lskcipher_decrypt() - decrypt ciphertext
+ * @tfm: lskcipher handle
+ * @src: source buffer
+ * @dst: destination buffer
+ * @len: number of bytes to process
+ * @siv: IV + state for the cipher operation. The length of the IV must
+ * comply with the IV size defined by crypto_lskcipher_ivsize. The
+ * IV is then followed with a buffer with the length as specified by
+ * crypto_lskcipher_statesize.
+ *
+ * Decrypt ciphertext data using the lskcipher handle.
+ *
+ * Return: >=0 if the cipher operation was successful, if positive
+ * then this many bytes have been left unprocessed;
+ * < 0 if an error occurred
+ */
+int crypto_lskcipher_decrypt(struct crypto_lskcipher *tfm, const u8 *src,
+ u8 *dst, unsigned len, u8 *siv);
+
+/**
* DOC: Symmetric Key Cipher Request Handle
*
* The skcipher_request data structure contains all pointers to data
@@ -488,19 +836,20 @@ static inline struct skcipher_request *skcipher_request_cast(
*
* Return: allocated request handle in case of success, or NULL if out of memory
*/
-static inline struct skcipher_request *skcipher_request_alloc(
+static inline struct skcipher_request *skcipher_request_alloc_noprof(
struct crypto_skcipher *tfm, gfp_t gfp)
{
struct skcipher_request *req;
- req = kmalloc(sizeof(struct skcipher_request) +
- crypto_skcipher_reqsize(tfm), gfp);
+ req = kmalloc_noprof(sizeof(struct skcipher_request) +
+ crypto_skcipher_reqsize(tfm), gfp);
if (likely(req))
skcipher_request_set_tfm(req, tfm);
return req;
}
+#define skcipher_request_alloc(...) alloc_hooks(skcipher_request_alloc_noprof(__VA_ARGS__))
/**
* skcipher_request_free() - zeroize and free request data structure
diff --git a/include/crypto/sm2.h b/include/crypto/sm2.h
new file mode 100644
index 000000000000..04a92c1013c8
--- /dev/null
+++ b/include/crypto/sm2.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * sm2.h - SM2 asymmetric public-key algorithm
+ * as specified by OSCCA GM/T 0003.1-2012 -- 0003.5-2012 SM2 and
+ * described at https://tools.ietf.org/html/draft-shen-sm2-ecdsa-02
+ *
+ * Copyright (c) 2020, Alibaba Group.
+ * Written by Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
+ */
+
+#ifndef _CRYPTO_SM2_H
+#define _CRYPTO_SM2_H
+
+struct shash_desc;
+
+#if IS_REACHABLE(CONFIG_CRYPTO_SM2)
+int sm2_compute_z_digest(struct shash_desc *desc,
+ const void *key, unsigned int keylen, void *dgst);
+#else
+static inline int sm2_compute_z_digest(struct shash_desc *desc,
+ const void *key, unsigned int keylen,
+ void *dgst)
+{
+ return -ENOTSUPP;
+}
+#endif
+
+#endif /* _CRYPTO_SM2_H */
diff --git a/include/crypto/sm3.h b/include/crypto/sm3.h
index 1438942dc773..1f021ad0533f 100644
--- a/include/crypto/sm3.h
+++ b/include/crypto/sm3.h
@@ -1,5 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Common values for SM3 algorithm
+ *
+ * Copyright (C) 2017 ARM Limited or its affiliates.
+ * Copyright (C) 2017 Gilad Ben-Yossef <gilad@benyossef.com>
+ * Copyright (C) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
*/
#ifndef _CRYPTO_SM3_H
@@ -30,11 +35,30 @@ struct sm3_state {
u8 buffer[SM3_BLOCK_SIZE];
};
-struct shash_desc;
+/*
+ * Stand-alone implementation of the SM3 algorithm. It is designed to
+ * have as little dependencies as possible so it can be used in the
+ * kexec_file purgatory. In other cases you should generally use the
+ * hash APIs from include/crypto/hash.h. Especially when hashing large
+ * amounts of data as those APIs may be hw-accelerated.
+ *
+ * For details see lib/crypto/sm3.c
+ */
+
+static inline void sm3_init(struct sm3_state *sctx)
+{
+ sctx->state[0] = SM3_IVA;
+ sctx->state[1] = SM3_IVB;
+ sctx->state[2] = SM3_IVC;
+ sctx->state[3] = SM3_IVD;
+ sctx->state[4] = SM3_IVE;
+ sctx->state[5] = SM3_IVF;
+ sctx->state[6] = SM3_IVG;
+ sctx->state[7] = SM3_IVH;
+ sctx->count = 0;
+}
-extern int crypto_sm3_update(struct shash_desc *desc, const u8 *data,
- unsigned int len);
+void sm3_update(struct sm3_state *sctx, const u8 *data, unsigned int len);
+void sm3_final(struct sm3_state *sctx, u8 *out);
-extern int crypto_sm3_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *hash);
#endif
diff --git a/include/crypto/sm3_base.h b/include/crypto/sm3_base.h
index 1cbf9aa1fe52..2f3a32ab97bb 100644
--- a/include/crypto/sm3_base.h
+++ b/include/crypto/sm3_base.h
@@ -13,6 +13,7 @@
#include <crypto/sm3.h>
#include <linux/crypto.h>
#include <linux/module.h>
+#include <linux/string.h>
#include <asm/unaligned.h>
typedef void (sm3_block_fn)(struct sm3_state *sst, u8 const *src, int blocks);
@@ -104,7 +105,7 @@ static inline int sm3_base_finish(struct shash_desc *desc, u8 *out)
for (i = 0; i < SM3_DIGEST_SIZE / sizeof(__be32); i++)
put_unaligned_be32(sctx->state[i], digest++);
- *sctx = (struct sm3_state){};
+ memzero_explicit(sctx, sizeof(*sctx));
return 0;
}
diff --git a/include/crypto/sm4.h b/include/crypto/sm4.h
index 7afd730d16ff..9656a9a40326 100644
--- a/include/crypto/sm4.h
+++ b/include/crypto/sm4.h
@@ -3,6 +3,7 @@
/*
* Common values for the SM4 algorithm
* Copyright (C) 2018 ARM Limited or its affiliates.
+ * Copyright (c) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
*/
#ifndef _CRYPTO_SM4_H
@@ -15,17 +16,33 @@
#define SM4_BLOCK_SIZE 16
#define SM4_RKEY_WORDS 32
-struct crypto_sm4_ctx {
+struct sm4_ctx {
u32 rkey_enc[SM4_RKEY_WORDS];
u32 rkey_dec[SM4_RKEY_WORDS];
};
-int crypto_sm4_set_key(struct crypto_tfm *tfm, const u8 *in_key,
- unsigned int key_len);
-int crypto_sm4_expand_key(struct crypto_sm4_ctx *ctx, const u8 *in_key,
+extern const u32 crypto_sm4_fk[];
+extern const u32 crypto_sm4_ck[];
+extern const u8 crypto_sm4_sbox[];
+
+/**
+ * sm4_expandkey - Expands the SM4 key as described in GB/T 32907-2016
+ * @ctx: The location where the computed key will be stored.
+ * @in_key: The supplied key.
+ * @key_len: The length of the supplied key.
+ *
+ * Returns 0 on success. The function fails only if an invalid key size (or
+ * pointer) is supplied.
+ */
+int sm4_expandkey(struct sm4_ctx *ctx, const u8 *in_key,
unsigned int key_len);
-void crypto_sm4_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in);
-void crypto_sm4_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in);
+/**
+ * sm4_crypt_block - Encrypt or decrypt a single SM4 block
+ * @rk: The rkey_enc for encrypt or rkey_dec for decrypt
+ * @out: Buffer to store output data
+ * @in: Buffer containing the input data
+ */
+void sm4_crypt_block(const u32 *rk, u8 *out, const u8 *in);
#endif
diff --git a/include/crypto/utils.h b/include/crypto/utils.h
new file mode 100644
index 000000000000..acbb917a00c6
--- /dev/null
+++ b/include/crypto/utils.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Cryptographic utilities
+ *
+ * Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au>
+ */
+#ifndef _CRYPTO_UTILS_H
+#define _CRYPTO_UTILS_H
+
+#include <asm/unaligned.h>
+#include <linux/compiler_attributes.h>
+#include <linux/types.h>
+
+void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int size);
+
+static inline void crypto_xor(u8 *dst, const u8 *src, unsigned int size)
+{
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
+ __builtin_constant_p(size) &&
+ (size % sizeof(unsigned long)) == 0) {
+ unsigned long *d = (unsigned long *)dst;
+ unsigned long *s = (unsigned long *)src;
+ unsigned long l;
+
+ while (size > 0) {
+ l = get_unaligned(d) ^ get_unaligned(s++);
+ put_unaligned(l, d++);
+ size -= sizeof(unsigned long);
+ }
+ } else {
+ __crypto_xor(dst, dst, src, size);
+ }
+}
+
+static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2,
+ unsigned int size)
+{
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
+ __builtin_constant_p(size) &&
+ (size % sizeof(unsigned long)) == 0) {
+ unsigned long *d = (unsigned long *)dst;
+ unsigned long *s1 = (unsigned long *)src1;
+ unsigned long *s2 = (unsigned long *)src2;
+ unsigned long l;
+
+ while (size > 0) {
+ l = get_unaligned(s1++) ^ get_unaligned(s2++);
+ put_unaligned(l, d++);
+ size -= sizeof(unsigned long);
+ }
+ } else {
+ __crypto_xor(dst, src1, src2, size);
+ }
+}
+
+noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size);
+
+/**
+ * crypto_memneq - Compare two areas of memory without leaking
+ * timing information.
+ *
+ * @a: One area of memory
+ * @b: Another area of memory
+ * @size: The size of the area.
+ *
+ * Returns 0 when data is equal, 1 otherwise.
+ */
+static inline int crypto_memneq(const void *a, const void *b, size_t size)
+{
+ return __crypto_memneq(a, b, size) != 0UL ? 1 : 0;
+}
+
+#endif /* _CRYPTO_UTILS_H */
diff --git a/include/crypto/xts.h b/include/crypto/xts.h
index 0f8dba69feb4..15b16c4853d8 100644
--- a/include/crypto/xts.h
+++ b/include/crypto/xts.h
@@ -8,8 +8,8 @@
#define XTS_BLOCK_SIZE 16
-static inline int xts_check_key(struct crypto_tfm *tfm,
- const u8 *key, unsigned int keylen)
+static inline int xts_verify_key(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
{
/*
* key consists of keys of equal size concatenated, therefore
@@ -18,24 +18,17 @@ static inline int xts_check_key(struct crypto_tfm *tfm,
if (keylen % 2)
return -EINVAL;
- /* ensure that the AES and tweak key are not identical */
- if (fips_enabled && !crypto_memneq(key, key + (keylen / 2), keylen / 2))
- return -EINVAL;
-
- return 0;
-}
-
-static inline int xts_verify_key(struct crypto_skcipher *tfm,
- const u8 *key, unsigned int keylen)
-{
/*
- * key consists of keys of equal size concatenated, therefore
- * the length must be even.
+ * In FIPS mode only a combined key length of either 256 or
+ * 512 bits is allowed, c.f. FIPS 140-3 IG C.I.
*/
- if (keylen % 2)
+ if (fips_enabled && keylen != 32 && keylen != 64)
return -EINVAL;
- /* ensure that the AES and tweak key are not identical */
+ /*
+ * Ensure that the AES and tweak key are not identical when
+ * in FIPS mode or the FORBID_WEAK_KEYS flag is set.
+ */
if ((fips_enabled || (crypto_skcipher_get_flags(tfm) &
CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) &&
!crypto_memneq(key, key + (keylen / 2), keylen / 2))