aboutsummaryrefslogtreecommitdiffstats
path: root/include/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'include/crypto')
-rw-r--r--include/crypto/acompress.h38
-rw-r--r--include/crypto/aead.h51
-rw-r--r--include/crypto/akcipher.h76
-rw-r--r--include/crypto/algapi.h14
-rw-r--r--include/crypto/cbc.h2
-rw-r--r--include/crypto/chacha20.h3
-rw-r--r--include/crypto/hash.h38
-rw-r--r--include/crypto/internal/cryptouser.h8
-rw-r--r--include/crypto/internal/geniv.h2
-rw-r--r--include/crypto/kpp.h51
-rw-r--r--include/crypto/mcryptd.h114
-rw-r--r--include/crypto/morus1280_glue.h2
-rw-r--r--include/crypto/morus640_glue.h2
-rw-r--r--include/crypto/null.h2
-rw-r--r--include/crypto/rng.h29
-rw-r--r--include/crypto/skcipher.h118
-rw-r--r--include/crypto/speck.h62
17 files changed, 399 insertions, 213 deletions
diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h
index e328b52425a8..22e6f412c595 100644
--- a/include/crypto/acompress.h
+++ b/include/crypto/acompress.h
@@ -234,6 +234,34 @@ static inline void acomp_request_set_params(struct acomp_req *req,
req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT;
}
+static inline void crypto_stat_compress(struct acomp_req *req, int ret)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+ atomic_inc(&tfm->base.__crt_alg->compress_err_cnt);
+ } else {
+ atomic_inc(&tfm->base.__crt_alg->compress_cnt);
+ atomic64_add(req->slen, &tfm->base.__crt_alg->compress_tlen);
+ }
+#endif
+}
+
+static inline void crypto_stat_decompress(struct acomp_req *req, int ret)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+ atomic_inc(&tfm->base.__crt_alg->compress_err_cnt);
+ } else {
+ atomic_inc(&tfm->base.__crt_alg->decompress_cnt);
+ atomic64_add(req->slen, &tfm->base.__crt_alg->decompress_tlen);
+ }
+#endif
+}
+
/**
* crypto_acomp_compress() -- Invoke asynchronous compress operation
*
@@ -246,8 +274,11 @@ static inline void acomp_request_set_params(struct acomp_req *req,
static inline int crypto_acomp_compress(struct acomp_req *req)
{
struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+ int ret;
- return tfm->compress(req);
+ ret = tfm->compress(req);
+ crypto_stat_compress(req, ret);
+ return ret;
}
/**
@@ -262,8 +293,11 @@ static inline int crypto_acomp_compress(struct acomp_req *req)
static inline int crypto_acomp_decompress(struct acomp_req *req)
{
struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+ int ret;
- return tfm->decompress(req);
+ ret = tfm->decompress(req);
+ crypto_stat_decompress(req, ret);
+ return ret;
}
#endif
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
index 1e26f790b03f..0d765d7bfb82 100644
--- a/include/crypto/aead.h
+++ b/include/crypto/aead.h
@@ -306,6 +306,34 @@ static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
return __crypto_aead_cast(req->base.tfm);
}
+static inline void crypto_stat_aead_encrypt(struct aead_request *req, int ret)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+ atomic_inc(&tfm->base.__crt_alg->aead_err_cnt);
+ } else {
+ atomic_inc(&tfm->base.__crt_alg->encrypt_cnt);
+ atomic64_add(req->cryptlen, &tfm->base.__crt_alg->encrypt_tlen);
+ }
+#endif
+}
+
+static inline void crypto_stat_aead_decrypt(struct aead_request *req, int ret)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+ atomic_inc(&tfm->base.__crt_alg->aead_err_cnt);
+ } else {
+ atomic_inc(&tfm->base.__crt_alg->decrypt_cnt);
+ atomic64_add(req->cryptlen, &tfm->base.__crt_alg->decrypt_tlen);
+ }
+#endif
+}
+
/**
* crypto_aead_encrypt() - encrypt plaintext
* @req: reference to the aead_request handle that holds all information
@@ -328,11 +356,14 @@ static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
static inline int crypto_aead_encrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ int ret;
if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
- return -ENOKEY;
-
- return crypto_aead_alg(aead)->encrypt(req);
+ ret = -ENOKEY;
+ else
+ ret = crypto_aead_alg(aead)->encrypt(req);
+ crypto_stat_aead_encrypt(req, ret);
+ return ret;
}
/**
@@ -360,14 +391,16 @@ static inline int crypto_aead_encrypt(struct aead_request *req)
static inline int crypto_aead_decrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ int ret;
if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
- return -ENOKEY;
-
- if (req->cryptlen < crypto_aead_authsize(aead))
- return -EINVAL;
-
- return crypto_aead_alg(aead)->decrypt(req);
+ ret = -ENOKEY;
+ else if (req->cryptlen < crypto_aead_authsize(aead))
+ ret = -EINVAL;
+ else
+ ret = crypto_aead_alg(aead)->decrypt(req);
+ crypto_stat_aead_decrypt(req, ret);
+ return ret;
}
/**
diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h
index b5e11de4d497..afac71119396 100644
--- a/include/crypto/akcipher.h
+++ b/include/crypto/akcipher.h
@@ -271,6 +271,62 @@ static inline unsigned int crypto_akcipher_maxsize(struct crypto_akcipher *tfm)
return alg->max_size(tfm);
}
+static inline void crypto_stat_akcipher_encrypt(struct akcipher_request *req,
+ int ret)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+ atomic_inc(&tfm->base.__crt_alg->akcipher_err_cnt);
+ } else {
+ atomic_inc(&tfm->base.__crt_alg->encrypt_cnt);
+ atomic64_add(req->src_len, &tfm->base.__crt_alg->encrypt_tlen);
+ }
+#endif
+}
+
+static inline void crypto_stat_akcipher_decrypt(struct akcipher_request *req,
+ int ret)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+ atomic_inc(&tfm->base.__crt_alg->akcipher_err_cnt);
+ } else {
+ atomic_inc(&tfm->base.__crt_alg->decrypt_cnt);
+ atomic64_add(req->src_len, &tfm->base.__crt_alg->decrypt_tlen);
+ }
+#endif
+}
+
+static inline void crypto_stat_akcipher_sign(struct akcipher_request *req,
+ int ret)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY)
+ atomic_inc(&tfm->base.__crt_alg->akcipher_err_cnt);
+ else
+ atomic_inc(&tfm->base.__crt_alg->sign_cnt);
+#endif
+}
+
+static inline void crypto_stat_akcipher_verify(struct akcipher_request *req,
+ int ret)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY)
+ atomic_inc(&tfm->base.__crt_alg->akcipher_err_cnt);
+ else
+ atomic_inc(&tfm->base.__crt_alg->verify_cnt);
+#endif
+}
+
/**
* crypto_akcipher_encrypt() - Invoke public key encrypt operation
*
@@ -285,8 +341,11 @@ static inline int crypto_akcipher_encrypt(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+ int ret;
- return alg->encrypt(req);
+ ret = alg->encrypt(req);
+ crypto_stat_akcipher_encrypt(req, ret);
+ return ret;
}
/**
@@ -303,8 +362,11 @@ static inline int crypto_akcipher_decrypt(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+ int ret;
- return alg->decrypt(req);
+ ret = alg->decrypt(req);
+ crypto_stat_akcipher_decrypt(req, ret);
+ return ret;
}
/**
@@ -321,8 +383,11 @@ static inline int crypto_akcipher_sign(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+ int ret;
- return alg->sign(req);
+ ret = alg->sign(req);
+ crypto_stat_akcipher_sign(req, ret);
+ return ret;
}
/**
@@ -339,8 +404,11 @@ static inline int crypto_akcipher_verify(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+ int ret;
- return alg->verify(req);
+ ret = alg->verify(req);
+ crypto_stat_akcipher_verify(req, ret);
+ return ret;
}
/**
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index bd5e8ccf1687..4a5ad10e75f0 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -20,8 +20,10 @@
/*
* Maximum values for blocksize and alignmask, used to allocate
* static buffers that are big enough for any combination of
- * ciphers and architectures.
+ * algs and architectures. Ciphers have a lower maximum size.
*/
+#define MAX_ALGAPI_BLOCKSIZE 160
+#define MAX_ALGAPI_ALIGNMASK 63
#define MAX_CIPHER_BLOCKSIZE 16
#define MAX_CIPHER_ALIGNMASK 15
@@ -425,4 +427,14 @@ static inline void crypto_yield(u32 flags)
#endif
}
+int crypto_register_notifier(struct notifier_block *nb);
+int crypto_unregister_notifier(struct notifier_block *nb);
+
+/* Crypto notification events. */
+enum {
+ CRYPTO_MSG_ALG_REQUEST,
+ CRYPTO_MSG_ALG_REGISTER,
+ CRYPTO_MSG_ALG_LOADED,
+};
+
#endif /* _CRYPTO_ALGAPI_H */
diff --git a/include/crypto/cbc.h b/include/crypto/cbc.h
index f5b8bfc22e6d..3bf28beefa33 100644
--- a/include/crypto/cbc.h
+++ b/include/crypto/cbc.h
@@ -113,7 +113,7 @@ static inline int crypto_cbc_decrypt_inplace(
unsigned int bsize = crypto_skcipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
- u8 last_iv[bsize];
+ u8 last_iv[MAX_CIPHER_BLOCKSIZE];
/* Start of the last block. */
src += nbytes - (nbytes & (bsize - 1)) - bsize;
diff --git a/include/crypto/chacha20.h b/include/crypto/chacha20.h
index b83d66073db0..f76302d99e2b 100644
--- a/include/crypto/chacha20.h
+++ b/include/crypto/chacha20.h
@@ -13,13 +13,12 @@
#define CHACHA20_IV_SIZE 16
#define CHACHA20_KEY_SIZE 32
#define CHACHA20_BLOCK_SIZE 64
-#define CHACHA20_BLOCK_WORDS (CHACHA20_BLOCK_SIZE / sizeof(u32))
struct chacha20_ctx {
u32 key[8];
};
-void chacha20_block(u32 *state, u32 *stream);
+void chacha20_block(u32 *state, u8 *stream);
void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv);
int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keysize);
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 76e432cab75d..bc7796600338 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -151,9 +151,13 @@ struct shash_desc {
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
+#define HASH_MAX_DIGESTSIZE 64
+#define HASH_MAX_DESCSIZE 360
+#define HASH_MAX_STATESIZE 512
+
#define SHASH_DESC_ON_STACK(shash, ctx) \
char __##shash##_desc[sizeof(struct shash_desc) + \
- crypto_shash_descsize(ctx)] CRYPTO_MINALIGN_ATTR; \
+ HASH_MAX_DESCSIZE] CRYPTO_MINALIGN_ATTR; \
struct shash_desc *shash = (struct shash_desc *)__##shash##_desc
/**
@@ -408,6 +412,32 @@ static inline void *ahash_request_ctx(struct ahash_request *req)
int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen);
+static inline void crypto_stat_ahash_update(struct ahash_request *req, int ret)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY)
+ atomic_inc(&tfm->base.__crt_alg->hash_err_cnt);
+ else
+ atomic64_add(req->nbytes, &tfm->base.__crt_alg->hash_tlen);
+#endif
+}
+
+static inline void crypto_stat_ahash_final(struct ahash_request *req, int ret)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+ atomic_inc(&tfm->base.__crt_alg->hash_err_cnt);
+ } else {
+ atomic_inc(&tfm->base.__crt_alg->hash_cnt);
+ atomic64_add(req->nbytes, &tfm->base.__crt_alg->hash_tlen);
+ }
+#endif
+}
+
/**
* crypto_ahash_finup() - update and finalize message digest
* @req: reference to the ahash_request handle that holds all information
@@ -522,7 +552,11 @@ static inline int crypto_ahash_init(struct ahash_request *req)
*/
static inline int crypto_ahash_update(struct ahash_request *req)
{
- return crypto_ahash_reqtfm(req)->update(req);
+ int ret;
+
+ ret = crypto_ahash_reqtfm(req)->update(req);
+ crypto_stat_ahash_update(req, ret);
+ return ret;
}
/**
diff --git a/include/crypto/internal/cryptouser.h b/include/crypto/internal/cryptouser.h
new file mode 100644
index 000000000000..8db299c25566
--- /dev/null
+++ b/include/crypto/internal/cryptouser.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <net/netlink.h>
+
+struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact);
+
+int crypto_dump_reportstat(struct sk_buff *skb, struct netlink_callback *cb);
+int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, struct nlattr **attrs);
+int crypto_dump_reportstat_done(struct netlink_callback *cb);
diff --git a/include/crypto/internal/geniv.h b/include/crypto/internal/geniv.h
index 2bcfb931bc5b..71be24cd59bd 100644
--- a/include/crypto/internal/geniv.h
+++ b/include/crypto/internal/geniv.h
@@ -20,7 +20,7 @@
struct aead_geniv_ctx {
spinlock_t lock;
struct crypto_aead *child;
- struct crypto_skcipher *sknull;
+ struct crypto_sync_skcipher *sknull;
u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
};
diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h
index 1bde0a6514fa..f517ba6d3a27 100644
--- a/include/crypto/kpp.h
+++ b/include/crypto/kpp.h
@@ -268,6 +268,42 @@ struct kpp_secret {
unsigned short len;
};
+static inline void crypto_stat_kpp_set_secret(struct crypto_kpp *tfm, int ret)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ if (ret)
+ atomic_inc(&tfm->base.__crt_alg->kpp_err_cnt);
+ else
+ atomic_inc(&tfm->base.__crt_alg->setsecret_cnt);
+#endif
+}
+
+static inline void crypto_stat_kpp_generate_public_key(struct kpp_request *req,
+ int ret)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+
+ if (ret)
+ atomic_inc(&tfm->base.__crt_alg->kpp_err_cnt);
+ else
+ atomic_inc(&tfm->base.__crt_alg->generate_public_key_cnt);
+#endif
+}
+
+static inline void crypto_stat_kpp_compute_shared_secret(struct kpp_request *req,
+ int ret)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+
+ if (ret)
+ atomic_inc(&tfm->base.__crt_alg->kpp_err_cnt);
+ else
+ atomic_inc(&tfm->base.__crt_alg->compute_shared_secret_cnt);
+#endif
+}
+
/**
* crypto_kpp_set_secret() - Invoke kpp operation
*
@@ -287,8 +323,11 @@ static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm,
const void *buffer, unsigned int len)
{
struct kpp_alg *alg = crypto_kpp_alg(tfm);
+ int ret;
- return alg->set_secret(tfm, buffer, len);
+ ret = alg->set_secret(tfm, buffer, len);
+ crypto_stat_kpp_set_secret(tfm, ret);
+ return ret;
}
/**
@@ -308,8 +347,11 @@ static inline int crypto_kpp_generate_public_key(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
struct kpp_alg *alg = crypto_kpp_alg(tfm);
+ int ret;
- return alg->generate_public_key(req);
+ ret = alg->generate_public_key(req);
+ crypto_stat_kpp_generate_public_key(req, ret);
+ return ret;
}
/**
@@ -326,8 +368,11 @@ static inline int crypto_kpp_compute_shared_secret(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
struct kpp_alg *alg = crypto_kpp_alg(tfm);
+ int ret;
- return alg->compute_shared_secret(req);
+ ret = alg->compute_shared_secret(req);
+ crypto_stat_kpp_compute_shared_secret(req, ret);
+ return ret;
}
/**
diff --git a/include/crypto/mcryptd.h b/include/crypto/mcryptd.h
deleted file mode 100644
index b67404fc4b34..000000000000
--- a/include/crypto/mcryptd.h
+++ /dev/null
@@ -1,114 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Software async multibuffer crypto daemon headers
- *
- * Author:
- * Tim Chen <tim.c.chen@linux.intel.com>
- *
- * Copyright (c) 2014, Intel Corporation.
- */
-
-#ifndef _CRYPTO_MCRYPT_H
-#define _CRYPTO_MCRYPT_H
-
-#include <linux/crypto.h>
-#include <linux/kernel.h>
-#include <crypto/hash.h>
-
-struct mcryptd_ahash {
- struct crypto_ahash base;
-};
-
-static inline struct mcryptd_ahash *__mcryptd_ahash_cast(
- struct crypto_ahash *tfm)
-{
- return (struct mcryptd_ahash *)tfm;
-}
-
-struct mcryptd_cpu_queue {
- struct crypto_queue queue;
- spinlock_t q_lock;
- struct work_struct work;
-};
-
-struct mcryptd_queue {
- struct mcryptd_cpu_queue __percpu *cpu_queue;
-};
-
-struct mcryptd_instance_ctx {
- struct crypto_spawn spawn;
- struct mcryptd_queue *queue;
-};
-
-struct mcryptd_hash_ctx {
- struct crypto_ahash *child;
- struct mcryptd_alg_state *alg_state;
-};
-
-struct mcryptd_tag {
- /* seq number of request */
- unsigned seq_num;
- /* arrival time of request */
- unsigned long arrival;
- unsigned long expire;
- int cpu;
-};
-
-struct mcryptd_hash_request_ctx {
- struct list_head waiter;
- crypto_completion_t complete;
- struct mcryptd_tag tag;
- struct crypto_hash_walk walk;
- u8 *out;
- int flag;
- struct ahash_request areq;
-};
-
-struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name,
- u32 type, u32 mask);
-struct crypto_ahash *mcryptd_ahash_child(struct mcryptd_ahash *tfm);
-struct ahash_request *mcryptd_ahash_desc(struct ahash_request *req);
-void mcryptd_free_ahash(struct mcryptd_ahash *tfm);
-void mcryptd_flusher(struct work_struct *work);
-
-enum mcryptd_req_type {
- MCRYPTD_NONE,
- MCRYPTD_UPDATE,
- MCRYPTD_FINUP,
- MCRYPTD_DIGEST,
- MCRYPTD_FINAL
-};
-
-struct mcryptd_alg_cstate {
- unsigned long next_flush;
- unsigned next_seq_num;
- bool flusher_engaged;
- struct delayed_work flush;
- int cpu;
- struct mcryptd_alg_state *alg_state;
- void *mgr;
- spinlock_t work_lock;
- struct list_head work_list;
- struct list_head flush_list;
-};
-
-struct mcryptd_alg_state {
- struct mcryptd_alg_cstate __percpu *alg_cstate;
- unsigned long (*flusher)(struct mcryptd_alg_cstate *cstate);
-};
-
-/* return delay in jiffies from current time */
-static inline unsigned long get_delay(unsigned long t)
-{
- long delay;
-
- delay = (long) t - (long) jiffies;
- if (delay <= 0)
- return 0;
- else
- return (unsigned long) delay;
-}
-
-void mcryptd_arm_flusher(struct mcryptd_alg_cstate *cstate, unsigned long delay);
-
-#endif
diff --git a/include/crypto/morus1280_glue.h b/include/crypto/morus1280_glue.h
index b26dd70efd9a..ba782e10065e 100644
--- a/include/crypto/morus1280_glue.h
+++ b/include/crypto/morus1280_glue.h
@@ -82,7 +82,7 @@ void cryptd_morus1280_glue_exit_tfm(struct crypto_aead *aead);
{ \
} \
\
- struct aead_alg crypto_morus1280_##id##_algs[] = {\
+ static struct aead_alg crypto_morus1280_##id##_algs[] = {\
{ \
.setkey = crypto_morus1280_glue_setkey, \
.setauthsize = crypto_morus1280_glue_setauthsize, \
diff --git a/include/crypto/morus640_glue.h b/include/crypto/morus640_glue.h
index 90c8db07e740..27fa790a2362 100644
--- a/include/crypto/morus640_glue.h
+++ b/include/crypto/morus640_glue.h
@@ -82,7 +82,7 @@ void cryptd_morus640_glue_exit_tfm(struct crypto_aead *aead);
{ \
} \
\
- struct aead_alg crypto_morus640_##id##_algs[] = {\
+ static struct aead_alg crypto_morus640_##id##_algs[] = {\
{ \
.setkey = crypto_morus640_glue_setkey, \
.setauthsize = crypto_morus640_glue_setauthsize, \
diff --git a/include/crypto/null.h b/include/crypto/null.h
index 15aeef6e30ef..0ef577cc00e3 100644
--- a/include/crypto/null.h
+++ b/include/crypto/null.h
@@ -9,7 +9,7 @@
#define NULL_DIGEST_SIZE 0
#define NULL_IV_SIZE 0
-struct crypto_skcipher *crypto_get_default_null_skcipher(void);
+struct crypto_sync_skcipher *crypto_get_default_null_skcipher(void);
void crypto_put_default_null_skcipher(void);
#endif
diff --git a/include/crypto/rng.h b/include/crypto/rng.h
index b95ede354a66..6d258f5b68f1 100644
--- a/include/crypto/rng.h
+++ b/include/crypto/rng.h
@@ -122,6 +122,29 @@ static inline void crypto_free_rng(struct crypto_rng *tfm)
crypto_destroy_tfm(tfm, crypto_rng_tfm(tfm));
}
+static inline void crypto_stat_rng_seed(struct crypto_rng *tfm, int ret)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY)
+ atomic_inc(&tfm->base.__crt_alg->rng_err_cnt);
+ else
+ atomic_inc(&tfm->base.__crt_alg->seed_cnt);
+#endif
+}
+
+static inline void crypto_stat_rng_generate(struct crypto_rng *tfm,
+ unsigned int dlen, int ret)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+ atomic_inc(&tfm->base.__crt_alg->rng_err_cnt);
+ } else {
+ atomic_inc(&tfm->base.__crt_alg->generate_cnt);
+ atomic64_add(dlen, &tfm->base.__crt_alg->generate_tlen);
+ }
+#endif
+}
+
/**
* crypto_rng_generate() - get random number
* @tfm: cipher handle
@@ -140,7 +163,11 @@ static inline int crypto_rng_generate(struct crypto_rng *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int dlen)
{
- return crypto_rng_alg(tfm)->generate(tfm, src, slen, dst, dlen);
+ int ret;
+
+ ret = crypto_rng_alg(tfm)->generate(tfm, src, slen, dst, dlen);
+ crypto_stat_rng_generate(tfm, dlen, ret);
+ return ret;
}
/**
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index 2f327f090c3e..925f547cdcfa 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -65,6 +65,10 @@ struct crypto_skcipher {
struct crypto_tfm base;
};
+struct crypto_sync_skcipher {
+ struct crypto_skcipher base;
+};
+
/**
* struct skcipher_alg - symmetric key cipher definition
* @min_keysize: Minimum key size supported by the transformation. This is the
@@ -139,9 +143,17 @@ struct skcipher_alg {
struct crypto_alg base;
};
-#define SKCIPHER_REQUEST_ON_STACK(name, tfm) \
+#define MAX_SYNC_SKCIPHER_REQSIZE 384
+/*
+ * This performs a type-check against the "tfm" argument to make sure
+ * all users have the correct skcipher tfm for doing on-stack requests.
+ */
+#define SYNC_SKCIPHER_REQUEST_ON_STACK(name, tfm) \
char __##name##_desc[sizeof(struct skcipher_request) + \
- crypto_skcipher_reqsize(tfm)] CRYPTO_MINALIGN_ATTR; \
+ MAX_SYNC_SKCIPHER_REQSIZE + \
+ (!(sizeof((struct crypto_sync_skcipher *)1 == \
+ (typeof(tfm))1))) \
+ ] CRYPTO_MINALIGN_ATTR; \
struct skcipher_request *name = (void *)__##name##_desc
/**
@@ -197,6 +209,9 @@ static inline struct crypto_skcipher *__crypto_skcipher_cast(
struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
u32 type, u32 mask);
+struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(const char *alg_name,
+ u32 type, u32 mask);
+
static inline struct crypto_tfm *crypto_skcipher_tfm(
struct crypto_skcipher *tfm)
{
@@ -212,6 +227,11 @@ static inline void crypto_free_skcipher(struct crypto_skcipher *tfm)
crypto_destroy_tfm(tfm, crypto_skcipher_tfm(tfm));
}
+static inline void crypto_free_sync_skcipher(struct crypto_sync_skcipher *tfm)
+{
+ crypto_free_skcipher(&tfm->base);
+}
+
/**
* crypto_has_skcipher() - Search for the availability of an skcipher.
* @alg_name: is the cra_name / name or cra_driver_name / driver name of the
@@ -280,6 +300,12 @@ static inline unsigned int crypto_skcipher_ivsize(struct crypto_skcipher *tfm)
return tfm->ivsize;
}
+static inline unsigned int crypto_sync_skcipher_ivsize(
+ struct crypto_sync_skcipher *tfm)
+{
+ return crypto_skcipher_ivsize(&tfm->base);
+}
+
static inline unsigned int crypto_skcipher_alg_chunksize(
struct skcipher_alg *alg)
{
@@ -356,6 +382,12 @@ static inline unsigned int crypto_skcipher_blocksize(
return crypto_tfm_alg_blocksize(crypto_skcipher_tfm(tfm));
}
+static inline unsigned int crypto_sync_skcipher_blocksize(
+ struct crypto_sync_skcipher *tfm)
+{
+ return crypto_skcipher_blocksize(&tfm->base);
+}
+
static inline unsigned int crypto_skcipher_alignmask(
struct crypto_skcipher *tfm)
{
@@ -379,6 +411,24 @@ static inline void crypto_skcipher_clear_flags(struct crypto_skcipher *tfm,
crypto_tfm_clear_flags(crypto_skcipher_tfm(tfm), flags);
}
+static inline u32 crypto_sync_skcipher_get_flags(
+ struct crypto_sync_skcipher *tfm)
+{
+ return crypto_skcipher_get_flags(&tfm->base);
+}
+
+static inline void crypto_sync_skcipher_set_flags(
+ struct crypto_sync_skcipher *tfm, u32 flags)
+{
+ crypto_skcipher_set_flags(&tfm->base, flags);
+}
+
+static inline void crypto_sync_skcipher_clear_flags(
+ struct crypto_sync_skcipher *tfm, u32 flags)
+{
+ crypto_skcipher_clear_flags(&tfm->base, flags);
+}
+
/**
* crypto_skcipher_setkey() - set key for cipher
* @tfm: cipher handle
@@ -401,6 +451,12 @@ static inline int crypto_skcipher_setkey(struct crypto_skcipher *tfm,
return tfm->setkey(tfm, key, keylen);
}
+static inline int crypto_sync_skcipher_setkey(struct crypto_sync_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ return crypto_skcipher_setkey(&tfm->base, key, keylen);
+}
+
static inline unsigned int crypto_skcipher_default_keysize(
struct crypto_skcipher *tfm)
{
@@ -422,6 +478,40 @@ static inline struct crypto_skcipher *crypto_skcipher_reqtfm(
return __crypto_skcipher_cast(req->base.tfm);
}
+static inline struct crypto_sync_skcipher *crypto_sync_skcipher_reqtfm(
+ struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+
+ return container_of(tfm, struct crypto_sync_skcipher, base);
+}
+
+static inline void crypto_stat_skcipher_encrypt(struct skcipher_request *req,
+ int ret, struct crypto_alg *alg)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+ atomic_inc(&alg->cipher_err_cnt);
+ } else {
+ atomic_inc(&alg->encrypt_cnt);
+ atomic64_add(req->cryptlen, &alg->encrypt_tlen);
+ }
+#endif
+}
+
+static inline void crypto_stat_skcipher_decrypt(struct skcipher_request *req,
+ int ret, struct crypto_alg *alg)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+ atomic_inc(&alg->cipher_err_cnt);
+ } else {
+ atomic_inc(&alg->decrypt_cnt);
+ atomic64_add(req->cryptlen, &alg->decrypt_tlen);
+ }
+#endif
+}
+
/**
* crypto_skcipher_encrypt() - encrypt plaintext
* @req: reference to the skcipher_request handle that holds all information
@@ -436,11 +526,14 @@ static inline struct crypto_skcipher *crypto_skcipher_reqtfm(
static inline int crypto_skcipher_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ int ret;
if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
- return -ENOKEY;
-
- return tfm->encrypt(req);
+ ret = -ENOKEY;
+ else
+ ret = tfm->encrypt(req);
+ crypto_stat_skcipher_encrypt(req, ret, tfm->base.__crt_alg);
+ return ret;
}
/**
@@ -457,11 +550,14 @@ static inline int crypto_skcipher_encrypt(struct skcipher_request *req)
static inline int crypto_skcipher_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ int ret;
if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
- return -ENOKEY;
-
- return tfm->decrypt(req);
+ ret = -ENOKEY;
+ else
+ ret = tfm->decrypt(req);
+ crypto_stat_skcipher_decrypt(req, ret, tfm->base.__crt_alg);
+ return ret;
}
/**
@@ -500,6 +596,12 @@ static inline void skcipher_request_set_tfm(struct skcipher_request *req,
req->base.tfm = crypto_skcipher_tfm(tfm);
}
+static inline void skcipher_request_set_sync_tfm(struct skcipher_request *req,
+ struct crypto_sync_skcipher *tfm)
+{
+ skcipher_request_set_tfm(req, &tfm->base);
+}
+
static inline struct skcipher_request *skcipher_request_cast(
struct crypto_async_request *req)
{
diff --git a/include/crypto/speck.h b/include/crypto/speck.h
deleted file mode 100644
index 73cfc952d405..000000000000
--- a/include/crypto/speck.h
+++ /dev/null
@@ -1,62 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Common values for the Speck algorithm
- */
-
-#ifndef _CRYPTO_SPECK_H
-#define _CRYPTO_SPECK_H
-
-#include <linux/types.h>
-
-/* Speck128 */
-
-#define SPECK128_BLOCK_SIZE 16
-
-#define SPECK128_128_KEY_SIZE 16
-#define SPECK128_128_NROUNDS 32
-
-#define SPECK128_192_KEY_SIZE 24
-#define SPECK128_192_NROUNDS 33
-
-#define SPECK128_256_KEY_SIZE 32
-#define SPECK128_256_NROUNDS 34
-
-struct speck128_tfm_ctx {
- u64 round_keys[SPECK128_256_NROUNDS];
- int nrounds;
-};
-
-void crypto_speck128_encrypt(const struct speck128_tfm_ctx *ctx,
- u8 *out, const u8 *in);
-
-void crypto_speck128_decrypt(const struct speck128_tfm_ctx *ctx,
- u8 *out, const u8 *in);
-
-int crypto_speck128_setkey(struct speck128_tfm_ctx *ctx, const u8 *key,
- unsigned int keysize);
-
-/* Speck64 */
-
-#define SPECK64_BLOCK_SIZE 8
-
-#define SPECK64_96_KEY_SIZE 12
-#define SPECK64_96_NROUNDS 26
-
-#define SPECK64_128_KEY_SIZE 16
-#define SPECK64_128_NROUNDS 27
-
-struct speck64_tfm_ctx {
- u32 round_keys[SPECK64_128_NROUNDS];
- int nrounds;
-};
-
-void crypto_speck64_encrypt(const struct speck64_tfm_ctx *ctx,
- u8 *out, const u8 *in);
-
-void crypto_speck64_decrypt(const struct speck64_tfm_ctx *ctx,
- u8 *out, const u8 *in);
-
-int crypto_speck64_setkey(struct speck64_tfm_ctx *ctx, const u8 *key,
- unsigned int keysize);
-
-#endif /* _CRYPTO_SPECK_H */