aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/crypto/aesni-intel_glue.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-06-22 21:04:48 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2015-06-22 21:04:48 -0700
commit44d21c3f3a2ef2f58b18bda64c52c99e723f3f4a (patch)
tree5146cf96cb0dbd7121176d484417ab942c92dcd4 /arch/x86/crypto/aesni-intel_glue.c
parentMerge tag 'please-pull-paravirt' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux (diff)
parentcrypto: marvell/cesa - remove COMPILE_TEST dependency (diff)
downloadlinux-dev-44d21c3f3a2ef2f58b18bda64c52c99e723f3f4a.tar.xz
linux-dev-44d21c3f3a2ef2f58b18bda64c52c99e723f3f4a.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu: "Here is the crypto update for 4.2: API: - Convert RNG interface to new style. - New AEAD interface with one SG list for AD and plain/cipher text. All external AEAD users have been converted. - New asymmetric key interface (akcipher). Algorithms: - Chacha20, Poly1305 and RFC7539 support. - New RSA implementation. - Jitter RNG. - DRBG is now seeded with both /dev/random and Jitter RNG. If kernel pool isn't ready then DRBG will be reseeded when it is. - DRBG is now the default crypto API RNG, replacing krng. - 842 compression (previously part of powerpc nx driver). Drivers: - Accelerated SHA-512 for arm64. - New Marvell CESA driver that supports DMA and more algorithms. - Updated powerpc nx 842 support. - Added support for SEC1 hardware to talitos" * git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (292 commits) crypto: marvell/cesa - remove COMPILE_TEST dependency crypto: algif_aead - Temporarily disable all AEAD algorithms crypto: af_alg - Forbid the use internal algorithms crypto: echainiv - Only hold RNG during initialisation crypto: seqiv - Add compatibility support without RNG crypto: eseqiv - Offer normal cipher functionality without RNG crypto: chainiv - Offer normal cipher functionality without RNG crypto: user - Add CRYPTO_MSG_DELRNG crypto: user - Move cryptouser.h to uapi crypto: rng - Do not free default RNG when it becomes unused crypto: skcipher - Allow givencrypt to be NULL crypto: sahara - propagate the error on clk_disable_unprepare() failure crypto: rsa - fix invalid select for AKCIPHER crypto: picoxcell - Update to the current clk API crypto: nx - Check for bogus firmware properties crypto: marvell/cesa - add DT bindings documentation crypto: marvell/cesa - add support for Kirkwood and Dove SoCs crypto: marvell/cesa - add support for Orion SoCs crypto: marvell/cesa - add allhwsupport module parameter crypto: marvell/cesa - add support for all armada SoCs ...
Diffstat (limited to 'arch/x86/crypto/aesni-intel_glue.c')
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c423
1 files changed, 167 insertions, 256 deletions
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index b419f43ce0c5..2bfc8a7c88c1 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -44,15 +44,19 @@
#endif
+#define AESNI_ALIGN 16
+#define AES_BLOCK_MASK (~(AES_BLOCK_SIZE - 1))
+#define RFC4106_HASH_SUBKEY_SIZE 16
+
/* This data is stored at the end of the crypto_tfm struct.
* It's a type of per "session" data storage location.
* This needs to be 16 byte aligned.
*/
struct aesni_rfc4106_gcm_ctx {
- u8 hash_subkey[16];
- struct crypto_aes_ctx aes_key_expanded;
+ u8 hash_subkey[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
+ struct crypto_aes_ctx aes_key_expanded
+ __attribute__ ((__aligned__(AESNI_ALIGN)));
u8 nonce[4];
- struct cryptd_aead *cryptd_tfm;
};
struct aesni_gcm_set_hash_subkey_result {
@@ -66,10 +70,6 @@ struct aesni_hash_subkey_req_data {
struct scatterlist sg;
};
-#define AESNI_ALIGN (16)
-#define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1))
-#define RFC4106_HASH_SUBKEY_SIZE 16
-
struct aesni_lrw_ctx {
struct lrw_table_ctx lrw_table;
u8 raw_aes_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
@@ -283,10 +283,11 @@ static void (*aesni_gcm_dec_tfm)(void *ctx, u8 *out,
static inline struct
aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
{
- return
- (struct aesni_rfc4106_gcm_ctx *)
- PTR_ALIGN((u8 *)
- crypto_tfm_ctx(crypto_aead_tfm(tfm)), AESNI_ALIGN);
+ unsigned long align = AESNI_ALIGN;
+
+ if (align <= crypto_tfm_ctx_alignment())
+ align = 1;
+ return PTR_ALIGN(crypto_aead_ctx(tfm), align);
}
#endif
@@ -790,36 +791,30 @@ static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
#endif
#ifdef CONFIG_X86_64
-static int rfc4106_init(struct crypto_tfm *tfm)
+static int rfc4106_init(struct crypto_aead *aead)
{
struct cryptd_aead *cryptd_tfm;
- struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *)
- PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
- struct crypto_aead *cryptd_child;
- struct aesni_rfc4106_gcm_ctx *child_ctx;
+ struct cryptd_aead **ctx = crypto_aead_ctx(aead);
+
cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni",
CRYPTO_ALG_INTERNAL,
CRYPTO_ALG_INTERNAL);
if (IS_ERR(cryptd_tfm))
return PTR_ERR(cryptd_tfm);
- cryptd_child = cryptd_aead_child(cryptd_tfm);
- child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child);
- memcpy(child_ctx, ctx, sizeof(*ctx));
- ctx->cryptd_tfm = cryptd_tfm;
- tfm->crt_aead.reqsize = sizeof(struct aead_request)
- + crypto_aead_reqsize(&cryptd_tfm->base);
+ *ctx = cryptd_tfm;
+ crypto_aead_set_reqsize(
+ aead,
+ sizeof(struct aead_request) +
+ crypto_aead_reqsize(&cryptd_tfm->base));
return 0;
}
-static void rfc4106_exit(struct crypto_tfm *tfm)
+static void rfc4106_exit(struct crypto_aead *aead)
{
- struct aesni_rfc4106_gcm_ctx *ctx =
- (struct aesni_rfc4106_gcm_ctx *)
- PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
- if (!IS_ERR(ctx->cryptd_tfm))
- cryptd_free_aead(ctx->cryptd_tfm);
- return;
+ struct cryptd_aead **ctx = crypto_aead_ctx(aead);
+
+ cryptd_free_aead(*ctx);
}
static void
@@ -845,8 +840,6 @@ rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
if (IS_ERR(ctr_tfm))
return PTR_ERR(ctr_tfm);
- crypto_ablkcipher_clear_flags(ctr_tfm, ~0);
-
ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
if (ret)
goto out_free_ablkcipher;
@@ -895,73 +888,29 @@ out_free_ablkcipher:
static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
unsigned int key_len)
{
- int ret = 0;
- struct crypto_tfm *tfm = crypto_aead_tfm(aead);
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead);
- u8 *new_key_align, *new_key_mem = NULL;
if (key_len < 4) {
- crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
/*Account for 4 byte nonce at the end.*/
key_len -= 4;
- if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
- key_len != AES_KEYSIZE_256) {
- crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
- /*This must be on a 16 byte boundary!*/
- if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN)
- return -EINVAL;
-
- if ((unsigned long)key % AESNI_ALIGN) {
- /*key is not aligned: use an auxuliar aligned pointer*/
- new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL);
- if (!new_key_mem)
- return -ENOMEM;
-
- new_key_align = PTR_ALIGN(new_key_mem, AESNI_ALIGN);
- memcpy(new_key_align, key, key_len);
- key = new_key_align;
- }
- if (!irq_fpu_usable())
- ret = crypto_aes_expand_key(&(ctx->aes_key_expanded),
- key, key_len);
- else {
- kernel_fpu_begin();
- ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len);
- kernel_fpu_end();
- }
- /*This must be on a 16 byte boundary!*/
- if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) {
- ret = -EINVAL;
- goto exit;
- }
- ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
-exit:
- kfree(new_key_mem);
- return ret;
+ return aes_set_key_common(crypto_aead_tfm(aead),
+ &ctx->aes_key_expanded, key, key_len) ?:
+ rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
}
static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
unsigned int key_len)
{
- struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
- struct crypto_aead *child = cryptd_aead_child(ctx->cryptd_tfm);
- struct aesni_rfc4106_gcm_ctx *c_ctx = aesni_rfc4106_gcm_ctx_get(child);
- struct cryptd_aead *cryptd_tfm = ctx->cryptd_tfm;
- int ret;
+ struct cryptd_aead **ctx = crypto_aead_ctx(parent);
+ struct cryptd_aead *cryptd_tfm = *ctx;
- ret = crypto_aead_setkey(child, key, key_len);
- if (!ret) {
- memcpy(ctx, c_ctx, sizeof(*ctx));
- ctx->cryptd_tfm = cryptd_tfm;
- }
- return ret;
+ return crypto_aead_setkey(&cryptd_tfm->base, key, key_len);
}
static int common_rfc4106_set_authsize(struct crypto_aead *aead,
@@ -975,7 +924,7 @@ static int common_rfc4106_set_authsize(struct crypto_aead *aead,
default:
return -EINVAL;
}
- crypto_aead_crt(aead)->authsize = authsize;
+
return 0;
}
@@ -984,30 +933,23 @@ static int common_rfc4106_set_authsize(struct crypto_aead *aead,
static int rfc4106_set_authsize(struct crypto_aead *parent,
unsigned int authsize)
{
- struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
- struct crypto_aead *child = cryptd_aead_child(ctx->cryptd_tfm);
- int ret;
+ struct cryptd_aead **ctx = crypto_aead_ctx(parent);
+ struct cryptd_aead *cryptd_tfm = *ctx;
- ret = crypto_aead_setauthsize(child, authsize);
- if (!ret)
- crypto_aead_crt(parent)->authsize = authsize;
- return ret;
+ return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
}
-static int __driver_rfc4106_encrypt(struct aead_request *req)
+static int helper_rfc4106_encrypt(struct aead_request *req)
{
u8 one_entry_in_sg = 0;
u8 *src, *dst, *assoc;
__be32 counter = cpu_to_be32(1);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
- u32 key_len = ctx->aes_key_expanded.key_length;
void *aes_ctx = &(ctx->aes_key_expanded);
unsigned long auth_tag_len = crypto_aead_authsize(tfm);
- u8 iv_tab[16+AESNI_ALIGN];
- u8* iv = (u8 *) PTR_ALIGN((u8 *)iv_tab, AESNI_ALIGN);
+ u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
struct scatter_walk src_sg_walk;
- struct scatter_walk assoc_sg_walk;
struct scatter_walk dst_sg_walk;
unsigned int i;
@@ -1016,12 +958,6 @@ static int __driver_rfc4106_encrypt(struct aead_request *req)
/* to 8 or 12 bytes */
if (unlikely(req->assoclen != 8 && req->assoclen != 12))
return -EINVAL;
- if (unlikely(auth_tag_len != 8 && auth_tag_len != 12 && auth_tag_len != 16))
- return -EINVAL;
- if (unlikely(key_len != AES_KEYSIZE_128 &&
- key_len != AES_KEYSIZE_192 &&
- key_len != AES_KEYSIZE_256))
- return -EINVAL;
/* IV below built */
for (i = 0; i < 4; i++)
@@ -1030,55 +966,57 @@ static int __driver_rfc4106_encrypt(struct aead_request *req)
*(iv+4+i) = req->iv[i];
*((__be32 *)(iv+12)) = counter;
- if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
+ if (sg_is_last(req->src) &&
+ req->src->offset + req->src->length <= PAGE_SIZE &&
+ sg_is_last(req->dst) &&
+ req->dst->offset + req->dst->length <= PAGE_SIZE) {
one_entry_in_sg = 1;
scatterwalk_start(&src_sg_walk, req->src);
- scatterwalk_start(&assoc_sg_walk, req->assoc);
- src = scatterwalk_map(&src_sg_walk);
- assoc = scatterwalk_map(&assoc_sg_walk);
+ assoc = scatterwalk_map(&src_sg_walk);
+ src = assoc + req->assoclen;
dst = src;
if (unlikely(req->src != req->dst)) {
scatterwalk_start(&dst_sg_walk, req->dst);
- dst = scatterwalk_map(&dst_sg_walk);
+ dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
}
-
} else {
/* Allocate memory for src, dst, assoc */
- src = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
+ assoc = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
GFP_ATOMIC);
- if (unlikely(!src))
+ if (unlikely(!assoc))
return -ENOMEM;
- assoc = (src + req->cryptlen + auth_tag_len);
- scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
- scatterwalk_map_and_copy(assoc, req->assoc, 0,
- req->assoclen, 0);
+ scatterwalk_map_and_copy(assoc, req->src, 0,
+ req->assoclen + req->cryptlen, 0);
+ src = assoc + req->assoclen;
dst = src;
}
+ kernel_fpu_begin();
aesni_gcm_enc_tfm(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
+ ((unsigned long)req->cryptlen), auth_tag_len);
+ kernel_fpu_end();
/* The authTag (aka the Integrity Check Value) needs to be written
* back to the packet. */
if (one_entry_in_sg) {
if (unlikely(req->src != req->dst)) {
- scatterwalk_unmap(dst);
- scatterwalk_done(&dst_sg_walk, 0, 0);
+ scatterwalk_unmap(dst - req->assoclen);
+ scatterwalk_advance(&dst_sg_walk, req->dst->length);
+ scatterwalk_done(&dst_sg_walk, 1, 0);
}
- scatterwalk_unmap(src);
scatterwalk_unmap(assoc);
- scatterwalk_done(&src_sg_walk, 0, 0);
- scatterwalk_done(&assoc_sg_walk, 0, 0);
+ scatterwalk_advance(&src_sg_walk, req->src->length);
+ scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
} else {
- scatterwalk_map_and_copy(dst, req->dst, 0,
- req->cryptlen + auth_tag_len, 1);
- kfree(src);
+ scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
+ req->cryptlen + auth_tag_len, 1);
+ kfree(assoc);
}
return 0;
}
-static int __driver_rfc4106_decrypt(struct aead_request *req)
+static int helper_rfc4106_decrypt(struct aead_request *req)
{
u8 one_entry_in_sg = 0;
u8 *src, *dst, *assoc;
@@ -1087,26 +1025,16 @@ static int __driver_rfc4106_decrypt(struct aead_request *req)
int retval = 0;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
- u32 key_len = ctx->aes_key_expanded.key_length;
void *aes_ctx = &(ctx->aes_key_expanded);
unsigned long auth_tag_len = crypto_aead_authsize(tfm);
- u8 iv_and_authTag[32+AESNI_ALIGN];
- u8 *iv = (u8 *) PTR_ALIGN((u8 *)iv_and_authTag, AESNI_ALIGN);
- u8 *authTag = iv + 16;
+ u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
+ u8 authTag[16];
struct scatter_walk src_sg_walk;
- struct scatter_walk assoc_sg_walk;
struct scatter_walk dst_sg_walk;
unsigned int i;
- if (unlikely((req->cryptlen < auth_tag_len) ||
- (req->assoclen != 8 && req->assoclen != 12)))
+ if (unlikely(req->assoclen != 8 && req->assoclen != 12))
return -EINVAL;
- if (unlikely(auth_tag_len != 8 && auth_tag_len != 12 && auth_tag_len != 16))
- return -EINVAL;
- if (unlikely(key_len != AES_KEYSIZE_128 &&
- key_len != AES_KEYSIZE_192 &&
- key_len != AES_KEYSIZE_256))
- return -EINVAL;
/* Assuming we are supporting rfc4106 64-bit extended */
/* sequence numbers We need to have the AAD length */
@@ -1120,33 +1048,36 @@ static int __driver_rfc4106_decrypt(struct aead_request *req)
*(iv+4+i) = req->iv[i];
*((__be32 *)(iv+12)) = counter;
- if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
+ if (sg_is_last(req->src) &&
+ req->src->offset + req->src->length <= PAGE_SIZE &&
+ sg_is_last(req->dst) &&
+ req->dst->offset + req->dst->length <= PAGE_SIZE) {
one_entry_in_sg = 1;
scatterwalk_start(&src_sg_walk, req->src);
- scatterwalk_start(&assoc_sg_walk, req->assoc);
- src = scatterwalk_map(&src_sg_walk);
- assoc = scatterwalk_map(&assoc_sg_walk);
+ assoc = scatterwalk_map(&src_sg_walk);
+ src = assoc + req->assoclen;
dst = src;
if (unlikely(req->src != req->dst)) {
scatterwalk_start(&dst_sg_walk, req->dst);
- dst = scatterwalk_map(&dst_sg_walk);
+ dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
}
} else {
/* Allocate memory for src, dst, assoc */
- src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
- if (!src)
+ assoc = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
+ if (!assoc)
return -ENOMEM;
- assoc = (src + req->cryptlen);
- scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
- scatterwalk_map_and_copy(assoc, req->assoc, 0,
- req->assoclen, 0);
+ scatterwalk_map_and_copy(assoc, req->src, 0,
+ req->assoclen + req->cryptlen, 0);
+ src = assoc + req->assoclen;
dst = src;
}
+ kernel_fpu_begin();
aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv,
ctx->hash_subkey, assoc, (unsigned long)req->assoclen,
authTag, auth_tag_len);
+ kernel_fpu_end();
/* Compare generated tag with passed in tag. */
retval = crypto_memneq(src + tempCipherLen, authTag, auth_tag_len) ?
@@ -1154,90 +1085,59 @@ static int __driver_rfc4106_decrypt(struct aead_request *req)
if (one_entry_in_sg) {
if (unlikely(req->src != req->dst)) {
- scatterwalk_unmap(dst);
- scatterwalk_done(&dst_sg_walk, 0, 0);
+ scatterwalk_unmap(dst - req->assoclen);
+ scatterwalk_advance(&dst_sg_walk, req->dst->length);
+ scatterwalk_done(&dst_sg_walk, 1, 0);
}
- scatterwalk_unmap(src);
scatterwalk_unmap(assoc);
- scatterwalk_done(&src_sg_walk, 0, 0);
- scatterwalk_done(&assoc_sg_walk, 0, 0);
+ scatterwalk_advance(&src_sg_walk, req->src->length);
+ scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
} else {
- scatterwalk_map_and_copy(dst, req->dst, 0, tempCipherLen, 1);
- kfree(src);
+ scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
+ tempCipherLen, 1);
+ kfree(assoc);
}
return retval;
}
static int rfc4106_encrypt(struct aead_request *req)
{
- int ret;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
+ struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
+ struct cryptd_aead *cryptd_tfm = *ctx;
+ struct aead_request *subreq = aead_request_ctx(req);
- if (!irq_fpu_usable()) {
- struct aead_request *cryptd_req =
- (struct aead_request *) aead_request_ctx(req);
+ aead_request_set_tfm(subreq, irq_fpu_usable() ?
+ cryptd_aead_child(cryptd_tfm) :
+ &cryptd_tfm->base);
- memcpy(cryptd_req, req, sizeof(*req));
- aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
- ret = crypto_aead_encrypt(cryptd_req);
- } else {
- kernel_fpu_begin();
- ret = __driver_rfc4106_encrypt(req);
- kernel_fpu_end();
- }
- return ret;
+ aead_request_set_callback(subreq, req->base.flags,
+ req->base.complete, req->base.data);
+ aead_request_set_crypt(subreq, req->src, req->dst,
+ req->cryptlen, req->iv);
+ aead_request_set_ad(subreq, req->assoclen);
+
+ return crypto_aead_encrypt(subreq);
}
static int rfc4106_decrypt(struct aead_request *req)
{
- int ret;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
+ struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
+ struct cryptd_aead *cryptd_tfm = *ctx;
+ struct aead_request *subreq = aead_request_ctx(req);
- if (!irq_fpu_usable()) {
- struct aead_request *cryptd_req =
- (struct aead_request *) aead_request_ctx(req);
+ aead_request_set_tfm(subreq, irq_fpu_usable() ?
+ cryptd_aead_child(cryptd_tfm) :
+ &cryptd_tfm->base);
- memcpy(cryptd_req, req, sizeof(*req));
- aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
- ret = crypto_aead_decrypt(cryptd_req);
- } else {
- kernel_fpu_begin();
- ret = __driver_rfc4106_decrypt(req);
- kernel_fpu_end();
- }
- return ret;
-}
-
-static int helper_rfc4106_encrypt(struct aead_request *req)
-{
- int ret;
-
- if (unlikely(!irq_fpu_usable())) {
- WARN_ONCE(1, "__gcm-aes-aesni alg used in invalid context");
- ret = -EINVAL;
- } else {
- kernel_fpu_begin();
- ret = __driver_rfc4106_encrypt(req);
- kernel_fpu_end();
- }
- return ret;
-}
-
-static int helper_rfc4106_decrypt(struct aead_request *req)
-{
- int ret;
+ aead_request_set_callback(subreq, req->base.flags,
+ req->base.complete, req->base.data);
+ aead_request_set_crypt(subreq, req->src, req->dst,
+ req->cryptlen, req->iv);
+ aead_request_set_ad(subreq, req->assoclen);
- if (unlikely(!irq_fpu_usable())) {
- WARN_ONCE(1, "__gcm-aes-aesni alg used in invalid context");
- ret = -EINVAL;
- } else {
- kernel_fpu_begin();
- ret = __driver_rfc4106_decrypt(req);
- kernel_fpu_end();
- }
- return ret;
+ return crypto_aead_decrypt(subreq);
}
#endif
@@ -1410,51 +1310,6 @@ static struct crypto_alg aesni_algs[] = { {
.geniv = "chainiv",
},
},
-}, {
- .cra_name = "__gcm-aes-aesni",
- .cra_driver_name = "__driver-gcm-aes-aesni",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_INTERNAL,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) +
- AESNI_ALIGN,
- .cra_alignmask = 0,
- .cra_type = &crypto_aead_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .aead = {
- .setkey = common_rfc4106_set_key,
- .setauthsize = common_rfc4106_set_authsize,
- .encrypt = helper_rfc4106_encrypt,
- .decrypt = helper_rfc4106_decrypt,
- .ivsize = 8,
- .maxauthsize = 16,
- },
- },
-}, {
- .cra_name = "rfc4106(gcm(aes))",
- .cra_driver_name = "rfc4106-gcm-aesni",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) +
- AESNI_ALIGN,
- .cra_alignmask = 0,
- .cra_type = &crypto_nivaead_type,
- .cra_module = THIS_MODULE,
- .cra_init = rfc4106_init,
- .cra_exit = rfc4106_exit,
- .cra_u = {
- .aead = {
- .setkey = rfc4106_set_key,
- .setauthsize = rfc4106_set_authsize,
- .encrypt = rfc4106_encrypt,
- .decrypt = rfc4106_decrypt,
- .geniv = "seqiv",
- .ivsize = 8,
- .maxauthsize = 16,
- },
- },
#endif
#if IS_ENABLED(CONFIG_CRYPTO_PCBC)
}, {
@@ -1569,6 +1424,46 @@ static struct crypto_alg aesni_algs[] = { {
},
} };
+#ifdef CONFIG_X86_64
+static struct aead_alg aesni_aead_algs[] = { {
+ .setkey = common_rfc4106_set_key,
+ .setauthsize = common_rfc4106_set_authsize,
+ .encrypt = helper_rfc4106_encrypt,
+ .decrypt = helper_rfc4106_decrypt,
+ .ivsize = 8,
+ .maxauthsize = 16,
+ .base = {
+ .cra_name = "__gcm-aes-aesni",
+ .cra_driver_name = "__driver-gcm-aes-aesni",
+ .cra_flags = CRYPTO_ALG_INTERNAL,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx),
+ .cra_alignmask = AESNI_ALIGN - 1,
+ .cra_module = THIS_MODULE,
+ },
+}, {
+ .init = rfc4106_init,
+ .exit = rfc4106_exit,
+ .setkey = rfc4106_set_key,
+ .setauthsize = rfc4106_set_authsize,
+ .encrypt = rfc4106_encrypt,
+ .decrypt = rfc4106_decrypt,
+ .ivsize = 8,
+ .maxauthsize = 16,
+ .base = {
+ .cra_name = "rfc4106(gcm(aes))",
+ .cra_driver_name = "rfc4106-gcm-aesni",
+ .cra_priority = 400,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct cryptd_aead *),
+ .cra_module = THIS_MODULE,
+ },
+} };
+#else
+static struct aead_alg aesni_aead_algs[0];
+#endif
+
static const struct x86_cpu_id aesni_cpu_id[] = {
X86_FEATURE_MATCH(X86_FEATURE_AES),
@@ -1616,11 +1511,27 @@ static int __init aesni_init(void)
if (err)
return err;
- return crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
+ err = crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
+ if (err)
+ goto fpu_exit;
+
+ err = crypto_register_aeads(aesni_aead_algs,
+ ARRAY_SIZE(aesni_aead_algs));
+ if (err)
+ goto unregister_algs;
+
+ return err;
+
+unregister_algs:
+ crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
+fpu_exit:
+ crypto_fpu_exit();
+ return err;
}
static void __exit aesni_exit(void)
{
+ crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs));
crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
crypto_fpu_exit();