aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/crypto/aes-ce-ccm-glue.c
diff options
context:
space:
mode:
authorArd Biesheuvel <ardb@kernel.org>2021-08-27 09:03:41 +0200
committerHerbert Xu <herbert@gondor.apana.org.au>2021-09-17 11:05:11 +0800
commit741691c44606b1903e674d12f3e4a4b68ade69ad (patch)
tree3bcc24ec59d3944c29b7a6335e235dd25113ce09 /arch/arm64/crypto/aes-ce-ccm-glue.c
parentcrypto: arm64/aes-ccm - remove non-SIMD fallback path (diff)
downloadlinux-dev-741691c44606b1903e674d12f3e4a4b68ade69ad.tar.xz
linux-dev-741691c44606b1903e674d12f3e4a4b68ade69ad.zip
crypto: arm64/aes-ccm - reduce NEON begin/end calls for common case
AES-CCM (as used in WPA2 CCMP, for instance) typically involves authenticate-only data, and operates on a single network packet, and so the common case is for the authenticate, en/decrypt and finalize SIMD helpers to all be called exactly once in sequence. Since kernel_neon_end() now involves manipulation of the preemption state as well as the softirq mask state, let's reduce the number of times we are forced to call it to only once if we are handling this common case. Signed-off-by: Ard Biesheuvel <ardb@kernel.org> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'arch/arm64/crypto/aes-ce-ccm-glue.c')
-rw-r--r--arch/arm64/crypto/aes-ce-ccm-glue.c80
1 files changed, 48 insertions, 32 deletions
diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c
index c1f221a181a5..d973655fab7e 100644
--- a/arch/arm64/crypto/aes-ce-ccm-glue.c
+++ b/arch/arm64/crypto/aes-ce-ccm-glue.c
@@ -97,10 +97,8 @@ static int ccm_init_mac(struct aead_request *req, u8 maciv[], u32 msglen)
static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[],
u32 abytes, u32 *macp)
{
- kernel_neon_begin();
ce_aes_ccm_auth_data(mac, in, abytes, macp, key->key_enc,
num_rounds(key));
- kernel_neon_end();
}
static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[])
@@ -136,6 +134,12 @@ static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[])
n = min_t(u32, n, SZ_4K); /* yield NEON at least every 4k */
p = scatterwalk_map(&walk);
ccm_update_mac(ctx, mac, p, n, &macp);
+
+ if (len / SZ_4K > (len - n) / SZ_4K) {
+ kernel_neon_end();
+ kernel_neon_begin();
+ }
+
len -= n;
scatterwalk_unmap(p);
@@ -158,35 +162,41 @@ static int ccm_encrypt(struct aead_request *req)
if (err)
return err;
- if (req->assoclen)
- ccm_calculate_auth_mac(req, mac);
-
/* preserve the original iv for the final round */
memcpy(buf, req->iv, AES_BLOCK_SIZE);
err = skcipher_walk_aead_encrypt(&walk, req, false);
+ if (unlikely(err))
+ return err;
+
+ kernel_neon_begin();
+
+ if (req->assoclen)
+ ccm_calculate_auth_mac(req, mac);
- while (walk.nbytes) {
+ do {
u32 tail = walk.nbytes % AES_BLOCK_SIZE;
if (walk.nbytes == walk.total)
tail = 0;
- kernel_neon_begin();
ce_aes_ccm_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
walk.nbytes - tail, ctx->key_enc,
num_rounds(ctx), mac, walk.iv);
- kernel_neon_end();
- err = skcipher_walk_done(&walk, tail);
- }
- if (!err) {
- kernel_neon_begin();
- ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx));
+ if (walk.nbytes == walk.total)
+ ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx));
+
kernel_neon_end();
- }
- if (err)
- return err;
+
+ if (walk.nbytes) {
+ err = skcipher_walk_done(&walk, tail);
+ if (unlikely(err))
+ return err;
+ if (unlikely(walk.nbytes))
+ kernel_neon_begin();
+ }
+ } while (walk.nbytes);
/* copy authtag to end of dst */
scatterwalk_map_and_copy(mac, req->dst, req->assoclen + req->cryptlen,
@@ -210,35 +220,41 @@ static int ccm_decrypt(struct aead_request *req)
if (err)
return err;
- if (req->assoclen)
- ccm_calculate_auth_mac(req, mac);
-
/* preserve the original iv for the final round */
memcpy(buf, req->iv, AES_BLOCK_SIZE);
err = skcipher_walk_aead_decrypt(&walk, req, false);
+ if (unlikely(err))
+ return err;
- while (walk.nbytes) {
+ kernel_neon_begin();
+
+ if (req->assoclen)
+ ccm_calculate_auth_mac(req, mac);
+
+ do {
u32 tail = walk.nbytes % AES_BLOCK_SIZE;
if (walk.nbytes == walk.total)
tail = 0;
- kernel_neon_begin();
ce_aes_ccm_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
- walk.nbytes - tail, ctx->key_enc,
- num_rounds(ctx), mac, walk.iv);
- kernel_neon_end();
+ walk.nbytes - tail, ctx->key_enc,
+ num_rounds(ctx), mac, walk.iv);
+
+ if (walk.nbytes == walk.total)
+ ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx));
- err = skcipher_walk_done(&walk, tail);
- }
- if (!err) {
- kernel_neon_begin();
- ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx));
kernel_neon_end();
- }
- if (err)
- return err;
+
+ if (walk.nbytes) {
+ err = skcipher_walk_done(&walk, tail);
+ if (unlikely(err))
+ return err;
+ if (unlikely(walk.nbytes))
+ kernel_neon_begin();
+ }
+ } while (walk.nbytes);
/* compare calculated auth tag with the stored one */
scatterwalk_map_and_copy(buf, req->src,