aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/crypto
diff options
context:
space:
mode:
authorEric Biggers <ebiggers@google.com>2018-12-04 22:20:05 -0800
committerHerbert Xu <herbert@gondor.apana.org.au>2018-12-13 18:24:58 +0800
commita033aed5a84eb93a32929b6862602cb283d39e82 (patch)
tree333b9c17544307a5f9fa06ffac438bd116f4f727 /arch/x86/crypto
parentcrypto: x86/chacha - add XChaCha12 support (diff)
downloadlinux-dev-a033aed5a84eb93a32929b6862602cb283d39e82.tar.xz
linux-dev-a033aed5a84eb93a32929b6862602cb283d39e82.zip
crypto: x86/chacha - yield the FPU occasionally
To improve responsiveness, yield the FPU (temporarily re-enabling preemption) every 4 KiB encrypted/decrypted, rather than keeping preemption disabled during the entire encryption/decryption operation. Alternatively we could do this for every skcipher_walk step, but steps may be small in some cases, and yielding the FPU is expensive on x86. Suggested-by: Martin Willi <martin@strongswan.org> Signed-off-by: Eric Biggers <ebiggers@google.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'arch/x86/crypto')
-rw-r--r--arch/x86/crypto/chacha_glue.c12
1 files changed, 11 insertions, 1 deletions
diff --git a/arch/x86/crypto/chacha_glue.c b/arch/x86/crypto/chacha_glue.c
index d19c2908be90..9b1d3fac4943 100644
--- a/arch/x86/crypto/chacha_glue.c
+++ b/arch/x86/crypto/chacha_glue.c
@@ -132,6 +132,7 @@ static int chacha_simd_stream_xor(struct skcipher_request *req,
{
u32 *state, state_buf[16 + 2] __aligned(8);
struct skcipher_walk walk;
+ int next_yield = 4096; /* bytes until next FPU yield */
int err;
BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16);
@@ -144,12 +145,21 @@ static int chacha_simd_stream_xor(struct skcipher_request *req,
while (walk.nbytes > 0) {
unsigned int nbytes = walk.nbytes;
- if (nbytes < walk.total)
+ if (nbytes < walk.total) {
nbytes = round_down(nbytes, walk.stride);
+ next_yield -= nbytes;
+ }
chacha_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr,
nbytes, ctx->nrounds);
+ if (next_yield <= 0) {
+ /* temporarily allow preemption */
+ kernel_fpu_end();
+ kernel_fpu_begin();
+ next_yield = 4096;
+ }
+
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
}