From 481021497c06de1e403a4cd9b0da359b612ed829 Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Wed, 13 Jun 2018 15:38:55 +0200 Subject: chacha20poly1305: use slow crypto on -rt kernels In rt kernels, spinlocks call schedule(), which means preemption can't be disabled. The FPU disables preemption. Hence, we can either restructure things to move the calls to kernel_fpu_begin/end to be really close to the actual crypto routines, or we can do the slower lazier solution of just not using the FPU at all on -rt kernels. This patch goes with the latter lazy solution. The reason why we don't place the calls to kernel_fpu_begin/end close to the crypto routines in the first place is that they're very expensive, as it usually involves a call to XSAVE. So on sane kernels, we benefit from only having to call it once. --- src/crypto/chacha20poly1305.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src/crypto') diff --git a/src/crypto/chacha20poly1305.h b/src/crypto/chacha20poly1305.h index 39919cd..f19bf52 100644 --- a/src/crypto/chacha20poly1305.h +++ b/src/crypto/chacha20poly1305.h @@ -56,7 +56,7 @@ bool __must_check xchacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t static inline bool chacha20poly1305_init_simd(void) { bool have_simd = false; -#if defined(CONFIG_X86_64) && !defined(CONFIG_UML) +#if defined(CONFIG_X86_64) && !defined(CONFIG_UML) && !defined(CONFIG_PREEMPT_RT_BASE) have_simd = irq_fpu_usable(); if (have_simd) kernel_fpu_begin(); -- cgit v1.2.3-59-g8ed1b