aboutsummaryrefslogtreecommitdiffstats
path: root/crypto/aes_ti.c
diff options
context:
space:
mode:
authorArd Biesheuvel <ard.biesheuvel@linaro.org>2019-07-02 21:41:21 +0200
committerHerbert Xu <herbert@gondor.apana.org.au>2019-07-26 14:52:04 +1000
commitb158fcbba857c71ffb05ab254aff3b32b5e3cfc3 (patch)
treefea1d1c172b2c41018f1ec8dee23b9da3352c984 /crypto/aes_ti.c
parentcrypto: aes - rename local routines to prevent future clashes (diff)
downloadlinux-dev-b158fcbba857c71ffb05ab254aff3b32b5e3cfc3.tar.xz
linux-dev-b158fcbba857c71ffb05ab254aff3b32b5e3cfc3.zip
crypto: aes/fixed-time - align key schedule with other implementations
The fixed time AES code mangles the key schedule so that xoring the first round key with values at fixed offsets across the Sbox produces the correct value. This primes the D-cache with the entire Sbox before any data dependent lookups are done, making it more difficult to infer key bits from timing variances when the plaintext is known. The downside of this approach is that it renders the key schedule incompatible with other implementations of AES in the kernel, which makes it cumbersome to use this implementation as a fallback for SIMD based AES in contexts where this is not allowed. So let's tweak the fixed Sbox indexes so that they add up to zero under the xor operation. While at it, increase the granularity to 16 bytes so we cover the entire Sbox even on systems with 16 byte cachelines. Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'crypto/aes_ti.c')
-rw-r--r--crypto/aes_ti.c52
1 files changed, 21 insertions, 31 deletions
diff --git a/crypto/aes_ti.c b/crypto/aes_ti.c
index 798fc9a2c8d6..b3ebdc5679cb 100644
--- a/crypto/aes_ti.c
+++ b/crypto/aes_ti.c
@@ -234,30 +234,8 @@ static int aesti_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
- int err;
- err = aesti_expand_key(ctx, in_key, key_len);
- if (err)
- return err;
-
- /*
- * In order to force the compiler to emit data independent Sbox lookups
- * at the start of each block, xor the first round key with values at
- * fixed indexes in the Sbox. This will need to be repeated each time
- * the key is used, which will pull the entire Sbox into the D-cache
- * before any data dependent Sbox lookups are performed.
- */
- ctx->key_enc[0] ^= __aesti_sbox[ 0] ^ __aesti_sbox[128];
- ctx->key_enc[1] ^= __aesti_sbox[32] ^ __aesti_sbox[160];
- ctx->key_enc[2] ^= __aesti_sbox[64] ^ __aesti_sbox[192];
- ctx->key_enc[3] ^= __aesti_sbox[96] ^ __aesti_sbox[224];
-
- ctx->key_dec[0] ^= __aesti_inv_sbox[ 0] ^ __aesti_inv_sbox[128];
- ctx->key_dec[1] ^= __aesti_inv_sbox[32] ^ __aesti_inv_sbox[160];
- ctx->key_dec[2] ^= __aesti_inv_sbox[64] ^ __aesti_inv_sbox[192];
- ctx->key_dec[3] ^= __aesti_inv_sbox[96] ^ __aesti_inv_sbox[224];
-
- return 0;
+ return aesti_expand_key(ctx, in_key, key_len);
}
static void aesti_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
@@ -280,10 +258,16 @@ static void aesti_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
*/
local_irq_save(flags);
- st0[0] ^= __aesti_sbox[ 0] ^ __aesti_sbox[128];
- st0[1] ^= __aesti_sbox[32] ^ __aesti_sbox[160];
- st0[2] ^= __aesti_sbox[64] ^ __aesti_sbox[192];
- st0[3] ^= __aesti_sbox[96] ^ __aesti_sbox[224];
+ /*
+ * Force the compiler to emit data independent Sbox references,
+ * by xoring the input with Sbox values that are known to add up
+ * to zero. This pulls the entire Sbox into the D-cache before any
+ * data dependent lookups are done.
+ */
+ st0[0] ^= __aesti_sbox[ 0] ^ __aesti_sbox[ 64] ^ __aesti_sbox[134] ^ __aesti_sbox[195];
+ st0[1] ^= __aesti_sbox[16] ^ __aesti_sbox[ 82] ^ __aesti_sbox[158] ^ __aesti_sbox[221];
+ st0[2] ^= __aesti_sbox[32] ^ __aesti_sbox[ 96] ^ __aesti_sbox[160] ^ __aesti_sbox[234];
+ st0[3] ^= __aesti_sbox[48] ^ __aesti_sbox[112] ^ __aesti_sbox[186] ^ __aesti_sbox[241];
for (round = 0;; round += 2, rkp += 8) {
st1[0] = mix_columns(subshift(st0, 0)) ^ rkp[0];
@@ -328,10 +312,16 @@ static void aesti_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
*/
local_irq_save(flags);
- st0[0] ^= __aesti_inv_sbox[ 0] ^ __aesti_inv_sbox[128];
- st0[1] ^= __aesti_inv_sbox[32] ^ __aesti_inv_sbox[160];
- st0[2] ^= __aesti_inv_sbox[64] ^ __aesti_inv_sbox[192];
- st0[3] ^= __aesti_inv_sbox[96] ^ __aesti_inv_sbox[224];
+ /*
+ * Force the compiler to emit data independent Sbox references,
+ * by xoring the input with Sbox values that are known to add up
+ * to zero. This pulls the entire Sbox into the D-cache before any
+ * data dependent lookups are done.
+ */
+ st0[0] ^= __aesti_inv_sbox[ 0] ^ __aesti_inv_sbox[ 64] ^ __aesti_inv_sbox[129] ^ __aesti_inv_sbox[200];
+ st0[1] ^= __aesti_inv_sbox[16] ^ __aesti_inv_sbox[ 83] ^ __aesti_inv_sbox[150] ^ __aesti_inv_sbox[212];
+ st0[2] ^= __aesti_inv_sbox[32] ^ __aesti_inv_sbox[ 96] ^ __aesti_inv_sbox[160] ^ __aesti_inv_sbox[236];
+ st0[3] ^= __aesti_inv_sbox[48] ^ __aesti_inv_sbox[112] ^ __aesti_inv_sbox[187] ^ __aesti_inv_sbox[247];
for (round = 0;; round += 2, rkp += 8) {
st1[0] = inv_mix_columns(inv_subshift(st0, 0)) ^ rkp[0];