aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArd Biesheuvel <ard.biesheuvel@linaro.org>2018-08-20 16:58:34 +0200
committerHerbert Xu <herbert@gondor.apana.org.au>2018-08-25 19:50:43 +0800
commitc2b24c36e0a30ebd8fc7d068da7f0451f2c05c76 (patch)
tree5b9d46b3140e66a980f70735f3787c669ffcaedf
parente5b954e8d11fdde55eed35017370a3a0d8837754 (diff)
downloadlinux-c2b24c36e0a30ebd8fc7d068da7f0451f2c05c76.tar.gz
crypto: arm64/aes-gcm-ce - fix scatterwalk API violation
Commit 71e52c278c54 ("crypto: arm64/aes-ce-gcm - operate on two input blocks at a time") modified the granularity at which the AES/GCM code processes its input to allow subsequent changes to be applied that improve performance by using aggregation to process multiple input blocks at once. For this reason, it doubled the algorithm's 'chunksize' property to 2 x AES_BLOCK_SIZE, but retained the non-SIMD fallback path that processes a single block at a time. In some cases, this violates the skcipher scatterwalk API, by calling skcipher_walk_done() with a non-zero residue value for a chunk that is expected to be handled in its entirety. This results in a WARN_ON() to be hit by the TLS self test code, but is likely to break other user cases as well. Unfortunately, none of the current test cases exercises this exact code path at the moment. Fixes: 71e52c278c54 ("crypto: arm64/aes-ce-gcm - operate on two ...") Reported-by: Vakul Garg <vakul.garg@nxp.com> Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Tested-by: Vakul Garg <vakul.garg@nxp.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r--arch/arm64/crypto/ghash-ce-glue.c29
1 files changed, 23 insertions, 6 deletions
diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c
index 6e9f33d14930eb..067d8937d5af1e 100644
--- a/arch/arm64/crypto/ghash-ce-glue.c
+++ b/arch/arm64/crypto/ghash-ce-glue.c
@@ -417,7 +417,7 @@ static int gcm_encrypt(struct aead_request *req)
__aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds);
put_unaligned_be32(2, iv + GCM_IV_SIZE);
- while (walk.nbytes >= AES_BLOCK_SIZE) {
+ while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
int blocks = walk.nbytes / AES_BLOCK_SIZE;
u8 *dst = walk.dst.virt.addr;
u8 *src = walk.src.virt.addr;
@@ -437,11 +437,18 @@ static int gcm_encrypt(struct aead_request *req)
NULL);
err = skcipher_walk_done(&walk,
- walk.nbytes % AES_BLOCK_SIZE);
+ walk.nbytes % (2 * AES_BLOCK_SIZE));
}
- if (walk.nbytes)
+ if (walk.nbytes) {
__aes_arm64_encrypt(ctx->aes_key.key_enc, ks, iv,
nrounds);
+ if (walk.nbytes > AES_BLOCK_SIZE) {
+ crypto_inc(iv, AES_BLOCK_SIZE);
+ __aes_arm64_encrypt(ctx->aes_key.key_enc,
+ ks + AES_BLOCK_SIZE, iv,
+ nrounds);
+ }
+ }
}
/* handle the tail */
@@ -545,7 +552,7 @@ static int gcm_decrypt(struct aead_request *req)
__aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds);
put_unaligned_be32(2, iv + GCM_IV_SIZE);
- while (walk.nbytes >= AES_BLOCK_SIZE) {
+ while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
int blocks = walk.nbytes / AES_BLOCK_SIZE;
u8 *dst = walk.dst.virt.addr;
u8 *src = walk.src.virt.addr;
@@ -564,11 +571,21 @@ static int gcm_decrypt(struct aead_request *req)
} while (--blocks > 0);
err = skcipher_walk_done(&walk,
- walk.nbytes % AES_BLOCK_SIZE);
+ walk.nbytes % (2 * AES_BLOCK_SIZE));
}
- if (walk.nbytes)
+ if (walk.nbytes) {
+ if (walk.nbytes > AES_BLOCK_SIZE) {
+ u8 *iv2 = iv + AES_BLOCK_SIZE;
+
+ memcpy(iv2, iv, AES_BLOCK_SIZE);
+ crypto_inc(iv2, AES_BLOCK_SIZE);
+
+ __aes_arm64_encrypt(ctx->aes_key.key_enc, iv2,
+ iv2, nrounds);
+ }
__aes_arm64_encrypt(ctx->aes_key.key_enc, iv, iv,
nrounds);
+ }
}
/* handle the tail */