From 8f4f68e788c3a7a696546291258bfa5fdb215523 Mon Sep 17 00:00:00 2001 From: Lu Jialin Date: Mon, 4 Sep 2023 13:33:41 +0000 Subject: crypto: pcrypt - Fix hungtask for PADATA_RESET We found a hungtask bug in test_aead_vec_cfg as follows: INFO: task cryptomgr_test:391009 blocked for more than 120 seconds. "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. Call trace: __switch_to+0x98/0xe0 __schedule+0x6c4/0xf40 schedule+0xd8/0x1b4 schedule_timeout+0x474/0x560 wait_for_common+0x368/0x4e0 wait_for_completion+0x20/0x30 wait_for_completion+0x20/0x30 test_aead_vec_cfg+0xab4/0xd50 test_aead+0x144/0x1f0 alg_test_aead+0xd8/0x1e0 alg_test+0x634/0x890 cryptomgr_test+0x40/0x70 kthread+0x1e0/0x220 ret_from_fork+0x10/0x18 Kernel panic - not syncing: hung_task: blocked tasks For padata_do_parallel, when the return err is 0 or -EBUSY, it will call wait_for_completion(&wait->completion) in test_aead_vec_cfg. In normal case, aead_request_complete() will be called in pcrypt_aead_serial and the return err is 0 for padata_do_parallel. But, when pinst->flags is PADATA_RESET, the return err is -EBUSY for padata_do_parallel, and it won't call aead_request_complete(). Therefore, test_aead_vec_cfg will hung at wait_for_completion(&wait->completion), which will cause hungtask. The problem comes as following: (padata_do_parallel) | rcu_read_lock_bh(); | err = -EINVAL; | (padata_replace) | pinst->flags |= PADATA_RESET; err = -EBUSY | if (pinst->flags & PADATA_RESET) | rcu_read_unlock_bh() | return err In order to resolve the problem, we replace the return err -EBUSY with -EAGAIN, which means parallel_data is changing, and the caller should call it again. v3: remove retry and just change the return err. v2: introduce padata_try_do_parallel() in pcrypt_aead_encrypt and pcrypt_aead_decrypt to solve the hungtask. Signed-off-by: Lu Jialin Signed-off-by: Guo Zihua Signed-off-by: Herbert Xu --- crypto/pcrypt.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'crypto') diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index 8c1d0ca412137f..d0d954fe9d54f3 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -117,6 +117,8 @@ static int pcrypt_aead_encrypt(struct aead_request *req) err = padata_do_parallel(ictx->psenc, padata, &ctx->cb_cpu); if (!err) return -EINPROGRESS; + if (err == -EBUSY) + return -EAGAIN; return err; } @@ -164,6 +166,8 @@ static int pcrypt_aead_decrypt(struct aead_request *req) err = padata_do_parallel(ictx->psdec, padata, &ctx->cb_cpu); if (!err) return -EINPROGRESS; + if (err == -EBUSY) + return -EAGAIN; return err; } -- cgit 1.2.3-korg From 17f7b9835a8a2ad5a2d835bc7cce87209468a5c3 Mon Sep 17 00:00:00 2001 From: Li zeming Date: Thu, 14 Sep 2023 02:17:27 +0800 Subject: crypto: api - Remove unnecessary NULL initialisation tfm is assigned first, so it does not need to initialize the assignment. Signed-off-by: Li zeming Signed-off-by: Herbert Xu --- crypto/api.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'crypto') diff --git a/crypto/api.c b/crypto/api.c index b9cc0c906efe07..7f402107f0cc88 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -389,7 +389,7 @@ EXPORT_SYMBOL_GPL(crypto_shoot_alg); struct crypto_tfm *__crypto_alloc_tfmgfp(struct crypto_alg *alg, u32 type, u32 mask, gfp_t gfp) { - struct crypto_tfm *tfm = NULL; + struct crypto_tfm *tfm; unsigned int tfm_size; int err = -ENOMEM; -- cgit 1.2.3-korg From 534562e59f354495c2644de6540d56713dde9e52 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 14 Sep 2023 16:28:21 +0800 Subject: crypto: aead - Add crypto_has_aead Add the helper crypto_has_aead. This is meant to replace the existing use of crypto_has_alg to locate AEAD algorithms. Signed-off-by: Herbert Xu --- crypto/aead.c | 6 ++++++ include/crypto/aead.h | 12 ++++++++++++ 2 files changed, 18 insertions(+) (limited to 'crypto') diff --git a/crypto/aead.c b/crypto/aead.c index d5ba204ebdbfa6..54906633566a23 100644 --- a/crypto/aead.c +++ b/crypto/aead.c @@ -269,6 +269,12 @@ struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask) } EXPORT_SYMBOL_GPL(crypto_alloc_aead); +int crypto_has_aead(const char *alg_name, u32 type, u32 mask) +{ + return crypto_type_has_alg(alg_name, &crypto_aead_type, type, mask); +} +EXPORT_SYMBOL_GPL(crypto_has_aead); + static int aead_prepare_alg(struct aead_alg *alg) { struct crypto_istat_aead *istat = aead_get_stat(alg); diff --git a/include/crypto/aead.h b/include/crypto/aead.h index 35e45b854a6fa4..51382befbe37ab 100644 --- a/include/crypto/aead.h +++ b/include/crypto/aead.h @@ -217,6 +217,18 @@ static inline void crypto_free_aead(struct crypto_aead *tfm) crypto_destroy_tfm(tfm, crypto_aead_tfm(tfm)); } +/** + * crypto_has_aead() - Search for the availability of an aead. + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * aead + * @type: specifies the type of the aead + * @mask: specifies the mask for the aead + * + * Return: true when the aead is known to the kernel crypto API; false + * otherwise + */ +int crypto_has_aead(const char *alg_name, u32 type, u32 mask); + static inline const char *crypto_aead_driver_name(struct crypto_aead *tfm) { return crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)); -- cgit 1.2.3-korg From b64d143b752932ef483d0ed8d00958f1832dd6bc Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 14 Sep 2023 16:28:23 +0800 Subject: crypto: hash - Hide CRYPTO_ALG_TYPE_AHASH_MASK Move the macro CRYPTO_ALG_TYPE_AHASH_MASK out of linux/crypto.h and into crypto/ahash.c so that it's not visible to users of the Crypto API. Also remove the unused CRYPTO_ALG_TYPE_HASH_MASK macro. Signed-off-by: Herbert Xu --- crypto/ahash.c | 2 ++ include/linux/crypto.h | 2 -- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'crypto') diff --git a/crypto/ahash.c b/crypto/ahash.c index 709ef094079913..213bb3e9f2451c 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -21,6 +21,8 @@ #include "hash.h" +#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e + static const struct crypto_type crypto_ahash_type; struct ahash_request_priv { diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 31f6fee0c36c64..a0780deb017aa6 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -35,8 +35,6 @@ #define CRYPTO_ALG_TYPE_SHASH 0x0000000e #define CRYPTO_ALG_TYPE_AHASH 0x0000000f -#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e -#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e #define CRYPTO_ALG_TYPE_ACOMPRESS_MASK 0x0000000e #define CRYPTO_ALG_LARVAL 0x00000010 -- cgit 1.2.3-korg From 31865c4c4db2b742fec6ccbff80483fa3e7ab9b9 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 14 Sep 2023 16:28:24 +0800 Subject: crypto: skcipher - Add lskcipher Add a new API type lskcipher designed for taking straight kernel pointers instead of SG lists. Its relationship to skcipher will be analogous to that between shash and ahash. Signed-off-by: Herbert Xu --- crypto/Makefile | 6 +- crypto/cryptd.c | 2 +- crypto/lskcipher.c | 594 +++++++++++++++++++++++++++++++++++++ crypto/skcipher.c | 75 +++-- crypto/skcipher.h | 30 ++ include/crypto/internal/skcipher.h | 114 ++++++- include/crypto/skcipher.h | 309 ++++++++++++++++++- include/linux/crypto.h | 1 + 8 files changed, 1086 insertions(+), 45 deletions(-) create mode 100644 crypto/lskcipher.c create mode 100644 crypto/skcipher.h (limited to 'crypto') diff --git a/crypto/Makefile b/crypto/Makefile index 953a7e105e58c8..5ac6876f935a3f 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -16,7 +16,11 @@ obj-$(CONFIG_CRYPTO_ALGAPI2) += crypto_algapi.o obj-$(CONFIG_CRYPTO_AEAD2) += aead.o obj-$(CONFIG_CRYPTO_GENIV) += geniv.o -obj-$(CONFIG_CRYPTO_SKCIPHER2) += skcipher.o +crypto_skcipher-y += lskcipher.o +crypto_skcipher-y += skcipher.o + +obj-$(CONFIG_CRYPTO_SKCIPHER2) += crypto_skcipher.o + obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o obj-$(CONFIG_CRYPTO_ECHAINIV) += echainiv.o diff --git a/crypto/cryptd.c b/crypto/cryptd.c index bbcc368b6a5513..194a92d677b9a2 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -929,7 +929,7 @@ static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb) return PTR_ERR(algt); switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { - case CRYPTO_ALG_TYPE_SKCIPHER: + case CRYPTO_ALG_TYPE_LSKCIPHER: return cryptd_create_skcipher(tmpl, tb, algt, &queue); case CRYPTO_ALG_TYPE_HASH: return cryptd_create_hash(tmpl, tb, algt, &queue); diff --git a/crypto/lskcipher.c b/crypto/lskcipher.c new file mode 100644 index 00000000000000..3343c6d955da71 --- /dev/null +++ b/crypto/lskcipher.c @@ -0,0 +1,594 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Linear symmetric key cipher operations. + * + * Generic encrypt/decrypt wrapper for ciphers. + * + * Copyright (c) 2023 Herbert Xu + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "skcipher.h" + +static inline struct crypto_lskcipher *__crypto_lskcipher_cast( + struct crypto_tfm *tfm) +{ + return container_of(tfm, struct crypto_lskcipher, base); +} + +static inline struct lskcipher_alg *__crypto_lskcipher_alg( + struct crypto_alg *alg) +{ + return container_of(alg, struct lskcipher_alg, co.base); +} + +static inline struct crypto_istat_cipher *lskcipher_get_stat( + struct lskcipher_alg *alg) +{ + return skcipher_get_stat_common(&alg->co); +} + +static inline int crypto_lskcipher_errstat(struct lskcipher_alg *alg, int err) +{ + struct crypto_istat_cipher *istat = lskcipher_get_stat(alg); + + if (!IS_ENABLED(CONFIG_CRYPTO_STATS)) + return err; + + if (err) + atomic64_inc(&istat->err_cnt); + + return err; +} + +static int lskcipher_setkey_unaligned(struct crypto_lskcipher *tfm, + const u8 *key, unsigned int keylen) +{ + unsigned long alignmask = crypto_lskcipher_alignmask(tfm); + struct lskcipher_alg *cipher = crypto_lskcipher_alg(tfm); + u8 *buffer, *alignbuffer; + unsigned long absize; + int ret; + + absize = keylen + alignmask; + buffer = kmalloc(absize, GFP_ATOMIC); + if (!buffer) + return -ENOMEM; + + alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); + memcpy(alignbuffer, key, keylen); + ret = cipher->setkey(tfm, alignbuffer, keylen); + kfree_sensitive(buffer); + return ret; +} + +int crypto_lskcipher_setkey(struct crypto_lskcipher *tfm, const u8 *key, + unsigned int keylen) +{ + unsigned long alignmask = crypto_lskcipher_alignmask(tfm); + struct lskcipher_alg *cipher = crypto_lskcipher_alg(tfm); + + if (keylen < cipher->co.min_keysize || keylen > cipher->co.max_keysize) + return -EINVAL; + + if ((unsigned long)key & alignmask) + return lskcipher_setkey_unaligned(tfm, key, keylen); + else + return cipher->setkey(tfm, key, keylen); +} +EXPORT_SYMBOL_GPL(crypto_lskcipher_setkey); + +static int crypto_lskcipher_crypt_unaligned( + struct crypto_lskcipher *tfm, const u8 *src, u8 *dst, unsigned len, + u8 *iv, int (*crypt)(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned len, u8 *iv, bool final)) +{ + unsigned ivsize = crypto_lskcipher_ivsize(tfm); + unsigned bs = crypto_lskcipher_blocksize(tfm); + unsigned cs = crypto_lskcipher_chunksize(tfm); + int err; + u8 *tiv; + u8 *p; + + BUILD_BUG_ON(MAX_CIPHER_BLOCKSIZE > PAGE_SIZE || + MAX_CIPHER_ALIGNMASK >= PAGE_SIZE); + + tiv = kmalloc(PAGE_SIZE, GFP_ATOMIC); + if (!tiv) + return -ENOMEM; + + memcpy(tiv, iv, ivsize); + + p = kmalloc(PAGE_SIZE, GFP_ATOMIC); + err = -ENOMEM; + if (!p) + goto out; + + while (len >= bs) { + unsigned chunk = min((unsigned)PAGE_SIZE, len); + int err; + + if (chunk > cs) + chunk &= ~(cs - 1); + + memcpy(p, src, chunk); + err = crypt(tfm, p, p, chunk, tiv, true); + if (err) + goto out; + + memcpy(dst, p, chunk); + src += chunk; + dst += chunk; + len -= chunk; + } + + err = len ? -EINVAL : 0; + +out: + memcpy(iv, tiv, ivsize); + kfree_sensitive(p); + kfree_sensitive(tiv); + return err; +} + +static int crypto_lskcipher_crypt(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned len, u8 *iv, + int (*crypt)(struct crypto_lskcipher *tfm, + const u8 *src, u8 *dst, + unsigned len, u8 *iv, + bool final)) +{ + unsigned long alignmask = crypto_lskcipher_alignmask(tfm); + struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm); + int ret; + + if (((unsigned long)src | (unsigned long)dst | (unsigned long)iv) & + alignmask) { + ret = crypto_lskcipher_crypt_unaligned(tfm, src, dst, len, iv, + crypt); + goto out; + } + + ret = crypt(tfm, src, dst, len, iv, true); + +out: + return crypto_lskcipher_errstat(alg, ret); +} + +int crypto_lskcipher_encrypt(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned len, u8 *iv) +{ + struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm); + + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { + struct crypto_istat_cipher *istat = lskcipher_get_stat(alg); + + atomic64_inc(&istat->encrypt_cnt); + atomic64_add(len, &istat->encrypt_tlen); + } + + return crypto_lskcipher_crypt(tfm, src, dst, len, iv, alg->encrypt); +} +EXPORT_SYMBOL_GPL(crypto_lskcipher_encrypt); + +int crypto_lskcipher_decrypt(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned len, u8 *iv) +{ + struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm); + + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { + struct crypto_istat_cipher *istat = lskcipher_get_stat(alg); + + atomic64_inc(&istat->decrypt_cnt); + atomic64_add(len, &istat->decrypt_tlen); + } + + return crypto_lskcipher_crypt(tfm, src, dst, len, iv, alg->decrypt); +} +EXPORT_SYMBOL_GPL(crypto_lskcipher_decrypt); + +int crypto_lskcipher_setkey_sg(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct crypto_lskcipher **ctx = crypto_skcipher_ctx(tfm); + + return crypto_lskcipher_setkey(*ctx, key, keylen); +} + +static int crypto_lskcipher_crypt_sg(struct skcipher_request *req, + int (*crypt)(struct crypto_lskcipher *tfm, + const u8 *src, u8 *dst, + unsigned len, u8 *iv, + bool final)) +{ + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + struct crypto_lskcipher **ctx = crypto_skcipher_ctx(skcipher); + struct crypto_lskcipher *tfm = *ctx; + struct skcipher_walk walk; + int err; + + err = skcipher_walk_virt(&walk, req, false); + + while (walk.nbytes) { + err = crypt(tfm, walk.src.virt.addr, walk.dst.virt.addr, + walk.nbytes, walk.iv, walk.nbytes == walk.total); + err = skcipher_walk_done(&walk, err); + } + + return err; +} + +int crypto_lskcipher_encrypt_sg(struct skcipher_request *req) +{ + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + struct crypto_lskcipher **ctx = crypto_skcipher_ctx(skcipher); + struct lskcipher_alg *alg = crypto_lskcipher_alg(*ctx); + + return crypto_lskcipher_crypt_sg(req, alg->encrypt); +} + +int crypto_lskcipher_decrypt_sg(struct skcipher_request *req) +{ + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + struct crypto_lskcipher **ctx = crypto_skcipher_ctx(skcipher); + struct lskcipher_alg *alg = crypto_lskcipher_alg(*ctx); + + return crypto_lskcipher_crypt_sg(req, alg->decrypt); +} + +static void crypto_lskcipher_exit_tfm(struct crypto_tfm *tfm) +{ + struct crypto_lskcipher *skcipher = __crypto_lskcipher_cast(tfm); + struct lskcipher_alg *alg = crypto_lskcipher_alg(skcipher); + + alg->exit(skcipher); +} + +static int crypto_lskcipher_init_tfm(struct crypto_tfm *tfm) +{ + struct crypto_lskcipher *skcipher = __crypto_lskcipher_cast(tfm); + struct lskcipher_alg *alg = crypto_lskcipher_alg(skcipher); + + if (alg->exit) + skcipher->base.exit = crypto_lskcipher_exit_tfm; + + if (alg->init) + return alg->init(skcipher); + + return 0; +} + +static void crypto_lskcipher_free_instance(struct crypto_instance *inst) +{ + struct lskcipher_instance *skcipher = + container_of(inst, struct lskcipher_instance, s.base); + + skcipher->free(skcipher); +} + +static void __maybe_unused crypto_lskcipher_show( + struct seq_file *m, struct crypto_alg *alg) +{ + struct lskcipher_alg *skcipher = __crypto_lskcipher_alg(alg); + + seq_printf(m, "type : lskcipher\n"); + seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); + seq_printf(m, "min keysize : %u\n", skcipher->co.min_keysize); + seq_printf(m, "max keysize : %u\n", skcipher->co.max_keysize); + seq_printf(m, "ivsize : %u\n", skcipher->co.ivsize); + seq_printf(m, "chunksize : %u\n", skcipher->co.chunksize); +} + +static int __maybe_unused crypto_lskcipher_report( + struct sk_buff *skb, struct crypto_alg *alg) +{ + struct lskcipher_alg *skcipher = __crypto_lskcipher_alg(alg); + struct crypto_report_blkcipher rblkcipher; + + memset(&rblkcipher, 0, sizeof(rblkcipher)); + + strscpy(rblkcipher.type, "lskcipher", sizeof(rblkcipher.type)); + strscpy(rblkcipher.geniv, "", sizeof(rblkcipher.geniv)); + + rblkcipher.blocksize = alg->cra_blocksize; + rblkcipher.min_keysize = skcipher->co.min_keysize; + rblkcipher.max_keysize = skcipher->co.max_keysize; + rblkcipher.ivsize = skcipher->co.ivsize; + + return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER, + sizeof(rblkcipher), &rblkcipher); +} + +static int __maybe_unused crypto_lskcipher_report_stat( + struct sk_buff *skb, struct crypto_alg *alg) +{ + struct lskcipher_alg *skcipher = __crypto_lskcipher_alg(alg); + struct crypto_istat_cipher *istat; + struct crypto_stat_cipher rcipher; + + istat = lskcipher_get_stat(skcipher); + + memset(&rcipher, 0, sizeof(rcipher)); + + strscpy(rcipher.type, "cipher", sizeof(rcipher.type)); + + rcipher.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt); + rcipher.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen); + rcipher.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt); + rcipher.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen); + rcipher.stat_err_cnt = atomic64_read(&istat->err_cnt); + + return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher); +} + +static const struct crypto_type crypto_lskcipher_type = { + .extsize = crypto_alg_extsize, + .init_tfm = crypto_lskcipher_init_tfm, + .free = crypto_lskcipher_free_instance, +#ifdef CONFIG_PROC_FS + .show = crypto_lskcipher_show, +#endif +#if IS_ENABLED(CONFIG_CRYPTO_USER) + .report = crypto_lskcipher_report, +#endif +#ifdef CONFIG_CRYPTO_STATS + .report_stat = crypto_lskcipher_report_stat, +#endif + .maskclear = ~CRYPTO_ALG_TYPE_MASK, + .maskset = CRYPTO_ALG_TYPE_MASK, + .type = CRYPTO_ALG_TYPE_LSKCIPHER, + .tfmsize = offsetof(struct crypto_lskcipher, base), +}; + +static void crypto_lskcipher_exit_tfm_sg(struct crypto_tfm *tfm) +{ + struct crypto_lskcipher **ctx = crypto_tfm_ctx(tfm); + + crypto_free_lskcipher(*ctx); +} + +int crypto_init_lskcipher_ops_sg(struct crypto_tfm *tfm) +{ + struct crypto_lskcipher **ctx = crypto_tfm_ctx(tfm); + struct crypto_alg *calg = tfm->__crt_alg; + struct crypto_lskcipher *skcipher; + + if (!crypto_mod_get(calg)) + return -EAGAIN; + + skcipher = crypto_create_tfm(calg, &crypto_lskcipher_type); + if (IS_ERR(skcipher)) { + crypto_mod_put(calg); + return PTR_ERR(skcipher); + } + + *ctx = skcipher; + tfm->exit = crypto_lskcipher_exit_tfm_sg; + + return 0; +} + +int crypto_grab_lskcipher(struct crypto_lskcipher_spawn *spawn, + struct crypto_instance *inst, + const char *name, u32 type, u32 mask) +{ + spawn->base.frontend = &crypto_lskcipher_type; + return crypto_grab_spawn(&spawn->base, inst, name, type, mask); +} +EXPORT_SYMBOL_GPL(crypto_grab_lskcipher); + +struct crypto_lskcipher *crypto_alloc_lskcipher(const char *alg_name, + u32 type, u32 mask) +{ + return crypto_alloc_tfm(alg_name, &crypto_lskcipher_type, type, mask); +} +EXPORT_SYMBOL_GPL(crypto_alloc_lskcipher); + +static int lskcipher_prepare_alg(struct lskcipher_alg *alg) +{ + struct crypto_alg *base = &alg->co.base; + int err; + + err = skcipher_prepare_alg_common(&alg->co); + if (err) + return err; + + if (alg->co.chunksize & (alg->co.chunksize - 1)) + return -EINVAL; + + base->cra_type = &crypto_lskcipher_type; + base->cra_flags |= CRYPTO_ALG_TYPE_LSKCIPHER; + + return 0; +} + +int crypto_register_lskcipher(struct lskcipher_alg *alg) +{ + struct crypto_alg *base = &alg->co.base; + int err; + + err = lskcipher_prepare_alg(alg); + if (err) + return err; + + return crypto_register_alg(base); +} +EXPORT_SYMBOL_GPL(crypto_register_lskcipher); + +void crypto_unregister_lskcipher(struct lskcipher_alg *alg) +{ + crypto_unregister_alg(&alg->co.base); +} +EXPORT_SYMBOL_GPL(crypto_unregister_lskcipher); + +int crypto_register_lskciphers(struct lskcipher_alg *algs, int count) +{ + int i, ret; + + for (i = 0; i < count; i++) { + ret = crypto_register_lskcipher(&algs[i]); + if (ret) + goto err; + } + + return 0; + +err: + for (--i; i >= 0; --i) + crypto_unregister_lskcipher(&algs[i]); + + return ret; +} +EXPORT_SYMBOL_GPL(crypto_register_lskciphers); + +void crypto_unregister_lskciphers(struct lskcipher_alg *algs, int count) +{ + int i; + + for (i = count - 1; i >= 0; --i) + crypto_unregister_lskcipher(&algs[i]); +} +EXPORT_SYMBOL_GPL(crypto_unregister_lskciphers); + +int lskcipher_register_instance(struct crypto_template *tmpl, + struct lskcipher_instance *inst) +{ + int err; + + if (WARN_ON(!inst->free)) + return -EINVAL; + + err = lskcipher_prepare_alg(&inst->alg); + if (err) + return err; + + return crypto_register_instance(tmpl, lskcipher_crypto_instance(inst)); +} +EXPORT_SYMBOL_GPL(lskcipher_register_instance); + +static int lskcipher_setkey_simple(struct crypto_lskcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct crypto_lskcipher *cipher = lskcipher_cipher_simple(tfm); + + crypto_lskcipher_clear_flags(cipher, CRYPTO_TFM_REQ_MASK); + crypto_lskcipher_set_flags(cipher, crypto_lskcipher_get_flags(tfm) & + CRYPTO_TFM_REQ_MASK); + return crypto_lskcipher_setkey(cipher, key, keylen); +} + +static int lskcipher_init_tfm_simple(struct crypto_lskcipher *tfm) +{ + struct lskcipher_instance *inst = lskcipher_alg_instance(tfm); + struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm); + struct crypto_lskcipher_spawn *spawn; + struct crypto_lskcipher *cipher; + + spawn = lskcipher_instance_ctx(inst); + cipher = crypto_spawn_lskcipher(spawn); + if (IS_ERR(cipher)) + return PTR_ERR(cipher); + + *ctx = cipher; + return 0; +} + +static void lskcipher_exit_tfm_simple(struct crypto_lskcipher *tfm) +{ + struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm); + + crypto_free_lskcipher(*ctx); +} + +static void lskcipher_free_instance_simple(struct lskcipher_instance *inst) +{ + crypto_drop_lskcipher(lskcipher_instance_ctx(inst)); + kfree(inst); +} + +/** + * lskcipher_alloc_instance_simple - allocate instance of simple block cipher + * + * Allocate an lskcipher_instance for a simple block cipher mode of operation, + * e.g. cbc or ecb. The instance context will have just a single crypto_spawn, + * that for the underlying cipher. The {min,max}_keysize, ivsize, blocksize, + * alignmask, and priority are set from the underlying cipher but can be + * overridden if needed. The tfm context defaults to + * struct crypto_lskcipher *, and default ->setkey(), ->init(), and + * ->exit() methods are installed. + * + * @tmpl: the template being instantiated + * @tb: the template parameters + * + * Return: a pointer to the new instance, or an ERR_PTR(). The caller still + * needs to register the instance. + */ +struct lskcipher_instance *lskcipher_alloc_instance_simple( + struct crypto_template *tmpl, struct rtattr **tb) +{ + u32 mask; + struct lskcipher_instance *inst; + struct crypto_lskcipher_spawn *spawn; + struct lskcipher_alg *cipher_alg; + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_LSKCIPHER, &mask); + if (err) + return ERR_PTR(err); + + inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); + if (!inst) + return ERR_PTR(-ENOMEM); + + spawn = lskcipher_instance_ctx(inst); + err = crypto_grab_lskcipher(spawn, + lskcipher_crypto_instance(inst), + crypto_attr_alg_name(tb[1]), 0, mask); + if (err) + goto err_free_inst; + cipher_alg = crypto_lskcipher_spawn_alg(spawn); + + err = crypto_inst_setname(lskcipher_crypto_instance(inst), tmpl->name, + &cipher_alg->co.base); + if (err) + goto err_free_inst; + + /* Don't allow nesting. */ + err = -ELOOP; + if ((cipher_alg->co.base.cra_flags & CRYPTO_ALG_INSTANCE)) + goto err_free_inst; + + err = -EINVAL; + if (cipher_alg->co.ivsize) + goto err_free_inst; + + inst->free = lskcipher_free_instance_simple; + + /* Default algorithm properties, can be overridden */ + inst->alg.co.base.cra_blocksize = cipher_alg->co.base.cra_blocksize; + inst->alg.co.base.cra_alignmask = cipher_alg->co.base.cra_alignmask; + inst->alg.co.base.cra_priority = cipher_alg->co.base.cra_priority; + inst->alg.co.min_keysize = cipher_alg->co.min_keysize; + inst->alg.co.max_keysize = cipher_alg->co.max_keysize; + inst->alg.co.ivsize = cipher_alg->co.base.cra_blocksize; + + /* Use struct crypto_lskcipher * by default, can be overridden */ + inst->alg.co.base.cra_ctxsize = sizeof(struct crypto_lskcipher *); + inst->alg.setkey = lskcipher_setkey_simple; + inst->alg.init = lskcipher_init_tfm_simple; + inst->alg.exit = lskcipher_exit_tfm_simple; + + return inst; + +err_free_inst: + lskcipher_free_instance_simple(inst); + return ERR_PTR(err); +} +EXPORT_SYMBOL_GPL(lskcipher_alloc_instance_simple); diff --git a/crypto/skcipher.c b/crypto/skcipher.c index 7b275716cf4e3a..b9496dc8a609f6 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -24,8 +24,9 @@ #include #include #include +#include "skcipher.h" -#include "internal.h" +#define CRYPTO_ALG_TYPE_SKCIPHER_MASK 0x0000000e enum { SKCIPHER_WALK_PHYS = 1 << 0, @@ -43,6 +44,8 @@ struct skcipher_walk_buffer { u8 buffer[]; }; +static const struct crypto_type crypto_skcipher_type; + static int skcipher_walk_next(struct skcipher_walk *walk); static inline void skcipher_map_src(struct skcipher_walk *walk) @@ -89,11 +92,7 @@ static inline struct skcipher_alg *__crypto_skcipher_alg( static inline struct crypto_istat_cipher *skcipher_get_stat( struct skcipher_alg *alg) { -#ifdef CONFIG_CRYPTO_STATS - return &alg->stat; -#else - return NULL; -#endif + return skcipher_get_stat_common(&alg->co); } static inline int crypto_skcipher_errstat(struct skcipher_alg *alg, int err) @@ -468,6 +467,7 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk, struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); walk->total = req->cryptlen; walk->nbytes = 0; @@ -485,10 +485,14 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk, SKCIPHER_WALK_SLEEP : 0; walk->blocksize = crypto_skcipher_blocksize(tfm); - walk->stride = crypto_skcipher_walksize(tfm); walk->ivsize = crypto_skcipher_ivsize(tfm); walk->alignmask = crypto_skcipher_alignmask(tfm); + if (alg->co.base.cra_type != &crypto_skcipher_type) + walk->stride = alg->co.chunksize; + else + walk->stride = alg->walksize; + return skcipher_walk_first(walk); } @@ -616,6 +620,11 @@ int crypto_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned long alignmask = crypto_skcipher_alignmask(tfm); int err; + if (cipher->co.base.cra_type != &crypto_skcipher_type) { + err = crypto_lskcipher_setkey_sg(tfm, key, keylen); + goto out; + } + if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) return -EINVAL; @@ -624,6 +633,7 @@ int crypto_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, else err = cipher->setkey(tfm, key, keylen); +out: if (unlikely(err)) { skcipher_set_needkey(tfm); return err; @@ -649,6 +659,8 @@ int crypto_skcipher_encrypt(struct skcipher_request *req) if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) ret = -ENOKEY; + else if (alg->co.base.cra_type != &crypto_skcipher_type) + ret = crypto_lskcipher_encrypt_sg(req); else ret = alg->encrypt(req); @@ -671,6 +683,8 @@ int crypto_skcipher_decrypt(struct skcipher_request *req) if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) ret = -ENOKEY; + else if (alg->co.base.cra_type != &crypto_skcipher_type) + ret = crypto_lskcipher_decrypt_sg(req); else ret = alg->decrypt(req); @@ -693,6 +707,9 @@ static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm) skcipher_set_needkey(skcipher); + if (tfm->__crt_alg->cra_type != &crypto_skcipher_type) + return crypto_init_lskcipher_ops_sg(tfm); + if (alg->exit) skcipher->base.exit = crypto_skcipher_exit_tfm; @@ -702,6 +719,14 @@ static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm) return 0; } +static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg) +{ + if (alg->cra_type != &crypto_skcipher_type) + return sizeof(struct crypto_lskcipher *); + + return crypto_alg_extsize(alg); +} + static void crypto_skcipher_free_instance(struct crypto_instance *inst) { struct skcipher_instance *skcipher = @@ -770,7 +795,7 @@ static int __maybe_unused crypto_skcipher_report_stat( } static const struct crypto_type crypto_skcipher_type = { - .extsize = crypto_alg_extsize, + .extsize = crypto_skcipher_extsize, .init_tfm = crypto_skcipher_init_tfm, .free = crypto_skcipher_free_instance, #ifdef CONFIG_PROC_FS @@ -783,7 +808,7 @@ static const struct crypto_type crypto_skcipher_type = { .report_stat = crypto_skcipher_report_stat, #endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, - .maskset = CRYPTO_ALG_TYPE_MASK, + .maskset = CRYPTO_ALG_TYPE_SKCIPHER_MASK, .type = CRYPTO_ALG_TYPE_SKCIPHER, .tfmsize = offsetof(struct crypto_skcipher, base), }; @@ -834,23 +859,18 @@ int crypto_has_skcipher(const char *alg_name, u32 type, u32 mask) } EXPORT_SYMBOL_GPL(crypto_has_skcipher); -static int skcipher_prepare_alg(struct skcipher_alg *alg) +int skcipher_prepare_alg_common(struct skcipher_alg_common *alg) { - struct crypto_istat_cipher *istat = skcipher_get_stat(alg); + struct crypto_istat_cipher *istat = skcipher_get_stat_common(alg); struct crypto_alg *base = &alg->base; - if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 || - alg->walksize > PAGE_SIZE / 8) + if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8) return -EINVAL; if (!alg->chunksize) alg->chunksize = base->cra_blocksize; - if (!alg->walksize) - alg->walksize = alg->chunksize; - base->cra_type = &crypto_skcipher_type; base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; - base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER; if (IS_ENABLED(CONFIG_CRYPTO_STATS)) memset(istat, 0, sizeof(*istat)); @@ -858,6 +878,27 @@ static int skcipher_prepare_alg(struct skcipher_alg *alg) return 0; } +static int skcipher_prepare_alg(struct skcipher_alg *alg) +{ + struct crypto_alg *base = &alg->base; + int err; + + err = skcipher_prepare_alg_common(&alg->co); + if (err) + return err; + + if (alg->walksize > PAGE_SIZE / 8) + return -EINVAL; + + if (!alg->walksize) + alg->walksize = alg->chunksize; + + base->cra_type = &crypto_skcipher_type; + base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER; + + return 0; +} + int crypto_register_skcipher(struct skcipher_alg *alg) { struct crypto_alg *base = &alg->base; diff --git a/crypto/skcipher.h b/crypto/skcipher.h new file mode 100644 index 00000000000000..6f1295f0fef24f --- /dev/null +++ b/crypto/skcipher.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Cryptographic API. + * + * Copyright (c) 2023 Herbert Xu + */ +#ifndef _LOCAL_CRYPTO_SKCIPHER_H +#define _LOCAL_CRYPTO_SKCIPHER_H + +#include +#include "internal.h" + +static inline struct crypto_istat_cipher *skcipher_get_stat_common( + struct skcipher_alg_common *alg) +{ +#ifdef CONFIG_CRYPTO_STATS + return &alg->stat; +#else + return NULL; +#endif +} + +int crypto_lskcipher_setkey_sg(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen); +int crypto_lskcipher_encrypt_sg(struct skcipher_request *req); +int crypto_lskcipher_decrypt_sg(struct skcipher_request *req); +int crypto_init_lskcipher_ops_sg(struct crypto_tfm *tfm); +int skcipher_prepare_alg_common(struct skcipher_alg_common *alg); + +#endif /* _LOCAL_CRYPTO_SKCIPHER_H */ diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h index fb3d9e899f526b..4382fd707b8a39 100644 --- a/include/crypto/internal/skcipher.h +++ b/include/crypto/internal/skcipher.h @@ -36,10 +36,25 @@ struct skcipher_instance { }; }; +struct lskcipher_instance { + void (*free)(struct lskcipher_instance *inst); + union { + struct { + char head[offsetof(struct lskcipher_alg, co.base)]; + struct crypto_instance base; + } s; + struct lskcipher_alg alg; + }; +}; + struct crypto_skcipher_spawn { struct crypto_spawn base; }; +struct crypto_lskcipher_spawn { + struct crypto_spawn base; +}; + struct skcipher_walk { union { struct { @@ -80,6 +95,12 @@ static inline struct crypto_instance *skcipher_crypto_instance( return &inst->s.base; } +static inline struct crypto_instance *lskcipher_crypto_instance( + struct lskcipher_instance *inst) +{ + return &inst->s.base; +} + static inline struct skcipher_instance *skcipher_alg_instance( struct crypto_skcipher *skcipher) { @@ -87,11 +108,23 @@ static inline struct skcipher_instance *skcipher_alg_instance( struct skcipher_instance, alg); } +static inline struct lskcipher_instance *lskcipher_alg_instance( + struct crypto_lskcipher *lskcipher) +{ + return container_of(crypto_lskcipher_alg(lskcipher), + struct lskcipher_instance, alg); +} + static inline void *skcipher_instance_ctx(struct skcipher_instance *inst) { return crypto_instance_ctx(skcipher_crypto_instance(inst)); } +static inline void *lskcipher_instance_ctx(struct lskcipher_instance *inst) +{ + return crypto_instance_ctx(lskcipher_crypto_instance(inst)); +} + static inline void skcipher_request_complete(struct skcipher_request *req, int err) { crypto_request_complete(&req->base, err); @@ -101,29 +134,56 @@ int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, struct crypto_instance *inst, const char *name, u32 type, u32 mask); +int crypto_grab_lskcipher(struct crypto_lskcipher_spawn *spawn, + struct crypto_instance *inst, + const char *name, u32 type, u32 mask); + static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn) { crypto_drop_spawn(&spawn->base); } +static inline void crypto_drop_lskcipher(struct crypto_lskcipher_spawn *spawn) +{ + crypto_drop_spawn(&spawn->base); +} + static inline struct skcipher_alg *crypto_skcipher_spawn_alg( struct crypto_skcipher_spawn *spawn) { return container_of(spawn->base.alg, struct skcipher_alg, base); } +static inline struct lskcipher_alg *crypto_lskcipher_spawn_alg( + struct crypto_lskcipher_spawn *spawn) +{ + return container_of(spawn->base.alg, struct lskcipher_alg, co.base); +} + static inline struct skcipher_alg *crypto_spawn_skcipher_alg( struct crypto_skcipher_spawn *spawn) { return crypto_skcipher_spawn_alg(spawn); } +static inline struct lskcipher_alg *crypto_spawn_lskcipher_alg( + struct crypto_lskcipher_spawn *spawn) +{ + return crypto_lskcipher_spawn_alg(spawn); +} + static inline struct crypto_skcipher *crypto_spawn_skcipher( struct crypto_skcipher_spawn *spawn) { return crypto_spawn_tfm2(&spawn->base); } +static inline struct crypto_lskcipher *crypto_spawn_lskcipher( + struct crypto_lskcipher_spawn *spawn) +{ + return crypto_spawn_tfm2(&spawn->base); +} + static inline void crypto_skcipher_set_reqsize( struct crypto_skcipher *skcipher, unsigned int reqsize) { @@ -144,6 +204,13 @@ void crypto_unregister_skciphers(struct skcipher_alg *algs, int count); int skcipher_register_instance(struct crypto_template *tmpl, struct skcipher_instance *inst); +int crypto_register_lskcipher(struct lskcipher_alg *alg); +void crypto_unregister_lskcipher(struct lskcipher_alg *alg); +int crypto_register_lskciphers(struct lskcipher_alg *algs, int count); +void crypto_unregister_lskciphers(struct lskcipher_alg *algs, int count); +int lskcipher_register_instance(struct crypto_template *tmpl, + struct lskcipher_instance *inst); + int skcipher_walk_done(struct skcipher_walk *walk, int err); int skcipher_walk_virt(struct skcipher_walk *walk, struct skcipher_request *req, @@ -166,6 +233,11 @@ static inline void *crypto_skcipher_ctx(struct crypto_skcipher *tfm) return crypto_tfm_ctx(&tfm->base); } +static inline void *crypto_lskcipher_ctx(struct crypto_lskcipher *tfm) +{ + return crypto_tfm_ctx(&tfm->base); +} + static inline void *crypto_skcipher_ctx_dma(struct crypto_skcipher *tfm) { return crypto_tfm_ctx_dma(&tfm->base); @@ -209,21 +281,16 @@ static inline unsigned int crypto_skcipher_alg_walksize( return alg->walksize; } -/** - * crypto_skcipher_walksize() - obtain walk size - * @tfm: cipher handle - * - * In some cases, algorithms can only perform optimally when operating on - * multiple blocks in parallel. This is reflected by the walksize, which - * must be a multiple of the chunksize (or equal if the concern does not - * apply) - * - * Return: walk size in bytes - */ -static inline unsigned int crypto_skcipher_walksize( - struct crypto_skcipher *tfm) +static inline unsigned int crypto_lskcipher_alg_min_keysize( + struct lskcipher_alg *alg) +{ + return alg->co.min_keysize; +} + +static inline unsigned int crypto_lskcipher_alg_max_keysize( + struct lskcipher_alg *alg) { - return crypto_skcipher_alg_walksize(crypto_skcipher_alg(tfm)); + return alg->co.max_keysize; } /* Helpers for simple block cipher modes of operation */ @@ -249,5 +316,24 @@ static inline struct crypto_alg *skcipher_ialg_simple( return crypto_spawn_cipher_alg(spawn); } +static inline struct crypto_lskcipher *lskcipher_cipher_simple( + struct crypto_lskcipher *tfm) +{ + struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm); + + return *ctx; +} + +struct lskcipher_instance *lskcipher_alloc_instance_simple( + struct crypto_template *tmpl, struct rtattr **tb); + +static inline struct lskcipher_alg *lskcipher_ialg_simple( + struct lskcipher_instance *inst) +{ + struct crypto_lskcipher_spawn *spawn = lskcipher_instance_ctx(inst); + + return crypto_lskcipher_spawn_alg(spawn); +} + #endif /* _CRYPTO_INTERNAL_SKCIPHER_H */ diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index 080d1ba3611d8d..a648ef5ce89743 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h @@ -49,6 +49,10 @@ struct crypto_sync_skcipher { struct crypto_skcipher base; }; +struct crypto_lskcipher { + struct crypto_tfm base; +}; + /* * struct crypto_istat_cipher - statistics for cipher algorithm * @encrypt_cnt: number of encrypt requests @@ -65,6 +69,43 @@ struct crypto_istat_cipher { atomic64_t err_cnt; }; +#ifdef CONFIG_CRYPTO_STATS +#define SKCIPHER_ALG_COMMON_STAT struct crypto_istat_cipher stat; +#else +#define SKCIPHER_ALG_COMMON_STAT +#endif + +/* + * struct skcipher_alg_common - common properties of skcipher_alg + * @min_keysize: Minimum key size supported by the transformation. This is the + * smallest key length supported by this transformation algorithm. + * This must be set to one of the pre-defined values as this is + * not hardware specific. Possible values for this field can be + * found via git grep "_MIN_KEY_SIZE" include/crypto/ + * @max_keysize: Maximum key size supported by the transformation. This is the + * largest key length supported by this transformation algorithm. + * This must be set to one of the pre-defined values as this is + * not hardware specific. Possible values for this field can be + * found via git grep "_MAX_KEY_SIZE" include/crypto/ + * @ivsize: IV size applicable for transformation. The consumer must provide an + * IV of exactly that size to perform the encrypt or decrypt operation. + * @chunksize: Equal to the block size except for stream ciphers such as + * CTR where it is set to the underlying block size. + * @stat: Statistics for cipher algorithm + * @base: Definition of a generic crypto algorithm. + */ +#define SKCIPHER_ALG_COMMON { \ + unsigned int min_keysize; \ + unsigned int max_keysize; \ + unsigned int ivsize; \ + unsigned int chunksize; \ + \ + SKCIPHER_ALG_COMMON_STAT \ + \ + struct crypto_alg base; \ +} +struct skcipher_alg_common SKCIPHER_ALG_COMMON; + /** * struct skcipher_alg - symmetric key cipher definition * @min_keysize: Minimum key size supported by the transformation. This is the @@ -120,6 +161,7 @@ struct crypto_istat_cipher { * in parallel. Should be a multiple of chunksize. * @stat: Statistics for cipher algorithm * @base: Definition of a generic crypto algorithm. + * @co: see struct skcipher_alg_common * * All fields except @ivsize are mandatory and must be filled. */ @@ -131,17 +173,55 @@ struct skcipher_alg { int (*init)(struct crypto_skcipher *tfm); void (*exit)(struct crypto_skcipher *tfm); - unsigned int min_keysize; - unsigned int max_keysize; - unsigned int ivsize; - unsigned int chunksize; unsigned int walksize; -#ifdef CONFIG_CRYPTO_STATS - struct crypto_istat_cipher stat; -#endif + union { + struct SKCIPHER_ALG_COMMON; + struct skcipher_alg_common co; + }; +}; - struct crypto_alg base; +/** + * struct lskcipher_alg - linear symmetric key cipher definition + * @setkey: Set key for the transformation. This function is used to either + * program a supplied key into the hardware or store the key in the + * transformation context for programming it later. Note that this + * function does modify the transformation context. This function can + * be called multiple times during the existence of the transformation + * object, so one must make sure the key is properly reprogrammed into + * the hardware. This function is also responsible for checking the key + * length for validity. In case a software fallback was put in place in + * the @cra_init call, this function might need to use the fallback if + * the algorithm doesn't support all of the key sizes. + * @encrypt: Encrypt a number of bytes. This function is used to encrypt + * the supplied data. This function shall not modify + * the transformation context, as this function may be called + * in parallel with the same transformation object. Data + * may be left over if length is not a multiple of blocks + * and there is more to come (final == false). The number of + * left-over bytes should be returned in case of success. + * @decrypt: Decrypt a number of bytes. This is a reverse counterpart to + * @encrypt and the conditions are exactly the same. + * @init: Initialize the cryptographic transformation object. This function + * is used to initialize the cryptographic transformation object. + * This function is called only once at the instantiation time, right + * after the transformation context was allocated. + * @exit: Deinitialize the cryptographic transformation object. This is a + * counterpart to @init, used to remove various changes set in + * @init. + * @co: see struct skcipher_alg_common + */ +struct lskcipher_alg { + int (*setkey)(struct crypto_lskcipher *tfm, const u8 *key, + unsigned int keylen); + int (*encrypt)(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned len, u8 *iv, bool final); + int (*decrypt)(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned len, u8 *iv, bool final); + int (*init)(struct crypto_lskcipher *tfm); + void (*exit)(struct crypto_lskcipher *tfm); + + struct skcipher_alg_common co; }; #define MAX_SYNC_SKCIPHER_REQSIZE 384 @@ -213,12 +293,36 @@ struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name, struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(const char *alg_name, u32 type, u32 mask); + +/** + * crypto_alloc_lskcipher() - allocate linear symmetric key cipher handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * lskcipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Allocate a cipher handle for an lskcipher. The returned struct + * crypto_lskcipher is the cipher handle that is required for any subsequent + * API invocation for that lskcipher. + * + * Return: allocated cipher handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ +struct crypto_lskcipher *crypto_alloc_lskcipher(const char *alg_name, + u32 type, u32 mask); + static inline struct crypto_tfm *crypto_skcipher_tfm( struct crypto_skcipher *tfm) { return &tfm->base; } +static inline struct crypto_tfm *crypto_lskcipher_tfm( + struct crypto_lskcipher *tfm) +{ + return &tfm->base; +} + /** * crypto_free_skcipher() - zeroize and free cipher handle * @tfm: cipher handle to be freed @@ -235,6 +339,17 @@ static inline void crypto_free_sync_skcipher(struct crypto_sync_skcipher *tfm) crypto_free_skcipher(&tfm->base); } +/** + * crypto_free_lskcipher() - zeroize and free cipher handle + * @tfm: cipher handle to be freed + * + * If @tfm is a NULL or error pointer, this function does nothing. + */ +static inline void crypto_free_lskcipher(struct crypto_lskcipher *tfm) +{ + crypto_destroy_tfm(tfm, crypto_lskcipher_tfm(tfm)); +} + /** * crypto_has_skcipher() - Search for the availability of an skcipher. * @alg_name: is the cra_name / name or cra_driver_name / driver name of the @@ -253,6 +368,19 @@ static inline const char *crypto_skcipher_driver_name( return crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)); } +static inline const char *crypto_lskcipher_driver_name( + struct crypto_lskcipher *tfm) +{ + return crypto_tfm_alg_driver_name(crypto_lskcipher_tfm(tfm)); +} + +static inline struct skcipher_alg_common *crypto_skcipher_alg_common( + struct crypto_skcipher *tfm) +{ + return container_of(crypto_skcipher_tfm(tfm)->__crt_alg, + struct skcipher_alg_common, base); +} + static inline struct skcipher_alg *crypto_skcipher_alg( struct crypto_skcipher *tfm) { @@ -260,11 +388,24 @@ static inline struct skcipher_alg *crypto_skcipher_alg( struct skcipher_alg, base); } +static inline struct lskcipher_alg *crypto_lskcipher_alg( + struct crypto_lskcipher *tfm) +{ + return container_of(crypto_lskcipher_tfm(tfm)->__crt_alg, + struct lskcipher_alg, co.base); +} + static inline unsigned int crypto_skcipher_alg_ivsize(struct skcipher_alg *alg) { return alg->ivsize; } +static inline unsigned int crypto_lskcipher_alg_ivsize( + struct lskcipher_alg *alg) +{ + return alg->co.ivsize; +} + /** * crypto_skcipher_ivsize() - obtain IV size * @tfm: cipher handle @@ -276,7 +417,7 @@ static inline unsigned int crypto_skcipher_alg_ivsize(struct skcipher_alg *alg) */ static inline unsigned int crypto_skcipher_ivsize(struct crypto_skcipher *tfm) { - return crypto_skcipher_alg(tfm)->ivsize; + return crypto_skcipher_alg_common(tfm)->ivsize; } static inline unsigned int crypto_sync_skcipher_ivsize( @@ -285,6 +426,21 @@ static inline unsigned int crypto_sync_skcipher_ivsize( return crypto_skcipher_ivsize(&tfm->base); } +/** + * crypto_lskcipher_ivsize() - obtain IV size + * @tfm: cipher handle + * + * The size of the IV for the lskcipher referenced by the cipher handle is + * returned. This IV size may be zero if the cipher does not need an IV. + * + * Return: IV size in bytes + */ +static inline unsigned int crypto_lskcipher_ivsize( + struct crypto_lskcipher *tfm) +{ + return crypto_lskcipher_alg(tfm)->co.ivsize; +} + /** * crypto_skcipher_blocksize() - obtain block size of cipher * @tfm: cipher handle @@ -301,12 +457,34 @@ static inline unsigned int crypto_skcipher_blocksize( return crypto_tfm_alg_blocksize(crypto_skcipher_tfm(tfm)); } +/** + * crypto_lskcipher_blocksize() - obtain block size of cipher + * @tfm: cipher handle + * + * The block size for the lskcipher referenced with the cipher handle is + * returned. The caller may use that information to allocate appropriate + * memory for the data returned by the encryption or decryption operation + * + * Return: block size of cipher + */ +static inline unsigned int crypto_lskcipher_blocksize( + struct crypto_lskcipher *tfm) +{ + return crypto_tfm_alg_blocksize(crypto_lskcipher_tfm(tfm)); +} + static inline unsigned int crypto_skcipher_alg_chunksize( struct skcipher_alg *alg) { return alg->chunksize; } +static inline unsigned int crypto_lskcipher_alg_chunksize( + struct lskcipher_alg *alg) +{ + return alg->co.chunksize; +} + /** * crypto_skcipher_chunksize() - obtain chunk size * @tfm: cipher handle @@ -321,7 +499,24 @@ static inline unsigned int crypto_skcipher_alg_chunksize( static inline unsigned int crypto_skcipher_chunksize( struct crypto_skcipher *tfm) { - return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm)); + return crypto_skcipher_alg_common(tfm)->chunksize; +} + +/** + * crypto_lskcipher_chunksize() - obtain chunk size + * @tfm: cipher handle + * + * The block size is set to one for ciphers such as CTR. However, + * you still need to provide incremental updates in multiples of + * the underlying block size as the IV does not have sub-block + * granularity. This is known in this API as the chunk size. + * + * Return: chunk size in bytes + */ +static inline unsigned int crypto_lskcipher_chunksize( + struct crypto_lskcipher *tfm) +{ + return crypto_lskcipher_alg_chunksize(crypto_lskcipher_alg(tfm)); } static inline unsigned int crypto_sync_skcipher_blocksize( @@ -336,6 +531,12 @@ static inline unsigned int crypto_skcipher_alignmask( return crypto_tfm_alg_alignmask(crypto_skcipher_tfm(tfm)); } +static inline unsigned int crypto_lskcipher_alignmask( + struct crypto_lskcipher *tfm) +{ + return crypto_tfm_alg_alignmask(crypto_lskcipher_tfm(tfm)); +} + static inline u32 crypto_skcipher_get_flags(struct crypto_skcipher *tfm) { return crypto_tfm_get_flags(crypto_skcipher_tfm(tfm)); @@ -371,6 +572,23 @@ static inline void crypto_sync_skcipher_clear_flags( crypto_skcipher_clear_flags(&tfm->base, flags); } +static inline u32 crypto_lskcipher_get_flags(struct crypto_lskcipher *tfm) +{ + return crypto_tfm_get_flags(crypto_lskcipher_tfm(tfm)); +} + +static inline void crypto_lskcipher_set_flags(struct crypto_lskcipher *tfm, + u32 flags) +{ + crypto_tfm_set_flags(crypto_lskcipher_tfm(tfm), flags); +} + +static inline void crypto_lskcipher_clear_flags(struct crypto_lskcipher *tfm, + u32 flags) +{ + crypto_tfm_clear_flags(crypto_lskcipher_tfm(tfm), flags); +} + /** * crypto_skcipher_setkey() - set key for cipher * @tfm: cipher handle @@ -396,16 +614,47 @@ static inline int crypto_sync_skcipher_setkey(struct crypto_sync_skcipher *tfm, return crypto_skcipher_setkey(&tfm->base, key, keylen); } +/** + * crypto_lskcipher_setkey() - set key for cipher + * @tfm: cipher handle + * @key: buffer holding the key + * @keylen: length of the key in bytes + * + * The caller provided key is set for the lskcipher referenced by the cipher + * handle. + * + * Note, the key length determines the cipher type. Many block ciphers implement + * different cipher modes depending on the key size, such as AES-128 vs AES-192 + * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 + * is performed. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ +int crypto_lskcipher_setkey(struct crypto_lskcipher *tfm, + const u8 *key, unsigned int keylen); + static inline unsigned int crypto_skcipher_min_keysize( struct crypto_skcipher *tfm) { - return crypto_skcipher_alg(tfm)->min_keysize; + return crypto_skcipher_alg_common(tfm)->min_keysize; } static inline unsigned int crypto_skcipher_max_keysize( struct crypto_skcipher *tfm) { - return crypto_skcipher_alg(tfm)->max_keysize; + return crypto_skcipher_alg_common(tfm)->max_keysize; +} + +static inline unsigned int crypto_lskcipher_min_keysize( + struct crypto_lskcipher *tfm) +{ + return crypto_lskcipher_alg(tfm)->co.min_keysize; +} + +static inline unsigned int crypto_lskcipher_max_keysize( + struct crypto_lskcipher *tfm) +{ + return crypto_lskcipher_alg(tfm)->co.max_keysize; } /** @@ -457,6 +706,42 @@ int crypto_skcipher_encrypt(struct skcipher_request *req); */ int crypto_skcipher_decrypt(struct skcipher_request *req); +/** + * crypto_lskcipher_encrypt() - encrypt plaintext + * @tfm: lskcipher handle + * @src: source buffer + * @dst: destination buffer + * @len: number of bytes to process + * @iv: IV for the cipher operation which must comply with the IV size defined + * by crypto_lskcipher_ivsize + * + * Encrypt plaintext data using the lskcipher handle. + * + * Return: >=0 if the cipher operation was successful, if positive + * then this many bytes have been left unprocessed; + * < 0 if an error occurred + */ +int crypto_lskcipher_encrypt(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned len, u8 *iv); + +/** + * crypto_lskcipher_decrypt() - decrypt ciphertext + * @tfm: lskcipher handle + * @src: source buffer + * @dst: destination buffer + * @len: number of bytes to process + * @iv: IV for the cipher operation which must comply with the IV size defined + * by crypto_lskcipher_ivsize + * + * Decrypt ciphertext data using the lskcipher handle. + * + * Return: >=0 if the cipher operation was successful, if positive + * then this many bytes have been left unprocessed; + * < 0 if an error occurred + */ +int crypto_lskcipher_decrypt(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned len, u8 *iv); + /** * DOC: Symmetric Key Cipher Request Handle * diff --git a/include/linux/crypto.h b/include/linux/crypto.h index a0780deb017aa6..f3c3a3b27facd8 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -24,6 +24,7 @@ #define CRYPTO_ALG_TYPE_CIPHER 0x00000001 #define CRYPTO_ALG_TYPE_COMPRESS 0x00000002 #define CRYPTO_ALG_TYPE_AEAD 0x00000003 +#define CRYPTO_ALG_TYPE_LSKCIPHER 0x00000004 #define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005 #define CRYPTO_ALG_TYPE_AKCIPHER 0x00000006 #define CRYPTO_ALG_TYPE_SIG 0x00000007 -- cgit 1.2.3-korg From 8aee5d4ebd113319f6882b2cd0475d270fdc0d41 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 14 Sep 2023 16:28:25 +0800 Subject: crypto: lskcipher - Add compatibility wrapper around ECB As an aid to the transition from cipher algorithm implementations to lskcipher, add a temporary wrapper when creating simple lskcipher templates by using ecb(X) instead of X if an lskcipher implementation of X cannot be found. This can be reverted once all cipher implementations have switched over to lskcipher. Signed-off-by: Herbert Xu --- crypto/lskcipher.c | 57 +++++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 52 insertions(+), 5 deletions(-) (limited to 'crypto') diff --git a/crypto/lskcipher.c b/crypto/lskcipher.c index 3343c6d955da71..9be3c04bc62a39 100644 --- a/crypto/lskcipher.c +++ b/crypto/lskcipher.c @@ -536,13 +536,19 @@ struct lskcipher_instance *lskcipher_alloc_instance_simple( u32 mask; struct lskcipher_instance *inst; struct crypto_lskcipher_spawn *spawn; + char ecb_name[CRYPTO_MAX_ALG_NAME]; struct lskcipher_alg *cipher_alg; + const char *cipher_name; int err; err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_LSKCIPHER, &mask); if (err) return ERR_PTR(err); + cipher_name = crypto_attr_alg_name(tb[1]); + if (IS_ERR(cipher_name)) + return ERR_CAST(cipher_name); + inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) return ERR_PTR(-ENOMEM); @@ -550,9 +556,23 @@ struct lskcipher_instance *lskcipher_alloc_instance_simple( spawn = lskcipher_instance_ctx(inst); err = crypto_grab_lskcipher(spawn, lskcipher_crypto_instance(inst), - crypto_attr_alg_name(tb[1]), 0, mask); + cipher_name, 0, mask); + + ecb_name[0] = 0; + if (err == -ENOENT && !!memcmp(tmpl->name, "ecb", 4)) { + err = -ENAMETOOLONG; + if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)", + cipher_name) >= CRYPTO_MAX_ALG_NAME) + goto err_free_inst; + + err = crypto_grab_lskcipher(spawn, + lskcipher_crypto_instance(inst), + ecb_name, 0, mask); + } + if (err) goto err_free_inst; + cipher_alg = crypto_lskcipher_spawn_alg(spawn); err = crypto_inst_setname(lskcipher_crypto_instance(inst), tmpl->name, @@ -560,10 +580,37 @@ struct lskcipher_instance *lskcipher_alloc_instance_simple( if (err) goto err_free_inst; - /* Don't allow nesting. */ - err = -ELOOP; - if ((cipher_alg->co.base.cra_flags & CRYPTO_ALG_INSTANCE)) - goto err_free_inst; + if (ecb_name[0]) { + int len; + + len = strscpy(ecb_name, &cipher_alg->co.base.cra_name[4], + sizeof(ecb_name)); + if (len < 2) + goto err_free_inst; + + if (ecb_name[len - 1] != ')') + goto err_free_inst; + + ecb_name[len - 1] = 0; + + err = -ENAMETOOLONG; + if (snprintf(inst->alg.co.base.cra_name, CRYPTO_MAX_ALG_NAME, + "%s(%s)", tmpl->name, ecb_name) >= + CRYPTO_MAX_ALG_NAME) + goto err_free_inst; + + if (strcmp(ecb_name, cipher_name) && + snprintf(inst->alg.co.base.cra_driver_name, + CRYPTO_MAX_ALG_NAME, + "%s(%s)", tmpl->name, cipher_name) >= + CRYPTO_MAX_ALG_NAME) + goto err_free_inst; + } else { + /* Don't allow nesting. */ + err = -ELOOP; + if ((cipher_alg->co.base.cra_flags & CRYPTO_ALG_INSTANCE)) + goto err_free_inst; + } err = -EINVAL; if (cipher_alg->co.ivsize) -- cgit 1.2.3-korg From 3dfe8786b11a4a3f9ced2eb89c6c5d73eba84700 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 14 Sep 2023 16:28:26 +0800 Subject: crypto: testmgr - Add support for lskcipher algorithms Test lskcipher algorithms using the same logic as cipher algorithms. Signed-off-by: Herbert Xu --- crypto/testmgr.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'crypto') diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 216878c8bc3d62..aed4a6bf47ad3d 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -5945,6 +5945,25 @@ test_done: return rc; notest: + if ((type & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_LSKCIPHER) { + char nalg[CRYPTO_MAX_ALG_NAME]; + + if (snprintf(nalg, sizeof(nalg), "ecb(%s)", alg) >= + sizeof(nalg)) + goto notest2; + + i = alg_find_test(nalg); + if (i < 0) + goto notest2; + + if (fips_enabled && !alg_test_descs[i].fips_allowed) + goto non_fips_alg; + + rc = alg_test_skcipher(alg_test_descs + i, driver, type, mask); + goto test_done; + } + +notest2: printk(KERN_INFO "alg: No test for %s (%s)\n", alg, driver); if (type & CRYPTO_ALG_FIPS_INTERNAL) -- cgit 1.2.3-korg From 32a8dc4afcfb098ef4e8b465c90db17d22d90107 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 14 Sep 2023 16:28:27 +0800 Subject: crypto: ecb - Convert from skcipher to lskcipher This patch adds two different implementations of ECB. First of all an lskcipher wrapper around existing ciphers is introduced as a temporary transition aid. Secondly a permanent lskcipher template is also added. It's simply a wrapper around the underlying lskcipher algorithm. Signed-off-by: Herbert Xu --- crypto/ecb.c | 190 ++++++++++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 156 insertions(+), 34 deletions(-) (limited to 'crypto') diff --git a/crypto/ecb.c b/crypto/ecb.c index 71fbb0543d64ea..cc7625d1a475e8 100644 --- a/crypto/ecb.c +++ b/crypto/ecb.c @@ -5,75 +5,196 @@ * Copyright (c) 2006 Herbert Xu */ -#include #include #include #include #include #include #include +#include -static int crypto_ecb_crypt(struct skcipher_request *req, - struct crypto_cipher *cipher, +static int crypto_ecb_crypt(struct crypto_cipher *cipher, const u8 *src, + u8 *dst, unsigned nbytes, bool final, void (*fn)(struct crypto_tfm *, u8 *, const u8 *)) { const unsigned int bsize = crypto_cipher_blocksize(cipher); - struct skcipher_walk walk; - unsigned int nbytes; - int err; - - err = skcipher_walk_virt(&walk, req, false); - while ((nbytes = walk.nbytes) != 0) { - const u8 *src = walk.src.virt.addr; - u8 *dst = walk.dst.virt.addr; + while (nbytes >= bsize) { + fn(crypto_cipher_tfm(cipher), dst, src); - do { - fn(crypto_cipher_tfm(cipher), dst, src); + src += bsize; + dst += bsize; - src += bsize; - dst += bsize; - } while ((nbytes -= bsize) >= bsize); - - err = skcipher_walk_done(&walk, nbytes); + nbytes -= bsize; } - return err; + return nbytes && final ? -EINVAL : nbytes; } -static int crypto_ecb_encrypt(struct skcipher_request *req) +static int crypto_ecb_encrypt2(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned len, u8 *iv, bool final) { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct crypto_cipher *cipher = skcipher_cipher_simple(tfm); + struct crypto_cipher **ctx = crypto_lskcipher_ctx(tfm); + struct crypto_cipher *cipher = *ctx; - return crypto_ecb_crypt(req, cipher, + return crypto_ecb_crypt(cipher, src, dst, len, final, crypto_cipher_alg(cipher)->cia_encrypt); } -static int crypto_ecb_decrypt(struct skcipher_request *req) +static int crypto_ecb_decrypt2(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned len, u8 *iv, bool final) { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct crypto_cipher *cipher = skcipher_cipher_simple(tfm); + struct crypto_cipher **ctx = crypto_lskcipher_ctx(tfm); + struct crypto_cipher *cipher = *ctx; - return crypto_ecb_crypt(req, cipher, + return crypto_ecb_crypt(cipher, src, dst, len, final, crypto_cipher_alg(cipher)->cia_decrypt); } -static int crypto_ecb_create(struct crypto_template *tmpl, struct rtattr **tb) +static int lskcipher_setkey_simple2(struct crypto_lskcipher *tfm, + const u8 *key, unsigned int keylen) +{ + struct crypto_cipher **ctx = crypto_lskcipher_ctx(tfm); + struct crypto_cipher *cipher = *ctx; + + crypto_cipher_clear_flags(cipher, CRYPTO_TFM_REQ_MASK); + crypto_cipher_set_flags(cipher, crypto_lskcipher_get_flags(tfm) & + CRYPTO_TFM_REQ_MASK); + return crypto_cipher_setkey(cipher, key, keylen); +} + +static int lskcipher_init_tfm_simple2(struct crypto_lskcipher *tfm) +{ + struct lskcipher_instance *inst = lskcipher_alg_instance(tfm); + struct crypto_cipher **ctx = crypto_lskcipher_ctx(tfm); + struct crypto_cipher_spawn *spawn; + struct crypto_cipher *cipher; + + spawn = lskcipher_instance_ctx(inst); + cipher = crypto_spawn_cipher(spawn); + if (IS_ERR(cipher)) + return PTR_ERR(cipher); + + *ctx = cipher; + return 0; +} + +static void lskcipher_exit_tfm_simple2(struct crypto_lskcipher *tfm) +{ + struct crypto_cipher **ctx = crypto_lskcipher_ctx(tfm); + + crypto_free_cipher(*ctx); +} + +static void lskcipher_free_instance_simple2(struct lskcipher_instance *inst) +{ + crypto_drop_cipher(lskcipher_instance_ctx(inst)); + kfree(inst); +} + +static struct lskcipher_instance *lskcipher_alloc_instance_simple2( + struct crypto_template *tmpl, struct rtattr **tb) +{ + struct crypto_cipher_spawn *spawn; + struct lskcipher_instance *inst; + struct crypto_alg *cipher_alg; + u32 mask; + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_LSKCIPHER, &mask); + if (err) + return ERR_PTR(err); + + inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); + if (!inst) + return ERR_PTR(-ENOMEM); + spawn = lskcipher_instance_ctx(inst); + + err = crypto_grab_cipher(spawn, lskcipher_crypto_instance(inst), + crypto_attr_alg_name(tb[1]), 0, mask); + if (err) + goto err_free_inst; + cipher_alg = crypto_spawn_cipher_alg(spawn); + + err = crypto_inst_setname(lskcipher_crypto_instance(inst), tmpl->name, + cipher_alg); + if (err) + goto err_free_inst; + + inst->free = lskcipher_free_instance_simple2; + + /* Default algorithm properties, can be overridden */ + inst->alg.co.base.cra_blocksize = cipher_alg->cra_blocksize; + inst->alg.co.base.cra_alignmask = cipher_alg->cra_alignmask; + inst->alg.co.base.cra_priority = cipher_alg->cra_priority; + inst->alg.co.min_keysize = cipher_alg->cra_cipher.cia_min_keysize; + inst->alg.co.max_keysize = cipher_alg->cra_cipher.cia_max_keysize; + inst->alg.co.ivsize = cipher_alg->cra_blocksize; + + /* Use struct crypto_cipher * by default, can be overridden */ + inst->alg.co.base.cra_ctxsize = sizeof(struct crypto_cipher *); + inst->alg.setkey = lskcipher_setkey_simple2; + inst->alg.init = lskcipher_init_tfm_simple2; + inst->alg.exit = lskcipher_exit_tfm_simple2; + + return inst; + +err_free_inst: + lskcipher_free_instance_simple2(inst); + return ERR_PTR(err); +} + +static int crypto_ecb_create2(struct crypto_template *tmpl, struct rtattr **tb) { - struct skcipher_instance *inst; + struct lskcipher_instance *inst; int err; - inst = skcipher_alloc_instance_simple(tmpl, tb); + inst = lskcipher_alloc_instance_simple2(tmpl, tb); if (IS_ERR(inst)) return PTR_ERR(inst); - inst->alg.ivsize = 0; /* ECB mode doesn't take an IV */ + /* ECB mode doesn't take an IV */ + inst->alg.co.ivsize = 0; + + inst->alg.encrypt = crypto_ecb_encrypt2; + inst->alg.decrypt = crypto_ecb_decrypt2; + + err = lskcipher_register_instance(tmpl, inst); + if (err) + inst->free(inst); + + return err; +} + +static int crypto_ecb_create(struct crypto_template *tmpl, struct rtattr **tb) +{ + struct crypto_lskcipher_spawn *spawn; + struct lskcipher_alg *cipher_alg; + struct lskcipher_instance *inst; + int err; + + inst = lskcipher_alloc_instance_simple(tmpl, tb); + if (IS_ERR(inst)) { + err = crypto_ecb_create2(tmpl, tb); + return err; + } + + spawn = lskcipher_instance_ctx(inst); + cipher_alg = crypto_lskcipher_spawn_alg(spawn); + + /* ECB mode doesn't take an IV */ + inst->alg.co.ivsize = 0; + if (cipher_alg->co.ivsize) + return -EINVAL; - inst->alg.encrypt = crypto_ecb_encrypt; - inst->alg.decrypt = crypto_ecb_decrypt; + inst->alg.co.base.cra_ctxsize = cipher_alg->co.base.cra_ctxsize; + inst->alg.setkey = cipher_alg->setkey; + inst->alg.encrypt = cipher_alg->encrypt; + inst->alg.decrypt = cipher_alg->decrypt; + inst->alg.init = cipher_alg->init; + inst->alg.exit = cipher_alg->exit; - err = skcipher_register_instance(tmpl, inst); + err = lskcipher_register_instance(tmpl, inst); if (err) inst->free(inst); @@ -102,3 +223,4 @@ module_exit(crypto_ecb_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("ECB block cipher mode of operation"); MODULE_ALIAS_CRYPTO("ecb"); +MODULE_IMPORT_NS(CRYPTO_INTERNAL); -- cgit 1.2.3-korg From 705b52fef3c73655701d9c8868e744f1fa03e942 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 14 Sep 2023 16:28:28 +0800 Subject: crypto: cbc - Convert from skcipher to lskcipher Replace the existing skcipher CBC template with an lskcipher version. Signed-off-by: Herbert Xu --- crypto/cbc.c | 163 ++++++++++++++++++++++------------------------------------- 1 file changed, 61 insertions(+), 102 deletions(-) (limited to 'crypto') diff --git a/crypto/cbc.c b/crypto/cbc.c index 6c03e96b945f66..28345b8d921c6a 100644 --- a/crypto/cbc.c +++ b/crypto/cbc.c @@ -5,8 +5,6 @@ * Copyright (c) 2006-2016 Herbert Xu */ -#include -#include #include #include #include @@ -14,99 +12,71 @@ #include #include -static int crypto_cbc_encrypt_segment(struct skcipher_walk *walk, - struct crypto_skcipher *skcipher) +static int crypto_cbc_encrypt_segment(struct crypto_lskcipher *tfm, + const u8 *src, u8 *dst, unsigned nbytes, + u8 *iv) { - unsigned int bsize = crypto_skcipher_blocksize(skcipher); - void (*fn)(struct crypto_tfm *, u8 *, const u8 *); - unsigned int nbytes = walk->nbytes; - u8 *src = walk->src.virt.addr; - u8 *dst = walk->dst.virt.addr; - struct crypto_cipher *cipher; - struct crypto_tfm *tfm; - u8 *iv = walk->iv; - - cipher = skcipher_cipher_simple(skcipher); - tfm = crypto_cipher_tfm(cipher); - fn = crypto_cipher_alg(cipher)->cia_encrypt; + unsigned int bsize = crypto_lskcipher_blocksize(tfm); - do { + for (; nbytes >= bsize; src += bsize, dst += bsize, nbytes -= bsize) { crypto_xor(iv, src, bsize); - fn(tfm, dst, iv); + crypto_lskcipher_encrypt(tfm, iv, dst, bsize, NULL); memcpy(iv, dst, bsize); - - src += bsize; - dst += bsize; - } while ((nbytes -= bsize) >= bsize); + } return nbytes; } -static int crypto_cbc_encrypt_inplace(struct skcipher_walk *walk, - struct crypto_skcipher *skcipher) +static int crypto_cbc_encrypt_inplace(struct crypto_lskcipher *tfm, + u8 *src, unsigned nbytes, u8 *oiv) { - unsigned int bsize = crypto_skcipher_blocksize(skcipher); - void (*fn)(struct crypto_tfm *, u8 *, const u8 *); - unsigned int nbytes = walk->nbytes; - u8 *src = walk->src.virt.addr; - struct crypto_cipher *cipher; - struct crypto_tfm *tfm; - u8 *iv = walk->iv; - - cipher = skcipher_cipher_simple(skcipher); - tfm = crypto_cipher_tfm(cipher); - fn = crypto_cipher_alg(cipher)->cia_encrypt; + unsigned int bsize = crypto_lskcipher_blocksize(tfm); + u8 *iv = oiv; + + if (nbytes < bsize) + goto out; do { crypto_xor(src, iv, bsize); - fn(tfm, src, src); + crypto_lskcipher_encrypt(tfm, src, src, bsize, NULL); iv = src; src += bsize; } while ((nbytes -= bsize) >= bsize); - memcpy(walk->iv, iv, bsize); + memcpy(oiv, iv, bsize); +out: return nbytes; } -static int crypto_cbc_encrypt(struct skcipher_request *req) +static int crypto_cbc_encrypt(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned len, u8 *iv, bool final) { - struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); - struct skcipher_walk walk; - int err; + struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm); + struct crypto_lskcipher *cipher = *ctx; + int rem; - err = skcipher_walk_virt(&walk, req, false); + if (src == dst) + rem = crypto_cbc_encrypt_inplace(cipher, dst, len, iv); + else + rem = crypto_cbc_encrypt_segment(cipher, src, dst, len, iv); - while (walk.nbytes) { - if (walk.src.virt.addr == walk.dst.virt.addr) - err = crypto_cbc_encrypt_inplace(&walk, skcipher); - else - err = crypto_cbc_encrypt_segment(&walk, skcipher); - err = skcipher_walk_done(&walk, err); - } - - return err; + return rem && final ? -EINVAL : rem; } -static int crypto_cbc_decrypt_segment(struct skcipher_walk *walk, - struct crypto_skcipher *skcipher) +static int crypto_cbc_decrypt_segment(struct crypto_lskcipher *tfm, + const u8 *src, u8 *dst, unsigned nbytes, + u8 *oiv) { - unsigned int bsize = crypto_skcipher_blocksize(skcipher); - void (*fn)(struct crypto_tfm *, u8 *, const u8 *); - unsigned int nbytes = walk->nbytes; - u8 *src = walk->src.virt.addr; - u8 *dst = walk->dst.virt.addr; - struct crypto_cipher *cipher; - struct crypto_tfm *tfm; - u8 *iv = walk->iv; - - cipher = skcipher_cipher_simple(skcipher); - tfm = crypto_cipher_tfm(cipher); - fn = crypto_cipher_alg(cipher)->cia_decrypt; + unsigned int bsize = crypto_lskcipher_blocksize(tfm); + const u8 *iv = oiv; + + if (nbytes < bsize) + goto out; do { - fn(tfm, dst, src); + crypto_lskcipher_decrypt(tfm, src, dst, bsize, NULL); crypto_xor(dst, iv, bsize); iv = src; @@ -114,83 +84,72 @@ static int crypto_cbc_decrypt_segment(struct skcipher_walk *walk, dst += bsize; } while ((nbytes -= bsize) >= bsize); - memcpy(walk->iv, iv, bsize); + memcpy(oiv, iv, bsize); +out: return nbytes; } -static int crypto_cbc_decrypt_inplace(struct skcipher_walk *walk, - struct crypto_skcipher *skcipher) +static int crypto_cbc_decrypt_inplace(struct crypto_lskcipher *tfm, + u8 *src, unsigned nbytes, u8 *iv) { - unsigned int bsize = crypto_skcipher_blocksize(skcipher); - void (*fn)(struct crypto_tfm *, u8 *, const u8 *); - unsigned int nbytes = walk->nbytes; - u8 *src = walk->src.virt.addr; + unsigned int bsize = crypto_lskcipher_blocksize(tfm); u8 last_iv[MAX_CIPHER_BLOCKSIZE]; - struct crypto_cipher *cipher; - struct crypto_tfm *tfm; - cipher = skcipher_cipher_simple(skcipher); - tfm = crypto_cipher_tfm(cipher); - fn = crypto_cipher_alg(cipher)->cia_decrypt; + if (nbytes < bsize) + goto out; /* Start of the last block. */ src += nbytes - (nbytes & (bsize - 1)) - bsize; memcpy(last_iv, src, bsize); for (;;) { - fn(tfm, src, src); + crypto_lskcipher_decrypt(tfm, src, src, bsize, NULL); if ((nbytes -= bsize) < bsize) break; crypto_xor(src, src - bsize, bsize); src -= bsize; } - crypto_xor(src, walk->iv, bsize); - memcpy(walk->iv, last_iv, bsize); + crypto_xor(src, iv, bsize); + memcpy(iv, last_iv, bsize); +out: return nbytes; } -static int crypto_cbc_decrypt(struct skcipher_request *req) +static int crypto_cbc_decrypt(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned len, u8 *iv, bool final) { - struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); - struct skcipher_walk walk; - int err; + struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm); + struct crypto_lskcipher *cipher = *ctx; + int rem; - err = skcipher_walk_virt(&walk, req, false); + if (src == dst) + rem = crypto_cbc_decrypt_inplace(cipher, dst, len, iv); + else + rem = crypto_cbc_decrypt_segment(cipher, src, dst, len, iv); - while (walk.nbytes) { - if (walk.src.virt.addr == walk.dst.virt.addr) - err = crypto_cbc_decrypt_inplace(&walk, skcipher); - else - err = crypto_cbc_decrypt_segment(&walk, skcipher); - err = skcipher_walk_done(&walk, err); - } - - return err; + return rem && final ? -EINVAL : rem; } static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct skcipher_instance *inst; - struct crypto_alg *alg; + struct lskcipher_instance *inst; int err; - inst = skcipher_alloc_instance_simple(tmpl, tb); + inst = lskcipher_alloc_instance_simple(tmpl, tb); if (IS_ERR(inst)) return PTR_ERR(inst); - alg = skcipher_ialg_simple(inst); - err = -EINVAL; - if (!is_power_of_2(alg->cra_blocksize)) + if (!is_power_of_2(inst->alg.co.base.cra_blocksize)) goto out_free_inst; inst->alg.encrypt = crypto_cbc_encrypt; inst->alg.decrypt = crypto_cbc_decrypt; - err = skcipher_register_instance(tmpl, inst); + err = lskcipher_register_instance(tmpl, inst); if (err) { out_free_inst: inst->free(inst); -- cgit 1.2.3-korg From 04597c8dd6c4b55e946fec50dc3b14a5d9d54501 Mon Sep 17 00:00:00 2001 From: Stephan Müller Date: Thu, 21 Sep 2023 13:48:11 +0200 Subject: crypto: jitter - add RCT/APT support for different OSRs The oversampling rate (OSR) value specifies the heuristically implied entropy in the recorded data - H_submitter = 1/osr. A different entropy estimate implies a different APT/RCT cutoff value. This change adds support for OSRs 1 through 15. This OSR can be selected by the caller of the Jitter RNG. For this patch, the caller still uses one hard-coded OSR. A subsequent patch allows this value to be configured. In addition, the power-up self test is adjusted as follows: * It allows the caller to provide an oversampling rate that should be tested with - commonly it should be the same as used for the actual runtime operation. This makes the power-up testing therefore consistent with the runtime operation. * It calls now jent_measure_jitter (i.e. collects the full entropy that can possibly be harvested by the Jitter RNG) instead of only jent_condition_data (which only returns the entropy harvested from the conditioning component). This should now alleviate reports where the Jitter RNG initialization thinks there is too little entropy. * The power-up test now solely relies on the (enhanced) APT and RCT test that is used as a health test at runtime. The code allowing the different OSRs as well as the power-up test changes are present in the user space version of the Jitter RNG 3.4.1 and thus was already in production use for some time. Reported-by "Ospan, Abylay" Signed-off-by: Stephan Mueller Signed-off-by: Herbert Xu --- crypto/jitterentropy-kcapi.c | 4 +- crypto/jitterentropy.c | 233 ++++++++++++++++++++++--------------------- crypto/jitterentropy.h | 3 +- 3 files changed, 123 insertions(+), 117 deletions(-) (limited to 'crypto') diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c index 7d1463a1562acb..1de730f9468308 100644 --- a/crypto/jitterentropy-kcapi.c +++ b/crypto/jitterentropy-kcapi.c @@ -245,7 +245,7 @@ static int jent_kcapi_init(struct crypto_tfm *tfm) crypto_shash_init(sdesc); rng->sdesc = sdesc; - rng->entropy_collector = jent_entropy_collector_alloc(1, 0, sdesc); + rng->entropy_collector = jent_entropy_collector_alloc(0, 0, sdesc); if (!rng->entropy_collector) { ret = -ENOMEM; goto err; @@ -334,7 +334,7 @@ static int __init jent_mod_init(void) desc->tfm = tfm; crypto_shash_init(desc); - ret = jent_entropy_init(desc); + ret = jent_entropy_init(0, 0, desc); shash_desc_zero(desc); crypto_free_shash(tfm); if (ret) { diff --git a/crypto/jitterentropy.c b/crypto/jitterentropy.c index fe9c233ec76930..1b99ffaa8c349c 100644 --- a/crypto/jitterentropy.c +++ b/crypto/jitterentropy.c @@ -72,6 +72,8 @@ struct rand_data { __u64 prev_time; /* SENSITIVE Previous time stamp */ __u64 last_delta; /* SENSITIVE stuck test */ __s64 last_delta2; /* SENSITIVE stuck test */ + + unsigned int flags; /* Flags used to initialize */ unsigned int osr; /* Oversample rate */ #define JENT_MEMORY_BLOCKS 64 #define JENT_MEMORY_BLOCKSIZE 32 @@ -88,16 +90,9 @@ struct rand_data { /* Repetition Count Test */ unsigned int rct_count; /* Number of stuck values */ - /* Intermittent health test failure threshold of 2^-30 */ - /* From an SP800-90B perspective, this RCT cutoff value is equal to 31. */ - /* However, our RCT implementation starts at 1, so we subtract 1 here. */ -#define JENT_RCT_CUTOFF (31 - 1) /* Taken from SP800-90B sec 4.4.1 */ -#define JENT_APT_CUTOFF 325 /* Taken from SP800-90B sec 4.4.2 */ - /* Permanent health test failure threshold of 2^-60 */ - /* From an SP800-90B perspective, this RCT cutoff value is equal to 61. */ - /* However, our RCT implementation starts at 1, so we subtract 1 here. */ -#define JENT_RCT_CUTOFF_PERMANENT (61 - 1) -#define JENT_APT_CUTOFF_PERMANENT 355 + /* Adaptive Proportion Test cutoff values */ + unsigned int apt_cutoff; /* Intermittent health test failure */ + unsigned int apt_cutoff_permanent; /* Permanent health test failure */ #define JENT_APT_WINDOW_SIZE 512 /* Data window size */ /* LSB of time stamp to process */ #define JENT_APT_LSB 16 @@ -122,6 +117,9 @@ struct rand_data { * zero). */ #define JENT_ESTUCK 8 /* Too many stuck results during init. */ #define JENT_EHEALTH 9 /* Health test failed during initialization */ +#define JENT_ERCT 10 /* RCT failed during initialization */ +#define JENT_EHASH 11 /* Hash self test failed */ +#define JENT_EMEM 12 /* Can't allocate memory for initialization */ /* * The output n bits can receive more than n bits of min entropy, of course, @@ -147,6 +145,48 @@ struct rand_data { * This test complies with SP800-90B section 4.4.2. ***************************************************************************/ +/* + * See the SP 800-90B comment #10b for the corrected cutoff for the SP 800-90B + * APT. + * http://www.untruth.org/~josh/sp80090b/UL%20SP800-90B-final%20comments%20v1.9%2020191212.pdf + * In in the syntax of R, this is C = 2 + qbinom(1 − 2^(−30), 511, 2^(-1/osr)). + * (The original formula wasn't correct because the first symbol must + * necessarily have been observed, so there is no chance of observing 0 of these + * symbols.) + * + * For the alpha < 2^-53, R cannot be used as it uses a float data type without + * arbitrary precision. A SageMath script is used to calculate those cutoff + * values. + * + * For any value above 14, this yields the maximal allowable value of 512 + * (by FIPS 140-2 IG 7.19 Resolution # 16, we cannot choose a cutoff value that + * renders the test unable to fail). + */ +static const unsigned int jent_apt_cutoff_lookup[15] = { + 325, 422, 459, 477, 488, 494, 499, 502, + 505, 507, 508, 509, 510, 511, 512 }; +static const unsigned int jent_apt_cutoff_permanent_lookup[15] = { + 355, 447, 479, 494, 502, 507, 510, 512, + 512, 512, 512, 512, 512, 512, 512 }; +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) + +static void jent_apt_init(struct rand_data *ec, unsigned int osr) +{ + /* + * Establish the apt_cutoff based on the presumed entropy rate of + * 1/osr. + */ + if (osr >= ARRAY_SIZE(jent_apt_cutoff_lookup)) { + ec->apt_cutoff = jent_apt_cutoff_lookup[ + ARRAY_SIZE(jent_apt_cutoff_lookup) - 1]; + ec->apt_cutoff_permanent = jent_apt_cutoff_permanent_lookup[ + ARRAY_SIZE(jent_apt_cutoff_permanent_lookup) - 1]; + } else { + ec->apt_cutoff = jent_apt_cutoff_lookup[osr - 1]; + ec->apt_cutoff_permanent = + jent_apt_cutoff_permanent_lookup[osr - 1]; + } +} /* * Reset the APT counter * @@ -187,12 +227,12 @@ static void jent_apt_insert(struct rand_data *ec, unsigned int delta_masked) /* APT health test failure detection */ static int jent_apt_permanent_failure(struct rand_data *ec) { - return (ec->apt_count >= JENT_APT_CUTOFF_PERMANENT) ? 1 : 0; + return (ec->apt_count >= ec->apt_cutoff_permanent) ? 1 : 0; } static int jent_apt_failure(struct rand_data *ec) { - return (ec->apt_count >= JENT_APT_CUTOFF) ? 1 : 0; + return (ec->apt_count >= ec->apt_cutoff) ? 1 : 0; } /*************************************************************************** @@ -275,15 +315,28 @@ static int jent_stuck(struct rand_data *ec, __u64 current_delta) return 0; } -/* RCT health test failure detection */ +/* + * The cutoff value is based on the following consideration: + * alpha = 2^-30 or 2^-60 as recommended in SP800-90B. + * In addition, we require an entropy value H of 1/osr as this is the minimum + * entropy required to provide full entropy. + * Note, we collect (DATA_SIZE_BITS + ENTROPY_SAFETY_FACTOR)*osr deltas for + * inserting them into the entropy pool which should then have (close to) + * DATA_SIZE_BITS bits of entropy in the conditioned output. + * + * Note, ec->rct_count (which equals to value B in the pseudo code of SP800-90B + * section 4.4.1) starts with zero. Hence we need to subtract one from the + * cutoff value as calculated following SP800-90B. Thus + * C = ceil(-log_2(alpha)/H) = 30*osr or 60*osr. + */ static int jent_rct_permanent_failure(struct rand_data *ec) { - return (ec->rct_count >= JENT_RCT_CUTOFF_PERMANENT) ? 1 : 0; + return (ec->rct_count >= (60 * ec->osr)) ? 1 : 0; } static int jent_rct_failure(struct rand_data *ec) { - return (ec->rct_count >= JENT_RCT_CUTOFF) ? 1 : 0; + return (ec->rct_count >= (30 * ec->osr)) ? 1 : 0; } /* Report of health test failures */ @@ -448,7 +501,7 @@ static void jent_memaccess(struct rand_data *ec, __u64 loop_cnt) * * @return result of stuck test */ -static int jent_measure_jitter(struct rand_data *ec) +static int jent_measure_jitter(struct rand_data *ec, __u64 *ret_current_delta) { __u64 time = 0; __u64 current_delta = 0; @@ -472,6 +525,10 @@ static int jent_measure_jitter(struct rand_data *ec) if (jent_condition_data(ec, current_delta, stuck)) stuck = 1; + /* return the raw entropy value */ + if (ret_current_delta) + *ret_current_delta = current_delta; + return stuck; } @@ -489,11 +546,11 @@ static void jent_gen_entropy(struct rand_data *ec) safety_factor = JENT_ENTROPY_SAFETY_FACTOR; /* priming of the ->prev_time value */ - jent_measure_jitter(ec); + jent_measure_jitter(ec, NULL); while (!jent_health_failure(ec)) { /* If a stuck measurement is received, repeat measurement */ - if (jent_measure_jitter(ec)) + if (jent_measure_jitter(ec, NULL)) continue; /* @@ -554,7 +611,8 @@ int jent_read_entropy(struct rand_data *ec, unsigned char *data, * Perform startup health tests and return permanent * error if it fails. */ - if (jent_entropy_init(ec->hash_state)) + if (jent_entropy_init(ec->osr, ec->flags, + ec->hash_state)) return -3; return -2; @@ -604,11 +662,15 @@ struct rand_data *jent_entropy_collector_alloc(unsigned int osr, /* verify and set the oversampling rate */ if (osr == 0) - osr = 1; /* minimum sampling rate is 1 */ + osr = 1; /* H_submitter = 1 / osr */ entropy_collector->osr = osr; + entropy_collector->flags = flags; entropy_collector->hash_state = hash_state; + /* Initialize the APT */ + jent_apt_init(entropy_collector, osr); + /* fill the data pad with non-zero values */ jent_gen_entropy(entropy_collector); @@ -622,20 +684,14 @@ void jent_entropy_collector_free(struct rand_data *entropy_collector) jent_zfree(entropy_collector); } -int jent_entropy_init(void *hash_state) +int jent_entropy_init(unsigned int osr, unsigned int flags, void *hash_state) { - int i; - __u64 delta_sum = 0; - __u64 old_delta = 0; - unsigned int nonstuck = 0; - int time_backwards = 0; - int count_mod = 0; - int count_stuck = 0; - struct rand_data ec = { 0 }; - - /* Required for RCT */ - ec.osr = 1; - ec.hash_state = hash_state; + struct rand_data *ec; + int i, time_backwards = 0, ret = 0; + + ec = jent_entropy_collector_alloc(osr, flags, hash_state); + if (!ec) + return JENT_EMEM; /* We could perform statistical tests here, but the problem is * that we only have a few loop counts to do testing. These @@ -664,31 +720,28 @@ int jent_entropy_init(void *hash_state) #define TESTLOOPCOUNT 1024 #define CLEARCACHE 100 for (i = 0; (TESTLOOPCOUNT + CLEARCACHE) > i; i++) { - __u64 time = 0; - __u64 time2 = 0; - __u64 delta = 0; - unsigned int lowdelta = 0; - int stuck; + __u64 start_time = 0, end_time = 0, delta = 0; /* Invoke core entropy collection logic */ - jent_get_nstime(&time); - ec.prev_time = time; - jent_condition_data(&ec, time, 0); - jent_get_nstime(&time2); + jent_measure_jitter(ec, &delta); + end_time = ec->prev_time; + start_time = ec->prev_time - delta; /* test whether timer works */ - if (!time || !time2) - return JENT_ENOTIME; - delta = jent_delta(time, time2); + if (!start_time || !end_time) { + ret = JENT_ENOTIME; + goto out; + } + /* * test whether timer is fine grained enough to provide * delta even when called shortly after each other -- this * implies that we also have a high resolution timer */ - if (!delta) - return JENT_ECOARSETIME; - - stuck = jent_stuck(&ec, delta); + if (!delta || (end_time == start_time)) { + ret = JENT_ECOARSETIME; + goto out; + } /* * up to here we did not modify any variable that will be @@ -700,49 +753,9 @@ int jent_entropy_init(void *hash_state) if (i < CLEARCACHE) continue; - if (stuck) - count_stuck++; - else { - nonstuck++; - - /* - * Ensure that the APT succeeded. - * - * With the check below that count_stuck must be less - * than 10% of the overall generated raw entropy values - * it is guaranteed that the APT is invoked at - * floor((TESTLOOPCOUNT * 0.9) / 64) == 14 times. - */ - if ((nonstuck % JENT_APT_WINDOW_SIZE) == 0) { - jent_apt_reset(&ec, - delta & JENT_APT_WORD_MASK); - } - } - - /* Validate health test result */ - if (jent_health_failure(&ec)) - return JENT_EHEALTH; - /* test whether we have an increasing timer */ - if (!(time2 > time)) + if (!(end_time > start_time)) time_backwards++; - - /* use 32 bit value to ensure compilation on 32 bit arches */ - lowdelta = time2 - time; - if (!(lowdelta % 100)) - count_mod++; - - /* - * ensure that we have a varying delta timer which is necessary - * for the calculation of entropy -- perform this check - * only after the first loop is executed as we need to prime - * the old_data value - */ - if (delta > old_delta) - delta_sum += (delta - old_delta); - else - delta_sum += (old_delta - delta); - old_delta = delta; } /* @@ -752,31 +765,23 @@ int jent_entropy_init(void *hash_state) * should not fail. The value of 3 should cover the NTP case being * performed during our test run. */ - if (time_backwards > 3) - return JENT_ENOMONOTONIC; - - /* - * Variations of deltas of time must on average be larger - * than 1 to ensure the entropy estimation - * implied with 1 is preserved - */ - if ((delta_sum) <= 1) - return JENT_EVARVAR; + if (time_backwards > 3) { + ret = JENT_ENOMONOTONIC; + goto out; + } - /* - * Ensure that we have variations in the time stamp below 10 for at - * least 10% of all checks -- on some platforms, the counter increments - * in multiples of 100, but not always - */ - if ((TESTLOOPCOUNT/10 * 9) < count_mod) - return JENT_ECOARSETIME; + /* Did we encounter a health test failure? */ + if (jent_rct_failure(ec)) { + ret = JENT_ERCT; + goto out; + } + if (jent_apt_failure(ec)) { + ret = JENT_EHEALTH; + goto out; + } - /* - * If we have more than 90% stuck results, then this Jitter RNG is - * likely to not work well. - */ - if ((TESTLOOPCOUNT/10 * 9) < count_stuck) - return JENT_ESTUCK; +out: + jent_entropy_collector_free(ec); - return 0; + return ret; } diff --git a/crypto/jitterentropy.h b/crypto/jitterentropy.h index 4c92176ea2b1dd..626c6228b7e2b3 100644 --- a/crypto/jitterentropy.h +++ b/crypto/jitterentropy.h @@ -9,7 +9,8 @@ extern int jent_hash_time(void *hash_state, __u64 time, u8 *addtl, int jent_read_random_block(void *hash_state, char *dst, unsigned int dst_len); struct rand_data; -extern int jent_entropy_init(void *hash_state); +extern int jent_entropy_init(unsigned int osr, unsigned int flags, + void *hash_state); extern int jent_read_entropy(struct rand_data *ec, unsigned char *data, unsigned int len); -- cgit 1.2.3-korg From 59bcfd788552504606e3eb774ae68052379396b6 Mon Sep 17 00:00:00 2001 From: Stephan Müller Date: Thu, 21 Sep 2023 13:48:33 +0200 Subject: crypto: jitter - Allow configuration of memory size The memory size consumed by the Jitter RNG is one contributing factor in the amount of entropy that is gathered. As the amount of entropy directly correlates with the distance of the memory from the CPU, the caches that are possibly present on a given system have an impact on the collected entropy. Thus, the kernel compile time should offer a means to configure the amount of memory used by the Jitter RNG. Although this option could be turned into a runtime option (e.g. a kernel command line option), it should remain a compile time option as otherwise adminsitrators who may not have performed an entropy assessment may select a value that is inappropriate. The default value selected by the configuration is identical to the current Jitter RNG value. Thus, the patch should not lead to any change in the Jitter RNG behavior. To accommodate larger memory buffers, kvzalloc / kvfree is used. Signed-off-by: Stephan Mueller Signed-off-by: Herbert Xu --- crypto/Kconfig | 43 +++++++++++++++++++++++++++++++++++++++++++ crypto/jitterentropy-kcapi.c | 11 +++++++++++ crypto/jitterentropy.c | 16 +++++++++------- crypto/jitterentropy.h | 2 ++ 4 files changed, 65 insertions(+), 7 deletions(-) (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index 650b1b3620d818..00c827d9f0d2de 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1296,6 +1296,49 @@ config CRYPTO_JITTERENTROPY See https://www.chronox.de/jent.html +choice + prompt "CPU Jitter RNG Memory Size" + default CRYPTO_JITTERENTROPY_MEMSIZE_2 + depends on CRYPTO_JITTERENTROPY + help + The Jitter RNG measures the execution time of memory accesses. + Multiple consecutive memory accesses are performed. If the memory + size fits into a cache (e.g. L1), only the memory access timing + to that cache is measured. The closer the cache is to the CPU + the less variations are measured and thus the less entropy is + obtained. Thus, if the memory size fits into the L1 cache, the + obtained entropy is less than if the memory size fits within + L1 + L2, which in turn is less if the memory fits into + L1 + L2 + L3. Thus, by selecting a different memory size, + the entropy rate produced by the Jitter RNG can be modified. + + config CRYPTO_JITTERENTROPY_MEMSIZE_2 + bool "2048 Bytes (default)" + + config CRYPTO_JITTERENTROPY_MEMSIZE_128 + bool "128 kBytes" + + config CRYPTO_JITTERENTROPY_MEMSIZE_1024 + bool "1024 kBytes" + + config CRYPTO_JITTERENTROPY_MEMSIZE_8192 + bool "8192 kBytes" +endchoice + +config CRYPTO_JITTERENTROPY_MEMORY_BLOCKS + int + default 64 if CRYPTO_JITTERENTROPY_MEMSIZE_2 + default 512 if CRYPTO_JITTERENTROPY_MEMSIZE_128 + default 1024 if CRYPTO_JITTERENTROPY_MEMSIZE_1024 + default 4096 if CRYPTO_JITTERENTROPY_MEMSIZE_8192 + +config CRYPTO_JITTERENTROPY_MEMORY_BLOCKSIZE + int + default 32 if CRYPTO_JITTERENTROPY_MEMSIZE_2 + default 256 if CRYPTO_JITTERENTROPY_MEMSIZE_128 + default 1024 if CRYPTO_JITTERENTROPY_MEMSIZE_1024 + default 2048 if CRYPTO_JITTERENTROPY_MEMSIZE_8192 + config CRYPTO_JITTERENTROPY_TESTINTERFACE bool "CPU Jitter RNG Test Interface" depends on CRYPTO_JITTERENTROPY diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c index 1de730f9468308..a8e7bbd28c6eb4 100644 --- a/crypto/jitterentropy-kcapi.c +++ b/crypto/jitterentropy-kcapi.c @@ -54,6 +54,17 @@ * Helper function ***************************************************************************/ +void *jent_kvzalloc(unsigned int len) +{ + return kvzalloc(len, GFP_KERNEL); +} + +void jent_kvzfree(void *ptr, unsigned int len) +{ + memzero_explicit(ptr, len); + kvfree(ptr); +} + void *jent_zalloc(unsigned int len) { return kzalloc(len, GFP_KERNEL); diff --git a/crypto/jitterentropy.c b/crypto/jitterentropy.c index 1b99ffaa8c349c..18bbe2b89a94dc 100644 --- a/crypto/jitterentropy.c +++ b/crypto/jitterentropy.c @@ -75,10 +75,10 @@ struct rand_data { unsigned int flags; /* Flags used to initialize */ unsigned int osr; /* Oversample rate */ -#define JENT_MEMORY_BLOCKS 64 -#define JENT_MEMORY_BLOCKSIZE 32 #define JENT_MEMORY_ACCESSLOOPS 128 -#define JENT_MEMORY_SIZE (JENT_MEMORY_BLOCKS*JENT_MEMORY_BLOCKSIZE) +#define JENT_MEMORY_SIZE \ + (CONFIG_CRYPTO_JITTERENTROPY_MEMORY_BLOCKS * \ + CONFIG_CRYPTO_JITTERENTROPY_MEMORY_BLOCKSIZE) unsigned char *mem; /* Memory access location with size of * memblocks * memblocksize */ unsigned int memlocation; /* Pointer to byte in *mem */ @@ -650,13 +650,15 @@ struct rand_data *jent_entropy_collector_alloc(unsigned int osr, /* Allocate memory for adding variations based on memory * access */ - entropy_collector->mem = jent_zalloc(JENT_MEMORY_SIZE); + entropy_collector->mem = jent_kvzalloc(JENT_MEMORY_SIZE); if (!entropy_collector->mem) { jent_zfree(entropy_collector); return NULL; } - entropy_collector->memblocksize = JENT_MEMORY_BLOCKSIZE; - entropy_collector->memblocks = JENT_MEMORY_BLOCKS; + entropy_collector->memblocksize = + CONFIG_CRYPTO_JITTERENTROPY_MEMORY_BLOCKSIZE; + entropy_collector->memblocks = + CONFIG_CRYPTO_JITTERENTROPY_MEMORY_BLOCKS; entropy_collector->memaccessloops = JENT_MEMORY_ACCESSLOOPS; } @@ -679,7 +681,7 @@ struct rand_data *jent_entropy_collector_alloc(unsigned int osr, void jent_entropy_collector_free(struct rand_data *entropy_collector) { - jent_zfree(entropy_collector->mem); + jent_kvzfree(entropy_collector->mem, JENT_MEMORY_SIZE); entropy_collector->mem = NULL; jent_zfree(entropy_collector); } diff --git a/crypto/jitterentropy.h b/crypto/jitterentropy.h index 626c6228b7e2b3..e31661ee00d379 100644 --- a/crypto/jitterentropy.h +++ b/crypto/jitterentropy.h @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-or-later +extern void *jent_kvzalloc(unsigned int len); +extern void jent_kvzfree(void *ptr, unsigned int len); extern void *jent_zalloc(unsigned int len); extern void jent_zfree(void *ptr); extern void jent_get_nstime(__u64 *out); -- cgit 1.2.3-korg From 0baa8fab334a4d7017235b72fa8a547433572109 Mon Sep 17 00:00:00 2001 From: Stephan Müller Date: Thu, 21 Sep 2023 13:48:59 +0200 Subject: crypto: jitter - Allow configuration of oversampling rate The oversampling rate used by the Jitter RNG allows the configuration of the heuristically implied entropy in one timing measurement. This entropy rate is (1 / OSR) bits of entropy per time stamp. Considering that the Jitter RNG now support APT/RCT health tests for different OSRs, allow this value to be configured at compile time to support systems with limited amount of entropy in their timer. The allowed range of OSR values complies with the APT/RCT cutoff health test values which range from 1 through 15. The default value of the OSR selection support is left at 1 which is the current default. Thus, the addition of the configuration support does not alter the default Jitter RNG behavior. Signed-off-by: Stephan Mueller Signed-off-by: Herbert Xu --- crypto/Kconfig | 17 +++++++++++++++++ crypto/jitterentropy-kcapi.c | 6 ++++-- 2 files changed, 21 insertions(+), 2 deletions(-) (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index 00c827d9f0d2de..ed931ddea644d2 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1339,6 +1339,23 @@ config CRYPTO_JITTERENTROPY_MEMORY_BLOCKSIZE default 1024 if CRYPTO_JITTERENTROPY_MEMSIZE_1024 default 2048 if CRYPTO_JITTERENTROPY_MEMSIZE_8192 +config CRYPTO_JITTERENTROPY_OSR + int "CPU Jitter RNG Oversampling Rate" + range 1 15 + default 1 + depends on CRYPTO_JITTERENTROPY + help + The Jitter RNG allows the specification of an oversampling rate (OSR). + The Jitter RNG operation requires a fixed amount of timing + measurements to produce one output block of random numbers. The + OSR value is multiplied with the amount of timing measurements to + generate one output block. Thus, the timing measurement is oversampled + by the OSR factor. The oversampling allows the Jitter RNG to operate + on hardware whose timers deliver limited amount of entropy (e.g. + the timer is coarse) by setting the OSR to a higher value. The + trade-off, however, is that the Jitter RNG now requires more time + to generate random numbers. + config CRYPTO_JITTERENTROPY_TESTINTERFACE bool "CPU Jitter RNG Test Interface" depends on CRYPTO_JITTERENTROPY diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c index a8e7bbd28c6eb4..0c6752221451bf 100644 --- a/crypto/jitterentropy-kcapi.c +++ b/crypto/jitterentropy-kcapi.c @@ -256,7 +256,9 @@ static int jent_kcapi_init(struct crypto_tfm *tfm) crypto_shash_init(sdesc); rng->sdesc = sdesc; - rng->entropy_collector = jent_entropy_collector_alloc(0, 0, sdesc); + rng->entropy_collector = + jent_entropy_collector_alloc(CONFIG_CRYPTO_JITTERENTROPY_OSR, 0, + sdesc); if (!rng->entropy_collector) { ret = -ENOMEM; goto err; @@ -345,7 +347,7 @@ static int __init jent_mod_init(void) desc->tfm = tfm; crypto_shash_init(desc); - ret = jent_entropy_init(0, 0, desc); + ret = jent_entropy_init(CONFIG_CRYPTO_JITTERENTROPY_OSR, 0, desc); shash_desc_zero(desc); crypto_free_shash(tfm); if (ret) { -- cgit 1.2.3-korg From 5ec12f1c7bac891c3268dd6e441a3755ca2b46e0 Mon Sep 17 00:00:00 2001 From: Uwe Kleine-König Date: Sat, 23 Sep 2023 12:08:06 +0200 Subject: crypto: engine - Make crypto_engine_exit() return void MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit All callers ignore the return value, so simplify by not providing one. Note that crypto_engine_exit() is typically called in a device driver's remove path (or the error path in probe), where errors cannot be handled anyhow. Signed-off-by: Uwe Kleine-König Signed-off-by: Herbert Xu --- crypto/crypto_engine.c | 8 ++------ include/crypto/engine.h | 2 +- 2 files changed, 3 insertions(+), 7 deletions(-) (limited to 'crypto') diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c index 108d9d55c509b5..e60a0eb628e8a0 100644 --- a/crypto/crypto_engine.c +++ b/crypto/crypto_engine.c @@ -552,20 +552,16 @@ EXPORT_SYMBOL_GPL(crypto_engine_alloc_init); /** * crypto_engine_exit - free the resources of hardware engine when exit * @engine: the hardware engine need to be freed - * - * Return 0 for success. */ -int crypto_engine_exit(struct crypto_engine *engine) +void crypto_engine_exit(struct crypto_engine *engine) { int ret; ret = crypto_engine_stop(engine); if (ret) - return ret; + return; kthread_destroy_worker(engine->kworker); - - return 0; } EXPORT_SYMBOL_GPL(crypto_engine_exit); diff --git a/include/crypto/engine.h b/include/crypto/engine.h index 2835069c5997ee..545dbefe3e13c6 100644 --- a/include/crypto/engine.h +++ b/include/crypto/engine.h @@ -78,7 +78,7 @@ struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev, bool retry_support, int (*cbk_do_batch)(struct crypto_engine *engine), bool rt, int qlen); -int crypto_engine_exit(struct crypto_engine *engine); +void crypto_engine_exit(struct crypto_engine *engine); int crypto_engine_register_aead(struct aead_engine_alg *alg); void crypto_engine_unregister_aead(struct aead_engine_alg *alg); -- cgit 1.2.3-korg From a1e452026e6d7d16253b29b9d19d3c99a5066726 Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Tue, 26 Sep 2023 11:46:41 +0200 Subject: X.509: Add missing IMPLICIT annotations to AKID ASN.1 module The ASN.1 module in RFC 5280 appendix A.1 uses EXPLICIT TAGS whereas the one in appendix A.2 uses IMPLICIT TAGS. The kernel's simplified asn1_compiler.c always uses EXPLICIT TAGS, hence definitions from appendix A.2 need to be annotated as IMPLICIT for the compiler to generate RFC-compliant code. In particular, GeneralName is defined in appendix A.2: GeneralName ::= CHOICE { otherName [0] OtherName, ... dNSName [2] IA5String, x400Address [3] ORAddress, directoryName [4] Name, ... } Because appendix A.2 uses IMPLICIT TAGS, the IA5String tag (0x16) of a dNSName is not rendered. Instead, the string directly succeeds the [2] tag (0x82). Likewise, the SEQUENCE tag (0x30) of an OtherName is not rendered. Instead, only the constituents of the SEQUENCE are rendered: An OID tag (0x06), a [0] tag (0xa0) and an ANY tag. That's three consecutive tags instead of a single encompassing tag. The situation is different for x400Address and directoryName choices: They reference ORAddress and Name, which are defined in appendix A.1, therefore use EXPLICIT TAGS. The AKID ASN.1 module is missing several IMPLICIT annotations, hence isn't RFC-compliant. In the unlikely event that an AKID contains other elements beside a directoryName, users may see parse errors. Add the missing annotations but do not tag this commit for stable as I am not aware of any issue reports. Fixes are only eligible for stable if they're "obviously correct" and with ASN.1 there's no such thing. Signed-off-by: Lukas Wunner Signed-off-by: Herbert Xu --- crypto/asymmetric_keys/x509_akid.asn1 | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) (limited to 'crypto') diff --git a/crypto/asymmetric_keys/x509_akid.asn1 b/crypto/asymmetric_keys/x509_akid.asn1 index 1a33231a75a89d..c7818ff4e606f8 100644 --- a/crypto/asymmetric_keys/x509_akid.asn1 +++ b/crypto/asymmetric_keys/x509_akid.asn1 @@ -14,15 +14,15 @@ CertificateSerialNumber ::= INTEGER ({ x509_akid_note_serial }) GeneralNames ::= SEQUENCE OF GeneralName GeneralName ::= CHOICE { - otherName [0] ANY, - rfc822Name [1] IA5String, - dNSName [2] IA5String, + otherName [0] IMPLICIT OtherName, + rfc822Name [1] IMPLICIT IA5String, + dNSName [2] IMPLICIT IA5String, x400Address [3] ANY, directoryName [4] Name ({ x509_akid_note_name }), - ediPartyName [5] ANY, - uniformResourceIdentifier [6] IA5String, - iPAddress [7] OCTET STRING, - registeredID [8] OBJECT IDENTIFIER + ediPartyName [5] IMPLICIT EDIPartyName, + uniformResourceIdentifier [6] IMPLICIT IA5String, + iPAddress [7] IMPLICIT OCTET STRING, + registeredID [8] IMPLICIT OBJECT IDENTIFIER } Name ::= SEQUENCE OF RelativeDistinguishedName @@ -33,3 +33,13 @@ AttributeValueAssertion ::= SEQUENCE { attributeType OBJECT IDENTIFIER ({ x509_note_OID }), attributeValue ANY ({ x509_extract_name_segment }) } + +OtherName ::= SEQUENCE { + type-id OBJECT IDENTIFIER, + value [0] ANY + } + +EDIPartyName ::= SEQUENCE { + nameAssigner [0] ANY OPTIONAL, + partyName [1] ANY + } -- cgit 1.2.3-korg From 8468516f9f93a41dc65158b6428a1a1039c68f20 Mon Sep 17 00:00:00 2001 From: Dimitri John Ledkov Date: Mon, 2 Oct 2023 00:57:15 +0100 Subject: crypto: pkcs7 - remove md4 md5 x.509 support Remove support for md4 md5 hash and signatures in x.509 certificate parsers, pkcs7 signature parser, authenticode parser. All of these are insecure or broken, and everyone has long time ago migrated to alternative hash implementations. Also remove md2 & md3 oids which have already didn't have support. This is also likely the last user of md4 in the kernel, and thus crypto/md4.c and related tests in tcrypt & testmgr can likely be removed. Other users such as cifs smbfs ext modpost sumversions have their own internal implementation as needed. Signed-off-by: Dimitri John Ledkov Reviewed-by: Jarkko Sakkinen Signed-off-by: Herbert Xu --- crypto/asymmetric_keys/mscode_parser.c | 6 ------ crypto/asymmetric_keys/pkcs7_parser.c | 6 ------ crypto/asymmetric_keys/x509_cert_parser.c | 6 ------ include/linux/oid_registry.h | 8 -------- 4 files changed, 26 deletions(-) (limited to 'crypto') diff --git a/crypto/asymmetric_keys/mscode_parser.c b/crypto/asymmetric_keys/mscode_parser.c index 839591ad21ac04..690405ebe77b31 100644 --- a/crypto/asymmetric_keys/mscode_parser.c +++ b/crypto/asymmetric_keys/mscode_parser.c @@ -75,12 +75,6 @@ int mscode_note_digest_algo(void *context, size_t hdrlen, oid = look_up_OID(value, vlen); switch (oid) { - case OID_md4: - ctx->digest_algo = "md4"; - break; - case OID_md5: - ctx->digest_algo = "md5"; - break; case OID_sha1: ctx->digest_algo = "sha1"; break; diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c index 277482bb177771..cf4caab9620ff1 100644 --- a/crypto/asymmetric_keys/pkcs7_parser.c +++ b/crypto/asymmetric_keys/pkcs7_parser.c @@ -227,12 +227,6 @@ int pkcs7_sig_note_digest_algo(void *context, size_t hdrlen, struct pkcs7_parse_context *ctx = context; switch (ctx->last_oid) { - case OID_md4: - ctx->sinfo->sig->hash_algo = "md4"; - break; - case OID_md5: - ctx->sinfo->sig->hash_algo = "md5"; - break; case OID_sha1: ctx->sinfo->sig->hash_algo = "sha1"; break; diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c index 0a7049b470c181..2c30928621b7e9 100644 --- a/crypto/asymmetric_keys/x509_cert_parser.c +++ b/crypto/asymmetric_keys/x509_cert_parser.c @@ -195,15 +195,9 @@ int x509_note_sig_algo(void *context, size_t hdrlen, unsigned char tag, pr_debug("PubKey Algo: %u\n", ctx->last_oid); switch (ctx->last_oid) { - case OID_md2WithRSAEncryption: - case OID_md3WithRSAEncryption: default: return -ENOPKG; /* Unsupported combination */ - case OID_md4WithRSAEncryption: - ctx->cert->sig->hash_algo = "md4"; - goto rsa_pkcs1; - case OID_sha1WithRSAEncryption: ctx->cert->sig->hash_algo = "sha1"; goto rsa_pkcs1; diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h index f86a08ba0207ee..4d04fa5d1eeceb 100644 --- a/include/linux/oid_registry.h +++ b/include/linux/oid_registry.h @@ -30,9 +30,6 @@ enum OID { /* PKCS#1 {iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) pkcs-1(1)} */ OID_rsaEncryption, /* 1.2.840.113549.1.1.1 */ - OID_md2WithRSAEncryption, /* 1.2.840.113549.1.1.2 */ - OID_md3WithRSAEncryption, /* 1.2.840.113549.1.1.3 */ - OID_md4WithRSAEncryption, /* 1.2.840.113549.1.1.4 */ OID_sha1WithRSAEncryption, /* 1.2.840.113549.1.1.5 */ OID_sha256WithRSAEncryption, /* 1.2.840.113549.1.1.11 */ OID_sha384WithRSAEncryption, /* 1.2.840.113549.1.1.12 */ @@ -49,11 +46,6 @@ enum OID { OID_smimeCapabilites, /* 1.2.840.113549.1.9.15 */ OID_smimeAuthenticatedAttrs, /* 1.2.840.113549.1.9.16.2.11 */ - /* {iso(1) member-body(2) us(840) rsadsi(113549) digestAlgorithm(2)} */ - OID_md2, /* 1.2.840.113549.2.2 */ - OID_md4, /* 1.2.840.113549.2.4 */ - OID_md5, /* 1.2.840.113549.2.5 */ - OID_mskrb5, /* 1.2.840.48018.1.2.2 */ OID_krb5, /* 1.2.840.113554.1.2.2 */ OID_krb5u2u, /* 1.2.840.113554.1.2.2.3 */ -- cgit 1.2.3-korg From 62a465c25e99b9a98259a6b7f5bb759f5296d501 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 30 Aug 2023 17:56:25 +0800 Subject: crypto: deflate - Remove zlib-deflate Remove the implementation of zlib-deflate because it is completely unused in the kernel. Signed-off-by: Herbert Xu Reviewed-by: Ard Biesheuvel --- crypto/deflate.c | 61 ++++++++++++++++---------------------------------------- 1 file changed, 17 insertions(+), 44 deletions(-) (limited to 'crypto') diff --git a/crypto/deflate.c b/crypto/deflate.c index b2a46f6dc961e7..6e31e0db0e8659 100644 --- a/crypto/deflate.c +++ b/crypto/deflate.c @@ -39,24 +39,20 @@ struct deflate_ctx { struct z_stream_s decomp_stream; }; -static int deflate_comp_init(struct deflate_ctx *ctx, int format) +static int deflate_comp_init(struct deflate_ctx *ctx) { int ret = 0; struct z_stream_s *stream = &ctx->comp_stream; stream->workspace = vzalloc(zlib_deflate_workspacesize( - MAX_WBITS, MAX_MEM_LEVEL)); + -DEFLATE_DEF_WINBITS, MAX_MEM_LEVEL)); if (!stream->workspace) { ret = -ENOMEM; goto out; } - if (format) - ret = zlib_deflateInit(stream, 3); - else - ret = zlib_deflateInit2(stream, DEFLATE_DEF_LEVEL, Z_DEFLATED, - -DEFLATE_DEF_WINBITS, - DEFLATE_DEF_MEMLEVEL, - Z_DEFAULT_STRATEGY); + ret = zlib_deflateInit2(stream, DEFLATE_DEF_LEVEL, Z_DEFLATED, + -DEFLATE_DEF_WINBITS, DEFLATE_DEF_MEMLEVEL, + Z_DEFAULT_STRATEGY); if (ret != Z_OK) { ret = -EINVAL; goto out_free; @@ -68,7 +64,7 @@ out_free: goto out; } -static int deflate_decomp_init(struct deflate_ctx *ctx, int format) +static int deflate_decomp_init(struct deflate_ctx *ctx) { int ret = 0; struct z_stream_s *stream = &ctx->decomp_stream; @@ -78,10 +74,7 @@ static int deflate_decomp_init(struct deflate_ctx *ctx, int format) ret = -ENOMEM; goto out; } - if (format) - ret = zlib_inflateInit(stream); - else - ret = zlib_inflateInit2(stream, -DEFLATE_DEF_WINBITS); + ret = zlib_inflateInit2(stream, -DEFLATE_DEF_WINBITS); if (ret != Z_OK) { ret = -EINVAL; goto out_free; @@ -105,21 +98,21 @@ static void deflate_decomp_exit(struct deflate_ctx *ctx) vfree(ctx->decomp_stream.workspace); } -static int __deflate_init(void *ctx, int format) +static int __deflate_init(void *ctx) { int ret; - ret = deflate_comp_init(ctx, format); + ret = deflate_comp_init(ctx); if (ret) goto out; - ret = deflate_decomp_init(ctx, format); + ret = deflate_decomp_init(ctx); if (ret) deflate_comp_exit(ctx); out: return ret; } -static void *gen_deflate_alloc_ctx(struct crypto_scomp *tfm, int format) +static void *deflate_alloc_ctx(struct crypto_scomp *tfm) { struct deflate_ctx *ctx; int ret; @@ -128,7 +121,7 @@ static void *gen_deflate_alloc_ctx(struct crypto_scomp *tfm, int format) if (!ctx) return ERR_PTR(-ENOMEM); - ret = __deflate_init(ctx, format); + ret = __deflate_init(ctx); if (ret) { kfree(ctx); return ERR_PTR(ret); @@ -137,21 +130,11 @@ static void *gen_deflate_alloc_ctx(struct crypto_scomp *tfm, int format) return ctx; } -static void *deflate_alloc_ctx(struct crypto_scomp *tfm) -{ - return gen_deflate_alloc_ctx(tfm, 0); -} - -static void *zlib_deflate_alloc_ctx(struct crypto_scomp *tfm) -{ - return gen_deflate_alloc_ctx(tfm, 1); -} - static int deflate_init(struct crypto_tfm *tfm) { struct deflate_ctx *ctx = crypto_tfm_ctx(tfm); - return __deflate_init(ctx, 0); + return __deflate_init(ctx); } static void __deflate_exit(void *ctx) @@ -286,7 +269,7 @@ static struct crypto_alg alg = { .coa_decompress = deflate_decompress } } }; -static struct scomp_alg scomp[] = { { +static struct scomp_alg scomp = { .alloc_ctx = deflate_alloc_ctx, .free_ctx = deflate_free_ctx, .compress = deflate_scompress, @@ -296,17 +279,7 @@ static struct scomp_alg scomp[] = { { .cra_driver_name = "deflate-scomp", .cra_module = THIS_MODULE, } -}, { - .alloc_ctx = zlib_deflate_alloc_ctx, - .free_ctx = deflate_free_ctx, - .compress = deflate_scompress, - .decompress = deflate_sdecompress, - .base = { - .cra_name = "zlib-deflate", - .cra_driver_name = "zlib-deflate-scomp", - .cra_module = THIS_MODULE, - } -} }; +}; static int __init deflate_mod_init(void) { @@ -316,7 +289,7 @@ static int __init deflate_mod_init(void) if (ret) return ret; - ret = crypto_register_scomps(scomp, ARRAY_SIZE(scomp)); + ret = crypto_register_scomp(&scomp); if (ret) { crypto_unregister_alg(&alg); return ret; @@ -328,7 +301,7 @@ static int __init deflate_mod_init(void) static void __exit deflate_mod_fini(void) { crypto_unregister_alg(&alg); - crypto_unregister_scomps(scomp, ARRAY_SIZE(scomp)); + crypto_unregister_scomp(&scomp); } subsys_initcall(deflate_mod_init); -- cgit 1.2.3-korg From 30febae71c6182e0762dc7744737012b4f8e6a6d Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 30 Aug 2023 17:57:06 +0800 Subject: crypto: testmgr - Remove zlib-deflate Remove zlib-deflate test vectors as it no longer exists in the kernel. Signed-off-by: Herbert Xu Reviewed-by: Ard Biesheuvel --- crypto/testmgr.c | 10 -------- crypto/testmgr.h | 75 -------------------------------------------------------- 2 files changed, 85 deletions(-) (limited to 'crypto') diff --git a/crypto/testmgr.c b/crypto/testmgr.c index aed4a6bf47ad3d..bd4952e4ec6d38 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -5772,16 +5772,6 @@ static const struct alg_test_desc alg_test_descs[] = { .suite = { .hash = __VECS(xxhash64_tv_template) } - }, { - .alg = "zlib-deflate", - .test = alg_test_comp, - .fips_allowed = 1, - .suite = { - .comp = { - .comp = __VECS(zlib_deflate_comp_tv_template), - .decomp = __VECS(zlib_deflate_decomp_tv_template) - } - } }, { .alg = "zstd", .test = alg_test_comp, diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 5ca7a412508fbf..0cd6e0600255aa 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -35754,81 +35754,6 @@ static const struct comp_testvec deflate_decomp_tv_template[] = { }, }; -static const struct comp_testvec zlib_deflate_comp_tv_template[] = { - { - .inlen = 70, - .outlen = 44, - .input = "Join us now and share the software " - "Join us now and share the software ", - .output = "\x78\x5e\xf3\xca\xcf\xcc\x53\x28" - "\x2d\x56\xc8\xcb\x2f\x57\x48\xcc" - "\x4b\x51\x28\xce\x48\x2c\x4a\x55" - "\x28\xc9\x48\x55\x28\xce\x4f\x2b" - "\x29\x07\x71\xbc\x08\x2b\x01\x00" - "\x7c\x65\x19\x3d", - }, { - .inlen = 191, - .outlen = 129, - .input = "This document describes a compression method based on the DEFLATE" - "compression algorithm. This document defines the application of " - "the DEFLATE algorithm to the IP Payload Compression Protocol.", - .output = "\x78\x5e\x5d\xce\x41\x0a\xc3\x30" - "\x0c\x04\xc0\xaf\xec\x0b\xf2\x87" - "\xd2\xa6\x50\xe8\xc1\x07\x7f\x40" - "\xb1\x95\x5a\x60\x5b\xc6\x56\x0f" - "\xfd\x7d\x93\x1e\x42\xe8\x51\xec" - "\xee\x20\x9f\x64\x20\x6a\x78\x17" - "\xae\x86\xc8\x23\x74\x59\x78\x80" - "\x10\xb4\xb4\xce\x63\x88\x56\x14" - "\xb6\xa4\x11\x0b\x0d\x8e\xd8\x6e" - "\x4b\x8c\xdb\x7c\x7f\x5e\xfc\x7c" - "\xae\x51\x7e\x69\x17\x4b\x65\x02" - "\xfc\x1f\xbc\x4a\xdd\xd8\x7d\x48" - "\xad\x65\x09\x64\x3b\xac\xeb\xd9" - "\xc2\x01\xc0\xf4\x17\x3c\x1c\x1c" - "\x7d\xb2\x52\xc4\xf5\xf4\x8f\xeb" - "\x6a\x1a\x34\x4f\x5f\x2e\x32\x45" - "\x4e", - }, -}; - -static const struct comp_testvec zlib_deflate_decomp_tv_template[] = { - { - .inlen = 128, - .outlen = 191, - .input = "\x78\x9c\x5d\x8d\x31\x0e\xc2\x30" - "\x10\x04\xbf\xb2\x2f\xc8\x1f\x10" - "\x04\x09\x89\xc2\x85\x3f\x70\xb1" - "\x2f\xf8\x24\xdb\x67\xd9\x47\xc1" - "\xef\x49\x68\x12\x51\xae\x76\x67" - "\xd6\x27\x19\x88\x1a\xde\x85\xab" - "\x21\xf2\x08\x5d\x16\x1e\x20\x04" - "\x2d\xad\xf3\x18\xa2\x15\x85\x2d" - "\x69\xc4\x42\x83\x23\xb6\x6c\x89" - "\x71\x9b\xef\xcf\x8b\x9f\xcf\x33" - "\xca\x2f\xed\x62\xa9\x4c\x80\xff" - "\x13\xaf\x52\x37\xed\x0e\x52\x6b" - "\x59\x02\xd9\x4e\xe8\x7a\x76\x1d" - "\x02\x98\xfe\x8a\x87\x83\xa3\x4f" - "\x56\x8a\xb8\x9e\x8e\x5c\x57\xd3" - "\xa0\x79\xfa\x02\x2e\x32\x45\x4e", - .output = "This document describes a compression method based on the DEFLATE" - "compression algorithm. This document defines the application of " - "the DEFLATE algorithm to the IP Payload Compression Protocol.", - }, { - .inlen = 44, - .outlen = 70, - .input = "\x78\x9c\xf3\xca\xcf\xcc\x53\x28" - "\x2d\x56\xc8\xcb\x2f\x57\x48\xcc" - "\x4b\x51\x28\xce\x48\x2c\x4a\x55" - "\x28\xc9\x48\x55\x28\xce\x4f\x2b" - "\x29\x07\x71\xbc\x08\x2b\x01\x00" - "\x7c\x65\x19\x3d", - .output = "Join us now and share the software " - "Join us now and share the software ", - }, -}; - /* * LZO test vectors (null-terminated strings). */ -- cgit 1.2.3-korg From 845346841b77af84c88f1b709c63c14a58a64dc4 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 3 Oct 2023 11:31:55 +0800 Subject: crypto: skcipher - Add dependency on ecb As lskcipher requires the ecb wrapper for the transition add an explicit dependency on it so that it is always present. This can be removed once all simple ciphers have been converted to lskcipher. Reported-by: Nathan Chancellor Fixes: 705b52fef3c7 ("crypto: cbc - Convert from skcipher to lskcipher") Signed-off-by: Herbert Xu Tested-by: Nathan Chancellor Signed-off-by: Herbert Xu --- crypto/Kconfig | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index ed931ddea644d2..bbf51d55724e3f 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -85,6 +85,7 @@ config CRYPTO_SKCIPHER tristate select CRYPTO_SKCIPHER2 select CRYPTO_ALGAPI + select CRYPTO_ECB config CRYPTO_SKCIPHER2 tristate @@ -689,7 +690,7 @@ config CRYPTO_CTS config CRYPTO_ECB tristate "ECB (Electronic Codebook)" - select CRYPTO_SKCIPHER + select CRYPTO_SKCIPHER2 select CRYPTO_MANAGER help ECB (Electronic Codebook) mode (NIST SP800-38A) -- cgit 1.2.3-korg From 9a91792db10126fa291e22680d6cf5683d845a15 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 3 Oct 2023 11:43:18 +0800 Subject: crypto: arc4 - Convert from skcipher to lskcipher Replace skcipher implementation with lskcipher. Signed-off-by: Herbert Xu --- crypto/arc4.c | 60 ++++++++++++++++++++++---------------------------------- crypto/testmgr.c | 2 +- 2 files changed, 24 insertions(+), 38 deletions(-) (limited to 'crypto') diff --git a/crypto/arc4.c b/crypto/arc4.c index 3254dcc3436889..eb3590dc92826c 100644 --- a/crypto/arc4.c +++ b/crypto/arc4.c @@ -7,7 +7,6 @@ * Jon Oberheide */ -#include #include #include #include @@ -15,33 +14,24 @@ #include #include -static int crypto_arc4_setkey(struct crypto_skcipher *tfm, const u8 *in_key, +static int crypto_arc4_setkey(struct crypto_lskcipher *tfm, const u8 *in_key, unsigned int key_len) { - struct arc4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct arc4_ctx *ctx = crypto_lskcipher_ctx(tfm); return arc4_setkey(ctx, in_key, key_len); } -static int crypto_arc4_crypt(struct skcipher_request *req) +static int crypto_arc4_crypt(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned nbytes, u8 *iv, bool final) { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct arc4_ctx *ctx = crypto_skcipher_ctx(tfm); - struct skcipher_walk walk; - int err; + struct arc4_ctx *ctx = crypto_lskcipher_ctx(tfm); - err = skcipher_walk_virt(&walk, req, false); - - while (walk.nbytes > 0) { - arc4_crypt(ctx, walk.dst.virt.addr, walk.src.virt.addr, - walk.nbytes); - err = skcipher_walk_done(&walk, 0); - } - - return err; + arc4_crypt(ctx, dst, src, nbytes); + return 0; } -static int crypto_arc4_init(struct crypto_skcipher *tfm) +static int crypto_arc4_init(struct crypto_lskcipher *tfm) { pr_warn_ratelimited("\"%s\" (%ld) uses obsolete ecb(arc4) skcipher\n", current->comm, (unsigned long)current->pid); @@ -49,33 +39,29 @@ static int crypto_arc4_init(struct crypto_skcipher *tfm) return 0; } -static struct skcipher_alg arc4_alg = { - /* - * For legacy reasons, this is named "ecb(arc4)", not "arc4". - * Nevertheless it's actually a stream cipher, not a block cipher. - */ - .base.cra_name = "ecb(arc4)", - .base.cra_driver_name = "ecb(arc4)-generic", - .base.cra_priority = 100, - .base.cra_blocksize = ARC4_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct arc4_ctx), - .base.cra_module = THIS_MODULE, - .min_keysize = ARC4_MIN_KEY_SIZE, - .max_keysize = ARC4_MAX_KEY_SIZE, - .setkey = crypto_arc4_setkey, - .encrypt = crypto_arc4_crypt, - .decrypt = crypto_arc4_crypt, - .init = crypto_arc4_init, +static struct lskcipher_alg arc4_alg = { + .co.base.cra_name = "arc4", + .co.base.cra_driver_name = "arc4-generic", + .co.base.cra_priority = 100, + .co.base.cra_blocksize = ARC4_BLOCK_SIZE, + .co.base.cra_ctxsize = sizeof(struct arc4_ctx), + .co.base.cra_module = THIS_MODULE, + .co.min_keysize = ARC4_MIN_KEY_SIZE, + .co.max_keysize = ARC4_MAX_KEY_SIZE, + .setkey = crypto_arc4_setkey, + .encrypt = crypto_arc4_crypt, + .decrypt = crypto_arc4_crypt, + .init = crypto_arc4_init, }; static int __init arc4_init(void) { - return crypto_register_skcipher(&arc4_alg); + return crypto_register_lskcipher(&arc4_alg); } static void __exit arc4_exit(void) { - crypto_unregister_skcipher(&arc4_alg); + crypto_unregister_lskcipher(&arc4_alg); } subsys_initcall(arc4_init); diff --git a/crypto/testmgr.c b/crypto/testmgr.c index bd4952e4ec6d38..54135c7610f063 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -4963,7 +4963,7 @@ static const struct alg_test_desc alg_test_descs[] = { } }, { .alg = "ecb(arc4)", - .generic_driver = "ecb(arc4)-generic", + .generic_driver = "arc4-generic", .test = alg_test_skcipher, .suite = { .cipher = __VECS(arc4_tv_template) -- cgit 1.2.3-korg From 4822ed7e8524cd59c39e5ad27ae944f528164a0f Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 3 Oct 2023 11:43:20 +0800 Subject: crypto: essiv - Handle lskcipher spawns Add code to handle an underlying lskcihper object when grabbing an skcipher spawn. Fixes: 31865c4c4db2 ("crypto: skcipher - Add lskcipher") Signed-off-by: Herbert Xu --- crypto/essiv.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'crypto') diff --git a/crypto/essiv.c b/crypto/essiv.c index f7d4ef4837e541..e63fc6442e3201 100644 --- a/crypto/essiv.c +++ b/crypto/essiv.c @@ -442,6 +442,7 @@ out: static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb) { + struct skcipher_alg_common *skcipher_alg = NULL; struct crypto_attr_type *algt; const char *inner_cipher_name; const char *shash_name; @@ -450,7 +451,6 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb) struct crypto_instance *inst; struct crypto_alg *base, *block_base; struct essiv_instance_ctx *ictx; - struct skcipher_alg *skcipher_alg = NULL; struct aead_alg *aead_alg = NULL; struct crypto_alg *_hash_alg; struct shash_alg *hash_alg; @@ -475,7 +475,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb) mask = crypto_algt_inherited_mask(algt); switch (type) { - case CRYPTO_ALG_TYPE_SKCIPHER: + case CRYPTO_ALG_TYPE_LSKCIPHER: skcipher_inst = kzalloc(sizeof(*skcipher_inst) + sizeof(*ictx), GFP_KERNEL); if (!skcipher_inst) @@ -489,9 +489,10 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb) inner_cipher_name, 0, mask); if (err) goto out_free_inst; - skcipher_alg = crypto_spawn_skcipher_alg(&ictx->u.skcipher_spawn); + skcipher_alg = crypto_spawn_skcipher_alg_common( + &ictx->u.skcipher_spawn); block_base = &skcipher_alg->base; - ivsize = crypto_skcipher_alg_ivsize(skcipher_alg); + ivsize = skcipher_alg->ivsize; break; case CRYPTO_ALG_TYPE_AEAD: @@ -574,18 +575,17 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb) base->cra_alignmask = block_base->cra_alignmask; base->cra_priority = block_base->cra_priority; - if (type == CRYPTO_ALG_TYPE_SKCIPHER) { + if (type == CRYPTO_ALG_TYPE_LSKCIPHER) { skcipher_inst->alg.setkey = essiv_skcipher_setkey; skcipher_inst->alg.encrypt = essiv_skcipher_encrypt; skcipher_inst->alg.decrypt = essiv_skcipher_decrypt; skcipher_inst->alg.init = essiv_skcipher_init_tfm; skcipher_inst->alg.exit = essiv_skcipher_exit_tfm; - skcipher_inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(skcipher_alg); - skcipher_inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(skcipher_alg); + skcipher_inst->alg.min_keysize = skcipher_alg->min_keysize; + skcipher_inst->alg.max_keysize = skcipher_alg->max_keysize; skcipher_inst->alg.ivsize = ivsize; - skcipher_inst->alg.chunksize = crypto_skcipher_alg_chunksize(skcipher_alg); - skcipher_inst->alg.walksize = crypto_skcipher_alg_walksize(skcipher_alg); + skcipher_inst->alg.chunksize = skcipher_alg->chunksize; skcipher_inst->free = essiv_skcipher_free_instance; @@ -616,7 +616,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb) out_free_hash: crypto_mod_put(_hash_alg); out_drop_skcipher: - if (type == CRYPTO_ALG_TYPE_SKCIPHER) + if (type == CRYPTO_ALG_TYPE_LSKCIPHER) crypto_drop_skcipher(&ictx->u.skcipher_spawn); else crypto_drop_aead(&ictx->u.aead_spawn); -- cgit 1.2.3-korg From 7d6899a5ec66d702923ef1ab1433747e69087ba7 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 3 Oct 2023 11:43:21 +0800 Subject: crypto: cryptd - Only access common skcipher fields on spawn As skcipher spawns may be of the type lskcipher, only the common fields may be accessed. This was already the case but use the correct helpers to make this more obvious. Signed-off-by: Herbert Xu --- crypto/cryptd.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'crypto') diff --git a/crypto/cryptd.c b/crypto/cryptd.c index 194a92d677b9a2..31d022d47f7a06 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -377,7 +377,7 @@ static int cryptd_create_skcipher(struct crypto_template *tmpl, { struct skcipherd_instance_ctx *ctx; struct skcipher_instance *inst; - struct skcipher_alg *alg; + struct skcipher_alg_common *alg; u32 type; u32 mask; int err; @@ -396,17 +396,17 @@ static int cryptd_create_skcipher(struct crypto_template *tmpl, if (err) goto err_free_inst; - alg = crypto_spawn_skcipher_alg(&ctx->spawn); + alg = crypto_spawn_skcipher_alg_common(&ctx->spawn); err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base); if (err) goto err_free_inst; inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC | (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); - inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg); - inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg); - inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg); - inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg); + inst->alg.ivsize = alg->ivsize; + inst->alg.chunksize = alg->chunksize; + inst->alg.min_keysize = alg->min_keysize; + inst->alg.max_keysize = alg->max_keysize; inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx); -- cgit 1.2.3-korg From 3c45b5780400185de9e53e9432591c140ca59b4c Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 3 Oct 2023 11:43:22 +0800 Subject: crypto: adiantum - Only access common skcipher fields on spawn As skcipher spawns may be of the type lskcipher, only the common fields may be accessed. This was already the case but use the correct helpers to make this more obvious. Signed-off-by: Herbert Xu --- crypto/adiantum.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'crypto') diff --git a/crypto/adiantum.c b/crypto/adiantum.c index c33ba22a66389c..51703746d91e24 100644 --- a/crypto/adiantum.c +++ b/crypto/adiantum.c @@ -468,7 +468,7 @@ static void adiantum_free_instance(struct skcipher_instance *inst) * Check for a supported set of inner algorithms. * See the comment at the beginning of this file. */ -static bool adiantum_supported_algorithms(struct skcipher_alg *streamcipher_alg, +static bool adiantum_supported_algorithms(struct skcipher_alg_common *streamcipher_alg, struct crypto_alg *blockcipher_alg, struct shash_alg *hash_alg) { @@ -494,7 +494,7 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) const char *nhpoly1305_name; struct skcipher_instance *inst; struct adiantum_instance_ctx *ictx; - struct skcipher_alg *streamcipher_alg; + struct skcipher_alg_common *streamcipher_alg; struct crypto_alg *blockcipher_alg; struct shash_alg *hash_alg; int err; @@ -514,7 +514,7 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; - streamcipher_alg = crypto_spawn_skcipher_alg(&ictx->streamcipher_spawn); + streamcipher_alg = crypto_spawn_skcipher_alg_common(&ictx->streamcipher_spawn); /* Block cipher, e.g. "aes" */ err = crypto_grab_cipher(&ictx->blockcipher_spawn, @@ -578,8 +578,8 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.decrypt = adiantum_decrypt; inst->alg.init = adiantum_init_tfm; inst->alg.exit = adiantum_exit_tfm; - inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(streamcipher_alg); - inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(streamcipher_alg); + inst->alg.min_keysize = streamcipher_alg->min_keysize; + inst->alg.max_keysize = streamcipher_alg->max_keysize; inst->alg.ivsize = TWEAK_SIZE; inst->free = adiantum_free_instance; -- cgit 1.2.3-korg From cae3304330b52091e132544b307d59cc873649ad Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 3 Oct 2023 11:43:23 +0800 Subject: crypto: authenc - Only access common skcipher fields on spawn As skcipher spawns may be of the type lskcipher, only the common fields may be accessed. This was already the case but use the correct helpers to make this more obvious. Signed-off-by: Herbert Xu --- crypto/authenc.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'crypto') diff --git a/crypto/authenc.c b/crypto/authenc.c index 3326c7343e8673..fa896ab143bdf5 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -373,9 +373,9 @@ static int crypto_authenc_create(struct crypto_template *tmpl, u32 mask; struct aead_instance *inst; struct authenc_instance_ctx *ctx; + struct skcipher_alg_common *enc; struct hash_alg_common *auth; struct crypto_alg *auth_base; - struct skcipher_alg *enc; int err; err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); @@ -398,7 +398,7 @@ static int crypto_authenc_create(struct crypto_template *tmpl, crypto_attr_alg_name(tb[2]), 0, mask); if (err) goto err_free_inst; - enc = crypto_spawn_skcipher_alg(&ctx->enc); + enc = crypto_spawn_skcipher_alg_common(&ctx->enc); ctx->reqoff = ALIGN(2 * auth->digestsize + auth_base->cra_alignmask, auth_base->cra_alignmask + 1); @@ -422,8 +422,8 @@ static int crypto_authenc_create(struct crypto_template *tmpl, enc->base.cra_alignmask; inst->alg.base.cra_ctxsize = sizeof(struct crypto_authenc_ctx); - inst->alg.ivsize = crypto_skcipher_alg_ivsize(enc); - inst->alg.chunksize = crypto_skcipher_alg_chunksize(enc); + inst->alg.ivsize = enc->ivsize; + inst->alg.chunksize = enc->chunksize; inst->alg.maxauthsize = auth->digestsize; inst->alg.init = crypto_authenc_init_tfm; -- cgit 1.2.3-korg From 24a285cea82961824d9709025dea69dbc84e01a0 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 3 Oct 2023 11:43:24 +0800 Subject: crypto: authencesn - Only access common skcipher fields on spawn As skcipher spawns may be of the type lskcipher, only the common fields may be accessed. This was already the case but use the correct helpers to make this more obvious. Signed-off-by: Herbert Xu --- crypto/authencesn.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'crypto') diff --git a/crypto/authencesn.c b/crypto/authencesn.c index 91424e791d5c77..60e9568f023f6e 100644 --- a/crypto/authencesn.c +++ b/crypto/authencesn.c @@ -390,9 +390,9 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl, u32 mask; struct aead_instance *inst; struct authenc_esn_instance_ctx *ctx; + struct skcipher_alg_common *enc; struct hash_alg_common *auth; struct crypto_alg *auth_base; - struct skcipher_alg *enc; int err; err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); @@ -415,7 +415,7 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl, crypto_attr_alg_name(tb[2]), 0, mask); if (err) goto err_free_inst; - enc = crypto_spawn_skcipher_alg(&ctx->enc); + enc = crypto_spawn_skcipher_alg_common(&ctx->enc); err = -ENAMETOOLONG; if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, @@ -435,8 +435,8 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl, enc->base.cra_alignmask; inst->alg.base.cra_ctxsize = sizeof(struct crypto_authenc_esn_ctx); - inst->alg.ivsize = crypto_skcipher_alg_ivsize(enc); - inst->alg.chunksize = crypto_skcipher_alg_chunksize(enc); + inst->alg.ivsize = enc->ivsize; + inst->alg.chunksize = enc->chunksize; inst->alg.maxauthsize = auth->digestsize; inst->alg.init = crypto_authenc_esn_init_tfm; -- cgit 1.2.3-korg From 60fa9a39aeddff38704faa82c7c44a29123887e3 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 3 Oct 2023 11:43:25 +0800 Subject: crypto: ccm - Only access common skcipher fields on spawn As skcipher spawns may be of the type lskcipher, only the common fields may be accessed. This was already the case but use the correct helpers to make this more obvious. Signed-off-by: Herbert Xu --- crypto/ccm.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'crypto') diff --git a/crypto/ccm.c b/crypto/ccm.c index a9453129c51cb7..7af89a5b745c44 100644 --- a/crypto/ccm.c +++ b/crypto/ccm.c @@ -447,10 +447,10 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, const char *ctr_name, const char *mac_name) { + struct skcipher_alg_common *ctr; u32 mask; struct aead_instance *inst; struct ccm_instance_ctx *ictx; - struct skcipher_alg *ctr; struct hash_alg_common *mac; int err; @@ -478,13 +478,12 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, ctr_name, 0, mask); if (err) goto err_free_inst; - ctr = crypto_spawn_skcipher_alg(&ictx->ctr); + ctr = crypto_spawn_skcipher_alg_common(&ictx->ctr); /* The skcipher algorithm must be CTR mode, using 16-byte blocks. */ err = -EINVAL; if (strncmp(ctr->base.cra_name, "ctr(", 4) != 0 || - crypto_skcipher_alg_ivsize(ctr) != 16 || - ctr->base.cra_blocksize != 1) + ctr->ivsize != 16 || ctr->base.cra_blocksize != 1) goto err_free_inst; /* ctr and cbcmac must use the same underlying block cipher. */ @@ -507,7 +506,7 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, inst->alg.base.cra_alignmask = mac->base.cra_alignmask | ctr->base.cra_alignmask; inst->alg.ivsize = 16; - inst->alg.chunksize = crypto_skcipher_alg_chunksize(ctr); + inst->alg.chunksize = ctr->chunksize; inst->alg.maxauthsize = 16; inst->alg.base.cra_ctxsize = sizeof(struct crypto_ccm_ctx); inst->alg.init = crypto_ccm_init_tfm; -- cgit 1.2.3-korg From c9e4b76ff4a706afc30a7c10c89cc075df487485 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 3 Oct 2023 11:43:26 +0800 Subject: crypto: chacha20poly1305 - Only access common skcipher fields on spawn As skcipher spawns may be of the type lskcipher, only the common fields may be accessed. This was already the case but use the correct helpers to make this more obvious. Signed-off-by: Herbert Xu --- crypto/chacha20poly1305.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'crypto') diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c index 3a905c5d8f5351..0e2e208d98f942 100644 --- a/crypto/chacha20poly1305.c +++ b/crypto/chacha20poly1305.c @@ -558,7 +558,7 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, u32 mask; struct aead_instance *inst; struct chachapoly_instance_ctx *ctx; - struct skcipher_alg *chacha; + struct skcipher_alg_common *chacha; struct hash_alg_common *poly; int err; @@ -579,7 +579,7 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; - chacha = crypto_spawn_skcipher_alg(&ctx->chacha); + chacha = crypto_spawn_skcipher_alg_common(&ctx->chacha); err = crypto_grab_ahash(&ctx->poly, aead_crypto_instance(inst), crypto_attr_alg_name(tb[2]), 0, mask); @@ -591,7 +591,7 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, if (poly->digestsize != POLY1305_DIGEST_SIZE) goto err_free_inst; /* Need 16-byte IV size, including Initial Block Counter value */ - if (crypto_skcipher_alg_ivsize(chacha) != CHACHA_IV_SIZE) + if (chacha->ivsize != CHACHA_IV_SIZE) goto err_free_inst; /* Not a stream cipher? */ if (chacha->base.cra_blocksize != 1) @@ -615,7 +615,7 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, inst->alg.base.cra_ctxsize = sizeof(struct chachapoly_ctx) + ctx->saltlen; inst->alg.ivsize = ivsize; - inst->alg.chunksize = crypto_skcipher_alg_chunksize(chacha); + inst->alg.chunksize = chacha->chunksize; inst->alg.maxauthsize = POLY1305_DIGEST_SIZE; inst->alg.init = chachapoly_init; inst->alg.exit = chachapoly_exit; -- cgit 1.2.3-korg From 36b6fbefc06cbfe7b5f17c32445ecaf082c24449 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 3 Oct 2023 11:43:27 +0800 Subject: crypto: ctr - Only access common skcipher fields on spawn As skcipher spawns may be of the type lskcipher, only the common fields may be accessed. This was already the case but use the correct helpers to make this more obvious. Signed-off-by: Herbert Xu --- crypto/ctr.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) (limited to 'crypto') diff --git a/crypto/ctr.c b/crypto/ctr.c index 23c698b2201304..1420496062d57d 100644 --- a/crypto/ctr.c +++ b/crypto/ctr.c @@ -258,8 +258,8 @@ static int crypto_rfc3686_create(struct crypto_template *tmpl, struct rtattr **tb) { struct skcipher_instance *inst; - struct skcipher_alg *alg; struct crypto_skcipher_spawn *spawn; + struct skcipher_alg_common *alg; u32 mask; int err; @@ -278,11 +278,11 @@ static int crypto_rfc3686_create(struct crypto_template *tmpl, if (err) goto err_free_inst; - alg = crypto_spawn_skcipher_alg(spawn); + alg = crypto_spawn_skcipher_alg_common(spawn); /* We only support 16-byte blocks. */ err = -EINVAL; - if (crypto_skcipher_alg_ivsize(alg) != CTR_RFC3686_BLOCK_SIZE) + if (alg->ivsize != CTR_RFC3686_BLOCK_SIZE) goto err_free_inst; /* Not a stream cipher? */ @@ -303,11 +303,9 @@ static int crypto_rfc3686_create(struct crypto_template *tmpl, inst->alg.base.cra_alignmask = alg->base.cra_alignmask; inst->alg.ivsize = CTR_RFC3686_IV_SIZE; - inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg); - inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) + - CTR_RFC3686_NONCE_SIZE; - inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) + - CTR_RFC3686_NONCE_SIZE; + inst->alg.chunksize = alg->chunksize; + inst->alg.min_keysize = alg->min_keysize + CTR_RFC3686_NONCE_SIZE; + inst->alg.max_keysize = alg->max_keysize + CTR_RFC3686_NONCE_SIZE; inst->alg.setkey = crypto_rfc3686_setkey; inst->alg.encrypt = crypto_rfc3686_crypt; -- cgit 1.2.3-korg From 7202e65b1eeeb54e0ec634c63d102b8cf670ea3b Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 3 Oct 2023 11:43:28 +0800 Subject: crypto: cts - Only access common skcipher fields on spawn As skcipher spawns may be of the type lskcipher, only the common fields may be accessed. This was already the case but use the correct helpers to make this more obvious. Signed-off-by: Herbert Xu --- crypto/cts.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'crypto') diff --git a/crypto/cts.c b/crypto/cts.c index 8f604f6554b1c3..f5b42156b6c724 100644 --- a/crypto/cts.c +++ b/crypto/cts.c @@ -324,8 +324,8 @@ static void crypto_cts_free(struct skcipher_instance *inst) static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb) { struct crypto_skcipher_spawn *spawn; + struct skcipher_alg_common *alg; struct skcipher_instance *inst; - struct skcipher_alg *alg; u32 mask; int err; @@ -344,10 +344,10 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb) if (err) goto err_free_inst; - alg = crypto_spawn_skcipher_alg(spawn); + alg = crypto_spawn_skcipher_alg_common(spawn); err = -EINVAL; - if (crypto_skcipher_alg_ivsize(alg) != alg->base.cra_blocksize) + if (alg->ivsize != alg->base.cra_blocksize) goto err_free_inst; if (strncmp(alg->base.cra_name, "cbc(", 4)) @@ -363,9 +363,9 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.base.cra_alignmask = alg->base.cra_alignmask; inst->alg.ivsize = alg->base.cra_blocksize; - inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg); - inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg); - inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg); + inst->alg.chunksize = alg->chunksize; + inst->alg.min_keysize = alg->min_keysize; + inst->alg.max_keysize = alg->max_keysize; inst->alg.base.cra_ctxsize = sizeof(struct crypto_cts_ctx); -- cgit 1.2.3-korg From 712c22aa59672b7f5abdb81d8c585040e9e071f9 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 3 Oct 2023 11:43:29 +0800 Subject: crypto: gcm - Only access common skcipher fields on spawn As skcipher spawns may be of the type lskcipher, only the common fields may be accessed. This was already the case but use the correct helpers to make this more obvious. Signed-off-by: Herbert Xu --- crypto/gcm.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'crypto') diff --git a/crypto/gcm.c b/crypto/gcm.c index 4ba624450c3ffb..91ce6e0e2afc14 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -576,10 +576,10 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, const char *ctr_name, const char *ghash_name) { + struct skcipher_alg_common *ctr; u32 mask; struct aead_instance *inst; struct gcm_instance_ctx *ctx; - struct skcipher_alg *ctr; struct hash_alg_common *ghash; int err; @@ -607,13 +607,12 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, ctr_name, 0, mask); if (err) goto err_free_inst; - ctr = crypto_spawn_skcipher_alg(&ctx->ctr); + ctr = crypto_spawn_skcipher_alg_common(&ctx->ctr); /* The skcipher algorithm must be CTR mode, using 16-byte blocks. */ err = -EINVAL; if (strncmp(ctr->base.cra_name, "ctr(", 4) != 0 || - crypto_skcipher_alg_ivsize(ctr) != 16 || - ctr->base.cra_blocksize != 1) + ctr->ivsize != 16 || ctr->base.cra_blocksize != 1) goto err_free_inst; err = -ENAMETOOLONG; @@ -634,7 +633,7 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, ctr->base.cra_alignmask; inst->alg.base.cra_ctxsize = sizeof(struct crypto_gcm_ctx); inst->alg.ivsize = GCM_AES_IV_SIZE; - inst->alg.chunksize = crypto_skcipher_alg_chunksize(ctr); + inst->alg.chunksize = ctr->chunksize; inst->alg.maxauthsize = 16; inst->alg.init = crypto_gcm_init_tfm; inst->alg.exit = crypto_gcm_exit_tfm; -- cgit 1.2.3-korg From c4c6bb6e7905993126b8d00c641fa9037a198a9a Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 3 Oct 2023 11:43:30 +0800 Subject: crypto: hctr2 - Only access common skcipher fields on spawn As skcipher spawns may be of the type lskcipher, only the common fields may be accessed. This was already the case but use the correct helpers to make this more obvious. Signed-off-by: Herbert Xu --- crypto/hctr2.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'crypto') diff --git a/crypto/hctr2.c b/crypto/hctr2.c index 6f4c1884d0e96c..653fde727f0fa9 100644 --- a/crypto/hctr2.c +++ b/crypto/hctr2.c @@ -406,10 +406,10 @@ static int hctr2_create_common(struct crypto_template *tmpl, const char *xctr_name, const char *polyval_name) { + struct skcipher_alg_common *xctr_alg; u32 mask; struct skcipher_instance *inst; struct hctr2_instance_ctx *ictx; - struct skcipher_alg *xctr_alg; struct crypto_alg *blockcipher_alg; struct shash_alg *polyval_alg; char blockcipher_name[CRYPTO_MAX_ALG_NAME]; @@ -431,7 +431,7 @@ static int hctr2_create_common(struct crypto_template *tmpl, xctr_name, 0, mask); if (err) goto err_free_inst; - xctr_alg = crypto_spawn_skcipher_alg(&ictx->xctr_spawn); + xctr_alg = crypto_spawn_skcipher_alg_common(&ictx->xctr_spawn); err = -EINVAL; if (strncmp(xctr_alg->base.cra_name, "xctr(", 5)) @@ -500,8 +500,8 @@ static int hctr2_create_common(struct crypto_template *tmpl, inst->alg.decrypt = hctr2_decrypt; inst->alg.init = hctr2_init_tfm; inst->alg.exit = hctr2_exit_tfm; - inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(xctr_alg); - inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(xctr_alg); + inst->alg.min_keysize = xctr_alg->min_keysize; + inst->alg.max_keysize = xctr_alg->max_keysize; inst->alg.ivsize = TWEAK_SIZE; inst->free = hctr2_free_instance; -- cgit 1.2.3-korg From 1ec0a8aba573a6918e65dd99a3e19af1025d1408 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 3 Oct 2023 11:43:31 +0800 Subject: crypto: lrw - Only access common skcipher fields on spawn As skcipher spawns may be of the type lskcipher, only the common fields may be accessed. This was already the case but use the correct helpers to make this more obvious. Signed-off-by: Herbert Xu --- crypto/lrw.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) (limited to 'crypto') diff --git a/crypto/lrw.c b/crypto/lrw.c index 59260aefed2807..e216fbf2b78667 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c @@ -299,8 +299,8 @@ static void lrw_free_instance(struct skcipher_instance *inst) static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb) { struct crypto_skcipher_spawn *spawn; + struct skcipher_alg_common *alg; struct skcipher_instance *inst; - struct skcipher_alg *alg; const char *cipher_name; char ecb_name[CRYPTO_MAX_ALG_NAME]; u32 mask; @@ -336,13 +336,13 @@ static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb) if (err) goto err_free_inst; - alg = crypto_skcipher_spawn_alg(spawn); + alg = crypto_spawn_skcipher_alg_common(spawn); err = -EINVAL; if (alg->base.cra_blocksize != LRW_BLOCK_SIZE) goto err_free_inst; - if (crypto_skcipher_alg_ivsize(alg)) + if (alg->ivsize) goto err_free_inst; err = crypto_inst_setname(skcipher_crypto_instance(inst), "lrw", @@ -382,10 +382,8 @@ static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb) (__alignof__(be128) - 1); inst->alg.ivsize = LRW_BLOCK_SIZE; - inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) + - LRW_BLOCK_SIZE; - inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) + - LRW_BLOCK_SIZE; + inst->alg.min_keysize = alg->min_keysize + LRW_BLOCK_SIZE; + inst->alg.max_keysize = alg->max_keysize + LRW_BLOCK_SIZE; inst->alg.base.cra_ctxsize = sizeof(struct lrw_tfm_ctx); -- cgit 1.2.3-korg From bf028cfe8a505b5330be7df628ac19ffad48f932 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 3 Oct 2023 11:43:32 +0800 Subject: crypto: xts - Only access common skcipher fields on spawn As skcipher spawns may be of the type lskcipher, only the common fields may be accessed. This was already the case but use the correct helpers to make this more obvious. Signed-off-by: Herbert Xu --- crypto/xts.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'crypto') diff --git a/crypto/xts.c b/crypto/xts.c index 548b302c6c6a00..9237b143336729 100644 --- a/crypto/xts.c +++ b/crypto/xts.c @@ -338,9 +338,9 @@ static void xts_free_instance(struct skcipher_instance *inst) static int xts_create(struct crypto_template *tmpl, struct rtattr **tb) { + struct skcipher_alg_common *alg; struct skcipher_instance *inst; struct xts_instance_ctx *ctx; - struct skcipher_alg *alg; const char *cipher_name; u32 mask; int err; @@ -375,13 +375,13 @@ static int xts_create(struct crypto_template *tmpl, struct rtattr **tb) if (err) goto err_free_inst; - alg = crypto_skcipher_spawn_alg(&ctx->spawn); + alg = crypto_spawn_skcipher_alg_common(&ctx->spawn); err = -EINVAL; if (alg->base.cra_blocksize != XTS_BLOCK_SIZE) goto err_free_inst; - if (crypto_skcipher_alg_ivsize(alg)) + if (alg->ivsize) goto err_free_inst; err = crypto_inst_setname(skcipher_crypto_instance(inst), "xts", @@ -421,8 +421,8 @@ static int xts_create(struct crypto_template *tmpl, struct rtattr **tb) (__alignof__(u64) - 1); inst->alg.ivsize = XTS_BLOCK_SIZE; - inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) * 2; - inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) * 2; + inst->alg.min_keysize = alg->min_keysize * 2; + inst->alg.max_keysize = alg->max_keysize * 2; inst->alg.base.cra_ctxsize = sizeof(struct xts_tfm_ctx); -- cgit 1.2.3-korg From 8405ec8e3c02df8b3720874c3e2169fef4553868 Mon Sep 17 00:00:00 2001 From: Stephan Müller Date: Sat, 7 Oct 2023 09:10:43 +0200 Subject: crypto: jitter - reuse allocated entropy collector In case a health test error occurs during runtime, the power-up health tests are rerun to verify that the noise source is still good and that the reported health test error was an outlier. For performing this power-up health test, the already existing entropy collector instance is used instead of allocating a new one. This change has the following implications: * The noise that is collected as part of the newly run health tests is inserted into the entropy collector and thus stirs the existing data present in there further. Thus, the entropy collected during the health test is not wasted. This is also allowed by SP800-90B. * The power-on health test is not affected by the state of the entropy collector, because it resets the APT / RCT state. The remainder of the state is unrelated to the health test as it is only applied to newly obtained time stamps. This change also fixes a bug report about an allocation while in an atomic lock (the lock is taken in jent_kcapi_random, jent_read_entropy is called and this can call jent_entropy_init). Fixes: 04597c8dd6c4 ("jitter - add RCT/APT support for different OSRs") Reported-by: Dan Carpenter Signed-off-by: Stephan Mueller Signed-off-by: Herbert Xu --- crypto/jitterentropy-kcapi.c | 2 +- crypto/jitterentropy.c | 36 ++++++++++++++++++++++++++---------- crypto/jitterentropy.h | 2 +- 3 files changed, 28 insertions(+), 12 deletions(-) (limited to 'crypto') diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c index 0c6752221451bf..76edbf8af0ac78 100644 --- a/crypto/jitterentropy-kcapi.c +++ b/crypto/jitterentropy-kcapi.c @@ -347,7 +347,7 @@ static int __init jent_mod_init(void) desc->tfm = tfm; crypto_shash_init(desc); - ret = jent_entropy_init(CONFIG_CRYPTO_JITTERENTROPY_OSR, 0, desc); + ret = jent_entropy_init(CONFIG_CRYPTO_JITTERENTROPY_OSR, 0, desc, NULL); shash_desc_zero(desc); crypto_free_shash(tfm); if (ret) { diff --git a/crypto/jitterentropy.c b/crypto/jitterentropy.c index 18bbe2b89a94dc..09c9db90c15426 100644 --- a/crypto/jitterentropy.c +++ b/crypto/jitterentropy.c @@ -611,8 +611,7 @@ int jent_read_entropy(struct rand_data *ec, unsigned char *data, * Perform startup health tests and return permanent * error if it fails. */ - if (jent_entropy_init(ec->osr, ec->flags, - ec->hash_state)) + if (jent_entropy_init(0, 0, NULL, ec)) return -3; return -2; @@ -686,14 +685,30 @@ void jent_entropy_collector_free(struct rand_data *entropy_collector) jent_zfree(entropy_collector); } -int jent_entropy_init(unsigned int osr, unsigned int flags, void *hash_state) +int jent_entropy_init(unsigned int osr, unsigned int flags, void *hash_state, + struct rand_data *p_ec) { - struct rand_data *ec; - int i, time_backwards = 0, ret = 0; - - ec = jent_entropy_collector_alloc(osr, flags, hash_state); - if (!ec) - return JENT_EMEM; + /* + * If caller provides an allocated ec, reuse it which implies that the + * health test entropy data is used to further still the available + * entropy pool. + */ + struct rand_data *ec = p_ec; + int i, time_backwards = 0, ret = 0, ec_free = 0; + + if (!ec) { + ec = jent_entropy_collector_alloc(osr, flags, hash_state); + if (!ec) + return JENT_EMEM; + ec_free = 1; + } else { + /* Reset the APT */ + jent_apt_reset(ec, 0); + /* Ensure that a new APT base is obtained */ + ec->apt_base_set = 0; + /* Reset the RCT */ + ec->rct_count = 0; + } /* We could perform statistical tests here, but the problem is * that we only have a few loop counts to do testing. These @@ -783,7 +798,8 @@ int jent_entropy_init(unsigned int osr, unsigned int flags, void *hash_state) } out: - jent_entropy_collector_free(ec); + if (ec_free) + jent_entropy_collector_free(ec); return ret; } diff --git a/crypto/jitterentropy.h b/crypto/jitterentropy.h index e31661ee00d379..aa4728675ae245 100644 --- a/crypto/jitterentropy.h +++ b/crypto/jitterentropy.h @@ -12,7 +12,7 @@ int jent_read_random_block(void *hash_state, char *dst, unsigned int dst_len); struct rand_data; extern int jent_entropy_init(unsigned int osr, unsigned int flags, - void *hash_state); + void *hash_state, struct rand_data *p_ec); extern int jent_read_entropy(struct rand_data *ec, unsigned char *data, unsigned int len); -- cgit 1.2.3-korg From bb40d32689d73c46de39a0529d551f523f21dc9b Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 8 Oct 2023 19:31:16 -0700 Subject: crypto: xts - use 'spawn' for underlying single-block cipher Since commit adad556efcdd ("crypto: api - Fix built-in testing dependency failures"), the following warning appears when booting an x86_64 kernel that is configured with CONFIG_CRYPTO_MANAGER_EXTRA_TESTS=y and CONFIG_CRYPTO_AES_NI_INTEL=y, even when CONFIG_CRYPTO_XTS=y and CONFIG_CRYPTO_AES=y: alg: skcipher: skipping comparison tests for xts-aes-aesni because xts(ecb(aes-generic)) is unavailable This is caused by an issue in the xts template where it allocates an "aes" single-block cipher without declaring a dependency on it via the crypto_spawn mechanism. This issue was exposed by the above commit because it reversed the order that the algorithms are tested in. Specifically, when "xts(ecb(aes-generic))" is instantiated and tested during the comparison tests for "xts-aes-aesni", the "xts" template allocates an "aes" crypto_cipher for encrypting tweaks. This resolves to "aes-aesni". (Getting "aes-aesni" instead of "aes-generic" here is a bit weird, but it's apparently intended.) Due to the above-mentioned commit, the testing of "aes-aesni", and the finalization of its registration, now happens at this point instead of before. At the end of that, crypto_remove_spawns() unregisters all algorithm instances that depend on a lower-priority "aes" implementation such as "aes-generic" but that do not depend on "aes-aesni". However, because "xts" does not use the crypto_spawn mechanism for its "aes", its dependency on "aes-aesni" is not recognized by crypto_remove_spawns(). Thus, crypto_remove_spawns() unexpectedly unregisters "xts(ecb(aes-generic))". Fix this issue by making the "xts" template use the crypto_spawn mechanism for its "aes" dependency, like what other templates do. Note, this fix could be applied as far back as commit f1c131b45410 ("crypto: xts - Convert to skcipher"). However, the issue only got exposed by the much more recent changes to how the crypto API runs the self-tests, so there should be no need to backport this to very old kernels. Also, an alternative fix would be to flip the list iteration order in crypto_start_tests() to restore the original testing order. I'm thinking we should do that too, since the original order seems more natural, but it shouldn't be relied on for correctness. Fixes: adad556efcdd ("crypto: api - Fix built-in testing dependency failures") Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/xts.c | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) (limited to 'crypto') diff --git a/crypto/xts.c b/crypto/xts.c index 9237b143336729..672e1a3f0b0c93 100644 --- a/crypto/xts.c +++ b/crypto/xts.c @@ -28,7 +28,7 @@ struct xts_tfm_ctx { struct xts_instance_ctx { struct crypto_skcipher_spawn spawn; - char name[CRYPTO_MAX_ALG_NAME]; + struct crypto_cipher_spawn tweak_spawn; }; struct xts_request_ctx { @@ -306,7 +306,7 @@ static int xts_init_tfm(struct crypto_skcipher *tfm) ctx->child = child; - tweak = crypto_alloc_cipher(ictx->name, 0, 0); + tweak = crypto_spawn_cipher(&ictx->tweak_spawn); if (IS_ERR(tweak)) { crypto_free_skcipher(ctx->child); return PTR_ERR(tweak); @@ -333,12 +333,14 @@ static void xts_free_instance(struct skcipher_instance *inst) struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst); crypto_drop_skcipher(&ictx->spawn); + crypto_drop_cipher(&ictx->tweak_spawn); kfree(inst); } static int xts_create(struct crypto_template *tmpl, struct rtattr **tb) { struct skcipher_alg_common *alg; + char name[CRYPTO_MAX_ALG_NAME]; struct skcipher_instance *inst; struct xts_instance_ctx *ctx; const char *cipher_name; @@ -363,13 +365,13 @@ static int xts_create(struct crypto_template *tmpl, struct rtattr **tb) cipher_name, 0, mask); if (err == -ENOENT) { err = -ENAMETOOLONG; - if (snprintf(ctx->name, CRYPTO_MAX_ALG_NAME, "ecb(%s)", + if (snprintf(name, CRYPTO_MAX_ALG_NAME, "ecb(%s)", cipher_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst), - ctx->name, 0, mask); + name, 0, mask); } if (err) @@ -398,23 +400,28 @@ static int xts_create(struct crypto_template *tmpl, struct rtattr **tb) if (!strncmp(cipher_name, "ecb(", 4)) { int len; - len = strscpy(ctx->name, cipher_name + 4, sizeof(ctx->name)); + len = strscpy(name, cipher_name + 4, sizeof(name)); if (len < 2) goto err_free_inst; - if (ctx->name[len - 1] != ')') + if (name[len - 1] != ')') goto err_free_inst; - ctx->name[len - 1] = 0; + name[len - 1] = 0; if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, - "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME) { + "xts(%s)", name) >= CRYPTO_MAX_ALG_NAME) { err = -ENAMETOOLONG; goto err_free_inst; } } else goto err_free_inst; + err = crypto_grab_cipher(&ctx->tweak_spawn, + skcipher_crypto_instance(inst), name, 0, mask); + if (err) + goto err_free_inst; + inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = XTS_BLOCK_SIZE; inst->alg.base.cra_alignmask = alg->base.cra_alignmask | -- cgit 1.2.3-korg From 313a4074d78fc9b90c93c9298e9f90d86a144231 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Mon, 9 Oct 2023 00:32:13 -0700 Subject: crypto: shash - optimize the default digest and finup For an shash algorithm that doesn't implement ->digest, currently crypto_shash_digest() with aligned input makes 5 indirect calls: 1 to shash_digest_unaligned(), 1 to ->init, 2 to ->update ('alignmask + 1' bytes, then the rest), then 1 to ->final. This is true even if the algorithm implements ->finup. This is caused by an unnecessary fallback to code meant to handle unaligned inputs. In fact, crypto_shash_digest() already does the needed alignment check earlier. Therefore, optimize the number of indirect calls for aligned inputs to 3 when the algorithm implements ->finup. It remains at 5 when the algorithm implements neither ->finup nor ->digest. Similarly, for an shash algorithm that doesn't implement ->finup, currently crypto_shash_finup() with aligned input makes 4 indirect calls: 1 to shash_finup_unaligned(), 2 to ->update, and 1 to ->final. Optimize this to 3 calls. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/shash.c | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) (limited to 'crypto') diff --git a/crypto/shash.c b/crypto/shash.c index 1fadb6b59bdcc1..d99dc2f94c65f5 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -191,6 +191,15 @@ static int shash_finup_unaligned(struct shash_desc *desc, const u8 *data, shash_final_unaligned(desc, out); } +static int shash_default_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + struct shash_alg *shash = crypto_shash_alg(desc->tfm); + + return shash->update(desc, data, len) ?: + shash->final(desc, out); +} + int crypto_shash_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { @@ -224,6 +233,15 @@ static int shash_digest_unaligned(struct shash_desc *desc, const u8 *data, shash_final_unaligned(desc, out); } +static int shash_default_digest(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + struct shash_alg *shash = crypto_shash_alg(desc->tfm); + + return shash->init(desc) ?: + shash->finup(desc, data, len, out); +} + int crypto_shash_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { @@ -656,9 +674,9 @@ static int shash_prepare_alg(struct shash_alg *alg) base->cra_flags |= CRYPTO_ALG_TYPE_SHASH; if (!alg->finup) - alg->finup = shash_finup_unaligned; + alg->finup = shash_default_finup; if (!alg->digest) - alg->digest = shash_digest_unaligned; + alg->digest = shash_default_digest; if (!alg->export) { alg->export = shash_default_export; alg->import = shash_default_import; -- cgit 1.2.3-korg From 2e02c25ac93463d52bd7c1010cd647c944f179e1 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Mon, 9 Oct 2023 00:32:14 -0700 Subject: crypto: shash - fold shash_digest_unaligned() into crypto_shash_digest() Fold shash_digest_unaligned() into its only remaining caller. Also, avoid a redundant check of CRYPTO_TFM_NEED_KEY by replacing the call to crypto_shash_init() with shash->init(desc). Finally, replace shash_update_unaligned() + shash_final_unaligned() with shash_finup_unaligned() which does exactly that. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/shash.c | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) (limited to 'crypto') diff --git a/crypto/shash.c b/crypto/shash.c index d99dc2f94c65f5..15fee57cca8efd 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -225,14 +225,6 @@ int crypto_shash_finup(struct shash_desc *desc, const u8 *data, } EXPORT_SYMBOL_GPL(crypto_shash_finup); -static int shash_digest_unaligned(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - return crypto_shash_init(desc) ?: - shash_update_unaligned(desc, data, len) ?: - shash_final_unaligned(desc, out); -} - static int shash_default_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { @@ -260,7 +252,8 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data, if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) err = -ENOKEY; else if (((unsigned long)data | (unsigned long)out) & alignmask) - err = shash_digest_unaligned(desc, data, len, out); + err = shash->init(desc) ?: + shash_finup_unaligned(desc, data, len, out); else err = shash->digest(desc, data, len, out); -- cgit 1.2.3-korg From dadf5e56c967a095213f664135784bf770eb96ab Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Mon, 9 Oct 2023 22:59:43 -0700 Subject: crypto: adiantum - add fast path for single-page messages When the source scatterlist is a single page, optimize the first hash step of adiantum to use crypto_shash_digest() instead of init/update/final, and use the same local kmap for both hashing the bulk part and loading the narrow part of the source data. Likewise, when the destination scatterlist is a single page, optimize the second hash step of adiantum to use crypto_shash_digest() instead of init/update/final, and use the same local kmap for both hashing the bulk part and storing the narrow part of the destination data. In some cases these optimizations improve performance significantly. Note: ideally, for optimal performance each architecture should implement the full "adiantum(xchacha12,aes)" algorithm and fully optimize the contiguous buffer case to use no indirect calls. That's not something I've gotten around to doing, though. This commit just makes a relatively small change that provides some benefit with the existing template-based approach. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/adiantum.c | 65 ++++++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 47 insertions(+), 18 deletions(-) (limited to 'crypto') diff --git a/crypto/adiantum.c b/crypto/adiantum.c index 51703746d91e24..aa0f8b3b499adc 100644 --- a/crypto/adiantum.c +++ b/crypto/adiantum.c @@ -245,10 +245,9 @@ static void adiantum_hash_header(struct skcipher_request *req) /* Hash the left-hand part (the "bulk") of the message using NHPoly1305 */ static int adiantum_hash_message(struct skcipher_request *req, - struct scatterlist *sgl, le128 *digest) + struct scatterlist *sgl, unsigned int nents, + le128 *digest) { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); struct adiantum_request_ctx *rctx = skcipher_request_ctx(req); const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE; struct shash_desc *hash_desc = &rctx->u.hash_desc; @@ -256,14 +255,11 @@ static int adiantum_hash_message(struct skcipher_request *req, unsigned int i, n; int err; - hash_desc->tfm = tctx->hash; - err = crypto_shash_init(hash_desc); if (err) return err; - sg_miter_start(&miter, sgl, sg_nents(sgl), - SG_MITER_FROM_SG | SG_MITER_ATOMIC); + sg_miter_start(&miter, sgl, nents, SG_MITER_FROM_SG | SG_MITER_ATOMIC); for (i = 0; i < bulk_len; i += n) { sg_miter_next(&miter); n = min_t(unsigned int, miter.length, bulk_len - i); @@ -285,6 +281,8 @@ static int adiantum_finish(struct skcipher_request *req) const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); struct adiantum_request_ctx *rctx = skcipher_request_ctx(req); const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE; + struct scatterlist *dst = req->dst; + const unsigned int dst_nents = sg_nents(dst); le128 digest; int err; @@ -298,13 +296,30 @@ static int adiantum_finish(struct skcipher_request *req) * enc: C_R = C_M - H_{K_H}(T, C_L) * dec: P_R = P_M - H_{K_H}(T, P_L) */ - err = adiantum_hash_message(req, req->dst, &digest); - if (err) - return err; - le128_add(&digest, &digest, &rctx->header_hash); - le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest); - scatterwalk_map_and_copy(&rctx->rbuf.bignum, req->dst, - bulk_len, BLOCKCIPHER_BLOCK_SIZE, 1); + rctx->u.hash_desc.tfm = tctx->hash; + le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &rctx->header_hash); + if (dst_nents == 1 && dst->offset + req->cryptlen <= PAGE_SIZE) { + /* Fast path for single-page destination */ + void *virt = kmap_local_page(sg_page(dst)) + dst->offset; + + err = crypto_shash_digest(&rctx->u.hash_desc, virt, bulk_len, + (u8 *)&digest); + if (err) { + kunmap_local(virt); + return err; + } + le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest); + memcpy(virt + bulk_len, &rctx->rbuf.bignum, sizeof(le128)); + kunmap_local(virt); + } else { + /* Slow path that works for any destination scatterlist */ + err = adiantum_hash_message(req, dst, dst_nents, &digest); + if (err) + return err; + le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest); + scatterwalk_map_and_copy(&rctx->rbuf.bignum, dst, + bulk_len, sizeof(le128), 1); + } return 0; } @@ -324,6 +339,8 @@ static int adiantum_crypt(struct skcipher_request *req, bool enc) const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); struct adiantum_request_ctx *rctx = skcipher_request_ctx(req); const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE; + struct scatterlist *src = req->src; + const unsigned int src_nents = sg_nents(src); unsigned int stream_len; le128 digest; int err; @@ -339,12 +356,24 @@ static int adiantum_crypt(struct skcipher_request *req, bool enc) * dec: C_M = C_R + H_{K_H}(T, C_L) */ adiantum_hash_header(req); - err = adiantum_hash_message(req, req->src, &digest); + rctx->u.hash_desc.tfm = tctx->hash; + if (src_nents == 1 && src->offset + req->cryptlen <= PAGE_SIZE) { + /* Fast path for single-page source */ + void *virt = kmap_local_page(sg_page(src)) + src->offset; + + err = crypto_shash_digest(&rctx->u.hash_desc, virt, bulk_len, + (u8 *)&digest); + memcpy(&rctx->rbuf.bignum, virt + bulk_len, sizeof(le128)); + kunmap_local(virt); + } else { + /* Slow path that works for any source scatterlist */ + err = adiantum_hash_message(req, src, src_nents, &digest); + scatterwalk_map_and_copy(&rctx->rbuf.bignum, src, + bulk_len, sizeof(le128), 0); + } if (err) return err; - le128_add(&digest, &digest, &rctx->header_hash); - scatterwalk_map_and_copy(&rctx->rbuf.bignum, req->src, - bulk_len, BLOCKCIPHER_BLOCK_SIZE, 0); + le128_add(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &rctx->header_hash); le128_add(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest); /* If encrypting, encrypt P_M with the block cipher to get C_M */ -- cgit 1.2.3-korg From 16ab7cb5825fc3425c16ad2c6e53d827f382d7c6 Mon Sep 17 00:00:00 2001 From: Dimitri John Ledkov Date: Tue, 10 Oct 2023 22:22:38 +0100 Subject: crypto: pkcs7 - remove sha1 support Removes support for sha1 signed kernel modules, importing sha1 signed x.509 certificates. rsa-pkcs1pad keeps sha1 padding support, which seems to be used by virtio driver. sha1 remains available as there are many drivers and subsystems using it. Note only hmac(sha1) with secret keys remains cryptographically secure. In the kernel there are filesystems, IMA, tpm/pcr that appear to be using sha1. Maybe they can all start to be slowly upgraded to something else i.e. blake3, ParallelHash, SHAKE256 as needed. Signed-off-by: Dimitri John Ledkov Signed-off-by: Herbert Xu --- crypto/asymmetric_keys/mscode_parser.c | 3 -- crypto/asymmetric_keys/pkcs7_parser.c | 4 -- crypto/asymmetric_keys/public_key.c | 3 +- crypto/asymmetric_keys/signature.c | 2 +- crypto/asymmetric_keys/x509_cert_parser.c | 8 ---- crypto/testmgr.h | 80 ------------------------------- include/linux/oid_registry.h | 4 -- kernel/module/Kconfig | 5 -- 8 files changed, 2 insertions(+), 107 deletions(-) (limited to 'crypto') diff --git a/crypto/asymmetric_keys/mscode_parser.c b/crypto/asymmetric_keys/mscode_parser.c index 690405ebe77b31..6416bded0e073f 100644 --- a/crypto/asymmetric_keys/mscode_parser.c +++ b/crypto/asymmetric_keys/mscode_parser.c @@ -75,9 +75,6 @@ int mscode_note_digest_algo(void *context, size_t hdrlen, oid = look_up_OID(value, vlen); switch (oid) { - case OID_sha1: - ctx->digest_algo = "sha1"; - break; case OID_sha256: ctx->digest_algo = "sha256"; break; diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c index cf4caab9620ff1..ab647cb4d76689 100644 --- a/crypto/asymmetric_keys/pkcs7_parser.c +++ b/crypto/asymmetric_keys/pkcs7_parser.c @@ -227,9 +227,6 @@ int pkcs7_sig_note_digest_algo(void *context, size_t hdrlen, struct pkcs7_parse_context *ctx = context; switch (ctx->last_oid) { - case OID_sha1: - ctx->sinfo->sig->hash_algo = "sha1"; - break; case OID_sha256: ctx->sinfo->sig->hash_algo = "sha256"; break; @@ -272,7 +269,6 @@ int pkcs7_sig_note_pkey_algo(void *context, size_t hdrlen, ctx->sinfo->sig->pkey_algo = "rsa"; ctx->sinfo->sig->encoding = "pkcs1"; break; - case OID_id_ecdsa_with_sha1: case OID_id_ecdsa_with_sha224: case OID_id_ecdsa_with_sha256: case OID_id_ecdsa_with_sha384: diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c index abeecb8329b3d5..5bf0452c17af21 100644 --- a/crypto/asymmetric_keys/public_key.c +++ b/crypto/asymmetric_keys/public_key.c @@ -116,8 +116,7 @@ software_key_determine_akcipher(const struct public_key *pkey, */ if (!hash_algo) return -EINVAL; - if (strcmp(hash_algo, "sha1") != 0 && - strcmp(hash_algo, "sha224") != 0 && + if (strcmp(hash_algo, "sha224") != 0 && strcmp(hash_algo, "sha256") != 0 && strcmp(hash_algo, "sha384") != 0 && strcmp(hash_algo, "sha512") != 0) diff --git a/crypto/asymmetric_keys/signature.c b/crypto/asymmetric_keys/signature.c index 2deff81f8af50b..398983be77e8bc 100644 --- a/crypto/asymmetric_keys/signature.c +++ b/crypto/asymmetric_keys/signature.c @@ -115,7 +115,7 @@ EXPORT_SYMBOL_GPL(decrypt_blob); * Sign the specified data blob using the private key specified by params->key. * The signature is wrapped in an encoding if params->encoding is specified * (eg. "pkcs1"). If the encoding needs to know the digest type, this can be - * passed through params->hash_algo (eg. "sha1"). + * passed through params->hash_algo (eg. "sha512"). * * Returns the length of the data placed in the signature buffer or an error. */ diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c index 2c30928621b7e9..68ef1ffbbef6b8 100644 --- a/crypto/asymmetric_keys/x509_cert_parser.c +++ b/crypto/asymmetric_keys/x509_cert_parser.c @@ -198,10 +198,6 @@ int x509_note_sig_algo(void *context, size_t hdrlen, unsigned char tag, default: return -ENOPKG; /* Unsupported combination */ - case OID_sha1WithRSAEncryption: - ctx->cert->sig->hash_algo = "sha1"; - goto rsa_pkcs1; - case OID_sha256WithRSAEncryption: ctx->cert->sig->hash_algo = "sha256"; goto rsa_pkcs1; @@ -218,10 +214,6 @@ int x509_note_sig_algo(void *context, size_t hdrlen, unsigned char tag, ctx->cert->sig->hash_algo = "sha224"; goto rsa_pkcs1; - case OID_id_ecdsa_with_sha1: - ctx->cert->sig->hash_algo = "sha1"; - goto ecdsa; - case OID_id_ecdsa_with_sha224: ctx->cert->sig->hash_algo = "sha224"; goto ecdsa; diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 0cd6e0600255aa..d7e98397549b5b 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -653,30 +653,6 @@ static const struct akcipher_testvec rsa_tv_template[] = { static const struct akcipher_testvec ecdsa_nist_p192_tv_template[] = { { .key = - "\x04\xf7\x46\xf8\x2f\x15\xf6\x22\x8e\xd7\x57\x4f\xcc\xe7\xbb\xc1" - "\xd4\x09\x73\xcf\xea\xd0\x15\x07\x3d\xa5\x8a\x8a\x95\x43\xe4\x68" - "\xea\xc6\x25\xc1\xc1\x01\x25\x4c\x7e\xc3\x3c\xa6\x04\x0a\xe7\x08" - "\x98", - .key_len = 49, - .params = - "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48" - "\xce\x3d\x03\x01\x01", - .param_len = 21, - .m = - "\xcd\xb9\xd2\x1c\xb7\x6f\xcd\x44\xb3\xfd\x63\xea\xa3\x66\x7f\xae" - "\x63\x85\xe7\x82", - .m_size = 20, - .algo = OID_id_ecdsa_with_sha1, - .c = - "\x30\x35\x02\x19\x00\xba\xe5\x93\x83\x6e\xb6\x3b\x63\xa0\x27\x91" - "\xc6\xf6\x7f\xc3\x09\xad\x59\xad\x88\x27\xd6\x92\x6b\x02\x18\x10" - "\x68\x01\x9d\xba\xce\x83\x08\xef\x95\x52\x7b\xa0\x0f\xe4\x18\x86" - "\x80\x6f\xa5\x79\x77\xda\xd0", - .c_size = 55, - .public_key_vec = true, - .siggen_sigver_test = true, - }, { - .key = "\x04\xb6\x4b\xb1\xd1\xac\xba\x24\x8f\x65\xb2\x60\x00\x90\xbf\xbd" "\x78\x05\x73\xe9\x79\x1d\x6f\x7c\x0b\xd2\xc3\x93\xa7\x28\xe1\x75" "\xf7\xd5\x95\x1d\x28\x10\xc0\x75\x50\x5c\x1a\x4f\x3f\x8f\xa5\xee" @@ -780,32 +756,6 @@ static const struct akcipher_testvec ecdsa_nist_p192_tv_template[] = { static const struct akcipher_testvec ecdsa_nist_p256_tv_template[] = { { .key = - "\x04\xb9\x7b\xbb\xd7\x17\x64\xd2\x7e\xfc\x81\x5d\x87\x06\x83\x41" - "\x22\xd6\x9a\xaa\x87\x17\xec\x4f\x63\x55\x2f\x94\xba\xdd\x83\xe9" - "\x34\x4b\xf3\xe9\x91\x13\x50\xb6\xcb\xca\x62\x08\xe7\x3b\x09\xdc" - "\xc3\x63\x4b\x2d\xb9\x73\x53\xe4\x45\xe6\x7c\xad\xe7\x6b\xb0\xe8" - "\xaf", - .key_len = 65, - .params = - "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48" - "\xce\x3d\x03\x01\x07", - .param_len = 21, - .m = - "\xc2\x2b\x5f\x91\x78\x34\x26\x09\x42\x8d\x6f\x51\xb2\xc5\xaf\x4c" - "\x0b\xde\x6a\x42", - .m_size = 20, - .algo = OID_id_ecdsa_with_sha1, - .c = - "\x30\x46\x02\x21\x00\xf9\x25\xce\x9f\x3a\xa6\x35\x81\xcf\xd4\xe7" - "\xb7\xf0\x82\x56\x41\xf7\xd4\xad\x8d\x94\x5a\x69\x89\xee\xca\x6a" - "\x52\x0e\x48\x4d\xcc\x02\x21\x00\xd7\xe4\xef\x52\x66\xd3\x5b\x9d" - "\x8a\xfa\x54\x93\x29\xa7\x70\x86\xf1\x03\x03\xf3\x3b\xe2\x73\xf7" - "\xfb\x9d\x8b\xde\xd4\x8d\x6f\xad", - .c_size = 72, - .public_key_vec = true, - .siggen_sigver_test = true, - }, { - .key = "\x04\x8b\x6d\xc0\x33\x8e\x2d\x8b\x67\xf5\xeb\xc4\x7f\xa0\xf5\xd9" "\x7b\x03\xa5\x78\x9a\xb5\xea\x14\xe4\x23\xd0\xaf\xd7\x0e\x2e\xa0" "\xc9\x8b\xdb\x95\xf8\xb3\xaf\xac\x00\x2c\x2c\x1f\x7a\xfd\x95\x88" @@ -916,36 +866,6 @@ static const struct akcipher_testvec ecdsa_nist_p256_tv_template[] = { static const struct akcipher_testvec ecdsa_nist_p384_tv_template[] = { { - .key = /* secp384r1(sha1) */ - "\x04\x89\x25\xf3\x97\x88\xcb\xb0\x78\xc5\x72\x9a\x14\x6e\x7a\xb1" - "\x5a\xa5\x24\xf1\x95\x06\x9e\x28\xfb\xc4\xb9\xbe\x5a\x0d\xd9\x9f" - "\xf3\xd1\x4d\x2d\x07\x99\xbd\xda\xa7\x66\xec\xbb\xea\xba\x79\x42" - "\xc9\x34\x89\x6a\xe7\x0b\xc3\xf2\xfe\x32\x30\xbe\xba\xf9\xdf\x7e" - "\x4b\x6a\x07\x8e\x26\x66\x3f\x1d\xec\xa2\x57\x91\x51\xdd\x17\x0e" - "\x0b\x25\xd6\x80\x5c\x3b\xe6\x1a\x98\x48\x91\x45\x7a\x73\xb0\xc3" - "\xf1", - .key_len = 97, - .params = - "\x30\x10\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x05\x2b\x81\x04" - "\x00\x22", - .param_len = 18, - .m = - "\x12\x55\x28\xf0\x77\xd5\xb6\x21\x71\x32\x48\xcd\x28\xa8\x25\x22" - "\x3a\x69\xc1\x93", - .m_size = 20, - .algo = OID_id_ecdsa_with_sha1, - .c = - "\x30\x66\x02\x31\x00\xf5\x0f\x24\x4c\x07\x93\x6f\x21\x57\x55\x07" - "\x20\x43\x30\xde\xa0\x8d\x26\x8e\xae\x63\x3f\xbc\x20\x3a\xc6\xf1" - "\x32\x3c\xce\x70\x2b\x78\xf1\x4c\x26\xe6\x5b\x86\xcf\xec\x7c\x7e" - "\xd0\x87\xd7\xd7\x6e\x02\x31\x00\xcd\xbb\x7e\x81\x5d\x8f\x63\xc0" - "\x5f\x63\xb1\xbe\x5e\x4c\x0e\xa1\xdf\x28\x8c\x1b\xfa\xf9\x95\x88" - "\x74\xa0\x0f\xbf\xaf\xc3\x36\x76\x4a\xa1\x59\xf1\x1c\xa4\x58\x26" - "\x79\x12\x2a\xb7\xc5\x15\x92\xc5", - .c_size = 104, - .public_key_vec = true, - .siggen_sigver_test = true, - }, { .key = /* secp384r1(sha224) */ "\x04\x69\x6c\xcf\x62\xee\xd0\x0d\xe5\xb5\x2f\x70\x54\xcf\x26\xa0" "\xd9\x98\x8d\x92\x2a\xab\x9b\x11\xcb\x48\x18\xa1\xa9\x0d\xd5\x18" diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h index 4d04fa5d1eeceb..8b79e55cfcecbc 100644 --- a/include/linux/oid_registry.h +++ b/include/linux/oid_registry.h @@ -17,12 +17,10 @@ * build_OID_registry.pl to generate the data for look_up_OID(). */ enum OID { - OID_id_dsa_with_sha1, /* 1.2.840.10030.4.3 */ OID_id_dsa, /* 1.2.840.10040.4.1 */ OID_id_ecPublicKey, /* 1.2.840.10045.2.1 */ OID_id_prime192v1, /* 1.2.840.10045.3.1.1 */ OID_id_prime256v1, /* 1.2.840.10045.3.1.7 */ - OID_id_ecdsa_with_sha1, /* 1.2.840.10045.4.1 */ OID_id_ecdsa_with_sha224, /* 1.2.840.10045.4.3.1 */ OID_id_ecdsa_with_sha256, /* 1.2.840.10045.4.3.2 */ OID_id_ecdsa_with_sha384, /* 1.2.840.10045.4.3.3 */ @@ -30,7 +28,6 @@ enum OID { /* PKCS#1 {iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) pkcs-1(1)} */ OID_rsaEncryption, /* 1.2.840.113549.1.1.1 */ - OID_sha1WithRSAEncryption, /* 1.2.840.113549.1.1.5 */ OID_sha256WithRSAEncryption, /* 1.2.840.113549.1.1.11 */ OID_sha384WithRSAEncryption, /* 1.2.840.113549.1.1.12 */ OID_sha512WithRSAEncryption, /* 1.2.840.113549.1.1.13 */ @@ -67,7 +64,6 @@ enum OID { OID_PKU2U, /* 1.3.5.1.5.2.7 */ OID_Scram, /* 1.3.6.1.5.5.14 */ OID_certAuthInfoAccess, /* 1.3.6.1.5.5.7.1.1 */ - OID_sha1, /* 1.3.14.3.2.26 */ OID_id_ansip384r1, /* 1.3.132.0.34 */ OID_sha256, /* 2.16.840.1.101.3.4.2.1 */ OID_sha384, /* 2.16.840.1.101.3.4.2.2 */ diff --git a/kernel/module/Kconfig b/kernel/module/Kconfig index 33a2e991f60814..19a53d5e77362c 100644 --- a/kernel/module/Kconfig +++ b/kernel/module/Kconfig @@ -236,10 +236,6 @@ choice possible to load a signed module containing the algorithm to check the signature on that module. -config MODULE_SIG_SHA1 - bool "Sign modules with SHA-1" - select CRYPTO_SHA1 - config MODULE_SIG_SHA224 bool "Sign modules with SHA-224" select CRYPTO_SHA256 @@ -261,7 +257,6 @@ endchoice config MODULE_SIG_HASH string depends on MODULE_SIG || IMA_APPRAISE_MODSIG - default "sha1" if MODULE_SIG_SHA1 default "sha224" if MODULE_SIG_SHA224 default "sha256" if MODULE_SIG_SHA256 default "sha384" if MODULE_SIG_SHA384 -- cgit 1.2.3-korg From c1d760a47163bec1ecd5c82638c8c234fcbd549e Mon Sep 17 00:00:00 2001 From: Dimitri John Ledkov Date: Tue, 10 Oct 2023 22:25:29 +0100 Subject: crypto: mscode_parser - remove sha224 authenticode support It is possible to stand up own certificates and sign PE-COFF binaries using SHA-224. However it never became popular or needed since it has similar costs as SHA-256. Windows Authenticode infrastructure never had support for SHA-224, and all secureboot keys used fro linux vmlinuz have always been using at least SHA-256. Given the point of mscode_parser is to support interoperatiblity with typical de-facto hashes, remove support for SHA-224 to avoid posibility of creating interoperatibility issues with rhboot/shim, grub, and non-linux systems trying to sign or verify vmlinux. SHA-224 itself is not removed from the kernel, as it is truncated SHA-256. If requested I can write patches to remove SHA-224 support across all of the drivers. Signed-off-by: Dimitri John Ledkov Acked-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/asymmetric_keys/mscode_parser.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'crypto') diff --git a/crypto/asymmetric_keys/mscode_parser.c b/crypto/asymmetric_keys/mscode_parser.c index 6416bded0e073f..855cbc46a9c368 100644 --- a/crypto/asymmetric_keys/mscode_parser.c +++ b/crypto/asymmetric_keys/mscode_parser.c @@ -84,9 +84,6 @@ int mscode_note_digest_algo(void *context, size_t hdrlen, case OID_sha512: ctx->digest_algo = "sha512"; break; - case OID_sha224: - ctx->digest_algo = "sha224"; - break; case OID__NR: sprint_oid(value, vlen, buffer, sizeof(buffer)); -- cgit 1.2.3-korg From 87d6621c07d230d9168a021f2760062f2262e8b9 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 12 Oct 2023 13:11:25 +0800 Subject: crypto: lskcipher - Return EINVAL when ecb_name fails sanity checks Set the error value to -EINVAL instead of zero when the underlying name (within "ecb()") fails basic sanity checks. Fixes: 8aee5d4ebd11 ("crypto: lskcipher - Add compatibility wrapper around ECB") Reported-by: kernel test robot Reported-by: Dan Carpenter Closes: https://lore.kernel.org/r/202310111323.ZjK7bzjw-lkp@intel.com/ Signed-off-by: Herbert Xu --- crypto/lskcipher.c | 1 + 1 file changed, 1 insertion(+) (limited to 'crypto') diff --git a/crypto/lskcipher.c b/crypto/lskcipher.c index 9be3c04bc62a39..cb6170ebcaa36c 100644 --- a/crypto/lskcipher.c +++ b/crypto/lskcipher.c @@ -583,6 +583,7 @@ struct lskcipher_instance *lskcipher_alloc_instance_simple( if (ecb_name[0]) { int len; + err = -EINVAL; len = strscpy(ecb_name, &cipher_alg->co.base.cra_name[4], sizeof(ecb_name)); if (len < 2) -- cgit 1.2.3-korg From 7ec0a09d4e84396b8c3c799b0add4399f5fdb7a6 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 12 Oct 2023 22:56:13 -0700 Subject: crypto: skcipher - fix weak key check for lskciphers When an algorithm of the new "lskcipher" type is exposed through the "skcipher" API, calls to crypto_skcipher_setkey() don't pass on the CRYPTO_TFM_REQ_FORBID_WEAK_KEYS flag to the lskcipher. This causes self-test failures for ecb(des), as weak keys are not rejected anymore. Fix this. Fixes: 31865c4c4db2 ("crypto: skcipher - Add lskcipher") Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/lskcipher.c | 8 -------- crypto/skcipher.c | 8 +++++++- crypto/skcipher.h | 2 -- 3 files changed, 7 insertions(+), 11 deletions(-) (limited to 'crypto') diff --git a/crypto/lskcipher.c b/crypto/lskcipher.c index cb6170ebcaa36c..9edc897309510b 100644 --- a/crypto/lskcipher.c +++ b/crypto/lskcipher.c @@ -194,14 +194,6 @@ int crypto_lskcipher_decrypt(struct crypto_lskcipher *tfm, const u8 *src, } EXPORT_SYMBOL_GPL(crypto_lskcipher_decrypt); -int crypto_lskcipher_setkey_sg(struct crypto_skcipher *tfm, const u8 *key, - unsigned int keylen) -{ - struct crypto_lskcipher **ctx = crypto_skcipher_ctx(tfm); - - return crypto_lskcipher_setkey(*ctx, key, keylen); -} - static int crypto_lskcipher_crypt_sg(struct skcipher_request *req, int (*crypt)(struct crypto_lskcipher *tfm, const u8 *src, u8 *dst, diff --git a/crypto/skcipher.c b/crypto/skcipher.c index b9496dc8a609f6..ac8b8c04265429 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -621,7 +621,13 @@ int crypto_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, int err; if (cipher->co.base.cra_type != &crypto_skcipher_type) { - err = crypto_lskcipher_setkey_sg(tfm, key, keylen); + struct crypto_lskcipher **ctx = crypto_skcipher_ctx(tfm); + + crypto_lskcipher_clear_flags(*ctx, CRYPTO_TFM_REQ_MASK); + crypto_lskcipher_set_flags(*ctx, + crypto_skcipher_get_flags(tfm) & + CRYPTO_TFM_REQ_MASK); + err = crypto_lskcipher_setkey(*ctx, key, keylen); goto out; } diff --git a/crypto/skcipher.h b/crypto/skcipher.h index 6f1295f0fef24f..16c9484360dab0 100644 --- a/crypto/skcipher.h +++ b/crypto/skcipher.h @@ -20,8 +20,6 @@ static inline struct crypto_istat_cipher *skcipher_get_stat_common( #endif } -int crypto_lskcipher_setkey_sg(struct crypto_skcipher *tfm, const u8 *key, - unsigned int keylen); int crypto_lskcipher_encrypt_sg(struct skcipher_request *req); int crypto_lskcipher_decrypt_sg(struct skcipher_request *req); int crypto_init_lskcipher_ops_sg(struct crypto_tfm *tfm); -- cgit 1.2.3-korg From 04a93202ed7c3b451bf22d3ff4bcd379df27f299 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 16 Oct 2023 13:21:44 +0800 Subject: certs: Break circular dependency when selftest is modular The modular build fails because the self-test code depends on pkcs7 which in turn depends on x509 which contains the self-test. Split the self-test out into its own module to break the cycle. Fixes: 3cde3174eb91 ("certs: Add FIPS selftests") Signed-off-by: Herbert Xu Reviewed-by: Jarkko Sakkinen Signed-off-by: Herbert Xu --- crypto/asymmetric_keys/Kconfig | 3 ++- crypto/asymmetric_keys/Makefile | 3 ++- crypto/asymmetric_keys/selftest.c | 13 ++++++++++--- crypto/asymmetric_keys/x509_parser.h | 9 --------- crypto/asymmetric_keys/x509_public_key.c | 8 +------- 5 files changed, 15 insertions(+), 21 deletions(-) (limited to 'crypto') diff --git a/crypto/asymmetric_keys/Kconfig b/crypto/asymmetric_keys/Kconfig index 1ef3b46d6f6e5c..59ec726b7c770e 100644 --- a/crypto/asymmetric_keys/Kconfig +++ b/crypto/asymmetric_keys/Kconfig @@ -76,7 +76,7 @@ config SIGNED_PE_FILE_VERIFICATION signed PE binary. config FIPS_SIGNATURE_SELFTEST - bool "Run FIPS selftests on the X.509+PKCS7 signature verification" + tristate "Run FIPS selftests on the X.509+PKCS7 signature verification" help This option causes some selftests to be run on the signature verification code, using some built in data. This is required @@ -84,5 +84,6 @@ config FIPS_SIGNATURE_SELFTEST depends on KEYS depends on ASYMMETRIC_KEY_TYPE depends on PKCS7_MESSAGE_PARSER=X509_CERTIFICATE_PARSER + depends on X509_CERTIFICATE_PARSER endif # ASYMMETRIC_KEY_TYPE diff --git a/crypto/asymmetric_keys/Makefile b/crypto/asymmetric_keys/Makefile index 0d1fa1b692c6b2..1a273d6df3ebf4 100644 --- a/crypto/asymmetric_keys/Makefile +++ b/crypto/asymmetric_keys/Makefile @@ -22,7 +22,8 @@ x509_key_parser-y := \ x509_cert_parser.o \ x509_loader.o \ x509_public_key.o -x509_key_parser-$(CONFIG_FIPS_SIGNATURE_SELFTEST) += selftest.o +obj-$(CONFIG_FIPS_SIGNATURE_SELFTEST) += x509_selftest.o +x509_selftest-y += selftest.o $(obj)/x509_cert_parser.o: \ $(obj)/x509.asn1.h \ diff --git a/crypto/asymmetric_keys/selftest.c b/crypto/asymmetric_keys/selftest.c index fa0bf7f2428495..c50da7ef90ae99 100644 --- a/crypto/asymmetric_keys/selftest.c +++ b/crypto/asymmetric_keys/selftest.c @@ -4,10 +4,11 @@ * Written by David Howells (dhowells@redhat.com) */ -#include +#include #include +#include #include -#include +#include #include "x509_parser.h" struct certs_test { @@ -175,7 +176,7 @@ static const struct certs_test certs_tests[] __initconst = { TEST(certs_selftest_1_data, certs_selftest_1_pkcs7), }; -int __init fips_signature_selftest(void) +static int __init fips_signature_selftest(void) { struct key *keyring; int ret, i; @@ -222,3 +223,9 @@ int __init fips_signature_selftest(void) key_put(keyring); return 0; } + +late_initcall(fips_signature_selftest); + +MODULE_DESCRIPTION("X.509 self tests"); +MODULE_AUTHOR("Red Hat, Inc."); +MODULE_LICENSE("GPL"); diff --git a/crypto/asymmetric_keys/x509_parser.h b/crypto/asymmetric_keys/x509_parser.h index a299c9c56f409e..97a886cbe01c3d 100644 --- a/crypto/asymmetric_keys/x509_parser.h +++ b/crypto/asymmetric_keys/x509_parser.h @@ -40,15 +40,6 @@ struct x509_certificate { bool blacklisted; }; -/* - * selftest.c - */ -#ifdef CONFIG_FIPS_SIGNATURE_SELFTEST -extern int __init fips_signature_selftest(void); -#else -static inline int fips_signature_selftest(void) { return 0; } -#endif - /* * x509_cert_parser.c */ diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c index 7c71db3ac23d48..6a4f00be22fc10 100644 --- a/crypto/asymmetric_keys/x509_public_key.c +++ b/crypto/asymmetric_keys/x509_public_key.c @@ -262,15 +262,9 @@ static struct asymmetric_key_parser x509_key_parser = { /* * Module stuff */ -extern int __init certs_selftest(void); static int __init x509_key_init(void) { - int ret; - - ret = register_asymmetric_key_parser(&x509_key_parser); - if (ret < 0) - return ret; - return fips_signature_selftest(); + return register_asymmetric_key_parser(&x509_key_parser); } static void __exit x509_key_exit(void) -- cgit 1.2.3-korg From f5fb88e5301ba7b8cb85063d6b6b3bd378907e25 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 16 Oct 2023 13:57:30 +0800 Subject: crypto: rsa - Add module alias for pkcs1pad Add a module alias for pkcs1pas so that it can be auto-loaded by modprobe. Signed-off-by: Herbert Xu --- crypto/rsa-pkcs1pad.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'crypto') diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c index d2e5e104f8cfe3..49756c6ea7a1ec 100644 --- a/crypto/rsa-pkcs1pad.c +++ b/crypto/rsa-pkcs1pad.c @@ -687,3 +687,5 @@ struct crypto_template rsa_pkcs1pad_tmpl = { .create = pkcs1pad_create, .module = THIS_MODULE, }; + +MODULE_ALIAS_CRYPTO("pkcs1pad"); -- cgit 1.2.3-korg From 08debaa5cb31da50725a8cb2f06d3f617a9caa98 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 18 Oct 2023 15:34:55 -0700 Subject: crypto: shash - eliminate indirect call for default import and export Most shash algorithms don't have custom ->import and ->export functions, resulting in the memcpy() based default being used. Yet, crypto_shash_import() and crypto_shash_export() still make an indirect call, which is expensive. Therefore, change how the default import and export are called to make it so that crypto_shash_import() and crypto_shash_export() don't do an indirect call in this case. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/shash.c | 41 +++++++++++++++++++++++++++++++++-------- include/crypto/hash.h | 15 ++------------- 2 files changed, 35 insertions(+), 21 deletions(-) (limited to 'crypto') diff --git a/crypto/shash.c b/crypto/shash.c index 15fee57cca8efd..52420c41db44ae 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -277,17 +277,34 @@ int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data, } EXPORT_SYMBOL_GPL(crypto_shash_tfm_digest); -static int shash_default_export(struct shash_desc *desc, void *out) +int crypto_shash_export(struct shash_desc *desc, void *out) { - memcpy(out, shash_desc_ctx(desc), crypto_shash_descsize(desc->tfm)); + struct crypto_shash *tfm = desc->tfm; + struct shash_alg *shash = crypto_shash_alg(tfm); + + if (shash->export) + return shash->export(desc, out); + + memcpy(out, shash_desc_ctx(desc), crypto_shash_descsize(tfm)); return 0; } +EXPORT_SYMBOL_GPL(crypto_shash_export); -static int shash_default_import(struct shash_desc *desc, const void *in) +int crypto_shash_import(struct shash_desc *desc, const void *in) { - memcpy(shash_desc_ctx(desc), in, crypto_shash_descsize(desc->tfm)); + struct crypto_shash *tfm = desc->tfm; + struct shash_alg *shash = crypto_shash_alg(tfm); + + if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + + if (shash->import) + return shash->import(desc, in); + + memcpy(shash_desc_ctx(desc), in, crypto_shash_descsize(tfm)); return 0; } +EXPORT_SYMBOL_GPL(crypto_shash_import); static int shash_async_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) @@ -666,15 +683,23 @@ static int shash_prepare_alg(struct shash_alg *alg) base->cra_type = &crypto_shash_type; base->cra_flags |= CRYPTO_ALG_TYPE_SHASH; + /* + * Handle missing optional functions. For each one we can either + * install a default here, or we can leave the pointer as NULL and check + * the pointer for NULL in crypto_shash_*(), avoiding an indirect call + * when the default behavior is desired. For ->finup and ->digest we + * install defaults, since for optimal performance algorithms should + * implement these anyway. On the other hand, for ->import and + * ->export the common case and best performance comes from the simple + * memcpy of the shash_desc_ctx, so when those pointers are NULL we + * leave them NULL and provide the memcpy with no indirect call. + */ if (!alg->finup) alg->finup = shash_default_finup; if (!alg->digest) alg->digest = shash_default_digest; - if (!alg->export) { - alg->export = shash_default_export; - alg->import = shash_default_import; + if (!alg->export) alg->halg.statesize = alg->descsize; - } if (!alg->setkey) alg->setkey = shash_no_setkey; diff --git a/include/crypto/hash.h b/include/crypto/hash.h index f7c2a22cd776da..52e57e93b2f591 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -952,10 +952,7 @@ int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data, * Context: Any context. * Return: 0 if the export creation was successful; < 0 if an error occurred */ -static inline int crypto_shash_export(struct shash_desc *desc, void *out) -{ - return crypto_shash_alg(desc->tfm)->export(desc, out); -} +int crypto_shash_export(struct shash_desc *desc, void *out); /** * crypto_shash_import() - import operational state @@ -969,15 +966,7 @@ static inline int crypto_shash_export(struct shash_desc *desc, void *out) * Context: Any context. * Return: 0 if the import was successful; < 0 if an error occurred */ -static inline int crypto_shash_import(struct shash_desc *desc, const void *in) -{ - struct crypto_shash *tfm = desc->tfm; - - if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) - return -ENOKEY; - - return crypto_shash_alg(tfm)->import(desc, in); -} +int crypto_shash_import(struct shash_desc *desc, const void *in); /** * crypto_shash_init() - (re)initialize message digest -- cgit 1.2.3-korg From 21415bfe8b5543c41b64b19674e5fcc2c942623e Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 18 Oct 2023 22:53:32 -0700 Subject: crypto: cbcmac - remove unnecessary alignment logic The cbcmac template is aligning a field in its desc context to the alignmask of its underlying 'cipher', at runtime. This is almost entirely pointless, since cbcmac is already using the cipher API functions that handle alignment themselves, and few ciphers set a nonzero alignmask anyway. Also, even without runtime alignment, an alignment of at least 4 bytes can be guaranteed. Thus, at best this code is optimizing for the rare case of ciphers that set an alignmask >= 7, at the cost of hurting the common cases. Therefore, remove the manual alignment code from cbcmac. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/ccm.c | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) (limited to 'crypto') diff --git a/crypto/ccm.c b/crypto/ccm.c index 7af89a5b745c44..dd7aed63efc93b 100644 --- a/crypto/ccm.c +++ b/crypto/ccm.c @@ -56,6 +56,7 @@ struct cbcmac_tfm_ctx { struct cbcmac_desc_ctx { unsigned int len; + u8 dg[]; }; static inline struct crypto_ccm_req_priv_ctx *crypto_ccm_reqctx( @@ -785,10 +786,9 @@ static int crypto_cbcmac_digest_init(struct shash_desc *pdesc) { struct cbcmac_desc_ctx *ctx = shash_desc_ctx(pdesc); int bs = crypto_shash_digestsize(pdesc->tfm); - u8 *dg = (u8 *)ctx + crypto_shash_descsize(pdesc->tfm) - bs; ctx->len = 0; - memset(dg, 0, bs); + memset(ctx->dg, 0, bs); return 0; } @@ -801,18 +801,17 @@ static int crypto_cbcmac_digest_update(struct shash_desc *pdesc, const u8 *p, struct cbcmac_desc_ctx *ctx = shash_desc_ctx(pdesc); struct crypto_cipher *tfm = tctx->child; int bs = crypto_shash_digestsize(parent); - u8 *dg = (u8 *)ctx + crypto_shash_descsize(parent) - bs; while (len > 0) { unsigned int l = min(len, bs - ctx->len); - crypto_xor(dg + ctx->len, p, l); + crypto_xor(&ctx->dg[ctx->len], p, l); ctx->len +=l; len -= l; p += l; if (ctx->len == bs) { - crypto_cipher_encrypt_one(tfm, dg, dg); + crypto_cipher_encrypt_one(tfm, ctx->dg, ctx->dg); ctx->len = 0; } } @@ -827,12 +826,11 @@ static int crypto_cbcmac_digest_final(struct shash_desc *pdesc, u8 *out) struct cbcmac_desc_ctx *ctx = shash_desc_ctx(pdesc); struct crypto_cipher *tfm = tctx->child; int bs = crypto_shash_digestsize(parent); - u8 *dg = (u8 *)ctx + crypto_shash_descsize(parent) - bs; if (ctx->len) - crypto_cipher_encrypt_one(tfm, dg, dg); + crypto_cipher_encrypt_one(tfm, ctx->dg, ctx->dg); - memcpy(out, dg, bs); + memcpy(out, ctx->dg, bs); return 0; } @@ -889,8 +887,7 @@ static int cbcmac_create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.base.cra_blocksize = 1; inst->alg.digestsize = alg->cra_blocksize; - inst->alg.descsize = ALIGN(sizeof(struct cbcmac_desc_ctx), - alg->cra_alignmask + 1) + + inst->alg.descsize = sizeof(struct cbcmac_desc_ctx) + alg->cra_blocksize; inst->alg.base.cra_ctxsize = sizeof(struct cbcmac_tfm_ctx); -- cgit 1.2.3-korg From f9dc9f2e4072de356614d95940c9d7f448a4e334 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 18 Oct 2023 22:53:33 -0700 Subject: crypto: cmac - remove unnecessary alignment logic The cmac template is setting its alignmask to that of its underlying 'cipher'. Yet, it doesn't care itself about how its inputs and outputs are aligned, which is ostensibly the point of the alignmask. Instead, cmac actually just uses its alignmask itself to runtime-align certain fields in its tfm and desc contexts appropriately for its underlying cipher. That is almost entirely pointless too, though, since cmac is already using the cipher API functions that handle alignment themselves, and few ciphers set a nonzero alignmask anyway. Also, even without runtime alignment, an alignment of at least 4 bytes can be guaranteed. Thus, at best this code is optimizing for the rare case of ciphers that set an alignmask >= 7, at the cost of hurting the common cases. Therefore, this patch removes the manual alignment code from cmac and makes it stop setting an alignmask. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/cmac.c | 39 +++++++++++---------------------------- 1 file changed, 11 insertions(+), 28 deletions(-) (limited to 'crypto') diff --git a/crypto/cmac.c b/crypto/cmac.c index fce6b0f58e88e7..c7aa3665b076e4 100644 --- a/crypto/cmac.c +++ b/crypto/cmac.c @@ -28,7 +28,7 @@ */ struct cmac_tfm_ctx { struct crypto_cipher *child; - u8 ctx[]; + __be64 consts[]; }; /* @@ -44,17 +44,15 @@ struct cmac_tfm_ctx { */ struct cmac_desc_ctx { unsigned int len; - u8 ctx[]; + u8 odds[]; }; static int crypto_cmac_digest_setkey(struct crypto_shash *parent, const u8 *inkey, unsigned int keylen) { - unsigned long alignmask = crypto_shash_alignmask(parent); struct cmac_tfm_ctx *ctx = crypto_shash_ctx(parent); unsigned int bs = crypto_shash_blocksize(parent); - __be64 *consts = PTR_ALIGN((void *)ctx->ctx, - (alignmask | (__alignof__(__be64) - 1)) + 1); + __be64 *consts = ctx->consts; u64 _const[2]; int i, err = 0; u8 msb_mask, gfmask; @@ -104,10 +102,9 @@ static int crypto_cmac_digest_setkey(struct crypto_shash *parent, static int crypto_cmac_digest_init(struct shash_desc *pdesc) { - unsigned long alignmask = crypto_shash_alignmask(pdesc->tfm); struct cmac_desc_ctx *ctx = shash_desc_ctx(pdesc); int bs = crypto_shash_blocksize(pdesc->tfm); - u8 *prev = PTR_ALIGN((void *)ctx->ctx, alignmask + 1) + bs; + u8 *prev = &ctx->odds[bs]; ctx->len = 0; memset(prev, 0, bs); @@ -119,12 +116,11 @@ static int crypto_cmac_digest_update(struct shash_desc *pdesc, const u8 *p, unsigned int len) { struct crypto_shash *parent = pdesc->tfm; - unsigned long alignmask = crypto_shash_alignmask(parent); struct cmac_tfm_ctx *tctx = crypto_shash_ctx(parent); struct cmac_desc_ctx *ctx = shash_desc_ctx(pdesc); struct crypto_cipher *tfm = tctx->child; int bs = crypto_shash_blocksize(parent); - u8 *odds = PTR_ALIGN((void *)ctx->ctx, alignmask + 1); + u8 *odds = ctx->odds; u8 *prev = odds + bs; /* checking the data can fill the block */ @@ -165,14 +161,11 @@ static int crypto_cmac_digest_update(struct shash_desc *pdesc, const u8 *p, static int crypto_cmac_digest_final(struct shash_desc *pdesc, u8 *out) { struct crypto_shash *parent = pdesc->tfm; - unsigned long alignmask = crypto_shash_alignmask(parent); struct cmac_tfm_ctx *tctx = crypto_shash_ctx(parent); struct cmac_desc_ctx *ctx = shash_desc_ctx(pdesc); struct crypto_cipher *tfm = tctx->child; int bs = crypto_shash_blocksize(parent); - u8 *consts = PTR_ALIGN((void *)tctx->ctx, - (alignmask | (__alignof__(__be64) - 1)) + 1); - u8 *odds = PTR_ALIGN((void *)ctx->ctx, alignmask + 1); + u8 *odds = ctx->odds; u8 *prev = odds + bs; unsigned int offset = 0; @@ -191,7 +184,7 @@ static int crypto_cmac_digest_final(struct shash_desc *pdesc, u8 *out) } crypto_xor(prev, odds, bs); - crypto_xor(prev, consts + offset, bs); + crypto_xor(prev, (const u8 *)tctx->consts + offset, bs); crypto_cipher_encrypt_one(tfm, out, prev); @@ -241,7 +234,6 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb) struct shash_instance *inst; struct crypto_cipher_spawn *spawn; struct crypto_alg *alg; - unsigned long alignmask; u32 mask; int err; @@ -273,23 +265,14 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb) if (err) goto err_free_inst; - alignmask = alg->cra_alignmask; - inst->alg.base.cra_alignmask = alignmask; inst->alg.base.cra_priority = alg->cra_priority; inst->alg.base.cra_blocksize = alg->cra_blocksize; + inst->alg.base.cra_ctxsize = sizeof(struct cmac_tfm_ctx) + + alg->cra_blocksize * 2; inst->alg.digestsize = alg->cra_blocksize; - inst->alg.descsize = - ALIGN(sizeof(struct cmac_desc_ctx), crypto_tfm_ctx_alignment()) - + (alignmask & ~(crypto_tfm_ctx_alignment() - 1)) - + alg->cra_blocksize * 2; - - inst->alg.base.cra_ctxsize = - ALIGN(sizeof(struct cmac_tfm_ctx), crypto_tfm_ctx_alignment()) - + ((alignmask | (__alignof__(__be64) - 1)) & - ~(crypto_tfm_ctx_alignment() - 1)) - + alg->cra_blocksize * 2; - + inst->alg.descsize = sizeof(struct cmac_desc_ctx) + + alg->cra_blocksize * 2; inst->alg.init = crypto_cmac_digest_init; inst->alg.update = crypto_cmac_digest_update; inst->alg.final = crypto_cmac_digest_final; -- cgit 1.2.3-korg From 25c74a39e0f637a44982c3820a583755aedc9811 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 18 Oct 2023 22:53:34 -0700 Subject: crypto: hmac - remove unnecessary alignment logic The hmac template is setting its alignmask to that of its underlying unkeyed hash algorithm, and it is aligning the ipad and opad fields in its tfm context to that alignment. However, hmac does not actually need any sort of alignment itself, which makes this pointless except to keep the pads aligned to what the underlying algorithm prefers. But very few shash algorithms actually set an alignmask, and it is being removed from those remaining ones; also, after setkey, the pads are only passed to crypto_shash_import and crypto_shash_export which ignore the alignmask. Therefore, make the hmac template stop setting an alignmask and simply use natural alignment for ipad and opad. Note, this change also moves the pads from the beginning of the tfm context to the end, which makes much more sense; the variable-length fields should be at the end. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/hmac.c | 56 ++++++++++++++++++++++---------------------------------- 1 file changed, 22 insertions(+), 34 deletions(-) (limited to 'crypto') diff --git a/crypto/hmac.c b/crypto/hmac.c index ea93f4c55f251b..7cec25ff988915 100644 --- a/crypto/hmac.c +++ b/crypto/hmac.c @@ -24,31 +24,20 @@ struct hmac_ctx { struct crypto_shash *hash; + /* Contains 'u8 ipad[statesize];', then 'u8 opad[statesize];' */ + u8 pads[]; }; -static inline void *align_ptr(void *p, unsigned int align) -{ - return (void *)ALIGN((unsigned long)p, align); -} - -static inline struct hmac_ctx *hmac_ctx(struct crypto_shash *tfm) -{ - return align_ptr(crypto_shash_ctx_aligned(tfm) + - crypto_shash_statesize(tfm) * 2, - crypto_tfm_ctx_alignment()); -} - static int hmac_setkey(struct crypto_shash *parent, const u8 *inkey, unsigned int keylen) { int bs = crypto_shash_blocksize(parent); int ds = crypto_shash_digestsize(parent); int ss = crypto_shash_statesize(parent); - char *ipad = crypto_shash_ctx_aligned(parent); - char *opad = ipad + ss; - struct hmac_ctx *ctx = align_ptr(opad + ss, - crypto_tfm_ctx_alignment()); - struct crypto_shash *hash = ctx->hash; + struct hmac_ctx *tctx = crypto_shash_ctx(parent); + struct crypto_shash *hash = tctx->hash; + u8 *ipad = &tctx->pads[0]; + u8 *opad = &tctx->pads[ss]; SHASH_DESC_ON_STACK(shash, hash); unsigned int i; @@ -94,16 +83,18 @@ static int hmac_export(struct shash_desc *pdesc, void *out) static int hmac_import(struct shash_desc *pdesc, const void *in) { struct shash_desc *desc = shash_desc_ctx(pdesc); - struct hmac_ctx *ctx = hmac_ctx(pdesc->tfm); + const struct hmac_ctx *tctx = crypto_shash_ctx(pdesc->tfm); - desc->tfm = ctx->hash; + desc->tfm = tctx->hash; return crypto_shash_import(desc, in); } static int hmac_init(struct shash_desc *pdesc) { - return hmac_import(pdesc, crypto_shash_ctx_aligned(pdesc->tfm)); + const struct hmac_ctx *tctx = crypto_shash_ctx(pdesc->tfm); + + return hmac_import(pdesc, &tctx->pads[0]); } static int hmac_update(struct shash_desc *pdesc, @@ -119,7 +110,8 @@ static int hmac_final(struct shash_desc *pdesc, u8 *out) struct crypto_shash *parent = pdesc->tfm; int ds = crypto_shash_digestsize(parent); int ss = crypto_shash_statesize(parent); - char *opad = crypto_shash_ctx_aligned(parent) + ss; + const struct hmac_ctx *tctx = crypto_shash_ctx(parent); + const u8 *opad = &tctx->pads[ss]; struct shash_desc *desc = shash_desc_ctx(pdesc); return crypto_shash_final(desc, out) ?: @@ -134,7 +126,8 @@ static int hmac_finup(struct shash_desc *pdesc, const u8 *data, struct crypto_shash *parent = pdesc->tfm; int ds = crypto_shash_digestsize(parent); int ss = crypto_shash_statesize(parent); - char *opad = crypto_shash_ctx_aligned(parent) + ss; + const struct hmac_ctx *tctx = crypto_shash_ctx(parent); + const u8 *opad = &tctx->pads[ss]; struct shash_desc *desc = shash_desc_ctx(pdesc); return crypto_shash_finup(desc, data, nbytes, out) ?: @@ -147,7 +140,7 @@ static int hmac_init_tfm(struct crypto_shash *parent) struct crypto_shash *hash; struct shash_instance *inst = shash_alg_instance(parent); struct crypto_shash_spawn *spawn = shash_instance_ctx(inst); - struct hmac_ctx *ctx = hmac_ctx(parent); + struct hmac_ctx *tctx = crypto_shash_ctx(parent); hash = crypto_spawn_shash(spawn); if (IS_ERR(hash)) @@ -156,14 +149,14 @@ static int hmac_init_tfm(struct crypto_shash *parent) parent->descsize = sizeof(struct shash_desc) + crypto_shash_descsize(hash); - ctx->hash = hash; + tctx->hash = hash; return 0; } static int hmac_clone_tfm(struct crypto_shash *dst, struct crypto_shash *src) { - struct hmac_ctx *sctx = hmac_ctx(src); - struct hmac_ctx *dctx = hmac_ctx(dst); + struct hmac_ctx *sctx = crypto_shash_ctx(src); + struct hmac_ctx *dctx = crypto_shash_ctx(dst); struct crypto_shash *hash; hash = crypto_clone_shash(sctx->hash); @@ -176,9 +169,9 @@ static int hmac_clone_tfm(struct crypto_shash *dst, struct crypto_shash *src) static void hmac_exit_tfm(struct crypto_shash *parent) { - struct hmac_ctx *ctx = hmac_ctx(parent); + struct hmac_ctx *tctx = crypto_shash_ctx(parent); - crypto_free_shash(ctx->hash); + crypto_free_shash(tctx->hash); } static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) @@ -225,15 +218,10 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.base.cra_priority = alg->cra_priority; inst->alg.base.cra_blocksize = alg->cra_blocksize; - inst->alg.base.cra_alignmask = alg->cra_alignmask; + inst->alg.base.cra_ctxsize = sizeof(struct hmac_ctx) + (ss * 2); - ss = ALIGN(ss, alg->cra_alignmask + 1); inst->alg.digestsize = ds; inst->alg.statesize = ss; - - inst->alg.base.cra_ctxsize = sizeof(struct hmac_ctx) + - ALIGN(ss * 2, crypto_tfm_ctx_alignment()); - inst->alg.init = hmac_init; inst->alg.update = hmac_update; inst->alg.final = hmac_final; -- cgit 1.2.3-korg From 1fb90689bc7ced529152fec406faddd0bcbf99f1 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 18 Oct 2023 22:53:35 -0700 Subject: crypto: vmac - don't set alignmask The vmac template is setting its alignmask to that of its underlying 'cipher'. This doesn't actually accomplish anything useful, though, so stop doing it. (vmac_update() does have an alignment bug, where it assumes u64 alignment when it shouldn't, but that bug exists both before and after this patch.) This is a prerequisite for removing support for nonzero alignmasks from shash. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/vmac.c | 1 - 1 file changed, 1 deletion(-) (limited to 'crypto') diff --git a/crypto/vmac.c b/crypto/vmac.c index 4633b2dda1e0a5..0a1d8efa6c1a6f 100644 --- a/crypto/vmac.c +++ b/crypto/vmac.c @@ -649,7 +649,6 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.base.cra_priority = alg->cra_priority; inst->alg.base.cra_blocksize = alg->cra_blocksize; - inst->alg.base.cra_alignmask = alg->cra_alignmask; inst->alg.base.cra_ctxsize = sizeof(struct vmac_tfm_ctx); inst->alg.base.cra_init = vmac_init_tfm; -- cgit 1.2.3-korg From a2b1118052c41ca92cbc2366e77b2f0ff3b054ba Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 18 Oct 2023 22:53:36 -0700 Subject: crypto: xcbc - remove unnecessary alignment logic The xcbc template is setting its alignmask to that of its underlying 'cipher'. Yet, it doesn't care itself about how its inputs and outputs are aligned, which is ostensibly the point of the alignmask. Instead, xcbc actually just uses its alignmask itself to runtime-align certain fields in its tfm and desc contexts appropriately for its underlying cipher. That is almost entirely pointless too, though, since xcbc is already using the cipher API functions that handle alignment themselves, and few ciphers set a nonzero alignmask anyway. Also, even without runtime alignment, an alignment of at least 4 bytes can be guaranteed. Thus, at best this code is optimizing for the rare case of ciphers that set an alignmask >= 7, at the cost of hurting the common cases. Therefore, this patch removes the manual alignment code from xcbc and makes it stop setting an alignmask. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/xcbc.c | 32 ++++++++++---------------------- 1 file changed, 10 insertions(+), 22 deletions(-) (limited to 'crypto') diff --git a/crypto/xcbc.c b/crypto/xcbc.c index 6074c5c1da492e..a9e8ee9c1949cb 100644 --- a/crypto/xcbc.c +++ b/crypto/xcbc.c @@ -27,7 +27,7 @@ static u_int32_t ks[12] = {0x01010101, 0x01010101, 0x01010101, 0x01010101, */ struct xcbc_tfm_ctx { struct crypto_cipher *child; - u8 ctx[]; + u8 consts[]; }; /* @@ -43,7 +43,7 @@ struct xcbc_tfm_ctx { */ struct xcbc_desc_ctx { unsigned int len; - u8 ctx[]; + u8 odds[]; }; #define XCBC_BLOCKSIZE 16 @@ -51,9 +51,8 @@ struct xcbc_desc_ctx { static int crypto_xcbc_digest_setkey(struct crypto_shash *parent, const u8 *inkey, unsigned int keylen) { - unsigned long alignmask = crypto_shash_alignmask(parent); struct xcbc_tfm_ctx *ctx = crypto_shash_ctx(parent); - u8 *consts = PTR_ALIGN(&ctx->ctx[0], alignmask + 1); + u8 *consts = ctx->consts; int err = 0; u8 key1[XCBC_BLOCKSIZE]; int bs = sizeof(key1); @@ -71,10 +70,9 @@ static int crypto_xcbc_digest_setkey(struct crypto_shash *parent, static int crypto_xcbc_digest_init(struct shash_desc *pdesc) { - unsigned long alignmask = crypto_shash_alignmask(pdesc->tfm); struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc); int bs = crypto_shash_blocksize(pdesc->tfm); - u8 *prev = PTR_ALIGN(&ctx->ctx[0], alignmask + 1) + bs; + u8 *prev = &ctx->odds[bs]; ctx->len = 0; memset(prev, 0, bs); @@ -86,12 +84,11 @@ static int crypto_xcbc_digest_update(struct shash_desc *pdesc, const u8 *p, unsigned int len) { struct crypto_shash *parent = pdesc->tfm; - unsigned long alignmask = crypto_shash_alignmask(parent); struct xcbc_tfm_ctx *tctx = crypto_shash_ctx(parent); struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc); struct crypto_cipher *tfm = tctx->child; int bs = crypto_shash_blocksize(parent); - u8 *odds = PTR_ALIGN(&ctx->ctx[0], alignmask + 1); + u8 *odds = ctx->odds; u8 *prev = odds + bs; /* checking the data can fill the block */ @@ -132,13 +129,11 @@ static int crypto_xcbc_digest_update(struct shash_desc *pdesc, const u8 *p, static int crypto_xcbc_digest_final(struct shash_desc *pdesc, u8 *out) { struct crypto_shash *parent = pdesc->tfm; - unsigned long alignmask = crypto_shash_alignmask(parent); struct xcbc_tfm_ctx *tctx = crypto_shash_ctx(parent); struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc); struct crypto_cipher *tfm = tctx->child; int bs = crypto_shash_blocksize(parent); - u8 *consts = PTR_ALIGN(&tctx->ctx[0], alignmask + 1); - u8 *odds = PTR_ALIGN(&ctx->ctx[0], alignmask + 1); + u8 *odds = ctx->odds; u8 *prev = odds + bs; unsigned int offset = 0; @@ -157,7 +152,7 @@ static int crypto_xcbc_digest_final(struct shash_desc *pdesc, u8 *out) } crypto_xor(prev, odds, bs); - crypto_xor(prev, consts + offset, bs); + crypto_xor(prev, &tctx->consts[offset], bs); crypto_cipher_encrypt_one(tfm, out, prev); @@ -191,7 +186,6 @@ static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb) struct shash_instance *inst; struct crypto_cipher_spawn *spawn; struct crypto_alg *alg; - unsigned long alignmask; u32 mask; int err; @@ -218,21 +212,15 @@ static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb) if (err) goto err_free_inst; - alignmask = alg->cra_alignmask | 3; - inst->alg.base.cra_alignmask = alignmask; inst->alg.base.cra_priority = alg->cra_priority; inst->alg.base.cra_blocksize = alg->cra_blocksize; + inst->alg.base.cra_ctxsize = sizeof(struct xcbc_tfm_ctx) + + alg->cra_blocksize * 2; inst->alg.digestsize = alg->cra_blocksize; - inst->alg.descsize = ALIGN(sizeof(struct xcbc_desc_ctx), - crypto_tfm_ctx_alignment()) + - (alignmask & - ~(crypto_tfm_ctx_alignment() - 1)) + + inst->alg.descsize = sizeof(struct xcbc_desc_ctx) + alg->cra_blocksize * 2; - inst->alg.base.cra_ctxsize = ALIGN(sizeof(struct xcbc_tfm_ctx), - alignmask + 1) + - alg->cra_blocksize * 2; inst->alg.base.cra_init = xcbc_init_tfm; inst->alg.base.cra_exit = xcbc_exit_tfm; -- cgit 1.2.3-korg From 345bfa3c10ced43281877ce68ae7b3bf360afc76 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 18 Oct 2023 22:53:37 -0700 Subject: crypto: shash - remove support for nonzero alignmask Currently, the shash API checks the alignment of all message, key, and digest buffers against the algorithm's declared alignmask, and for any unaligned buffers it falls back to manually aligned temporary buffers. This is virtually useless, however. In the case of the message buffer, cryptographic hash functions internally operate on fixed-size blocks, so implementations end up needing to deal with byte-aligned data anyway because the length(s) passed to ->update might not be divisible by the block size. Word-alignment of the message can theoretically be helpful for CRCs, like what was being done in crc32c-sparc64. But in practice it's better for the algorithms to use unaligned accesses or align the message themselves. A similar argument applies to the key and digest. In any case, no shash algorithms actually set a nonzero alignmask anymore. Therefore, remove support for it from shash. The benefit is that all the code to handle "misaligned" buffers in the shash API goes away, reducing the overhead of the shash API. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/shash.c | 128 ++++----------------------------------------------------- 1 file changed, 8 insertions(+), 120 deletions(-) (limited to 'crypto') diff --git a/crypto/shash.c b/crypto/shash.c index 52420c41db44ae..409b33f9c97cc1 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -10,15 +10,12 @@ #include #include #include -#include #include #include #include #include "hash.h" -#define MAX_SHASH_ALIGNMASK 63 - static const struct crypto_type crypto_shash_type; static inline struct crypto_istat_hash *shash_get_stat(struct shash_alg *alg) @@ -38,27 +35,6 @@ int shash_no_setkey(struct crypto_shash *tfm, const u8 *key, } EXPORT_SYMBOL_GPL(shash_no_setkey); -static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key, - unsigned int keylen) -{ - struct shash_alg *shash = crypto_shash_alg(tfm); - unsigned long alignmask = crypto_shash_alignmask(tfm); - unsigned long absize; - u8 *buffer, *alignbuffer; - int err; - - absize = keylen + (alignmask & ~(crypto_tfm_ctx_alignment() - 1)); - buffer = kmalloc(absize, GFP_ATOMIC); - if (!buffer) - return -ENOMEM; - - alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); - memcpy(alignbuffer, key, keylen); - err = shash->setkey(tfm, alignbuffer, keylen); - kfree_sensitive(buffer); - return err; -} - static void shash_set_needkey(struct crypto_shash *tfm, struct shash_alg *alg) { if (crypto_shash_alg_needs_key(alg)) @@ -69,14 +45,9 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) { struct shash_alg *shash = crypto_shash_alg(tfm); - unsigned long alignmask = crypto_shash_alignmask(tfm); int err; - if ((unsigned long)key & alignmask) - err = shash_setkey_unaligned(tfm, key, keylen); - else - err = shash->setkey(tfm, key, keylen); - + err = shash->setkey(tfm, key, keylen); if (unlikely(err)) { shash_set_needkey(tfm, shash); return err; @@ -87,110 +58,35 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, } EXPORT_SYMBOL_GPL(crypto_shash_setkey); -static int shash_update_unaligned(struct shash_desc *desc, const u8 *data, - unsigned int len) -{ - struct crypto_shash *tfm = desc->tfm; - struct shash_alg *shash = crypto_shash_alg(tfm); - unsigned long alignmask = crypto_shash_alignmask(tfm); - unsigned int unaligned_len = alignmask + 1 - - ((unsigned long)data & alignmask); - /* - * We cannot count on __aligned() working for large values: - * https://patchwork.kernel.org/patch/9507697/ - */ - u8 ubuf[MAX_SHASH_ALIGNMASK * 2]; - u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1); - int err; - - if (WARN_ON(buf + unaligned_len > ubuf + sizeof(ubuf))) - return -EINVAL; - - if (unaligned_len > len) - unaligned_len = len; - - memcpy(buf, data, unaligned_len); - err = shash->update(desc, buf, unaligned_len); - memset(buf, 0, unaligned_len); - - return err ?: - shash->update(desc, data + unaligned_len, len - unaligned_len); -} - int crypto_shash_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - struct crypto_shash *tfm = desc->tfm; - struct shash_alg *shash = crypto_shash_alg(tfm); - unsigned long alignmask = crypto_shash_alignmask(tfm); + struct shash_alg *shash = crypto_shash_alg(desc->tfm); int err; if (IS_ENABLED(CONFIG_CRYPTO_STATS)) atomic64_add(len, &shash_get_stat(shash)->hash_tlen); - if ((unsigned long)data & alignmask) - err = shash_update_unaligned(desc, data, len); - else - err = shash->update(desc, data, len); + err = shash->update(desc, data, len); return crypto_shash_errstat(shash, err); } EXPORT_SYMBOL_GPL(crypto_shash_update); -static int shash_final_unaligned(struct shash_desc *desc, u8 *out) -{ - struct crypto_shash *tfm = desc->tfm; - unsigned long alignmask = crypto_shash_alignmask(tfm); - struct shash_alg *shash = crypto_shash_alg(tfm); - unsigned int ds = crypto_shash_digestsize(tfm); - /* - * We cannot count on __aligned() working for large values: - * https://patchwork.kernel.org/patch/9507697/ - */ - u8 ubuf[MAX_SHASH_ALIGNMASK + HASH_MAX_DIGESTSIZE]; - u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1); - int err; - - if (WARN_ON(buf + ds > ubuf + sizeof(ubuf))) - return -EINVAL; - - err = shash->final(desc, buf); - if (err) - goto out; - - memcpy(out, buf, ds); - -out: - memset(buf, 0, ds); - return err; -} - int crypto_shash_final(struct shash_desc *desc, u8 *out) { - struct crypto_shash *tfm = desc->tfm; - struct shash_alg *shash = crypto_shash_alg(tfm); - unsigned long alignmask = crypto_shash_alignmask(tfm); + struct shash_alg *shash = crypto_shash_alg(desc->tfm); int err; if (IS_ENABLED(CONFIG_CRYPTO_STATS)) atomic64_inc(&shash_get_stat(shash)->hash_cnt); - if ((unsigned long)out & alignmask) - err = shash_final_unaligned(desc, out); - else - err = shash->final(desc, out); + err = shash->final(desc, out); return crypto_shash_errstat(shash, err); } EXPORT_SYMBOL_GPL(crypto_shash_final); -static int shash_finup_unaligned(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - return shash_update_unaligned(desc, data, len) ?: - shash_final_unaligned(desc, out); -} - static int shash_default_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { @@ -205,7 +101,6 @@ int crypto_shash_finup(struct shash_desc *desc, const u8 *data, { struct crypto_shash *tfm = desc->tfm; struct shash_alg *shash = crypto_shash_alg(tfm); - unsigned long alignmask = crypto_shash_alignmask(tfm); int err; if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { @@ -215,11 +110,7 @@ int crypto_shash_finup(struct shash_desc *desc, const u8 *data, atomic64_add(len, &istat->hash_tlen); } - if (((unsigned long)data | (unsigned long)out) & alignmask) - err = shash_finup_unaligned(desc, data, len, out); - else - err = shash->finup(desc, data, len, out); - + err = shash->finup(desc, data, len, out); return crypto_shash_errstat(shash, err); } @@ -239,7 +130,6 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data, { struct crypto_shash *tfm = desc->tfm; struct shash_alg *shash = crypto_shash_alg(tfm); - unsigned long alignmask = crypto_shash_alignmask(tfm); int err; if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { @@ -251,9 +141,6 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data, if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) err = -ENOKEY; - else if (((unsigned long)data | (unsigned long)out) & alignmask) - err = shash->init(desc) ?: - shash_finup_unaligned(desc, data, len, out); else err = shash->digest(desc, data, len, out); @@ -670,7 +557,8 @@ static int shash_prepare_alg(struct shash_alg *alg) if (alg->descsize > HASH_MAX_DESCSIZE) return -EINVAL; - if (base->cra_alignmask > MAX_SHASH_ALIGNMASK) + /* alignmask is not useful for shash, so it is not supported. */ + if (base->cra_alignmask) return -EINVAL; if ((alg->export && !alg->import) || (alg->import && !alg->export)) -- cgit 1.2.3-korg From eed577b9a9220dc9f3968b54d055a3884d219897 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 18 Oct 2023 22:53:39 -0700 Subject: crypto: drbg - stop checking crypto_shash_alignmask Now that the shash algorithm type does not support nonzero alignmasks, crypto_shash_alignmask() always returns 0 and will be removed. In preparation for this, stop checking crypto_shash_alignmask() in drbg. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/drbg.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'crypto') diff --git a/crypto/drbg.c b/crypto/drbg.c index ff4ebbc68efab1..e01f8c7769d036 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -1698,7 +1698,7 @@ static int drbg_init_hash_kernel(struct drbg_state *drbg) sdesc->shash.tfm = tfm; drbg->priv_data = sdesc; - return crypto_shash_alignmask(tfm); + return 0; } static int drbg_fini_hash_kernel(struct drbg_state *drbg) -- cgit 1.2.3-korg From 2125c11efd83a5aeb9bf04bcbc83b49e8d7e2afb Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 18 Oct 2023 22:53:40 -0700 Subject: crypto: testmgr - stop checking crypto_shash_alignmask Now that the shash algorithm type does not support nonzero alignmasks, crypto_shash_alignmask() always returns 0 and will be removed. In preparation for this, stop checking crypto_shash_alignmask() in testmgr. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/testmgr.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'crypto') diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 54135c7610f063..48a0929c7a158d 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -1275,7 +1275,6 @@ static int test_shash_vec_cfg(const struct hash_testvec *vec, u8 *hashstate) { struct crypto_shash *tfm = desc->tfm; - const unsigned int alignmask = crypto_shash_alignmask(tfm); const unsigned int digestsize = crypto_shash_digestsize(tfm); const unsigned int statesize = crypto_shash_statesize(tfm); const char *driver = crypto_shash_driver_name(tfm); @@ -1287,7 +1286,7 @@ static int test_shash_vec_cfg(const struct hash_testvec *vec, /* Set the key, if specified */ if (vec->ksize) { err = do_setkey(crypto_shash_setkey, tfm, vec->key, vec->ksize, - cfg, alignmask); + cfg, 0); if (err) { if (err == vec->setkey_error) return 0; @@ -1304,7 +1303,7 @@ static int test_shash_vec_cfg(const struct hash_testvec *vec, } /* Build the scatterlist for the source data */ - err = build_hash_sglist(tsgl, vec, cfg, alignmask, divs); + err = build_hash_sglist(tsgl, vec, cfg, 0, divs); if (err) { pr_err("alg: shash: %s: error preparing scatterlist for test vector %s, cfg=\"%s\"\n", driver, vec_name, cfg->name); -- cgit 1.2.3-korg From 321dfe9777a88cdafe676bb03bab07af26c6cfd8 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 18 Oct 2023 22:53:41 -0700 Subject: crypto: adiantum - stop using alignmask of shash_alg Now that the shash algorithm type does not support nonzero alignmasks, shash_alg::base.cra_alignmask is always 0, so OR-ing it into another value is a no-op. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/adiantum.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'crypto') diff --git a/crypto/adiantum.c b/crypto/adiantum.c index aa0f8b3b499adc..9ff3376f9ed35f 100644 --- a/crypto/adiantum.c +++ b/crypto/adiantum.c @@ -590,8 +590,7 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.base.cra_blocksize = BLOCKCIPHER_BLOCK_SIZE; inst->alg.base.cra_ctxsize = sizeof(struct adiantum_tfm_ctx); - inst->alg.base.cra_alignmask = streamcipher_alg->base.cra_alignmask | - hash_alg->base.cra_alignmask; + inst->alg.base.cra_alignmask = streamcipher_alg->base.cra_alignmask; /* * The block cipher is only invoked once per message, so for long * messages (e.g. sectors for disk encryption) its performance doesn't -- cgit 1.2.3-korg From f6f1514cf72e5d9c2b7c2bc53c43f482b6183d29 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 18 Oct 2023 22:53:42 -0700 Subject: crypto: hctr2 - stop using alignmask of shash_alg Now that the shash algorithm type does not support nonzero alignmasks, shash_alg::base.cra_alignmask is always 0, so OR-ing it into another value is a no-op. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/hctr2.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'crypto') diff --git a/crypto/hctr2.c b/crypto/hctr2.c index 653fde727f0fa9..87e7547ad18623 100644 --- a/crypto/hctr2.c +++ b/crypto/hctr2.c @@ -485,8 +485,7 @@ static int hctr2_create_common(struct crypto_template *tmpl, inst->alg.base.cra_blocksize = BLOCKCIPHER_BLOCK_SIZE; inst->alg.base.cra_ctxsize = sizeof(struct hctr2_tfm_ctx) + polyval_alg->statesize * 2; - inst->alg.base.cra_alignmask = xctr_alg->base.cra_alignmask | - polyval_alg->base.cra_alignmask; + inst->alg.base.cra_alignmask = xctr_alg->base.cra_alignmask; /* * The hash function is called twice, so it is weighted higher than the * xctr and blockcipher. -- cgit 1.2.3-korg From cf27d9475f37fb69b5bc293e6e6d6c1d03cf7cc6 Mon Sep 17 00:00:00 2001 From: Stephan Müller Date: Thu, 19 Oct 2023 09:40:42 +0200 Subject: crypto: jitter - use permanent health test storage The health test result in the current code is only given for the currently processed raw time stamp. This implies to react on the health test error, the result must be checked after each raw time stamp being processed. To avoid this constant checking requirement, any health test error is recorded and stored to be analyzed at a later time, if needed. This change ensures that the power-up test catches any health test error. Without that patch, the power-up health test result is not enforced. The introduced changes are already in use with the user space version of the Jitter RNG. Fixes: 04597c8dd6c4 ("jitter - add RCT/APT support for different OSRs") Reported-by: Joachim Vandersmissen Signed-off-by: Stephan Mueller Signed-off-by: Herbert Xu --- crypto/jitterentropy.c | 125 +++++++++++++++++++++++++++++-------------------- 1 file changed, 74 insertions(+), 51 deletions(-) (limited to 'crypto') diff --git a/crypto/jitterentropy.c b/crypto/jitterentropy.c index 09c9db90c15426..26a9048bc893d9 100644 --- a/crypto/jitterentropy.c +++ b/crypto/jitterentropy.c @@ -100,6 +100,8 @@ struct rand_data { unsigned int apt_observations; /* Number of collected observations */ unsigned int apt_count; /* APT counter */ unsigned int apt_base; /* APT base reference */ + unsigned int health_failure; /* Record health failure */ + unsigned int apt_base_set:1; /* APT base reference set? */ }; @@ -121,6 +123,13 @@ struct rand_data { #define JENT_EHASH 11 /* Hash self test failed */ #define JENT_EMEM 12 /* Can't allocate memory for initialization */ +#define JENT_RCT_FAILURE 1 /* Failure in RCT health test. */ +#define JENT_APT_FAILURE 2 /* Failure in APT health test. */ +#define JENT_PERMANENT_FAILURE_SHIFT 16 +#define JENT_PERMANENT_FAILURE(x) (x << JENT_PERMANENT_FAILURE_SHIFT) +#define JENT_RCT_FAILURE_PERMANENT JENT_PERMANENT_FAILURE(JENT_RCT_FAILURE) +#define JENT_APT_FAILURE_PERMANENT JENT_PERMANENT_FAILURE(JENT_APT_FAILURE) + /* * The output n bits can receive more than n bits of min entropy, of course, * but the fixed output of the conditioning function can only asymptotically @@ -215,26 +224,22 @@ static void jent_apt_insert(struct rand_data *ec, unsigned int delta_masked) return; } - if (delta_masked == ec->apt_base) + if (delta_masked == ec->apt_base) { ec->apt_count++; + /* Note, ec->apt_count starts with one. */ + if (ec->apt_count >= ec->apt_cutoff_permanent) + ec->health_failure |= JENT_APT_FAILURE_PERMANENT; + else if (ec->apt_count >= ec->apt_cutoff) + ec->health_failure |= JENT_APT_FAILURE; + } + ec->apt_observations++; if (ec->apt_observations >= JENT_APT_WINDOW_SIZE) jent_apt_reset(ec, delta_masked); } -/* APT health test failure detection */ -static int jent_apt_permanent_failure(struct rand_data *ec) -{ - return (ec->apt_count >= ec->apt_cutoff_permanent) ? 1 : 0; -} - -static int jent_apt_failure(struct rand_data *ec) -{ - return (ec->apt_count >= ec->apt_cutoff) ? 1 : 0; -} - /*************************************************************************** * Stuck Test and its use as Repetition Count Test * @@ -261,6 +266,30 @@ static void jent_rct_insert(struct rand_data *ec, int stuck) { if (stuck) { ec->rct_count++; + + /* + * The cutoff value is based on the following consideration: + * alpha = 2^-30 or 2^-60 as recommended in SP800-90B. + * In addition, we require an entropy value H of 1/osr as this + * is the minimum entropy required to provide full entropy. + * Note, we collect (DATA_SIZE_BITS + ENTROPY_SAFETY_FACTOR)*osr + * deltas for inserting them into the entropy pool which should + * then have (close to) DATA_SIZE_BITS bits of entropy in the + * conditioned output. + * + * Note, ec->rct_count (which equals to value B in the pseudo + * code of SP800-90B section 4.4.1) starts with zero. Hence + * we need to subtract one from the cutoff value as calculated + * following SP800-90B. Thus C = ceil(-log_2(alpha)/H) = 30*osr + * or 60*osr. + */ + if ((unsigned int)ec->rct_count >= (60 * ec->osr)) { + ec->rct_count = -1; + ec->health_failure |= JENT_RCT_FAILURE_PERMANENT; + } else if ((unsigned int)ec->rct_count >= (30 * ec->osr)) { + ec->rct_count = -1; + ec->health_failure |= JENT_RCT_FAILURE; + } } else { /* Reset RCT */ ec->rct_count = 0; @@ -316,38 +345,24 @@ static int jent_stuck(struct rand_data *ec, __u64 current_delta) } /* - * The cutoff value is based on the following consideration: - * alpha = 2^-30 or 2^-60 as recommended in SP800-90B. - * In addition, we require an entropy value H of 1/osr as this is the minimum - * entropy required to provide full entropy. - * Note, we collect (DATA_SIZE_BITS + ENTROPY_SAFETY_FACTOR)*osr deltas for - * inserting them into the entropy pool which should then have (close to) - * DATA_SIZE_BITS bits of entropy in the conditioned output. - * - * Note, ec->rct_count (which equals to value B in the pseudo code of SP800-90B - * section 4.4.1) starts with zero. Hence we need to subtract one from the - * cutoff value as calculated following SP800-90B. Thus - * C = ceil(-log_2(alpha)/H) = 30*osr or 60*osr. + * Report any health test failures + * + * @ec [in] Reference to entropy collector + * + * @return a bitmask indicating which tests failed + * 0 No health test failure + * 1 RCT failure + * 2 APT failure + * 1<rct_count >= (60 * ec->osr)) ? 1 : 0; -} + /* Test is only enabled in FIPS mode */ + if (!fips_enabled) + return 0; -static int jent_rct_failure(struct rand_data *ec) -{ - return (ec->rct_count >= (30 * ec->osr)) ? 1 : 0; -} - -/* Report of health test failures */ -static int jent_health_failure(struct rand_data *ec) -{ - return jent_rct_failure(ec) | jent_apt_failure(ec); -} - -static int jent_permanent_health_failure(struct rand_data *ec) -{ - return jent_rct_permanent_failure(ec) | jent_apt_permanent_failure(ec); + return ec->health_failure; } /*************************************************************************** @@ -594,11 +609,12 @@ int jent_read_entropy(struct rand_data *ec, unsigned char *data, return -1; while (len > 0) { - unsigned int tocopy; + unsigned int tocopy, health_test_result; jent_gen_entropy(ec); - if (jent_permanent_health_failure(ec)) { + health_test_result = jent_health_failure(ec); + if (health_test_result > JENT_PERMANENT_FAILURE_SHIFT) { /* * At this point, the Jitter RNG instance is considered * as a failed instance. There is no rerun of the @@ -606,13 +622,18 @@ int jent_read_entropy(struct rand_data *ec, unsigned char *data, * is assumed to not further use this instance. */ return -3; - } else if (jent_health_failure(ec)) { + } else if (health_test_result) { /* * Perform startup health tests and return permanent * error if it fails. */ - if (jent_entropy_init(0, 0, NULL, ec)) + if (jent_entropy_init(0, 0, NULL, ec)) { + /* Mark the permanent error */ + ec->health_failure &= + JENT_RCT_FAILURE_PERMANENT | + JENT_APT_FAILURE_PERMANENT; return -3; + } return -2; } @@ -695,6 +716,7 @@ int jent_entropy_init(unsigned int osr, unsigned int flags, void *hash_state, */ struct rand_data *ec = p_ec; int i, time_backwards = 0, ret = 0, ec_free = 0; + unsigned int health_test_result; if (!ec) { ec = jent_entropy_collector_alloc(osr, flags, hash_state); @@ -708,6 +730,9 @@ int jent_entropy_init(unsigned int osr, unsigned int flags, void *hash_state, ec->apt_base_set = 0; /* Reset the RCT */ ec->rct_count = 0; + /* Reset intermittent, leave permanent health test result */ + ec->health_failure &= (~JENT_RCT_FAILURE); + ec->health_failure &= (~JENT_APT_FAILURE); } /* We could perform statistical tests here, but the problem is @@ -788,12 +813,10 @@ int jent_entropy_init(unsigned int osr, unsigned int flags, void *hash_state, } /* Did we encounter a health test failure? */ - if (jent_rct_failure(ec)) { - ret = JENT_ERCT; - goto out; - } - if (jent_apt_failure(ec)) { - ret = JENT_EHEALTH; + health_test_result = jent_health_failure(ec); + if (health_test_result) { + ret = (health_test_result & JENT_RCT_FAILURE) ? JENT_ERCT : + JENT_EHEALTH; goto out; } -- cgit 1.2.3-korg From 201c0da4d0298cfcd06a4548f94b90e3817b2e1b Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Sat, 21 Oct 2023 13:23:44 +0200 Subject: treewide: Add SPDX identifier to IETF ASN.1 modules Per section 4.c. of the IETF Trust Legal Provisions, "Code Components" in IETF Documents are licensed on the terms of the BSD-3-Clause license: https://trustee.ietf.org/documents/trust-legal-provisions/tlp-5/ The term "Code Components" specifically includes ASN.1 modules: https://trustee.ietf.org/documents/trust-legal-provisions/code-components-list-3/ Add an SPDX identifier as well as a copyright notice pursuant to section 6.d. of the Trust Legal Provisions to all ASN.1 modules in the tree which are derived from IETF Documents. Section 4.d. of the Trust Legal Provisions requests that each Code Component identify the RFC from which it is taken, so link that RFC in every ASN.1 module. Signed-off-by: Lukas Wunner Signed-off-by: Herbert Xu --- crypto/asymmetric_keys/pkcs7.asn1 | 7 +++++++ crypto/asymmetric_keys/pkcs8.asn1 | 6 ++++++ crypto/asymmetric_keys/x509.asn1 | 7 +++++++ crypto/asymmetric_keys/x509_akid.asn1 | 5 +++++ crypto/rsaprivkey.asn1 | 7 +++++++ crypto/rsapubkey.asn1 | 7 +++++++ fs/smb/server/ksmbd_spnego_negtokeninit.asn1 | 8 ++++++++ fs/smb/server/ksmbd_spnego_negtokentarg.asn1 | 7 +++++++ net/ipv4/netfilter/nf_nat_snmp_basic.asn1 | 8 ++++++++ 9 files changed, 62 insertions(+) (limited to 'crypto') diff --git a/crypto/asymmetric_keys/pkcs7.asn1 b/crypto/asymmetric_keys/pkcs7.asn1 index 1eca740b816ace..28e1f4a41c1442 100644 --- a/crypto/asymmetric_keys/pkcs7.asn1 +++ b/crypto/asymmetric_keys/pkcs7.asn1 @@ -1,3 +1,10 @@ +-- SPDX-License-Identifier: BSD-3-Clause +-- +-- Copyright (C) 2009 IETF Trust and the persons identified as authors +-- of the code +-- +-- https://www.rfc-editor.org/rfc/rfc5652#section-3 + PKCS7ContentInfo ::= SEQUENCE { contentType ContentType ({ pkcs7_check_content_type }), content [0] EXPLICIT SignedData OPTIONAL diff --git a/crypto/asymmetric_keys/pkcs8.asn1 b/crypto/asymmetric_keys/pkcs8.asn1 index 702c41a3c7137a..a2a8af2633d80e 100644 --- a/crypto/asymmetric_keys/pkcs8.asn1 +++ b/crypto/asymmetric_keys/pkcs8.asn1 @@ -1,3 +1,9 @@ +-- SPDX-License-Identifier: BSD-3-Clause +-- +-- Copyright (C) 2010 IETF Trust and the persons identified as authors +-- of the code +-- +-- https://www.rfc-editor.org/rfc/rfc5958#section-2 -- -- This is the unencrypted variant -- diff --git a/crypto/asymmetric_keys/x509.asn1 b/crypto/asymmetric_keys/x509.asn1 index 92d59c32f96a8e..feb9573cacce07 100644 --- a/crypto/asymmetric_keys/x509.asn1 +++ b/crypto/asymmetric_keys/x509.asn1 @@ -1,3 +1,10 @@ +-- SPDX-License-Identifier: BSD-3-Clause +-- +-- Copyright (C) 2008 IETF Trust and the persons identified as authors +-- of the code +-- +-- https://www.rfc-editor.org/rfc/rfc5280#section-4 + Certificate ::= SEQUENCE { tbsCertificate TBSCertificate ({ x509_note_tbs_certificate }), signatureAlgorithm AlgorithmIdentifier, diff --git a/crypto/asymmetric_keys/x509_akid.asn1 b/crypto/asymmetric_keys/x509_akid.asn1 index c7818ff4e606f8..0f8355cf190780 100644 --- a/crypto/asymmetric_keys/x509_akid.asn1 +++ b/crypto/asymmetric_keys/x509_akid.asn1 @@ -1,3 +1,8 @@ +-- SPDX-License-Identifier: BSD-3-Clause +-- +-- Copyright (C) 2008 IETF Trust and the persons identified as authors +-- of the code +-- -- X.509 AuthorityKeyIdentifier -- rfc5280 section 4.2.1.1 diff --git a/crypto/rsaprivkey.asn1 b/crypto/rsaprivkey.asn1 index 4ce06758e8af75..76865124a9c716 100644 --- a/crypto/rsaprivkey.asn1 +++ b/crypto/rsaprivkey.asn1 @@ -1,3 +1,10 @@ +-- SPDX-License-Identifier: BSD-3-Clause +-- +-- Copyright (C) 2016 IETF Trust and the persons identified as authors +-- of the code +-- +-- https://www.rfc-editor.org/rfc/rfc8017#appendix-A.1.2 + RsaPrivKey ::= SEQUENCE { version INTEGER, n INTEGER ({ rsa_get_n }), diff --git a/crypto/rsapubkey.asn1 b/crypto/rsapubkey.asn1 index 725498e461d25f..0d32b1ca6270f7 100644 --- a/crypto/rsapubkey.asn1 +++ b/crypto/rsapubkey.asn1 @@ -1,3 +1,10 @@ +-- SPDX-License-Identifier: BSD-3-Clause +-- +-- Copyright (C) 2016 IETF Trust and the persons identified as authors +-- of the code +-- +-- https://www.rfc-editor.org/rfc/rfc8017#appendix-A.1.1 + RsaPubKey ::= SEQUENCE { n INTEGER ({ rsa_get_n }), e INTEGER ({ rsa_get_e }) diff --git a/fs/smb/server/ksmbd_spnego_negtokeninit.asn1 b/fs/smb/server/ksmbd_spnego_negtokeninit.asn1 index 0065f191b54b7a..001513806fc0d8 100644 --- a/fs/smb/server/ksmbd_spnego_negtokeninit.asn1 +++ b/fs/smb/server/ksmbd_spnego_negtokeninit.asn1 @@ -1,3 +1,11 @@ +-- SPDX-License-Identifier: BSD-3-Clause +-- +-- Copyright (C) 1998, 2000 IETF Trust and the persons identified as authors +-- of the code +-- +-- https://www.rfc-editor.org/rfc/rfc2478#section-3.2.1 +-- https://www.rfc-editor.org/rfc/rfc2743#section-3.1 + GSSAPI ::= [APPLICATION 0] IMPLICIT SEQUENCE { thisMech diff --git a/fs/smb/server/ksmbd_spnego_negtokentarg.asn1 b/fs/smb/server/ksmbd_spnego_negtokentarg.asn1 index 1151933e7b9c58..797e485d57f1ed 100644 --- a/fs/smb/server/ksmbd_spnego_negtokentarg.asn1 +++ b/fs/smb/server/ksmbd_spnego_negtokentarg.asn1 @@ -1,3 +1,10 @@ +-- SPDX-License-Identifier: BSD-3-Clause +-- +-- Copyright (C) 1998 IETF Trust and the persons identified as authors +-- of the code +-- +-- https://www.rfc-editor.org/rfc/rfc2478#section-3.2.1 + GSSAPI ::= CHOICE { negTokenInit diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.asn1 b/net/ipv4/netfilter/nf_nat_snmp_basic.asn1 index 24b73268f362f0..dc2cc579416095 100644 --- a/net/ipv4/netfilter/nf_nat_snmp_basic.asn1 +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.asn1 @@ -1,3 +1,11 @@ +-- SPDX-License-Identifier: BSD-3-Clause +-- +-- Copyright (C) 1990, 2002 IETF Trust and the persons identified as authors +-- of the code +-- +-- https://www.rfc-editor.org/rfc/rfc1157#section-4 +-- https://www.rfc-editor.org/rfc/rfc3416#section-3 + Message ::= SEQUENCE { version -- cgit 1.2.3-korg From c626910f3f1bbce6ad18bc613d895d2a089ed95e Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 22 Oct 2023 01:10:44 -0700 Subject: crypto: ahash - remove support for nonzero alignmask Currently, the ahash API checks the alignment of all key and result buffers against the algorithm's declared alignmask, and for any unaligned buffers it falls back to manually aligned temporary buffers. This is virtually useless, however. First, since it does not apply to the message, its effect is much more limited than e.g. is the case for the alignmask for "skcipher". Second, the key and result buffers are given as virtual addresses and cannot (in general) be DMA'ed into, so drivers end up having to copy to/from them in software anyway. As a result it's easy to use memcpy() or the unaligned access helpers. The crypto_hash_walk_*() helper functions do use the alignmask to align the message. But with one exception those are only used for shash algorithms being exposed via the ahash API, not for native ahashes, and aligning the message is not required in this case, especially now that alignmask support has been removed from shash. The exception is the n2_core driver, which doesn't set an alignmask. In any case, no ahash algorithms actually set a nonzero alignmask anymore. Therefore, remove support for it from ahash. The benefit is that all the code to handle "misaligned" buffers in the ahash API goes away, reducing the overhead of the ahash API. This follows the same change that was made to shash. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- Documentation/crypto/devel-algos.rst | 4 +- crypto/ahash.c | 117 +++-------------------------------- crypto/shash.c | 8 +-- include/crypto/internal/hash.h | 4 +- include/linux/crypto.h | 27 ++++---- 5 files changed, 28 insertions(+), 132 deletions(-) (limited to 'crypto') diff --git a/Documentation/crypto/devel-algos.rst b/Documentation/crypto/devel-algos.rst index 3506899ef83e39..9b7782f4f6e0a8 100644 --- a/Documentation/crypto/devel-algos.rst +++ b/Documentation/crypto/devel-algos.rst @@ -235,6 +235,4 @@ Specifics Of Asynchronous HASH Transformation Some of the drivers will want to use the Generic ScatterWalk in case the implementation needs to be fed separate chunks of the scatterlist which -contains the input data. The buffer containing the resulting hash will -always be properly aligned to .cra_alignmask so there is no need to -worry about this. +contains the input data. diff --git a/crypto/ahash.c b/crypto/ahash.c index 213bb3e9f2451c..744fd3b8ea2588 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -35,21 +35,12 @@ struct ahash_request_priv { static int hash_walk_next(struct crypto_hash_walk *walk) { - unsigned int alignmask = walk->alignmask; unsigned int offset = walk->offset; unsigned int nbytes = min(walk->entrylen, ((unsigned int)(PAGE_SIZE)) - offset); walk->data = kmap_local_page(walk->pg); walk->data += offset; - - if (offset & alignmask) { - unsigned int unaligned = alignmask + 1 - (offset & alignmask); - - if (nbytes > unaligned) - nbytes = unaligned; - } - walk->entrylen -= nbytes; return nbytes; } @@ -73,23 +64,8 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk) int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) { - unsigned int alignmask = walk->alignmask; - walk->data -= walk->offset; - if (walk->entrylen && (walk->offset & alignmask) && !err) { - unsigned int nbytes; - - walk->offset = ALIGN(walk->offset, alignmask + 1); - nbytes = min(walk->entrylen, - (unsigned int)(PAGE_SIZE - walk->offset)); - if (nbytes) { - walk->entrylen -= nbytes; - walk->data += walk->offset; - return nbytes; - } - } - kunmap_local(walk->data); crypto_yield(walk->flags); @@ -121,7 +97,6 @@ int crypto_hash_walk_first(struct ahash_request *req, return 0; } - walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req)); walk->sg = req->src; walk->flags = req->base.flags; @@ -129,26 +104,6 @@ int crypto_hash_walk_first(struct ahash_request *req, } EXPORT_SYMBOL_GPL(crypto_hash_walk_first); -static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key, - unsigned int keylen) -{ - unsigned long alignmask = crypto_ahash_alignmask(tfm); - int ret; - u8 *buffer, *alignbuffer; - unsigned long absize; - - absize = keylen + alignmask; - buffer = kmalloc(absize, GFP_KERNEL); - if (!buffer) - return -ENOMEM; - - alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); - memcpy(alignbuffer, key, keylen); - ret = tfm->setkey(tfm, alignbuffer, keylen); - kfree_sensitive(buffer); - return ret; -} - static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { @@ -167,13 +122,7 @@ static void ahash_set_needkey(struct crypto_ahash *tfm) int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { - unsigned long alignmask = crypto_ahash_alignmask(tfm); - int err; - - if ((unsigned long)key & alignmask) - err = ahash_setkey_unaligned(tfm, key, keylen); - else - err = tfm->setkey(tfm, key, keylen); + int err = tfm->setkey(tfm, key, keylen); if (unlikely(err)) { ahash_set_needkey(tfm); @@ -189,7 +138,6 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt, bool has_state) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - unsigned long alignmask = crypto_ahash_alignmask(tfm); unsigned int ds = crypto_ahash_digestsize(tfm); struct ahash_request *subreq; unsigned int subreq_size; @@ -203,7 +151,6 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt, reqsize = ALIGN(reqsize, crypto_tfm_ctx_alignment()); subreq_size += reqsize; subreq_size += ds; - subreq_size += alignmask & ~(crypto_tfm_ctx_alignment() - 1); flags = ahash_request_flags(req); gfp = (flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; @@ -215,7 +162,6 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt, ahash_request_set_callback(subreq, flags, cplt, req); result = (u8 *)(subreq + 1) + reqsize; - result = PTR_ALIGN(result, alignmask + 1); ahash_request_set_crypt(subreq, req->src, result, req->nbytes); @@ -251,56 +197,6 @@ static void ahash_restore_req(struct ahash_request *req, int err) kfree_sensitive(subreq); } -static void ahash_op_unaligned_done(void *data, int err) -{ - struct ahash_request *areq = data; - - if (err == -EINPROGRESS) - goto out; - - /* First copy req->result into req->priv.result */ - ahash_restore_req(areq, err); - -out: - /* Complete the ORIGINAL request. */ - ahash_request_complete(areq, err); -} - -static int ahash_op_unaligned(struct ahash_request *req, - int (*op)(struct ahash_request *), - bool has_state) -{ - int err; - - err = ahash_save_req(req, ahash_op_unaligned_done, has_state); - if (err) - return err; - - err = op(req->priv); - if (err == -EINPROGRESS || err == -EBUSY) - return err; - - ahash_restore_req(req, err); - - return err; -} - -static int crypto_ahash_op(struct ahash_request *req, - int (*op)(struct ahash_request *), - bool has_state) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - unsigned long alignmask = crypto_ahash_alignmask(tfm); - int err; - - if ((unsigned long)req->result & alignmask) - err = ahash_op_unaligned(req, op, has_state); - else - err = op(req); - - return crypto_hash_errstat(crypto_hash_alg_common(tfm), err); -} - int crypto_ahash_final(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); @@ -309,7 +205,7 @@ int crypto_ahash_final(struct ahash_request *req) if (IS_ENABLED(CONFIG_CRYPTO_STATS)) atomic64_inc(&hash_get_stat(alg)->hash_cnt); - return crypto_ahash_op(req, tfm->final, true); + return crypto_hash_errstat(alg, tfm->final(req)); } EXPORT_SYMBOL_GPL(crypto_ahash_final); @@ -325,7 +221,7 @@ int crypto_ahash_finup(struct ahash_request *req) atomic64_add(req->nbytes, &istat->hash_tlen); } - return crypto_ahash_op(req, tfm->finup, true); + return crypto_hash_errstat(alg, tfm->finup(req)); } EXPORT_SYMBOL_GPL(crypto_ahash_finup); @@ -333,6 +229,7 @@ int crypto_ahash_digest(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct hash_alg_common *alg = crypto_hash_alg_common(tfm); + int err; if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { struct crypto_istat_hash *istat = hash_get_stat(alg); @@ -342,9 +239,11 @@ int crypto_ahash_digest(struct ahash_request *req) } if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) - return crypto_hash_errstat(alg, -ENOKEY); + err = -ENOKEY; + else + err = tfm->digest(req); - return crypto_ahash_op(req, tfm->digest, false); + return crypto_hash_errstat(alg, err); } EXPORT_SYMBOL_GPL(crypto_ahash_digest); diff --git a/crypto/shash.c b/crypto/shash.c index 409b33f9c97cc1..359702c2cd02bf 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -541,6 +541,10 @@ int hash_prepare_alg(struct hash_alg_common *alg) if (alg->digestsize > HASH_MAX_DIGESTSIZE) return -EINVAL; + /* alignmask is not useful for hashes, so it is not supported. */ + if (base->cra_alignmask) + return -EINVAL; + base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; if (IS_ENABLED(CONFIG_CRYPTO_STATS)) @@ -557,10 +561,6 @@ static int shash_prepare_alg(struct shash_alg *alg) if (alg->descsize > HASH_MAX_DESCSIZE) return -EINVAL; - /* alignmask is not useful for shash, so it is not supported. */ - if (base->cra_alignmask) - return -EINVAL; - if ((alg->export && !alg->import) || (alg->import && !alg->export)) return -EINVAL; diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index 8d0cd0c591a092..59c707e4dea467 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -18,15 +18,13 @@ struct crypto_hash_walk { char *data; unsigned int offset; - unsigned int alignmask; + unsigned int flags; struct page *pg; unsigned int entrylen; unsigned int total; struct scatterlist *sg; - - unsigned int flags; }; struct ahash_instance { diff --git a/include/linux/crypto.h b/include/linux/crypto.h index f3c3a3b27facd8..b164da5e129e82 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -110,7 +110,6 @@ * crypto_aead_walksize() (with the remainder going at the end), no chunk * can cross a page boundary or a scatterlist element boundary. * ahash: - * - The result buffer must be aligned to the algorithm's alignmask. * - crypto_ahash_finup() must not be used unless the algorithm implements * ->finup() natively. */ @@ -278,18 +277,20 @@ struct compress_alg { * @cra_ctxsize: Size of the operational context of the transformation. This * value informs the kernel crypto API about the memory size * needed to be allocated for the transformation context. - * @cra_alignmask: Alignment mask for the input and output data buffer. The data - * buffer containing the input data for the algorithm must be - * aligned to this alignment mask. The data buffer for the - * output data must be aligned to this alignment mask. Note that - * the Crypto API will do the re-alignment in software, but - * only under special conditions and there is a performance hit. - * The re-alignment happens at these occasions for different - * @cra_u types: cipher -- For both input data and output data - * buffer; ahash -- For output hash destination buf; shash -- - * For output hash destination buf. - * This is needed on hardware which is flawed by design and - * cannot pick data from arbitrary addresses. + * @cra_alignmask: For cipher, skcipher, lskcipher, and aead algorithms this is + * 1 less than the alignment, in bytes, that the algorithm + * implementation requires for input and output buffers. When + * the crypto API is invoked with buffers that are not aligned + * to this alignment, the crypto API automatically utilizes + * appropriately aligned temporary buffers to comply with what + * the algorithm needs. (For scatterlists this happens only if + * the algorithm uses the skcipher_walk helper functions.) This + * misalignment handling carries a performance penalty, so it is + * preferred that algorithms do not set a nonzero alignmask. + * Also, crypto API users may wish to allocate buffers aligned + * to the alignmask of the algorithm being used, in order to + * avoid the API having to realign them. Note: the alignmask is + * not supported for hash algorithms and is always 0 for them. * @cra_priority: Priority of this transformation implementation. In case * multiple transformations with same @cra_name are available to * the Crypto API, the kernel will use the one with highest -- cgit 1.2.3-korg From 58e4bb5f16e7b643f7ee5d30cd355cefb44b2da9 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 22 Oct 2023 01:10:45 -0700 Subject: crypto: authenc - stop using alignmask of ahash Now that the alignmask for ahash and shash algorithms is always 0, simplify the code in authenc accordingly. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/authenc.c | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) (limited to 'crypto') diff --git a/crypto/authenc.c b/crypto/authenc.c index fa896ab143bdf5..3aaf3ab4e360fa 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -141,9 +141,6 @@ static int crypto_authenc_genicv(struct aead_request *req, unsigned int flags) u8 *hash = areq_ctx->tail; int err; - hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth), - crypto_ahash_alignmask(auth) + 1); - ahash_request_set_tfm(ahreq, auth); ahash_request_set_crypt(ahreq, req->dst, hash, req->assoclen + req->cryptlen); @@ -286,9 +283,6 @@ static int crypto_authenc_decrypt(struct aead_request *req) u8 *hash = areq_ctx->tail; int err; - hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth), - crypto_ahash_alignmask(auth) + 1); - ahash_request_set_tfm(ahreq, auth); ahash_request_set_crypt(ahreq, req->src, hash, req->assoclen + req->cryptlen - authsize); @@ -400,8 +394,7 @@ static int crypto_authenc_create(struct crypto_template *tmpl, goto err_free_inst; enc = crypto_spawn_skcipher_alg_common(&ctx->enc); - ctx->reqoff = ALIGN(2 * auth->digestsize + auth_base->cra_alignmask, - auth_base->cra_alignmask + 1); + ctx->reqoff = 2 * auth->digestsize; err = -ENAMETOOLONG; if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, @@ -418,8 +411,7 @@ static int crypto_authenc_create(struct crypto_template *tmpl, inst->alg.base.cra_priority = enc->base.cra_priority * 10 + auth_base->cra_priority; inst->alg.base.cra_blocksize = enc->base.cra_blocksize; - inst->alg.base.cra_alignmask = auth_base->cra_alignmask | - enc->base.cra_alignmask; + inst->alg.base.cra_alignmask = enc->base.cra_alignmask; inst->alg.base.cra_ctxsize = sizeof(struct crypto_authenc_ctx); inst->alg.ivsize = enc->ivsize; -- cgit 1.2.3-korg From 03be4e45074e2a9cb5e06bf527141716262574e6 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 22 Oct 2023 01:10:46 -0700 Subject: crypto: authencesn - stop using alignmask of ahash Now that the alignmask for ahash and shash algorithms is always 0, simplify the code in authenc accordingly. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/authencesn.c | 20 ++++++-------------- 1 file changed, 6 insertions(+), 14 deletions(-) (limited to 'crypto') diff --git a/crypto/authencesn.c b/crypto/authencesn.c index 60e9568f023f6e..2cc933e2f79011 100644 --- a/crypto/authencesn.c +++ b/crypto/authencesn.c @@ -87,11 +87,8 @@ static int crypto_authenc_esn_genicv_tail(struct aead_request *req, unsigned int flags) { struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); - struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); - struct crypto_ahash *auth = ctx->auth; - u8 *hash = PTR_ALIGN((u8 *)areq_ctx->tail, - crypto_ahash_alignmask(auth) + 1); + u8 *hash = areq_ctx->tail; unsigned int authsize = crypto_aead_authsize(authenc_esn); unsigned int assoclen = req->assoclen; unsigned int cryptlen = req->cryptlen; @@ -122,8 +119,7 @@ static int crypto_authenc_esn_genicv(struct aead_request *req, struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); struct crypto_ahash *auth = ctx->auth; - u8 *hash = PTR_ALIGN((u8 *)areq_ctx->tail, - crypto_ahash_alignmask(auth) + 1); + u8 *hash = areq_ctx->tail; struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); unsigned int authsize = crypto_aead_authsize(authenc_esn); unsigned int assoclen = req->assoclen; @@ -224,8 +220,7 @@ static int crypto_authenc_esn_decrypt_tail(struct aead_request *req, struct skcipher_request *skreq = (void *)(areq_ctx->tail + ctx->reqoff); struct crypto_ahash *auth = ctx->auth; - u8 *ohash = PTR_ALIGN((u8 *)areq_ctx->tail, - crypto_ahash_alignmask(auth) + 1); + u8 *ohash = areq_ctx->tail; unsigned int cryptlen = req->cryptlen - authsize; unsigned int assoclen = req->assoclen; struct scatterlist *dst = req->dst; @@ -272,8 +267,7 @@ static int crypto_authenc_esn_decrypt(struct aead_request *req) struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); unsigned int authsize = crypto_aead_authsize(authenc_esn); struct crypto_ahash *auth = ctx->auth; - u8 *ohash = PTR_ALIGN((u8 *)areq_ctx->tail, - crypto_ahash_alignmask(auth) + 1); + u8 *ohash = areq_ctx->tail; unsigned int assoclen = req->assoclen; unsigned int cryptlen = req->cryptlen; u8 *ihash = ohash + crypto_ahash_digestsize(auth); @@ -344,8 +338,7 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm) ctx->enc = enc; ctx->null = null; - ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth), - crypto_ahash_alignmask(auth) + 1); + ctx->reqoff = 2 * crypto_ahash_digestsize(auth); crypto_aead_set_reqsize( tfm, @@ -431,8 +424,7 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl, inst->alg.base.cra_priority = enc->base.cra_priority * 10 + auth_base->cra_priority; inst->alg.base.cra_blocksize = enc->base.cra_blocksize; - inst->alg.base.cra_alignmask = auth_base->cra_alignmask | - enc->base.cra_alignmask; + inst->alg.base.cra_alignmask = enc->base.cra_alignmask; inst->alg.base.cra_ctxsize = sizeof(struct crypto_authenc_esn_ctx); inst->alg.ivsize = enc->ivsize; -- cgit 1.2.3-korg From 93f367a9a41ab539e905b1ea91254868e4cf8faa Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 22 Oct 2023 01:10:47 -0700 Subject: crypto: testmgr - stop checking crypto_ahash_alignmask Now that the alignmask for ahash and shash algorithms is always 0, crypto_ahash_alignmask() always returns 0 and will be removed. In preparation for this, stop checking crypto_ahash_alignmask() in testmgr. As a result of this change, test_sg_division::offset_relative_to_alignmask and testvec_config::key_offset_relative_to_alignmask no longer have any effect on ahash (or shash) algorithms. Therefore, also stop setting these flags in default_hash_testvec_configs[]. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/testmgr.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) (limited to 'crypto') diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 48a0929c7a158d..335449a27f7575 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -408,17 +408,15 @@ static const struct testvec_config default_hash_testvec_configs[] = { .finalization_type = FINALIZATION_TYPE_FINAL, .key_offset = 1, }, { - .name = "digest buffer aligned only to alignmask", + .name = "digest misaligned buffer", .src_divs = { { .proportion_of_total = 10000, .offset = 1, - .offset_relative_to_alignmask = true, }, }, .finalization_type = FINALIZATION_TYPE_DIGEST, .key_offset = 1, - .key_offset_relative_to_alignmask = true, }, { .name = "init+update+update+final two even splits", .src_divs = { @@ -1458,7 +1456,6 @@ static int test_ahash_vec_cfg(const struct hash_testvec *vec, u8 *hashstate) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - const unsigned int alignmask = crypto_ahash_alignmask(tfm); const unsigned int digestsize = crypto_ahash_digestsize(tfm); const unsigned int statesize = crypto_ahash_statesize(tfm); const char *driver = crypto_ahash_driver_name(tfm); @@ -1474,7 +1471,7 @@ static int test_ahash_vec_cfg(const struct hash_testvec *vec, /* Set the key, if specified */ if (vec->ksize) { err = do_setkey(crypto_ahash_setkey, tfm, vec->key, vec->ksize, - cfg, alignmask); + cfg, 0); if (err) { if (err == vec->setkey_error) return 0; @@ -1491,7 +1488,7 @@ static int test_ahash_vec_cfg(const struct hash_testvec *vec, } /* Build the scatterlist for the source data */ - err = build_hash_sglist(tsgl, vec, cfg, alignmask, divs); + err = build_hash_sglist(tsgl, vec, cfg, 0, divs); if (err) { pr_err("alg: ahash: %s: error preparing scatterlist for test vector %s, cfg=\"%s\"\n", driver, vec_name, cfg->name); -- cgit 1.2.3-korg From 36cfc05715a7ca0c989a355f173cbe6ec469c1f9 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 22 Oct 2023 01:10:50 -0700 Subject: crypto: ccm - stop using alignmask of ahash Now that the alignmask for ahash and shash algorithms is always 0, simplify crypto_ccm_create_common() accordingly. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/ccm.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'crypto') diff --git a/crypto/ccm.c b/crypto/ccm.c index dd7aed63efc93b..36f0acec32e196 100644 --- a/crypto/ccm.c +++ b/crypto/ccm.c @@ -504,8 +504,7 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, inst->alg.base.cra_priority = (mac->base.cra_priority + ctr->base.cra_priority) / 2; inst->alg.base.cra_blocksize = 1; - inst->alg.base.cra_alignmask = mac->base.cra_alignmask | - ctr->base.cra_alignmask; + inst->alg.base.cra_alignmask = ctr->base.cra_alignmask; inst->alg.ivsize = 16; inst->alg.chunksize = ctr->chunksize; inst->alg.maxauthsize = 16; -- cgit 1.2.3-korg From 381a796a187a3c1ab477e7d1e5ea0c6aea1e9404 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 22 Oct 2023 01:10:51 -0700 Subject: crypto: chacha20poly1305 - stop using alignmask of ahash Now that the alignmask for ahash and shash algorithms is always 0, simplify chachapoly_create() accordingly. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/chacha20poly1305.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'crypto') diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c index 0e2e208d98f942..9e4651330852b5 100644 --- a/crypto/chacha20poly1305.c +++ b/crypto/chacha20poly1305.c @@ -610,8 +610,7 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, inst->alg.base.cra_priority = (chacha->base.cra_priority + poly->base.cra_priority) / 2; inst->alg.base.cra_blocksize = 1; - inst->alg.base.cra_alignmask = chacha->base.cra_alignmask | - poly->base.cra_alignmask; + inst->alg.base.cra_alignmask = chacha->base.cra_alignmask; inst->alg.base.cra_ctxsize = sizeof(struct chachapoly_ctx) + ctx->saltlen; inst->alg.ivsize = ivsize; -- cgit 1.2.3-korg From 33fe2fb763a84152a297e97b144bbda19fde67e4 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 22 Oct 2023 01:10:52 -0700 Subject: crypto: gcm - stop using alignmask of ahash Now that the alignmask for ahash and shash algorithms is always 0, simplify crypto_gcm_create_common() accordingly. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/gcm.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'crypto') diff --git a/crypto/gcm.c b/crypto/gcm.c index 91ce6e0e2afc14..84f7c23d14e483 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -629,8 +629,7 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, inst->alg.base.cra_priority = (ghash->base.cra_priority + ctr->base.cra_priority) / 2; inst->alg.base.cra_blocksize = 1; - inst->alg.base.cra_alignmask = ghash->base.cra_alignmask | - ctr->base.cra_alignmask; + inst->alg.base.cra_alignmask = ctr->base.cra_alignmask; inst->alg.base.cra_ctxsize = sizeof(struct crypto_gcm_ctx); inst->alg.ivsize = GCM_AES_IV_SIZE; inst->alg.chunksize = ctr->chunksize; -- cgit 1.2.3-korg From c2435e81a6930fed5aeb0aecc219b467a3045f5d Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 22 Oct 2023 01:10:54 -0700 Subject: crypto: ahash - remove struct ahash_request_priv struct ahash_request_priv is unused, so remove it. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/ahash.c | 8 -------- 1 file changed, 8 deletions(-) (limited to 'crypto') diff --git a/crypto/ahash.c b/crypto/ahash.c index 744fd3b8ea2588..556c9501009363 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -25,14 +25,6 @@ static const struct crypto_type crypto_ahash_type; -struct ahash_request_priv { - crypto_completion_t complete; - void *data; - u8 *result; - u32 flags; - void *ubuf[] CRYPTO_MINALIGN_ATTR; -}; - static int hash_walk_next(struct crypto_hash_walk *walk) { unsigned int offset = walk->offset; -- cgit 1.2.3-korg From 4d707a47517674434a7adb3a11ca9e1c9d4a1adf Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 22 Oct 2023 01:10:55 -0700 Subject: crypto: ahash - improve file comment Improve the file comment for crypto/ahash.c. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/ahash.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'crypto') diff --git a/crypto/ahash.c b/crypto/ahash.c index 556c9501009363..1ad402f4dac6c4 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -2,8 +2,12 @@ /* * Asynchronous Cryptographic Hash operations. * - * This is the asynchronous version of hash.c with notification of - * completion via a callback. + * This is the implementation of the ahash (asynchronous hash) API. It differs + * from shash (synchronous hash) in that ahash supports asynchronous operations, + * and it hashes data from scatterlists instead of virtually addressed buffers. + * + * The ahash API provides access to both ahash and shash algorithms. The shash + * API only provides access to shash algorithms. * * Copyright (c) 2008 Loc Ho */ -- cgit 1.2.3-korg From ecf889b70b6c0a174965a902a381f967bfd06914 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 22 Oct 2023 01:10:58 -0700 Subject: crypto: hash - move "ahash wrapping shash" functions to ahash.c The functions that are involved in implementing the ahash API on top of an shash algorithm belong better in ahash.c, not in shash.c where they currently are. Move them. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/ahash.c | 186 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ crypto/hash.h | 4 +- crypto/shash.c | 189 +-------------------------------------------------------- 3 files changed, 188 insertions(+), 191 deletions(-) (limited to 'crypto') diff --git a/crypto/ahash.c b/crypto/ahash.c index 1ad402f4dac6c4..74be1eb26c1aac 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -29,6 +29,192 @@ static const struct crypto_type crypto_ahash_type; +static int shash_async_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen) +{ + struct crypto_shash **ctx = crypto_ahash_ctx(tfm); + + return crypto_shash_setkey(*ctx, key, keylen); +} + +static int shash_async_init(struct ahash_request *req) +{ + struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); + struct shash_desc *desc = ahash_request_ctx(req); + + desc->tfm = *ctx; + + return crypto_shash_init(desc); +} + +int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc) +{ + struct crypto_hash_walk walk; + int nbytes; + + for (nbytes = crypto_hash_walk_first(req, &walk); nbytes > 0; + nbytes = crypto_hash_walk_done(&walk, nbytes)) + nbytes = crypto_shash_update(desc, walk.data, nbytes); + + return nbytes; +} +EXPORT_SYMBOL_GPL(shash_ahash_update); + +static int shash_async_update(struct ahash_request *req) +{ + return shash_ahash_update(req, ahash_request_ctx(req)); +} + +static int shash_async_final(struct ahash_request *req) +{ + return crypto_shash_final(ahash_request_ctx(req), req->result); +} + +int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc) +{ + struct crypto_hash_walk walk; + int nbytes; + + nbytes = crypto_hash_walk_first(req, &walk); + if (!nbytes) + return crypto_shash_final(desc, req->result); + + do { + nbytes = crypto_hash_walk_last(&walk) ? + crypto_shash_finup(desc, walk.data, nbytes, + req->result) : + crypto_shash_update(desc, walk.data, nbytes); + nbytes = crypto_hash_walk_done(&walk, nbytes); + } while (nbytes > 0); + + return nbytes; +} +EXPORT_SYMBOL_GPL(shash_ahash_finup); + +static int shash_async_finup(struct ahash_request *req) +{ + struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); + struct shash_desc *desc = ahash_request_ctx(req); + + desc->tfm = *ctx; + + return shash_ahash_finup(req, desc); +} + +int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc) +{ + unsigned int nbytes = req->nbytes; + struct scatterlist *sg; + unsigned int offset; + int err; + + if (nbytes && + (sg = req->src, offset = sg->offset, + nbytes <= min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) { + void *data; + + data = kmap_local_page(sg_page(sg)); + err = crypto_shash_digest(desc, data + offset, nbytes, + req->result); + kunmap_local(data); + } else + err = crypto_shash_init(desc) ?: + shash_ahash_finup(req, desc); + + return err; +} +EXPORT_SYMBOL_GPL(shash_ahash_digest); + +static int shash_async_digest(struct ahash_request *req) +{ + struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); + struct shash_desc *desc = ahash_request_ctx(req); + + desc->tfm = *ctx; + + return shash_ahash_digest(req, desc); +} + +static int shash_async_export(struct ahash_request *req, void *out) +{ + return crypto_shash_export(ahash_request_ctx(req), out); +} + +static int shash_async_import(struct ahash_request *req, const void *in) +{ + struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); + struct shash_desc *desc = ahash_request_ctx(req); + + desc->tfm = *ctx; + + return crypto_shash_import(desc, in); +} + +static void crypto_exit_shash_ops_async(struct crypto_tfm *tfm) +{ + struct crypto_shash **ctx = crypto_tfm_ctx(tfm); + + crypto_free_shash(*ctx); +} + +static int crypto_init_shash_ops_async(struct crypto_tfm *tfm) +{ + struct crypto_alg *calg = tfm->__crt_alg; + struct shash_alg *alg = __crypto_shash_alg(calg); + struct crypto_ahash *crt = __crypto_ahash_cast(tfm); + struct crypto_shash **ctx = crypto_tfm_ctx(tfm); + struct crypto_shash *shash; + + if (!crypto_mod_get(calg)) + return -EAGAIN; + + shash = crypto_create_tfm(calg, &crypto_shash_type); + if (IS_ERR(shash)) { + crypto_mod_put(calg); + return PTR_ERR(shash); + } + + *ctx = shash; + tfm->exit = crypto_exit_shash_ops_async; + + crt->init = shash_async_init; + crt->update = shash_async_update; + crt->final = shash_async_final; + crt->finup = shash_async_finup; + crt->digest = shash_async_digest; + if (crypto_shash_alg_has_setkey(alg)) + crt->setkey = shash_async_setkey; + + crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) & + CRYPTO_TFM_NEED_KEY); + + crt->export = shash_async_export; + crt->import = shash_async_import; + + crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash); + + return 0; +} + +static struct crypto_ahash * +crypto_clone_shash_ops_async(struct crypto_ahash *nhash, + struct crypto_ahash *hash) +{ + struct crypto_shash **nctx = crypto_ahash_ctx(nhash); + struct crypto_shash **ctx = crypto_ahash_ctx(hash); + struct crypto_shash *shash; + + shash = crypto_clone_shash(*ctx); + if (IS_ERR(shash)) { + crypto_free_ahash(nhash); + return ERR_CAST(shash); + } + + *nctx = shash; + + return nhash; +} + static int hash_walk_next(struct crypto_hash_walk *walk) { unsigned int offset = walk->offset; diff --git a/crypto/hash.h b/crypto/hash.h index 7e6c1a948692fb..de2ee2f4ae3044 100644 --- a/crypto/hash.h +++ b/crypto/hash.h @@ -31,9 +31,7 @@ static inline int crypto_hash_report_stat(struct sk_buff *skb, return nla_put(skb, CRYPTOCFGA_STAT_HASH, sizeof(rhash), &rhash); } -int crypto_init_shash_ops_async(struct crypto_tfm *tfm); -struct crypto_ahash *crypto_clone_shash_ops_async(struct crypto_ahash *nhash, - struct crypto_ahash *hash); +extern const struct crypto_type crypto_shash_type; int hash_prepare_alg(struct hash_alg_common *alg); diff --git a/crypto/shash.c b/crypto/shash.c index 359702c2cd02bf..28092ed8415a72 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -16,8 +16,6 @@ #include "hash.h" -static const struct crypto_type crypto_shash_type; - static inline struct crypto_istat_hash *shash_get_stat(struct shash_alg *alg) { return hash_get_stat(&alg->halg); @@ -193,191 +191,6 @@ int crypto_shash_import(struct shash_desc *desc, const void *in) } EXPORT_SYMBOL_GPL(crypto_shash_import); -static int shash_async_setkey(struct crypto_ahash *tfm, const u8 *key, - unsigned int keylen) -{ - struct crypto_shash **ctx = crypto_ahash_ctx(tfm); - - return crypto_shash_setkey(*ctx, key, keylen); -} - -static int shash_async_init(struct ahash_request *req) -{ - struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); - struct shash_desc *desc = ahash_request_ctx(req); - - desc->tfm = *ctx; - - return crypto_shash_init(desc); -} - -int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc) -{ - struct crypto_hash_walk walk; - int nbytes; - - for (nbytes = crypto_hash_walk_first(req, &walk); nbytes > 0; - nbytes = crypto_hash_walk_done(&walk, nbytes)) - nbytes = crypto_shash_update(desc, walk.data, nbytes); - - return nbytes; -} -EXPORT_SYMBOL_GPL(shash_ahash_update); - -static int shash_async_update(struct ahash_request *req) -{ - return shash_ahash_update(req, ahash_request_ctx(req)); -} - -static int shash_async_final(struct ahash_request *req) -{ - return crypto_shash_final(ahash_request_ctx(req), req->result); -} - -int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc) -{ - struct crypto_hash_walk walk; - int nbytes; - - nbytes = crypto_hash_walk_first(req, &walk); - if (!nbytes) - return crypto_shash_final(desc, req->result); - - do { - nbytes = crypto_hash_walk_last(&walk) ? - crypto_shash_finup(desc, walk.data, nbytes, - req->result) : - crypto_shash_update(desc, walk.data, nbytes); - nbytes = crypto_hash_walk_done(&walk, nbytes); - } while (nbytes > 0); - - return nbytes; -} -EXPORT_SYMBOL_GPL(shash_ahash_finup); - -static int shash_async_finup(struct ahash_request *req) -{ - struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); - struct shash_desc *desc = ahash_request_ctx(req); - - desc->tfm = *ctx; - - return shash_ahash_finup(req, desc); -} - -int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc) -{ - unsigned int nbytes = req->nbytes; - struct scatterlist *sg; - unsigned int offset; - int err; - - if (nbytes && - (sg = req->src, offset = sg->offset, - nbytes <= min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) { - void *data; - - data = kmap_local_page(sg_page(sg)); - err = crypto_shash_digest(desc, data + offset, nbytes, - req->result); - kunmap_local(data); - } else - err = crypto_shash_init(desc) ?: - shash_ahash_finup(req, desc); - - return err; -} -EXPORT_SYMBOL_GPL(shash_ahash_digest); - -static int shash_async_digest(struct ahash_request *req) -{ - struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); - struct shash_desc *desc = ahash_request_ctx(req); - - desc->tfm = *ctx; - - return shash_ahash_digest(req, desc); -} - -static int shash_async_export(struct ahash_request *req, void *out) -{ - return crypto_shash_export(ahash_request_ctx(req), out); -} - -static int shash_async_import(struct ahash_request *req, const void *in) -{ - struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); - struct shash_desc *desc = ahash_request_ctx(req); - - desc->tfm = *ctx; - - return crypto_shash_import(desc, in); -} - -static void crypto_exit_shash_ops_async(struct crypto_tfm *tfm) -{ - struct crypto_shash **ctx = crypto_tfm_ctx(tfm); - - crypto_free_shash(*ctx); -} - -int crypto_init_shash_ops_async(struct crypto_tfm *tfm) -{ - struct crypto_alg *calg = tfm->__crt_alg; - struct shash_alg *alg = __crypto_shash_alg(calg); - struct crypto_ahash *crt = __crypto_ahash_cast(tfm); - struct crypto_shash **ctx = crypto_tfm_ctx(tfm); - struct crypto_shash *shash; - - if (!crypto_mod_get(calg)) - return -EAGAIN; - - shash = crypto_create_tfm(calg, &crypto_shash_type); - if (IS_ERR(shash)) { - crypto_mod_put(calg); - return PTR_ERR(shash); - } - - *ctx = shash; - tfm->exit = crypto_exit_shash_ops_async; - - crt->init = shash_async_init; - crt->update = shash_async_update; - crt->final = shash_async_final; - crt->finup = shash_async_finup; - crt->digest = shash_async_digest; - if (crypto_shash_alg_has_setkey(alg)) - crt->setkey = shash_async_setkey; - - crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) & - CRYPTO_TFM_NEED_KEY); - - crt->export = shash_async_export; - crt->import = shash_async_import; - - crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash); - - return 0; -} - -struct crypto_ahash *crypto_clone_shash_ops_async(struct crypto_ahash *nhash, - struct crypto_ahash *hash) -{ - struct crypto_shash **nctx = crypto_ahash_ctx(nhash); - struct crypto_shash **ctx = crypto_ahash_ctx(hash); - struct crypto_shash *shash; - - shash = crypto_clone_shash(*ctx); - if (IS_ERR(shash)) { - crypto_free_ahash(nhash); - return ERR_CAST(shash); - } - - *nctx = shash; - - return nhash; -} - static void crypto_shash_exit_tfm(struct crypto_tfm *tfm) { struct crypto_shash *hash = __crypto_shash_cast(tfm); @@ -456,7 +269,7 @@ static int __maybe_unused crypto_shash_report_stat( return crypto_hash_report_stat(skb, alg, "shash"); } -static const struct crypto_type crypto_shash_type = { +const struct crypto_type crypto_shash_type = { .extsize = crypto_alg_extsize, .init_tfm = crypto_shash_init_tfm, .free = crypto_shash_free_instance, -- cgit 1.2.3-korg From 85b84327b3f0df32be19e01257fb375972be115c Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 22 Oct 2023 01:10:59 -0700 Subject: crypto: ahash - check for shash type instead of not ahash type Since the previous patch made crypto_shash_type visible to ahash.c, change checks for '->cra_type != &crypto_ahash_type' to '->cra_type == &crypto_shash_type'. This makes more sense and avoids having to forward-declare crypto_ahash_type. The result is still the same, since the type is either shash or ahash here. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/ahash.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'crypto') diff --git a/crypto/ahash.c b/crypto/ahash.c index 74be1eb26c1aac..96fec0ca202af9 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -27,8 +27,6 @@ #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e -static const struct crypto_type crypto_ahash_type; - static int shash_async_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { @@ -511,7 +509,7 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) crypto_ahash_set_statesize(hash, alg->halg.statesize); - if (tfm->__crt_alg->cra_type != &crypto_ahash_type) + if (tfm->__crt_alg->cra_type == &crypto_shash_type) return crypto_init_shash_ops_async(tfm); hash->init = alg->init; @@ -535,7 +533,7 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) static unsigned int crypto_ahash_extsize(struct crypto_alg *alg) { - if (alg->cra_type != &crypto_ahash_type) + if (alg->cra_type == &crypto_shash_type) return sizeof(struct crypto_shash *); return crypto_alg_extsize(alg); @@ -760,7 +758,7 @@ bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg) { struct crypto_alg *alg = &halg->base; - if (alg->cra_type != &crypto_ahash_type) + if (alg->cra_type == &crypto_shash_type) return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg)); return __crypto_ahash_alg(alg)->setkey != NULL; -- cgit 1.2.3-korg From 2f1f34c1bf7b309b296bc04321a09e6b5dba0673 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 22 Oct 2023 01:11:00 -0700 Subject: crypto: ahash - optimize performance when wrapping shash The "ahash" API provides access to both CPU-based and hardware offload- based implementations of hash algorithms. Typically the former are implemented as "shash" algorithms under the hood, while the latter are implemented as "ahash" algorithms. The "ahash" API provides access to both. Various kernel subsystems use the ahash API because they want to support hashing hardware offload without using a separate API for it. Yet, the common case is that a crypto accelerator is not actually being used, and ahash is just wrapping a CPU-based shash algorithm. This patch optimizes the ahash API for that common case by eliminating the extra indirect call for each ahash operation on top of shash. It also fixes the double-counting of crypto stats in this scenario (though CONFIG_CRYPTO_STATS should *not* be enabled by anyone interested in performance anyway...), and it eliminates redundant checking of CRYPTO_TFM_NEED_KEY. As a bonus, it also shrinks struct crypto_ahash. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/ahash.c | 285 +++++++++++++++++++++++++------------------------- crypto/hash.h | 10 ++ crypto/shash.c | 8 +- include/crypto/hash.h | 68 +----------- 4 files changed, 167 insertions(+), 204 deletions(-) (limited to 'crypto') diff --git a/crypto/ahash.c b/crypto/ahash.c index 96fec0ca202af9..deee55f939dc8c 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -27,22 +27,38 @@ #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e -static int shash_async_setkey(struct crypto_ahash *tfm, const u8 *key, - unsigned int keylen) +static inline struct crypto_istat_hash *ahash_get_stat(struct ahash_alg *alg) { - struct crypto_shash **ctx = crypto_ahash_ctx(tfm); + return hash_get_stat(&alg->halg); +} + +static inline int crypto_ahash_errstat(struct ahash_alg *alg, int err) +{ + if (!IS_ENABLED(CONFIG_CRYPTO_STATS)) + return err; - return crypto_shash_setkey(*ctx, key, keylen); + if (err && err != -EINPROGRESS && err != -EBUSY) + atomic64_inc(&ahash_get_stat(alg)->err_cnt); + + return err; } -static int shash_async_init(struct ahash_request *req) +/* + * For an ahash tfm that is using an shash algorithm (instead of an ahash + * algorithm), this returns the underlying shash tfm. + */ +static inline struct crypto_shash *ahash_to_shash(struct crypto_ahash *tfm) { - struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); - struct shash_desc *desc = ahash_request_ctx(req); + return *(struct crypto_shash **)crypto_ahash_ctx(tfm); +} - desc->tfm = *ctx; +static inline struct shash_desc *prepare_shash_desc(struct ahash_request *req, + struct crypto_ahash *tfm) +{ + struct shash_desc *desc = ahash_request_ctx(req); - return crypto_shash_init(desc); + desc->tfm = ahash_to_shash(tfm); + return desc; } int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc) @@ -58,16 +74,6 @@ int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc) } EXPORT_SYMBOL_GPL(shash_ahash_update); -static int shash_async_update(struct ahash_request *req) -{ - return shash_ahash_update(req, ahash_request_ctx(req)); -} - -static int shash_async_final(struct ahash_request *req) -{ - return crypto_shash_final(ahash_request_ctx(req), req->result); -} - int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc) { struct crypto_hash_walk walk; @@ -89,16 +95,6 @@ int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc) } EXPORT_SYMBOL_GPL(shash_ahash_finup); -static int shash_async_finup(struct ahash_request *req) -{ - struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); - struct shash_desc *desc = ahash_request_ctx(req); - - desc->tfm = *ctx; - - return shash_ahash_finup(req, desc); -} - int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc) { unsigned int nbytes = req->nbytes; @@ -123,42 +119,16 @@ int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc) } EXPORT_SYMBOL_GPL(shash_ahash_digest); -static int shash_async_digest(struct ahash_request *req) -{ - struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); - struct shash_desc *desc = ahash_request_ctx(req); - - desc->tfm = *ctx; - - return shash_ahash_digest(req, desc); -} - -static int shash_async_export(struct ahash_request *req, void *out) -{ - return crypto_shash_export(ahash_request_ctx(req), out); -} - -static int shash_async_import(struct ahash_request *req, const void *in) -{ - struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); - struct shash_desc *desc = ahash_request_ctx(req); - - desc->tfm = *ctx; - - return crypto_shash_import(desc, in); -} - -static void crypto_exit_shash_ops_async(struct crypto_tfm *tfm) +static void crypto_exit_ahash_using_shash(struct crypto_tfm *tfm) { struct crypto_shash **ctx = crypto_tfm_ctx(tfm); crypto_free_shash(*ctx); } -static int crypto_init_shash_ops_async(struct crypto_tfm *tfm) +static int crypto_init_ahash_using_shash(struct crypto_tfm *tfm) { struct crypto_alg *calg = tfm->__crt_alg; - struct shash_alg *alg = __crypto_shash_alg(calg); struct crypto_ahash *crt = __crypto_ahash_cast(tfm); struct crypto_shash **ctx = crypto_tfm_ctx(tfm); struct crypto_shash *shash; @@ -172,47 +142,17 @@ static int crypto_init_shash_ops_async(struct crypto_tfm *tfm) return PTR_ERR(shash); } + crt->using_shash = true; *ctx = shash; - tfm->exit = crypto_exit_shash_ops_async; - - crt->init = shash_async_init; - crt->update = shash_async_update; - crt->final = shash_async_final; - crt->finup = shash_async_finup; - crt->digest = shash_async_digest; - if (crypto_shash_alg_has_setkey(alg)) - crt->setkey = shash_async_setkey; + tfm->exit = crypto_exit_ahash_using_shash; crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) & CRYPTO_TFM_NEED_KEY); - - crt->export = shash_async_export; - crt->import = shash_async_import; - crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash); return 0; } -static struct crypto_ahash * -crypto_clone_shash_ops_async(struct crypto_ahash *nhash, - struct crypto_ahash *hash) -{ - struct crypto_shash **nctx = crypto_ahash_ctx(nhash); - struct crypto_shash **ctx = crypto_ahash_ctx(hash); - struct crypto_shash *shash; - - shash = crypto_clone_shash(*ctx); - if (IS_ERR(shash)) { - crypto_free_ahash(nhash); - return ERR_CAST(shash); - } - - *nctx = shash; - - return nhash; -} - static int hash_walk_next(struct crypto_hash_walk *walk) { unsigned int offset = walk->offset; @@ -290,30 +230,54 @@ static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key, return -ENOSYS; } -static void ahash_set_needkey(struct crypto_ahash *tfm) +static void ahash_set_needkey(struct crypto_ahash *tfm, struct ahash_alg *alg) { - const struct hash_alg_common *alg = crypto_hash_alg_common(tfm); - - if (tfm->setkey != ahash_nosetkey && - !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY)) + if (alg->setkey != ahash_nosetkey && + !(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY)) crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY); } int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { - int err = tfm->setkey(tfm, key, keylen); + if (likely(tfm->using_shash)) { + struct crypto_shash *shash = ahash_to_shash(tfm); + int err; - if (unlikely(err)) { - ahash_set_needkey(tfm); - return err; + err = crypto_shash_setkey(shash, key, keylen); + if (unlikely(err)) { + crypto_ahash_set_flags(tfm, + crypto_shash_get_flags(shash) & + CRYPTO_TFM_NEED_KEY); + return err; + } + } else { + struct ahash_alg *alg = crypto_ahash_alg(tfm); + int err; + + err = alg->setkey(tfm, key, keylen); + if (unlikely(err)) { + ahash_set_needkey(tfm, alg); + return err; + } } - crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); return 0; } EXPORT_SYMBOL_GPL(crypto_ahash_setkey); +int crypto_ahash_init(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + + if (likely(tfm->using_shash)) + return crypto_shash_init(prepare_shash_desc(req, tfm)); + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + return crypto_ahash_alg(tfm)->init(req); +} +EXPORT_SYMBOL_GPL(crypto_ahash_init); + static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt, bool has_state) { @@ -377,42 +341,67 @@ static void ahash_restore_req(struct ahash_request *req, int err) kfree_sensitive(subreq); } -int crypto_ahash_final(struct ahash_request *req) +int crypto_ahash_update(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct hash_alg_common *alg = crypto_hash_alg_common(tfm); + struct ahash_alg *alg; + if (likely(tfm->using_shash)) + return shash_ahash_update(req, ahash_request_ctx(req)); + + alg = crypto_ahash_alg(tfm); if (IS_ENABLED(CONFIG_CRYPTO_STATS)) - atomic64_inc(&hash_get_stat(alg)->hash_cnt); + atomic64_add(req->nbytes, &ahash_get_stat(alg)->hash_tlen); + return crypto_ahash_errstat(alg, alg->update(req)); +} +EXPORT_SYMBOL_GPL(crypto_ahash_update); + +int crypto_ahash_final(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct ahash_alg *alg; + + if (likely(tfm->using_shash)) + return crypto_shash_final(ahash_request_ctx(req), req->result); - return crypto_hash_errstat(alg, tfm->final(req)); + alg = crypto_ahash_alg(tfm); + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) + atomic64_inc(&ahash_get_stat(alg)->hash_cnt); + return crypto_ahash_errstat(alg, alg->final(req)); } EXPORT_SYMBOL_GPL(crypto_ahash_final); int crypto_ahash_finup(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct hash_alg_common *alg = crypto_hash_alg_common(tfm); + struct ahash_alg *alg; + + if (likely(tfm->using_shash)) + return shash_ahash_finup(req, ahash_request_ctx(req)); + alg = crypto_ahash_alg(tfm); if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { - struct crypto_istat_hash *istat = hash_get_stat(alg); + struct crypto_istat_hash *istat = ahash_get_stat(alg); atomic64_inc(&istat->hash_cnt); atomic64_add(req->nbytes, &istat->hash_tlen); } - - return crypto_hash_errstat(alg, tfm->finup(req)); + return crypto_ahash_errstat(alg, alg->finup(req)); } EXPORT_SYMBOL_GPL(crypto_ahash_finup); int crypto_ahash_digest(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct hash_alg_common *alg = crypto_hash_alg_common(tfm); + struct ahash_alg *alg; int err; + if (likely(tfm->using_shash)) + return shash_ahash_digest(req, prepare_shash_desc(req, tfm)); + + alg = crypto_ahash_alg(tfm); if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { - struct crypto_istat_hash *istat = hash_get_stat(alg); + struct crypto_istat_hash *istat = ahash_get_stat(alg); atomic64_inc(&istat->hash_cnt); atomic64_add(req->nbytes, &istat->hash_tlen); @@ -421,9 +410,9 @@ int crypto_ahash_digest(struct ahash_request *req) if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) err = -ENOKEY; else - err = tfm->digest(req); + err = alg->digest(req); - return crypto_hash_errstat(alg, err); + return crypto_ahash_errstat(alg, err); } EXPORT_SYMBOL_GPL(crypto_ahash_digest); @@ -448,7 +437,7 @@ static int ahash_def_finup_finish1(struct ahash_request *req, int err) subreq->base.complete = ahash_def_finup_done2; - err = crypto_ahash_reqtfm(req)->final(subreq); + err = crypto_ahash_alg(crypto_ahash_reqtfm(req))->final(subreq); if (err == -EINPROGRESS || err == -EBUSY) return err; @@ -485,13 +474,35 @@ static int ahash_def_finup(struct ahash_request *req) if (err) return err; - err = tfm->update(req->priv); + err = crypto_ahash_alg(tfm)->update(req->priv); if (err == -EINPROGRESS || err == -EBUSY) return err; return ahash_def_finup_finish1(req, err); } +int crypto_ahash_export(struct ahash_request *req, void *out) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + + if (likely(tfm->using_shash)) + return crypto_shash_export(ahash_request_ctx(req), out); + return crypto_ahash_alg(tfm)->export(req, out); +} +EXPORT_SYMBOL_GPL(crypto_ahash_export); + +int crypto_ahash_import(struct ahash_request *req, const void *in) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + + if (likely(tfm->using_shash)) + return crypto_shash_import(prepare_shash_desc(req, tfm), in); + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + return crypto_ahash_alg(tfm)->import(req, in); +} +EXPORT_SYMBOL_GPL(crypto_ahash_import); + static void crypto_ahash_exit_tfm(struct crypto_tfm *tfm) { struct crypto_ahash *hash = __crypto_ahash_cast(tfm); @@ -505,25 +516,12 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) struct crypto_ahash *hash = __crypto_ahash_cast(tfm); struct ahash_alg *alg = crypto_ahash_alg(hash); - hash->setkey = ahash_nosetkey; - crypto_ahash_set_statesize(hash, alg->halg.statesize); if (tfm->__crt_alg->cra_type == &crypto_shash_type) - return crypto_init_shash_ops_async(tfm); - - hash->init = alg->init; - hash->update = alg->update; - hash->final = alg->final; - hash->finup = alg->finup ?: ahash_def_finup; - hash->digest = alg->digest; - hash->export = alg->export; - hash->import = alg->import; - - if (alg->setkey) { - hash->setkey = alg->setkey; - ahash_set_needkey(hash); - } + return crypto_init_ahash_using_shash(tfm); + + ahash_set_needkey(hash, alg); if (alg->exit_tfm) tfm->exit = crypto_ahash_exit_tfm; @@ -641,19 +639,21 @@ struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *hash) if (IS_ERR(nhash)) return nhash; - nhash->init = hash->init; - nhash->update = hash->update; - nhash->final = hash->final; - nhash->finup = hash->finup; - nhash->digest = hash->digest; - nhash->export = hash->export; - nhash->import = hash->import; - nhash->setkey = hash->setkey; nhash->reqsize = hash->reqsize; nhash->statesize = hash->statesize; - if (tfm->__crt_alg->cra_type != &crypto_ahash_type) - return crypto_clone_shash_ops_async(nhash, hash); + if (likely(hash->using_shash)) { + struct crypto_shash **nctx = crypto_ahash_ctx(nhash); + struct crypto_shash *shash; + + shash = crypto_clone_shash(ahash_to_shash(hash)); + if (IS_ERR(shash)) { + err = PTR_ERR(shash); + goto out_free_nhash; + } + *nctx = shash; + return nhash; + } err = -ENOSYS; alg = crypto_ahash_alg(hash); @@ -687,6 +687,11 @@ static int ahash_prepare_alg(struct ahash_alg *alg) base->cra_type = &crypto_ahash_type; base->cra_flags |= CRYPTO_ALG_TYPE_AHASH; + if (!alg->finup) + alg->finup = ahash_def_finup; + if (!alg->setkey) + alg->setkey = ahash_nosetkey; + return 0; } @@ -761,7 +766,7 @@ bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg) if (alg->cra_type == &crypto_shash_type) return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg)); - return __crypto_ahash_alg(alg)->setkey != NULL; + return __crypto_ahash_alg(alg)->setkey != ahash_nosetkey; } EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey); diff --git a/crypto/hash.h b/crypto/hash.h index de2ee2f4ae3044..93f6ba0df263e5 100644 --- a/crypto/hash.h +++ b/crypto/hash.h @@ -12,6 +12,16 @@ #include "internal.h" +static inline struct crypto_istat_hash *hash_get_stat( + struct hash_alg_common *alg) +{ +#ifdef CONFIG_CRYPTO_STATS + return &alg->stat; +#else + return NULL; +#endif +} + static inline int crypto_hash_report_stat(struct sk_buff *skb, struct crypto_alg *alg, const char *type) diff --git a/crypto/shash.c b/crypto/shash.c index 28092ed8415a72..d5194221c88cb9 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -23,7 +23,13 @@ static inline struct crypto_istat_hash *shash_get_stat(struct shash_alg *alg) static inline int crypto_shash_errstat(struct shash_alg *alg, int err) { - return crypto_hash_errstat(&alg->halg, err); + if (!IS_ENABLED(CONFIG_CRYPTO_STATS)) + return err; + + if (err && err != -EINPROGRESS && err != -EBUSY) + atomic64_inc(&shash_get_stat(alg)->err_cnt); + + return err; } int shash_no_setkey(struct crypto_shash *tfm, const u8 *key, diff --git a/include/crypto/hash.h b/include/crypto/hash.h index b00a4a36a8ec31..c7bdbece27ccbc 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -250,16 +250,7 @@ struct shash_alg { #undef HASH_ALG_COMMON_STAT struct crypto_ahash { - int (*init)(struct ahash_request *req); - int (*update)(struct ahash_request *req); - int (*final)(struct ahash_request *req); - int (*finup)(struct ahash_request *req); - int (*digest)(struct ahash_request *req); - int (*export)(struct ahash_request *req, void *out); - int (*import)(struct ahash_request *req, const void *in); - int (*setkey)(struct crypto_ahash *tfm, const u8 *key, - unsigned int keylen); - + bool using_shash; /* Underlying algorithm is shash, not ahash */ unsigned int statesize; unsigned int reqsize; struct crypto_tfm base; @@ -513,10 +504,7 @@ int crypto_ahash_digest(struct ahash_request *req); * * Return: 0 if the export was successful; < 0 if an error occurred */ -static inline int crypto_ahash_export(struct ahash_request *req, void *out) -{ - return crypto_ahash_reqtfm(req)->export(req, out); -} +int crypto_ahash_export(struct ahash_request *req, void *out); /** * crypto_ahash_import() - import message digest state @@ -529,15 +517,7 @@ static inline int crypto_ahash_export(struct ahash_request *req, void *out) * * Return: 0 if the import was successful; < 0 if an error occurred */ -static inline int crypto_ahash_import(struct ahash_request *req, const void *in) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - - if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) - return -ENOKEY; - - return tfm->import(req, in); -} +int crypto_ahash_import(struct ahash_request *req, const void *in); /** * crypto_ahash_init() - (re)initialize message digest handle @@ -550,36 +530,7 @@ static inline int crypto_ahash_import(struct ahash_request *req, const void *in) * * Return: see crypto_ahash_final() */ -static inline int crypto_ahash_init(struct ahash_request *req) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - - if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) - return -ENOKEY; - - return tfm->init(req); -} - -static inline struct crypto_istat_hash *hash_get_stat( - struct hash_alg_common *alg) -{ -#ifdef CONFIG_CRYPTO_STATS - return &alg->stat; -#else - return NULL; -#endif -} - -static inline int crypto_hash_errstat(struct hash_alg_common *alg, int err) -{ - if (!IS_ENABLED(CONFIG_CRYPTO_STATS)) - return err; - - if (err && err != -EINPROGRESS && err != -EBUSY) - atomic64_inc(&hash_get_stat(alg)->err_cnt); - - return err; -} +int crypto_ahash_init(struct ahash_request *req); /** * crypto_ahash_update() - add data to message digest for processing @@ -592,16 +543,7 @@ static inline int crypto_hash_errstat(struct hash_alg_common *alg, int err) * * Return: see crypto_ahash_final() */ -static inline int crypto_ahash_update(struct ahash_request *req) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct hash_alg_common *alg = crypto_hash_alg_common(tfm); - - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) - atomic64_add(req->nbytes, &hash_get_stat(alg)->hash_tlen); - - return crypto_hash_errstat(alg, tfm->update(req)); -} +int crypto_ahash_update(struct ahash_request *req); /** * DOC: Asynchronous Hash Request Handle -- cgit 1.2.3-korg From 4b057654ebc3e071e2a95ea2edfd15b5682cedba Mon Sep 17 00:00:00 2001 From: Dimitri John Ledkov Date: Sun, 22 Oct 2023 19:22:04 +0100 Subject: crypto: FIPS 202 SHA-3 register in hash info for IMA Register FIPS 202 SHA-3 hashes in hash info for IMA and other users. Sizes 256 and up, as 224 is too weak for any practical purposes. Signed-off-by: Dimitri John Ledkov Signed-off-by: Herbert Xu --- crypto/hash_info.c | 6 ++++++ include/crypto/hash_info.h | 1 + include/uapi/linux/hash_info.h | 3 +++ 3 files changed, 10 insertions(+) (limited to 'crypto') diff --git a/crypto/hash_info.c b/crypto/hash_info.c index a49ff96bde7784..9a467638c97130 100644 --- a/crypto/hash_info.c +++ b/crypto/hash_info.c @@ -29,6 +29,9 @@ const char *const hash_algo_name[HASH_ALGO__LAST] = { [HASH_ALGO_SM3_256] = "sm3", [HASH_ALGO_STREEBOG_256] = "streebog256", [HASH_ALGO_STREEBOG_512] = "streebog512", + [HASH_ALGO_SHA3_256] = "sha3-256", + [HASH_ALGO_SHA3_384] = "sha3-384", + [HASH_ALGO_SHA3_512] = "sha3-512", }; EXPORT_SYMBOL_GPL(hash_algo_name); @@ -53,5 +56,8 @@ const int hash_digest_size[HASH_ALGO__LAST] = { [HASH_ALGO_SM3_256] = SM3256_DIGEST_SIZE, [HASH_ALGO_STREEBOG_256] = STREEBOG256_DIGEST_SIZE, [HASH_ALGO_STREEBOG_512] = STREEBOG512_DIGEST_SIZE, + [HASH_ALGO_SHA3_256] = SHA3_256_DIGEST_SIZE, + [HASH_ALGO_SHA3_384] = SHA3_384_DIGEST_SIZE, + [HASH_ALGO_SHA3_512] = SHA3_512_DIGEST_SIZE, }; EXPORT_SYMBOL_GPL(hash_digest_size); diff --git a/include/crypto/hash_info.h b/include/crypto/hash_info.h index dd4f067850493b..d6927739f8b2c9 100644 --- a/include/crypto/hash_info.h +++ b/include/crypto/hash_info.h @@ -10,6 +10,7 @@ #include #include +#include #include #include diff --git a/include/uapi/linux/hash_info.h b/include/uapi/linux/hash_info.h index 74a8609fcb4d38..0af23ec196d8f7 100644 --- a/include/uapi/linux/hash_info.h +++ b/include/uapi/linux/hash_info.h @@ -35,6 +35,9 @@ enum hash_algo { HASH_ALGO_SM3_256, HASH_ALGO_STREEBOG_256, HASH_ALGO_STREEBOG_512, + HASH_ALGO_SHA3_256, + HASH_ALGO_SHA3_384, + HASH_ALGO_SHA3_512, HASH_ALGO__LAST }; -- cgit 1.2.3-korg From ee62afb9d02dd279a7b73245614f13f8fe777a6d Mon Sep 17 00:00:00 2001 From: Dimitri John Ledkov Date: Sun, 22 Oct 2023 19:22:05 +0100 Subject: crypto: rsa-pkcs1pad - Add FIPS 202 SHA-3 support Add support in rsa-pkcs1pad for FIPS 202 SHA-3 hashes, sizes 256 and up. As 224 is too weak for any practical purposes. Signed-off-by: Dimitri John Ledkov Signed-off-by: Herbert Xu --- crypto/rsa-pkcs1pad.c | 25 ++++++++++++++++++++++++- crypto/testmgr.c | 12 ++++++++++++ 2 files changed, 36 insertions(+), 1 deletion(-) (limited to 'crypto') diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c index 49756c6ea7a1ec..cd501195f34a1a 100644 --- a/crypto/rsa-pkcs1pad.c +++ b/crypto/rsa-pkcs1pad.c @@ -61,6 +61,24 @@ static const u8 rsa_digest_info_sha512[] = { 0x05, 0x00, 0x04, 0x40 }; +static const u8 rsa_digest_info_sha3_256[] = { + 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, + 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x08, + 0x05, 0x00, 0x04, 0x20 +}; + +static const u8 rsa_digest_info_sha3_384[] = { + 0x30, 0x41, 0x30, 0x0d, 0x06, 0x09, + 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x09, + 0x05, 0x00, 0x04, 0x30 +}; + +static const u8 rsa_digest_info_sha3_512[] = { + 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, + 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x0A, + 0x05, 0x00, 0x04, 0x40 +}; + static const struct rsa_asn1_template { const char *name; const u8 *data; @@ -74,8 +92,13 @@ static const struct rsa_asn1_template { _(sha384), _(sha512), _(sha224), - { NULL } #undef _ +#define _(X) { "sha3-" #X, rsa_digest_info_sha3_##X, sizeof(rsa_digest_info_sha3_##X) } + _(256), + _(384), + _(512), +#undef _ + { NULL } }; static const struct rsa_asn1_template *rsa_lookup_asn1(const char *name) diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 335449a27f7575..1dc93bf608d4a0 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -5464,6 +5464,18 @@ static const struct alg_test_desc alg_test_descs[] = { .alg = "pkcs1pad(rsa,sha512)", .test = alg_test_null, .fips_allowed = 1, + }, { + .alg = "pkcs1pad(rsa,sha3-256)", + .test = alg_test_null, + .fips_allowed = 1, + }, { + .alg = "pkcs1pad(rsa,sha3-384)", + .test = alg_test_null, + .fips_allowed = 1, + }, { + .alg = "pkcs1pad(rsa,sha3-512)", + .test = alg_test_null, + .fips_allowed = 1, }, { .alg = "poly1305", .test = alg_test_hash, -- cgit 1.2.3-korg From fdb4f66c9545f29742be5a8d325798e6016c3c4e Mon Sep 17 00:00:00 2001 From: Dimitri John Ledkov Date: Sun, 22 Oct 2023 19:22:06 +0100 Subject: crypto: asymmetric_keys - allow FIPS 202 SHA-3 signatures Add FIPS 202 SHA-3 hash signature support in x509 certificates, pkcs7 signatures, and authenticode signatures. Supports hashes of size 256 and up, as 224 is too weak for any practical purposes. Signed-off-by: Dimitri John Ledkov Signed-off-by: Herbert Xu --- crypto/asymmetric_keys/mscode_parser.c | 9 +++++++++ crypto/asymmetric_keys/pkcs7_parser.c | 12 ++++++++++++ crypto/asymmetric_keys/public_key.c | 5 ++++- crypto/asymmetric_keys/x509_cert_parser.c | 24 ++++++++++++++++++++++++ 4 files changed, 49 insertions(+), 1 deletion(-) (limited to 'crypto') diff --git a/crypto/asymmetric_keys/mscode_parser.c b/crypto/asymmetric_keys/mscode_parser.c index 855cbc46a9c368..05402ef8964ed4 100644 --- a/crypto/asymmetric_keys/mscode_parser.c +++ b/crypto/asymmetric_keys/mscode_parser.c @@ -84,6 +84,15 @@ int mscode_note_digest_algo(void *context, size_t hdrlen, case OID_sha512: ctx->digest_algo = "sha512"; break; + case OID_sha3_256: + ctx->digest_algo = "sha3-256"; + break; + case OID_sha3_384: + ctx->digest_algo = "sha3-384"; + break; + case OID_sha3_512: + ctx->digest_algo = "sha3-512"; + break; case OID__NR: sprint_oid(value, vlen, buffer, sizeof(buffer)); diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c index ab647cb4d76689..5b08c50722d0f5 100644 --- a/crypto/asymmetric_keys/pkcs7_parser.c +++ b/crypto/asymmetric_keys/pkcs7_parser.c @@ -248,6 +248,15 @@ int pkcs7_sig_note_digest_algo(void *context, size_t hdrlen, case OID_gost2012Digest512: ctx->sinfo->sig->hash_algo = "streebog512"; break; + case OID_sha3_256: + ctx->sinfo->sig->hash_algo = "sha3-256"; + break; + case OID_sha3_384: + ctx->sinfo->sig->hash_algo = "sha3-384"; + break; + case OID_sha3_512: + ctx->sinfo->sig->hash_algo = "sha3-512"; + break; default: printk("Unsupported digest algo: %u\n", ctx->last_oid); return -ENOPKG; @@ -273,6 +282,9 @@ int pkcs7_sig_note_pkey_algo(void *context, size_t hdrlen, case OID_id_ecdsa_with_sha256: case OID_id_ecdsa_with_sha384: case OID_id_ecdsa_with_sha512: + case OID_id_ecdsa_with_sha3_256: + case OID_id_ecdsa_with_sha3_384: + case OID_id_ecdsa_with_sha3_512: ctx->sinfo->sig->pkey_algo = "ecdsa"; ctx->sinfo->sig->encoding = "x962"; break; diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c index 5bf0452c17af21..8eeab38a3d8ae2 100644 --- a/crypto/asymmetric_keys/public_key.c +++ b/crypto/asymmetric_keys/public_key.c @@ -119,7 +119,10 @@ software_key_determine_akcipher(const struct public_key *pkey, if (strcmp(hash_algo, "sha224") != 0 && strcmp(hash_algo, "sha256") != 0 && strcmp(hash_algo, "sha384") != 0 && - strcmp(hash_algo, "sha512") != 0) + strcmp(hash_algo, "sha512") != 0 && + strcmp(hash_algo, "sha3-256") != 0 && + strcmp(hash_algo, "sha3-384") != 0 && + strcmp(hash_algo, "sha3-512") != 0) return -EINVAL; } else if (strcmp(pkey->pkey_algo, "sm2") == 0) { if (strcmp(encoding, "raw") != 0) diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c index 68ef1ffbbef6b8..487204d394266e 100644 --- a/crypto/asymmetric_keys/x509_cert_parser.c +++ b/crypto/asymmetric_keys/x509_cert_parser.c @@ -214,6 +214,18 @@ int x509_note_sig_algo(void *context, size_t hdrlen, unsigned char tag, ctx->cert->sig->hash_algo = "sha224"; goto rsa_pkcs1; + case OID_id_rsassa_pkcs1_v1_5_with_sha3_256: + ctx->cert->sig->hash_algo = "sha3-256"; + goto rsa_pkcs1; + + case OID_id_rsassa_pkcs1_v1_5_with_sha3_384: + ctx->cert->sig->hash_algo = "sha3-384"; + goto rsa_pkcs1; + + case OID_id_rsassa_pkcs1_v1_5_with_sha3_512: + ctx->cert->sig->hash_algo = "sha3-512"; + goto rsa_pkcs1; + case OID_id_ecdsa_with_sha224: ctx->cert->sig->hash_algo = "sha224"; goto ecdsa; @@ -230,6 +242,18 @@ int x509_note_sig_algo(void *context, size_t hdrlen, unsigned char tag, ctx->cert->sig->hash_algo = "sha512"; goto ecdsa; + case OID_id_ecdsa_with_sha3_256: + ctx->cert->sig->hash_algo = "sha3-256"; + goto ecdsa; + + case OID_id_ecdsa_with_sha3_384: + ctx->cert->sig->hash_algo = "sha3-384"; + goto ecdsa; + + case OID_id_ecdsa_with_sha3_512: + ctx->cert->sig->hash_algo = "sha3-512"; + goto ecdsa; + case OID_gost2012Signature256: ctx->cert->sig->hash_algo = "streebog256"; goto ecrdsa; -- cgit 1.2.3-korg From b030c45844cfe0d4c89202cd6c9f4697b3a25a6a Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 27 Oct 2023 12:52:06 -0700 Subject: crypto: testmgr - move pkcs1pad(rsa,sha3-*) to correct place alg_test_descs[] needs to be in sorted order, since it is used for binary search. This fixes the following boot-time warning: testmgr: alg_test_descs entries in wrong order: 'pkcs1pad(rsa,sha512)' before 'pkcs1pad(rsa,sha3-256)' Fixes: ee62afb9d02d ("crypto: rsa-pkcs1pad - Add FIPS 202 SHA-3 support") Signed-off-by: Eric Biggers Reviewed-by: Dimitri John Ledkov Signed-off-by: Herbert Xu --- crypto/testmgr.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'crypto') diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 1dc93bf608d4a0..15c7a3011269b7 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -5457,23 +5457,23 @@ static const struct alg_test_desc alg_test_descs[] = { .akcipher = __VECS(pkcs1pad_rsa_tv_template) } }, { - .alg = "pkcs1pad(rsa,sha384)", + .alg = "pkcs1pad(rsa,sha3-256)", .test = alg_test_null, .fips_allowed = 1, }, { - .alg = "pkcs1pad(rsa,sha512)", + .alg = "pkcs1pad(rsa,sha3-384)", .test = alg_test_null, .fips_allowed = 1, }, { - .alg = "pkcs1pad(rsa,sha3-256)", + .alg = "pkcs1pad(rsa,sha3-512)", .test = alg_test_null, .fips_allowed = 1, }, { - .alg = "pkcs1pad(rsa,sha3-384)", + .alg = "pkcs1pad(rsa,sha384)", .test = alg_test_null, .fips_allowed = 1, }, { - .alg = "pkcs1pad(rsa,sha3-512)", + .alg = "pkcs1pad(rsa,sha512)", .test = alg_test_null, .fips_allowed = 1, }, { -- cgit 1.2.3-korg From a312e07a65fb598ed239b940434392721385c722 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 27 Oct 2023 13:30:17 -0700 Subject: crypto: adiantum - flush destination page before unmapping Upon additional review, the new fast path in adiantum_finish() is missing the call to flush_dcache_page() that scatterwalk_map_and_copy() was doing. It's apparently debatable whether flush_dcache_page() is actually needed, as per the discussion at https://lore.kernel.org/lkml/YYP1lAq46NWzhOf0@casper.infradead.org/T/#u. However, it appears that currently all the helper functions that write to a page, such as scatterwalk_map_and_copy(), memcpy_to_page(), and memzero_page(), do the dcache flush. So do it to be consistent. Fixes: dadf5e56c967 ("crypto: adiantum - add fast path for single-page messages") Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/adiantum.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'crypto') diff --git a/crypto/adiantum.c b/crypto/adiantum.c index 9ff3376f9ed35f..60f3883b736aa8 100644 --- a/crypto/adiantum.c +++ b/crypto/adiantum.c @@ -300,7 +300,8 @@ static int adiantum_finish(struct skcipher_request *req) le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &rctx->header_hash); if (dst_nents == 1 && dst->offset + req->cryptlen <= PAGE_SIZE) { /* Fast path for single-page destination */ - void *virt = kmap_local_page(sg_page(dst)) + dst->offset; + struct page *page = sg_page(dst); + void *virt = kmap_local_page(page) + dst->offset; err = crypto_shash_digest(&rctx->u.hash_desc, virt, bulk_len, (u8 *)&digest); @@ -310,6 +311,7 @@ static int adiantum_finish(struct skcipher_request *req) } le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest); memcpy(virt + bulk_len, &rctx->rbuf.bignum, sizeof(le128)); + flush_dcache_page(page); kunmap_local(virt); } else { /* Slow path that works for any destination scatterlist */ -- cgit 1.2.3-korg