aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-11-14 10:52:09 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-14 10:52:09 -0800
commit37dc79565c4b7e735f190eaa6ed5bb6eb3d3968a (patch)
tree4f20cc3c9240c5759f72bf560b596a809173ee29 /include
parent894025f24bd028942da3e602b87d9f7223109b14 (diff)
parent1d9ddde12e3c9bab7f3d3484eb9446315e3571ca (diff)
downloadrdma-37dc79565c4b7e735f190eaa6ed5bb6eb3d3968a.tar.gz
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "Here is the crypto update for 4.15: API: - Disambiguate EBUSY when queueing crypto request by adding ENOSPC. This change touches code outside the crypto API. - Reset settings when empty string is written to rng_current. Algorithms: - Add OSCCA SM3 secure hash. Drivers: - Remove old mv_cesa driver (replaced by marvell/cesa). - Enable rfc3686/ecb/cfb/ofb AES in crypto4xx. - Add ccm/gcm AES in crypto4xx. - Add support for BCM7278 in iproc-rng200. - Add hash support on Exynos in s5p-sss. - Fix fallback-induced error in vmx. - Fix output IV in atmel-aes. - Fix empty GCM hash in mediatek. Others: - Fix DoS potential in lib/mpi. - Fix potential out-of-order issues with padata" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (162 commits) lib/mpi: call cond_resched() from mpi_powm() loop crypto: stm32/hash - Fix return issue on update crypto: dh - Remove pointless checks for NULL 'p' and 'g' crypto: qat - Clean up error handling in qat_dh_set_secret() crypto: dh - Don't permit 'key' or 'g' size longer than 'p' crypto: dh - Don't permit 'p' to be 0 crypto: dh - Fix double free of ctx->p hwrng: iproc-rng200 - Add support for BCM7278 dt-bindings: rng: Document BCM7278 RNG200 compatible crypto: chcr - Replace _manual_ swap with swap macro crypto: marvell - Add a NULL entry at the end of mv_cesa_plat_id_table[] hwrng: virtio - Virtio RNG devices need to be re-registered after suspend/resume crypto: atmel - remove empty functions crypto: ecdh - remove empty exit() MAINTAINERS: update maintainer for qat crypto: caam - remove unused param of ctx_map_to_sec4_sg() crypto: caam - remove unneeded edesc zeroization crypto: atmel-aes - Reset the controller before each use crypto: atmel-aes - properly set IV after {en,de}crypt hwrng: core - Reset user selected rng by writing "" to rng_current ...
Diffstat (limited to 'include')
-rw-r--r--include/crypto/dh.h2
-rw-r--r--include/crypto/drbg.h3
-rw-r--r--include/crypto/ecdh.h2
-rw-r--r--include/crypto/gcm.h8
-rw-r--r--include/crypto/gf128mul.h2
-rw-r--r--include/crypto/hash.h28
-rw-r--r--include/crypto/if_alg.h15
-rw-r--r--include/crypto/sm3.h40
-rw-r--r--include/crypto/sm3_base.h117
-rw-r--r--include/linux/crypto.h40
-rw-r--r--include/linux/padata.h4
11 files changed, 230 insertions, 31 deletions
diff --git a/include/crypto/dh.h b/include/crypto/dh.h
index f638998fb6d0ef..71e1bb24d79f03 100644
--- a/include/crypto/dh.h
+++ b/include/crypto/dh.h
@@ -53,7 +53,7 @@ struct dh {
*
* Return: size of the key in bytes
*/
-int crypto_dh_key_len(const struct dh *params);
+unsigned int crypto_dh_key_len(const struct dh *params);
/**
* crypto_dh_encode_key() - encode the private key
diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h
index 22f884c97387e9..8f941102af3633 100644
--- a/include/crypto/drbg.h
+++ b/include/crypto/drbg.h
@@ -126,8 +126,7 @@ struct drbg_state {
__u8 *ctr_null_value; /* CTR mode aligned zero buf */
__u8 *outscratchpadbuf; /* CTR mode output scratchpad */
__u8 *outscratchpad; /* CTR mode aligned outbuf */
- struct completion ctr_completion; /* CTR mode async handler */
- int ctr_async_err; /* CTR mode async error */
+ struct crypto_wait ctr_wait; /* CTR mode async wait obj */
bool seeded; /* DRBG fully seeded? */
bool pr; /* Prediction resistance enabled? */
diff --git a/include/crypto/ecdh.h b/include/crypto/ecdh.h
index 1aff2a8a3a6892..d696317c43a8c4 100644
--- a/include/crypto/ecdh.h
+++ b/include/crypto/ecdh.h
@@ -54,7 +54,7 @@ struct ecdh {
*
* Return: size of the key in bytes
*/
-int crypto_ecdh_key_len(const struct ecdh *params);
+unsigned int crypto_ecdh_key_len(const struct ecdh *params);
/**
* crypto_ecdh_encode_key() - encode the private key
diff --git a/include/crypto/gcm.h b/include/crypto/gcm.h
new file mode 100644
index 00000000000000..c50e057ea17e30
--- /dev/null
+++ b/include/crypto/gcm.h
@@ -0,0 +1,8 @@
+#ifndef _CRYPTO_GCM_H
+#define _CRYPTO_GCM_H
+
+#define GCM_AES_IV_SIZE 12
+#define GCM_RFC4106_IV_SIZE 8
+#define GCM_RFC4543_IV_SIZE 8
+
+#endif
diff --git a/include/crypto/gf128mul.h b/include/crypto/gf128mul.h
index 0977fb18ff68c6..fa0a63d298dc48 100644
--- a/include/crypto/gf128mul.h
+++ b/include/crypto/gf128mul.h
@@ -227,7 +227,7 @@ struct gf128mul_4k *gf128mul_init_4k_lle(const be128 *g);
struct gf128mul_4k *gf128mul_init_4k_bbe(const be128 *g);
void gf128mul_4k_lle(be128 *a, const struct gf128mul_4k *t);
void gf128mul_4k_bbe(be128 *a, const struct gf128mul_4k *t);
-
+void gf128mul_x8_ble(le128 *r, const le128 *x);
static inline void gf128mul_free_4k(struct gf128mul_4k *t)
{
kzfree(t);
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index b5727bcd233626..0ed31fd80242cf 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -75,6 +75,7 @@ struct ahash_request {
* state of the HASH transformation at the beginning. This shall fill in
* the internal structures used during the entire duration of the whole
* transformation. No data processing happens at this point.
+ * Note: mandatory.
* @update: Push a chunk of data into the driver for transformation. This
* function actually pushes blocks of data from upper layers into the
* driver, which then passes those to the hardware as seen fit. This
@@ -84,16 +85,20 @@ struct ahash_request {
* context, as this function may be called in parallel with the same
* transformation object. Data processing can happen synchronously
* [SHASH] or asynchronously [AHASH] at this point.
+ * Note: mandatory.
* @final: Retrieve result from the driver. This function finalizes the
* transformation and retrieves the resulting hash from the driver and
* pushes it back to upper layers. No data processing happens at this
- * point.
+ * point unless hardware requires it to finish the transformation
+ * (then the data buffered by the device driver is processed).
+ * Note: mandatory.
* @finup: Combination of @update and @final. This function is effectively a
* combination of @update and @final calls issued in sequence. As some
* hardware cannot do @update and @final separately, this callback was
* added to allow such hardware to be used at least by IPsec. Data
* processing can happen synchronously [SHASH] or asynchronously [AHASH]
* at this point.
+ * Note: optional.
* @digest: Combination of @init and @update and @final. This function
* effectively behaves as the entire chain of operations, @init,
* @update and @final issued in sequence. Just like @finup, this was
@@ -416,11 +421,10 @@ static inline bool crypto_ahash_has_setkey(struct crypto_ahash *tfm)
* needed to perform the cipher operation
*
* This function is a "short-hand" for the function calls of
- * crypto_ahash_update and crypto_shash_final. The parameters have the same
+ * crypto_ahash_update and crypto_ahash_final. The parameters have the same
* meaning as discussed for those separate functions.
*
- * Return: 0 if the message digest creation was successful; < 0 if an error
- * occurred
+ * Return: see crypto_ahash_final()
*/
int crypto_ahash_finup(struct ahash_request *req);
@@ -433,8 +437,11 @@ int crypto_ahash_finup(struct ahash_request *req);
* based on all data added to the cipher handle. The message digest is placed
* into the output buffer registered with the ahash_request handle.
*
- * Return: 0 if the message digest creation was successful; < 0 if an error
- * occurred
+ * Return:
+ * 0 if the message digest was successfully calculated;
+ * -EINPROGRESS if data is feeded into hardware (DMA) or queued for later;
+ * -EBUSY if queue is full and request should be resubmitted later;
+ * other < 0 if an error occurred
*/
int crypto_ahash_final(struct ahash_request *req);
@@ -447,8 +454,7 @@ int crypto_ahash_final(struct ahash_request *req);
* crypto_ahash_update and crypto_ahash_final. The parameters have the same
* meaning as discussed for those separate three functions.
*
- * Return: 0 if the message digest creation was successful; < 0 if an error
- * occurred
+ * Return: see crypto_ahash_final()
*/
int crypto_ahash_digest(struct ahash_request *req);
@@ -493,8 +499,7 @@ static inline int crypto_ahash_import(struct ahash_request *req, const void *in)
* handle. Any potentially existing state created by previous operations is
* discarded.
*
- * Return: 0 if the message digest initialization was successful; < 0 if an
- * error occurred
+ * Return: see crypto_ahash_final()
*/
static inline int crypto_ahash_init(struct ahash_request *req)
{
@@ -510,8 +515,7 @@ static inline int crypto_ahash_init(struct ahash_request *req)
* is pointed to by the scatter/gather list registered in the &ahash_request
* handle
*
- * Return: 0 if the message digest update was successful; < 0 if an error
- * occurred
+ * Return: see crypto_ahash_final()
*/
static inline int crypto_ahash_update(struct ahash_request *req)
{
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index 75ec9c662268bc..6abf0a3604dc39 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -40,11 +40,6 @@ struct alg_sock {
void *private;
};
-struct af_alg_completion {
- struct completion completion;
- int err;
-};
-
struct af_alg_control {
struct af_alg_iv *iv;
int op;
@@ -152,7 +147,7 @@ struct af_alg_ctx {
void *iv;
size_t aead_assoclen;
- struct af_alg_completion completion;
+ struct crypto_wait wait;
size_t used;
size_t rcvused;
@@ -177,19 +172,11 @@ void af_alg_link_sg(struct af_alg_sgl *sgl_prev, struct af_alg_sgl *sgl_new);
int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con);
-int af_alg_wait_for_completion(int err, struct af_alg_completion *completion);
-void af_alg_complete(struct crypto_async_request *req, int err);
-
static inline struct alg_sock *alg_sk(struct sock *sk)
{
return (struct alg_sock *)sk;
}
-static inline void af_alg_init_completion(struct af_alg_completion *completion)
-{
- init_completion(&completion->completion);
-}
-
/**
* Size of available buffer for sending data from user space to kernel.
*
diff --git a/include/crypto/sm3.h b/include/crypto/sm3.h
new file mode 100644
index 00000000000000..1438942dc77374
--- /dev/null
+++ b/include/crypto/sm3.h
@@ -0,0 +1,40 @@
+/*
+ * Common values for SM3 algorithm
+ */
+
+#ifndef _CRYPTO_SM3_H
+#define _CRYPTO_SM3_H
+
+#include <linux/types.h>
+
+#define SM3_DIGEST_SIZE 32
+#define SM3_BLOCK_SIZE 64
+
+#define SM3_T1 0x79CC4519
+#define SM3_T2 0x7A879D8A
+
+#define SM3_IVA 0x7380166f
+#define SM3_IVB 0x4914b2b9
+#define SM3_IVC 0x172442d7
+#define SM3_IVD 0xda8a0600
+#define SM3_IVE 0xa96f30bc
+#define SM3_IVF 0x163138aa
+#define SM3_IVG 0xe38dee4d
+#define SM3_IVH 0xb0fb0e4e
+
+extern const u8 sm3_zero_message_hash[SM3_DIGEST_SIZE];
+
+struct sm3_state {
+ u32 state[SM3_DIGEST_SIZE / 4];
+ u64 count;
+ u8 buffer[SM3_BLOCK_SIZE];
+};
+
+struct shash_desc;
+
+extern int crypto_sm3_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len);
+
+extern int crypto_sm3_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *hash);
+#endif
diff --git a/include/crypto/sm3_base.h b/include/crypto/sm3_base.h
new file mode 100644
index 00000000000000..256948e3929641
--- /dev/null
+++ b/include/crypto/sm3_base.h
@@ -0,0 +1,117 @@
+/*
+ * sm3_base.h - core logic for SM3 implementations
+ *
+ * Copyright (C) 2017 ARM Limited or its affiliates.
+ * Written by Gilad Ben-Yossef <gilad@benyossef.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <crypto/internal/hash.h>
+#include <crypto/sm3.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+#include <asm/unaligned.h>
+
+typedef void (sm3_block_fn)(struct sm3_state *sst, u8 const *src, int blocks);
+
+static inline int sm3_base_init(struct shash_desc *desc)
+{
+ struct sm3_state *sctx = shash_desc_ctx(desc);
+
+ sctx->state[0] = SM3_IVA;
+ sctx->state[1] = SM3_IVB;
+ sctx->state[2] = SM3_IVC;
+ sctx->state[3] = SM3_IVD;
+ sctx->state[4] = SM3_IVE;
+ sctx->state[5] = SM3_IVF;
+ sctx->state[6] = SM3_IVG;
+ sctx->state[7] = SM3_IVH;
+ sctx->count = 0;
+
+ return 0;
+}
+
+static inline int sm3_base_do_update(struct shash_desc *desc,
+ const u8 *data,
+ unsigned int len,
+ sm3_block_fn *block_fn)
+{
+ struct sm3_state *sctx = shash_desc_ctx(desc);
+ unsigned int partial = sctx->count % SM3_BLOCK_SIZE;
+
+ sctx->count += len;
+
+ if (unlikely((partial + len) >= SM3_BLOCK_SIZE)) {
+ int blocks;
+
+ if (partial) {
+ int p = SM3_BLOCK_SIZE - partial;
+
+ memcpy(sctx->buffer + partial, data, p);
+ data += p;
+ len -= p;
+
+ block_fn(sctx, sctx->buffer, 1);
+ }
+
+ blocks = len / SM3_BLOCK_SIZE;
+ len %= SM3_BLOCK_SIZE;
+
+ if (blocks) {
+ block_fn(sctx, data, blocks);
+ data += blocks * SM3_BLOCK_SIZE;
+ }
+ partial = 0;
+ }
+ if (len)
+ memcpy(sctx->buffer + partial, data, len);
+
+ return 0;
+}
+
+static inline int sm3_base_do_finalize(struct shash_desc *desc,
+ sm3_block_fn *block_fn)
+{
+ const int bit_offset = SM3_BLOCK_SIZE - sizeof(__be64);
+ struct sm3_state *sctx = shash_desc_ctx(desc);
+ __be64 *bits = (__be64 *)(sctx->buffer + bit_offset);
+ unsigned int partial = sctx->count % SM3_BLOCK_SIZE;
+
+ sctx->buffer[partial++] = 0x80;
+ if (partial > bit_offset) {
+ memset(sctx->buffer + partial, 0x0, SM3_BLOCK_SIZE - partial);
+ partial = 0;
+
+ block_fn(sctx, sctx->buffer, 1);
+ }
+
+ memset(sctx->buffer + partial, 0x0, bit_offset - partial);
+ *bits = cpu_to_be64(sctx->count << 3);
+ block_fn(sctx, sctx->buffer, 1);
+
+ return 0;
+}
+
+static inline int sm3_base_finish(struct shash_desc *desc, u8 *out)
+{
+ struct sm3_state *sctx = shash_desc_ctx(desc);
+ __be32 *digest = (__be32 *)out;
+ int i;
+
+ for (i = 0; i < SM3_DIGEST_SIZE / sizeof(__be32); i++)
+ put_unaligned_be32(sctx->state[i], digest++);
+
+ *sctx = (struct sm3_state){};
+ return 0;
+}
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 84da9978e9516a..78508ca4b10857 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -24,6 +24,7 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/uaccess.h>
+#include <linux/completion.h>
/*
* Autoloaded crypto modules should only use a prefixed name to avoid allowing
@@ -468,6 +469,45 @@ struct crypto_alg {
} CRYPTO_MINALIGN_ATTR;
/*
+ * A helper struct for waiting for completion of async crypto ops
+ */
+struct crypto_wait {
+ struct completion completion;
+ int err;
+};
+
+/*
+ * Macro for declaring a crypto op async wait object on stack
+ */
+#define DECLARE_CRYPTO_WAIT(_wait) \
+ struct crypto_wait _wait = { \
+ COMPLETION_INITIALIZER_ONSTACK((_wait).completion), 0 }
+
+/*
+ * Async ops completion helper functioons
+ */
+void crypto_req_done(struct crypto_async_request *req, int err);
+
+static inline int crypto_wait_req(int err, struct crypto_wait *wait)
+{
+ switch (err) {
+ case -EINPROGRESS:
+ case -EBUSY:
+ wait_for_completion(&wait->completion);
+ reinit_completion(&wait->completion);
+ err = wait->err;
+ break;
+ };
+
+ return err;
+}
+
+static inline void crypto_init_wait(struct crypto_wait *wait)
+{
+ init_completion(&wait->completion);
+}
+
+/*
* Algorithm registration interface.
*/
int crypto_register_alg(struct crypto_alg *alg);
diff --git a/include/linux/padata.h b/include/linux/padata.h
index 2f9c1f93b1ce4e..5d13d25da2c86b 100644
--- a/include/linux/padata.h
+++ b/include/linux/padata.h
@@ -37,6 +37,7 @@
* @list: List entry, to attach to the padata lists.
* @pd: Pointer to the internal control structure.
* @cb_cpu: Callback cpu for serializatioon.
+ * @cpu: Cpu for parallelization.
* @seq_nr: Sequence number of the parallelized data object.
* @info: Used to pass information from the parallel to the serial function.
* @parallel: Parallel execution function.
@@ -46,6 +47,7 @@ struct padata_priv {
struct list_head list;
struct parallel_data *pd;
int cb_cpu;
+ int cpu;
int info;
void (*parallel)(struct padata_priv *padata);
void (*serial)(struct padata_priv *padata);
@@ -85,6 +87,7 @@ struct padata_serial_queue {
* @swork: work struct for serialization.
* @pd: Backpointer to the internal control structure.
* @work: work struct for parallelization.
+ * @reorder_work: work struct for reordering.
* @num_obj: Number of objects that are processed by this cpu.
* @cpu_index: Index of the cpu.
*/
@@ -93,6 +96,7 @@ struct padata_parallel_queue {
struct padata_list reorder;
struct parallel_data *pd;
struct work_struct work;
+ struct work_struct reorder_work;
atomic_t num_obj;
int cpu_index;
};