aboutsummaryrefslogtreecommitdiffstats
path: root/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'crypto')
-rw-r--r--crypto/af_alg.c193
-rw-r--r--crypto/algif_aead.c56
-rw-r--r--crypto/algif_hash.c188
-rw-r--r--crypto/algif_rng.c2
-rw-r--r--crypto/algif_skcipher.c24
5 files changed, 177 insertions, 286 deletions
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 5f7252a5b7b44..6218c773d71c5 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -482,7 +482,6 @@ static const struct proto_ops alg_proto_ops = {
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
.sendmsg = sock_no_sendmsg,
.recvmsg = sock_no_recvmsg,
@@ -531,50 +530,25 @@ static const struct net_proto_family alg_family = {
.owner = THIS_MODULE,
};
-int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len)
-{
- size_t off;
- ssize_t n;
- int npages, i;
-
- n = iov_iter_get_pages2(iter, sgl->pages, len, ALG_MAX_PAGES, &off);
- if (n < 0)
- return n;
-
- npages = DIV_ROUND_UP(off + n, PAGE_SIZE);
- if (WARN_ON(npages == 0))
- return -EINVAL;
- /* Add one extra for linking */
- sg_init_table(sgl->sg, npages + 1);
-
- for (i = 0, len = n; i < npages; i++) {
- int plen = min_t(int, len, PAGE_SIZE - off);
-
- sg_set_page(sgl->sg + i, sgl->pages[i], plen, off);
-
- off = 0;
- len -= plen;
- }
- sg_mark_end(sgl->sg + npages - 1);
- sgl->npages = npages;
-
- return n;
-}
-EXPORT_SYMBOL_GPL(af_alg_make_sg);
-
static void af_alg_link_sg(struct af_alg_sgl *sgl_prev,
struct af_alg_sgl *sgl_new)
{
- sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1);
- sg_chain(sgl_prev->sg, sgl_prev->npages + 1, sgl_new->sg);
+ sg_unmark_end(sgl_prev->sgt.sgl + sgl_prev->sgt.nents - 1);
+ sg_chain(sgl_prev->sgt.sgl, sgl_prev->sgt.nents + 1, sgl_new->sgt.sgl);
}
void af_alg_free_sg(struct af_alg_sgl *sgl)
{
int i;
- for (i = 0; i < sgl->npages; i++)
- put_page(sgl->pages[i]);
+ if (sgl->sgt.sgl) {
+ if (sgl->need_unpin)
+ for (i = 0; i < sgl->sgt.nents; i++)
+ unpin_user_page(sg_page(&sgl->sgt.sgl[i]));
+ if (sgl->sgt.sgl != sgl->sgl)
+ kvfree(sgl->sgt.sgl);
+ sgl->sgt.sgl = NULL;
+ }
}
EXPORT_SYMBOL_GPL(af_alg_free_sg);
@@ -1015,7 +989,7 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
while (size) {
struct scatterlist *sg;
size_t len = size;
- size_t plen;
+ ssize_t plen;
/* use the existing memory in an allocated page */
if (ctx->merge) {
@@ -1060,35 +1034,58 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
if (sgl->cur)
sg_unmark_end(sg + sgl->cur - 1);
- do {
- struct page *pg;
- unsigned int i = sgl->cur;
-
- plen = min_t(size_t, len, PAGE_SIZE);
-
- pg = alloc_page(GFP_KERNEL);
- if (!pg) {
- err = -ENOMEM;
+ if (msg->msg_flags & MSG_SPLICE_PAGES) {
+ struct sg_table sgtable = {
+ .sgl = sg,
+ .nents = sgl->cur,
+ .orig_nents = sgl->cur,
+ };
+
+ plen = extract_iter_to_sg(&msg->msg_iter, len, &sgtable,
+ MAX_SGL_ENTS - sgl->cur, 0);
+ if (plen < 0) {
+ err = plen;
goto unlock;
}
- sg_assign_page(sg + i, pg);
-
- err = memcpy_from_msg(page_address(sg_page(sg + i)),
- msg, plen);
- if (err) {
- __free_page(sg_page(sg + i));
- sg_assign_page(sg + i, NULL);
- goto unlock;
- }
-
- sg[i].length = plen;
+ for (; sgl->cur < sgtable.nents; sgl->cur++)
+ get_page(sg_page(&sg[sgl->cur]));
len -= plen;
ctx->used += plen;
copied += plen;
size -= plen;
- sgl->cur++;
- } while (len && sgl->cur < MAX_SGL_ENTS);
+ } else {
+ do {
+ struct page *pg;
+ unsigned int i = sgl->cur;
+
+ plen = min_t(size_t, len, PAGE_SIZE);
+
+ pg = alloc_page(GFP_KERNEL);
+ if (!pg) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ sg_assign_page(sg + i, pg);
+
+ err = memcpy_from_msg(
+ page_address(sg_page(sg + i)),
+ msg, plen);
+ if (err) {
+ __free_page(sg_page(sg + i));
+ sg_assign_page(sg + i, NULL);
+ goto unlock;
+ }
+
+ sg[i].length = plen;
+ len -= plen;
+ ctx->used += plen;
+ copied += plen;
+ size -= plen;
+ sgl->cur++;
+ } while (len && sgl->cur < MAX_SGL_ENTS);
+ }
if (!size)
sg_mark_end(sg + sgl->cur - 1);
@@ -1109,69 +1106,6 @@ unlock:
EXPORT_SYMBOL_GPL(af_alg_sendmsg);
/**
- * af_alg_sendpage - sendpage system call handler
- * @sock: socket of connection to user space to write to
- * @page: data to send
- * @offset: offset into page to begin sending
- * @size: length of data
- * @flags: message send/receive flags
- *
- * This is a generic implementation of sendpage to fill ctx->tsgl_list.
- */
-ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
- int offset, size_t size, int flags)
-{
- struct sock *sk = sock->sk;
- struct alg_sock *ask = alg_sk(sk);
- struct af_alg_ctx *ctx = ask->private;
- struct af_alg_tsgl *sgl;
- int err = -EINVAL;
-
- if (flags & MSG_SENDPAGE_NOTLAST)
- flags |= MSG_MORE;
-
- lock_sock(sk);
- if (!ctx->more && ctx->used)
- goto unlock;
-
- if (!size)
- goto done;
-
- if (!af_alg_writable(sk)) {
- err = af_alg_wait_for_wmem(sk, flags);
- if (err)
- goto unlock;
- }
-
- err = af_alg_alloc_tsgl(sk);
- if (err)
- goto unlock;
-
- ctx->merge = 0;
- sgl = list_entry(ctx->tsgl_list.prev, struct af_alg_tsgl, list);
-
- if (sgl->cur)
- sg_unmark_end(sgl->sg + sgl->cur - 1);
-
- sg_mark_end(sgl->sg + sgl->cur);
-
- get_page(page);
- sg_set_page(sgl->sg + sgl->cur, page, size, offset);
- sgl->cur++;
- ctx->used += size;
-
-done:
- ctx->more = flags & MSG_MORE;
-
-unlock:
- af_alg_data_wakeup(sk);
- release_sock(sk);
-
- return err ?: size;
-}
-EXPORT_SYMBOL_GPL(af_alg_sendpage);
-
-/**
* af_alg_free_resources - release resources required for crypto request
* @areq: Request holding the TX and RX SGL
*/
@@ -1288,8 +1222,8 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
while (maxsize > len && msg_data_left(msg)) {
struct af_alg_rsgl *rsgl;
+ ssize_t err;
size_t seglen;
- int err;
/* limit the amount of readable buffers */
if (!af_alg_readable(sk))
@@ -1306,16 +1240,23 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
return -ENOMEM;
}
- rsgl->sgl.npages = 0;
+ rsgl->sgl.sgt.sgl = rsgl->sgl.sgl;
+ rsgl->sgl.sgt.nents = 0;
+ rsgl->sgl.sgt.orig_nents = 0;
list_add_tail(&rsgl->list, &areq->rsgl_list);
- /* make one iovec available as scatterlist */
- err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
+ sg_init_table(rsgl->sgl.sgt.sgl, ALG_MAX_PAGES);
+ err = extract_iter_to_sg(&msg->msg_iter, seglen, &rsgl->sgl.sgt,
+ ALG_MAX_PAGES, 0);
if (err < 0) {
rsgl->sg_num_bytes = 0;
return err;
}
+ sg_mark_end(rsgl->sgl.sgt.sgl + rsgl->sgl.sgt.nents - 1);
+ rsgl->sgl.need_unpin =
+ iov_iter_extract_will_pin(&msg->msg_iter);
+
/* chain the new scatterlist with previous one */
if (areq->last_rsgl)
af_alg_link_sg(&areq->last_rsgl->sgl, &rsgl->sgl);
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index 42493b4d8ce46..7d58cbbce4af2 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -9,10 +9,10 @@
* The following concept of the memory management is used:
*
* The kernel maintains two SGLs, the TX SGL and the RX SGL. The TX SGL is
- * filled by user space with the data submitted via sendpage/sendmsg. Filling
- * up the TX SGL does not cause a crypto operation -- the data will only be
- * tracked by the kernel. Upon receipt of one recvmsg call, the caller must
- * provide a buffer which is tracked with the RX SGL.
+ * filled by user space with the data submitted via sendmsg (maybe with
+ * MSG_SPLICE_PAGES). Filling up the TX SGL does not cause a crypto operation
+ * -- the data will only be tracked by the kernel. Upon receipt of one recvmsg
+ * call, the caller must provide a buffer which is tracked with the RX SGL.
*
* During the processing of the recvmsg operation, the cipher request is
* allocated and prepared. As part of the recvmsg operation, the processed
@@ -113,19 +113,19 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
}
/*
- * Data length provided by caller via sendmsg/sendpage that has not
- * yet been processed.
+ * Data length provided by caller via sendmsg that has not yet been
+ * processed.
*/
used = ctx->used;
/*
- * Make sure sufficient data is present -- note, the same check is
- * also present in sendmsg/sendpage. The checks in sendpage/sendmsg
- * shall provide an information to the data sender that something is
- * wrong, but they are irrelevant to maintain the kernel integrity.
- * We need this check here too in case user space decides to not honor
- * the error message in sendmsg/sendpage and still call recvmsg. This
- * check here protects the kernel integrity.
+ * Make sure sufficient data is present -- note, the same check is also
+ * present in sendmsg. The checks in sendmsg shall provide an
+ * information to the data sender that something is wrong, but they are
+ * irrelevant to maintain the kernel integrity. We need this check
+ * here too in case user space decides to not honor the error message
+ * in sendmsg and still call recvmsg. This check here protects the
+ * kernel integrity.
*/
if (!aead_sufficient_data(sk))
return -EINVAL;
@@ -210,7 +210,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
*/
/* Use the RX SGL as source (and destination) for crypto op. */
- rsgl_src = areq->first_rsgl.sgl.sg;
+ rsgl_src = areq->first_rsgl.sgl.sgt.sgl;
if (ctx->enc) {
/*
@@ -224,7 +224,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
* RX SGL: AAD || PT || Tag
*/
err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
- areq->first_rsgl.sgl.sg, processed);
+ areq->first_rsgl.sgl.sgt.sgl,
+ processed);
if (err)
goto free;
af_alg_pull_tsgl(sk, processed, NULL, 0);
@@ -242,7 +243,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
/* Copy AAD || CT to RX SGL buffer for in-place operation. */
err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
- areq->first_rsgl.sgl.sg, outlen);
+ areq->first_rsgl.sgl.sgt.sgl,
+ outlen);
if (err)
goto free;
@@ -267,10 +269,10 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
if (usedpages) {
/* RX SGL present */
struct af_alg_sgl *sgl_prev = &areq->last_rsgl->sgl;
+ struct scatterlist *sg = sgl_prev->sgt.sgl;
- sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1);
- sg_chain(sgl_prev->sg, sgl_prev->npages + 1,
- areq->tsgl);
+ sg_unmark_end(sg + sgl_prev->sgt.nents - 1);
+ sg_chain(sg, sgl_prev->sgt.nents + 1, areq->tsgl);
} else
/* no RX SGL present (e.g. authentication only) */
rsgl_src = areq->tsgl;
@@ -278,7 +280,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
/* Initialize the crypto operation */
aead_request_set_crypt(&areq->cra_u.aead_req, rsgl_src,
- areq->first_rsgl.sgl.sg, used, ctx->iv);
+ areq->first_rsgl.sgl.sgt.sgl, used, ctx->iv);
aead_request_set_ad(&areq->cra_u.aead_req, ctx->aead_assoclen);
aead_request_set_tfm(&areq->cra_u.aead_req, tfm);
@@ -368,7 +370,6 @@ static struct proto_ops algif_aead_ops = {
.release = af_alg_release,
.sendmsg = aead_sendmsg,
- .sendpage = af_alg_sendpage,
.recvmsg = aead_recvmsg,
.poll = af_alg_poll,
};
@@ -420,18 +421,6 @@ static int aead_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
return aead_sendmsg(sock, msg, size);
}
-static ssize_t aead_sendpage_nokey(struct socket *sock, struct page *page,
- int offset, size_t size, int flags)
-{
- int err;
-
- err = aead_check_key(sock);
- if (err)
- return err;
-
- return af_alg_sendpage(sock, page, offset, size, flags);
-}
-
static int aead_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
size_t ignored, int flags)
{
@@ -459,7 +448,6 @@ static struct proto_ops algif_aead_ops_nokey = {
.release = af_alg_release,
.sendmsg = aead_sendmsg_nokey,
- .sendpage = aead_sendpage_nokey,
.recvmsg = aead_recvmsg_nokey,
.poll = af_alg_poll,
};
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index 63af72e19fa8c..0ab43e149f0e4 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -63,122 +63,114 @@ static void hash_free_result(struct sock *sk, struct hash_ctx *ctx)
static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
size_t ignored)
{
- int limit = ALG_MAX_PAGES * PAGE_SIZE;
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct hash_ctx *ctx = ask->private;
- long copied = 0;
+ ssize_t copied = 0;
+ size_t len, max_pages, npages;
+ bool continuing = ctx->more, need_init = false;
int err;
- if (limit > sk->sk_sndbuf)
- limit = sk->sk_sndbuf;
+ max_pages = min_t(size_t, ALG_MAX_PAGES,
+ DIV_ROUND_UP(sk->sk_sndbuf, PAGE_SIZE));
lock_sock(sk);
- if (!ctx->more) {
- if ((msg->msg_flags & MSG_MORE))
- hash_free_result(sk, ctx);
-
- err = crypto_wait_req(crypto_ahash_init(&ctx->req), &ctx->wait);
- if (err)
- goto unlock;
- }
-
- ctx->more = false;
-
- while (msg_data_left(msg)) {
- int len = msg_data_left(msg);
-
- if (len > limit)
- len = limit;
-
- len = af_alg_make_sg(&ctx->sgl, &msg->msg_iter, len);
- if (len < 0) {
- err = copied ? 0 : len;
- goto unlock;
- }
-
- ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL, len);
-
- err = crypto_wait_req(crypto_ahash_update(&ctx->req),
- &ctx->wait);
- af_alg_free_sg(&ctx->sgl);
- if (err) {
- iov_iter_revert(&msg->msg_iter, len);
- goto unlock;
+ if (!continuing) {
+ /* Discard a previous request that wasn't marked MSG_MORE. */
+ hash_free_result(sk, ctx);
+ if (!msg_data_left(msg))
+ goto done; /* Zero-length; don't start new req */
+ need_init = true;
+ } else if (!msg_data_left(msg)) {
+ /*
+ * No data - finalise the prev req if MSG_MORE so any error
+ * comes out here.
+ */
+ if (!(msg->msg_flags & MSG_MORE)) {
+ err = hash_alloc_result(sk, ctx);
+ if (err)
+ goto unlock_free;
+ ahash_request_set_crypt(&ctx->req, NULL,
+ ctx->result, 0);
+ err = crypto_wait_req(crypto_ahash_final(&ctx->req),
+ &ctx->wait);
+ if (err)
+ goto unlock_free;
}
-
- copied += len;
+ goto done_more;
}
- err = 0;
+ while (msg_data_left(msg)) {
+ ctx->sgl.sgt.sgl = ctx->sgl.sgl;
+ ctx->sgl.sgt.nents = 0;
+ ctx->sgl.sgt.orig_nents = 0;
- ctx->more = msg->msg_flags & MSG_MORE;
- if (!ctx->more) {
- err = hash_alloc_result(sk, ctx);
- if (err)
- goto unlock;
+ err = -EIO;
+ npages = iov_iter_npages(&msg->msg_iter, max_pages);
+ if (npages == 0)
+ goto unlock_free;
- ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
- err = crypto_wait_req(crypto_ahash_final(&ctx->req),
- &ctx->wait);
- }
+ sg_init_table(ctx->sgl.sgl, npages);
-unlock:
- release_sock(sk);
+ ctx->sgl.need_unpin = iov_iter_extract_will_pin(&msg->msg_iter);
- return err ?: copied;
-}
+ err = extract_iter_to_sg(&msg->msg_iter, LONG_MAX,
+ &ctx->sgl.sgt, npages, 0);
+ if (err < 0)
+ goto unlock_free;
+ len = err;
+ sg_mark_end(ctx->sgl.sgt.sgl + ctx->sgl.sgt.nents - 1);
-static ssize_t hash_sendpage(struct socket *sock, struct page *page,
- int offset, size_t size, int flags)
-{
- struct sock *sk = sock->sk;
- struct alg_sock *ask = alg_sk(sk);
- struct hash_ctx *ctx = ask->private;
- int err;
-
- if (flags & MSG_SENDPAGE_NOTLAST)
- flags |= MSG_MORE;
-
- lock_sock(sk);
- sg_init_table(ctx->sgl.sg, 1);
- sg_set_page(ctx->sgl.sg, page, size, offset);
-
- if (!(flags & MSG_MORE)) {
- err = hash_alloc_result(sk, ctx);
- if (err)
- goto unlock;
- } else if (!ctx->more)
- hash_free_result(sk, ctx);
+ if (!msg_data_left(msg)) {
+ err = hash_alloc_result(sk, ctx);
+ if (err)
+ goto unlock_free;
+ }
- ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, ctx->result, size);
+ ahash_request_set_crypt(&ctx->req, ctx->sgl.sgt.sgl,
+ ctx->result, len);
- if (!(flags & MSG_MORE)) {
- if (ctx->more)
- err = crypto_ahash_finup(&ctx->req);
- else
+ if (!msg_data_left(msg) && !continuing &&
+ !(msg->msg_flags & MSG_MORE)) {
err = crypto_ahash_digest(&ctx->req);
- } else {
- if (!ctx->more) {
- err = crypto_ahash_init(&ctx->req);
- err = crypto_wait_req(err, &ctx->wait);
- if (err)
- goto unlock;
+ } else {
+ if (need_init) {
+ err = crypto_wait_req(
+ crypto_ahash_init(&ctx->req),
+ &ctx->wait);
+ if (err)
+ goto unlock_free;
+ need_init = false;
+ }
+
+ if (msg_data_left(msg) || (msg->msg_flags & MSG_MORE))
+ err = crypto_ahash_update(&ctx->req);
+ else
+ err = crypto_ahash_finup(&ctx->req);
+ continuing = true;
}
- err = crypto_ahash_update(&ctx->req);
- }
-
- err = crypto_wait_req(err, &ctx->wait);
- if (err)
- goto unlock;
+ err = crypto_wait_req(err, &ctx->wait);
+ if (err)
+ goto unlock_free;
- ctx->more = flags & MSG_MORE;
+ copied += len;
+ af_alg_free_sg(&ctx->sgl);
+ }
+done_more:
+ ctx->more = msg->msg_flags & MSG_MORE;
+done:
+ err = 0;
unlock:
release_sock(sk);
+ return copied ?: err;
- return err ?: size;
+unlock_free:
+ af_alg_free_sg(&ctx->sgl);
+ hash_free_result(sk, ctx);
+ ctx->more = false;
+ goto unlock;
}
static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
@@ -296,7 +288,6 @@ static struct proto_ops algif_hash_ops = {
.release = af_alg_release,
.sendmsg = hash_sendmsg,
- .sendpage = hash_sendpage,
.recvmsg = hash_recvmsg,
.accept = hash_accept,
};
@@ -348,18 +339,6 @@ static int hash_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
return hash_sendmsg(sock, msg, size);
}
-static ssize_t hash_sendpage_nokey(struct socket *sock, struct page *page,
- int offset, size_t size, int flags)
-{
- int err;
-
- err = hash_check_key(sock);
- if (err)
- return err;
-
- return hash_sendpage(sock, page, offset, size, flags);
-}
-
static int hash_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
size_t ignored, int flags)
{
@@ -398,7 +377,6 @@ static struct proto_ops algif_hash_ops_nokey = {
.release = af_alg_release,
.sendmsg = hash_sendmsg_nokey,
- .sendpage = hash_sendpage_nokey,
.recvmsg = hash_recvmsg_nokey,
.accept = hash_accept_nokey,
};
diff --git a/crypto/algif_rng.c b/crypto/algif_rng.c
index 407408c437308..10c41adac3b1f 100644
--- a/crypto/algif_rng.c
+++ b/crypto/algif_rng.c
@@ -174,7 +174,6 @@ static struct proto_ops algif_rng_ops = {
.bind = sock_no_bind,
.accept = sock_no_accept,
.sendmsg = sock_no_sendmsg,
- .sendpage = sock_no_sendpage,
.release = af_alg_release,
.recvmsg = rng_recvmsg,
@@ -192,7 +191,6 @@ static struct proto_ops __maybe_unused algif_rng_test_ops = {
.mmap = sock_no_mmap,
.bind = sock_no_bind,
.accept = sock_no_accept,
- .sendpage = sock_no_sendpage,
.release = af_alg_release,
.recvmsg = rng_test_recvmsg,
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index ee8890ee8f332..9ada9b741af8d 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -9,10 +9,10 @@
* The following concept of the memory management is used:
*
* The kernel maintains two SGLs, the TX SGL and the RX SGL. The TX SGL is
- * filled by user space with the data submitted via sendpage/sendmsg. Filling
- * up the TX SGL does not cause a crypto operation -- the data will only be
- * tracked by the kernel. Upon receipt of one recvmsg call, the caller must
- * provide a buffer which is tracked with the RX SGL.
+ * filled by user space with the data submitted via sendmsg. Filling up the TX
+ * SGL does not cause a crypto operation -- the data will only be tracked by
+ * the kernel. Upon receipt of one recvmsg call, the caller must provide a
+ * buffer which is tracked with the RX SGL.
*
* During the processing of the recvmsg operation, the cipher request is
* allocated and prepared. As part of the recvmsg operation, the processed
@@ -105,7 +105,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
/* Initialize the crypto operation */
skcipher_request_set_tfm(&areq->cra_u.skcipher_req, tfm);
skcipher_request_set_crypt(&areq->cra_u.skcipher_req, areq->tsgl,
- areq->first_rsgl.sgl.sg, len, ctx->iv);
+ areq->first_rsgl.sgl.sgt.sgl, len, ctx->iv);
if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
/* AIO operation */
@@ -194,7 +194,6 @@ static struct proto_ops algif_skcipher_ops = {
.release = af_alg_release,
.sendmsg = skcipher_sendmsg,
- .sendpage = af_alg_sendpage,
.recvmsg = skcipher_recvmsg,
.poll = af_alg_poll,
};
@@ -246,18 +245,6 @@ static int skcipher_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
return skcipher_sendmsg(sock, msg, size);
}
-static ssize_t skcipher_sendpage_nokey(struct socket *sock, struct page *page,
- int offset, size_t size, int flags)
-{
- int err;
-
- err = skcipher_check_key(sock);
- if (err)
- return err;
-
- return af_alg_sendpage(sock, page, offset, size, flags);
-}
-
static int skcipher_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
size_t ignored, int flags)
{
@@ -285,7 +272,6 @@ static struct proto_ops algif_skcipher_ops_nokey = {
.release = af_alg_release,
.sendmsg = skcipher_sendmsg_nokey,
- .sendpage = skcipher_sendpage_nokey,
.recvmsg = skcipher_recvmsg_nokey,
.poll = af_alg_poll,
};